diff --git a/repos/ealanos/src/app/echo_server/config.h b/repos/ealanos/src/app/echo_server/config.h new file mode 100644 index 0000000000..026c610a96 --- /dev/null +++ b/repos/ealanos/src/app/echo_server/config.h @@ -0,0 +1,9 @@ +#pragma once + +namespace application::blinktree_server::network { +class config +{ +public: + static constexpr auto max_connections() noexcept { return 64U; } +}; +} // namespace mx::io::network \ No newline at end of file diff --git a/repos/ealanos/src/app/echo_server/main.cpp b/repos/ealanos/src/app/echo_server/main.cpp new file mode 100644 index 0000000000..545aa09920 --- /dev/null +++ b/repos/ealanos/src/app/echo_server/main.cpp @@ -0,0 +1,116 @@ + +#include "ealanos/memory/hamstraaja.h" +#include "mx/memory/global_heap.h" +#include "mx/system/topology.h" +#include "mxnic_netif.h" +#include "server.h" +#include +#include +#include +#include +#include +#include +#include +#include + +namespace application::echo_server +{ + class Server + { + public: + + Server(std::uint64_t port, mx::util::core_set &&cores, std::uint16_t prefetch_distance, + mx::synchronization::isolation_level node_isolation_level, + mx::synchronization::protocol preferred_synchronization_method); + + void run(); + + private: + + const std::uint64_t _port; + + const std::uint16_t _prefetch_distance; + + /// Cores. + mx::util::core_set _cores; + + // The synchronization mechanism to use for tree nodes. + const mx::synchronization::isolation_level _node_isolation_level; + + // Preferred synchronization method. + const mx::synchronization::protocol _preferred_synchronization_method; + }; +} // namespace application::echo_server + +using namespace application::echo_server; + +Server::Server(const std::uint64_t port, mx::util::core_set &&cores, + const std::uint16_t prefetch_distance, + const mx::synchronization::isolation_level node_isolation_level, + const mx::synchronization::protocol preferred_synchronization_method) + : _port(port), _cores(std::move(cores)), _prefetch_distance(prefetch_distance), + _node_isolation_level(node_isolation_level), + _preferred_synchronization_method(preferred_synchronization_method) +{ +} + +void Server::run() +{ + network::Server *server; + + Libc::Env &env = mx::system::Environment::env(); + mx::tasking::runtime::init(env, this->_cores, this->_prefetch_distance, + /* use mx tasking's task allocator*/ false); + + static mx::memory::dynamic::Allocator *alloc = new ( + mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(mx::memory::dynamic::Allocator))) + mx::memory::dynamic::Allocator(); + + static Timer::Connection timer{env}; + + static Genode::Heap _alloc{env.ram(), env.rm()}; + + mx::memory::GlobalHeap::_alloc->reserve_superblocks(64, 128, 6); + + Mxip::mxip_init(*mx::memory::GlobalHeap::_alloc, timer); + Ealan::Memory::Hamstraaja<128, 4 * 4096> *task_alloc = + new (_alloc) Ealan::Memory::Hamstraaja<128, 4 * 4096>(env.pd(), env.rm()); + task_alloc->reserve_superblocks(48, 128, 6); + Mxip::Nic_netif::Payload_allocator *palloc = new (_alloc) Mxip::Nic_netif::Payload_allocator(env.pd(), env.rm()); + server = new network::Server{env, this->_port, mx::tasking::runtime::channels(), timer, _alloc, task_alloc, palloc}; + + std::cout << "Waiting for requests on port :" << this->_port << std::endl; + auto network_thread = std::thread{[server]() { + server->listen(); + }}; + + std::cout << "Task sizes are: " << std::endl; + std::cout << "MxIP::Receive_task: " << sizeof(Mxip::Receive_task) << std::endl; + std::cout << "MxIP::Tx_ready_task: " << sizeof(Mxip::Tx_ready_task) << std::endl; + std::cout << "MxIP::Link_state_task: " << sizeof(Mxip::Link_state_task) << std::endl; + + std::cout << "Server receive task: " << sizeof(network::ReceiveTask) << std::endl; + mx::tasking::runtime::start_and_wait(); + + network_thread.join(); + + // delete server; +} + +void Libc::Component::construct(Libc::Env &env) +{ + mx::system::Environment::set_env(&env); + + auto sys_cores = mx::util::core_set::build(64); + mx::system::Environment::set_cores(&sys_cores); + Libc::with_libc([&]() { + auto cores = mx::util::core_set::build(64); + + + auto *server = new Server(12345, std::move(cores), 3, + mx::synchronization::isolation_level::ExclusiveWriter, + mx::synchronization::protocol::OLFIT); + + server->run(); + }); +} diff --git a/repos/ealanos/src/app/echo_server/server.cpp b/repos/ealanos/src/app/echo_server/server.cpp new file mode 100644 index 0000000000..5629e9f805 --- /dev/null +++ b/repos/ealanos/src/app/echo_server/server.cpp @@ -0,0 +1,378 @@ +#include "server.h" +#include "ealanos/memory/hamstraaja.h" +#include "lwip/err.h" +#include "lwip/pbuf.h" +#include "lwip/tcp.h" +#include "lwip/tcpbase.h" +#include "mx/memory/global_heap.h" +#include "mx/tasking/task.h" +#include "mxnic_netif.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace application::echo_server::network; + +Server *Server::_myself; + +ReceiveTask *Server::_receive_tasks = nullptr; + +Server::Server(Libc::Env &env, + const std::uint64_t port, + const std::uint16_t count_channels, Timer::Connection &timer, Genode::Heap &alloc, Ealan::Memory::Hamstraaja<128, 4*4096> *talloc, Mxip::Nic_netif::Payload_allocator *palloc) noexcept + : _port(port), _socket(nullptr), + _count_channels(count_channels), _env{env}, _config(env, "config"), _alloc(alloc), _timer(timer), _netif(env, _alloc, _config.xml(), _wakeup_scheduler, talloc, palloc) +{ + Server::_myself = this; + this->_buffer.fill('\0'); + + _wakeup_scheduler.set_nic(&_netif); + + _receive_tasks = static_cast(mx::memory::GlobalHeap::allocate_cache_line_aligned(65536 * sizeof(ReceiveTask))); + +} + +Server::~Server() { +} + +bool Server::listen() +{ + _socket = tcp_new(); + + if (!_socket) { + Genode::error("Failed to create server socket"); + return false; + } + + err_t rc = tcp_bind(_socket, &ip_addr_any, _port); + if (rc != ERR_OK) { + Genode::error("Failed to bind server socket to port ", _port); + return false; + } + + _socket = tcp_listen_with_backlog(_socket, 64); + tcp_accept(_socket, &Server::_handle_tcp_connect); + + return true; +} + +class Send_task : public mx::tasking::TaskInterface +{ + private: + struct Server::state *_s; + std::string _message; + + public: + Send_task(Server::state *s, std::string message) : _s(s), _message(message) {} + + mx::tasking::TaskResult execute(std::uint16_t, std::uint16_t) override + { + using namespace Lwip; + + if (_s->state == Server::CLOSED || _s->state == Server::CLOSING) { + Genode::warning("Tried to send over socket that is to be closed"); + //Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_remove(); + } + + pbuf *ptr = pbuf_alloc(PBUF_TRANSPORT, _message.length(), PBUF_RAM); + + if (!(_s->pcb) || !_s) { + Genode::error("Tried sending over invalid pcb"); + //Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_remove(); + } + + ptr->payload = static_cast(const_cast(_message.c_str())); + ptr->len = _message.length(); + + if (ptr->len > tcp_sndbuf(_s->pcb)) { + Genode::warning("Not enough space in send buffer"); + //Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_remove(); + } + + err_t rc = ERR_OK; + { + rc = tcp_write(_s->pcb, ptr->payload, ptr->len, TCP_WRITE_FLAG_COPY); + } + if (rc == ERR_OK) + { + tcp_output(_s->pcb); + pbuf_free(ptr); + } else { + Genode::warning("LWIP tcp_write error ", static_cast(rc)); + if (_s->state == Server::CLOSED || _s->state == Server::CLOSING) + return mx::tasking::TaskResult::make_remove(); + + pbuf_free(ptr); + /*if (_s->tx == nullptr) + _s->tx = ptr; + else { + pbuf_cat(_s->tx, ptr); + }*/ + } + //Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_remove(); + } +}; + +void +Server::send(Server::state *s, std::string &&message) +{ + + const auto length = std::uint64_t(message.size()); + auto response = std::string(length + sizeof(length), '\0'); + + // Write header + std::memcpy(response.data(), static_cast(&length), sizeof(length)); + + // Write data + std::memmove(response.data() + sizeof(length), message.data(), length); + + auto task = mx::tasking::runtime::new_task(0, s, response);//new (mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(Send_task))) Send_task(s, response); + task->annotate(static_cast(s->channel_id)); + mx::tasking::runtime::spawn(*task); +} + +void Server::stop() noexcept +{ + this->_is_running = false; +} + +class Close_task : public mx::tasking::TaskInterface +{ + private: + Server::state &_s; + + public: + Close_task(Server::state &s) : _s(s) {} + + mx::tasking::TaskResult execute(std::uint16_t, std::uint16_t) + { + Genode::log("Closing connection for ", static_cast(_s.pcb) , " and state object ", static_cast(&_s)); + Server::tcpbtree_close(_s.pcb, &_s); + _s.state = Server::CLOSED; + //Server::free_task(static_cast(this)); + Genode::log("Closed connection"); + return mx::tasking::TaskResult::make_remove(); + } +}; + +/*********** + * LWIP callback function definitions + ***********/ +err_t Server::_handle_tcp_connect(void *arg, struct tcp_pcb *newpcb, err_t err) +{ + + struct state *s; + + static uint64_t count_connections = 0; + + LWIP_UNUSED_ARG(arg); + + if ((err != ERR_OK) || (newpcb == NULL)) { return ERR_VAL; } + + s = new (mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(struct state))) + state(); // static_cast(mem_malloc(sizeof(struct state))); + + if (!s) { + Genode::error("Failed to allocate state object for new connection."); + return ERR_MEM; + } + //Genode::log("New connection #", count_connections, ": arg=", arg, " pcb=", newpcb, " s=", s, " &s=", static_cast(&s)); + + s->state = states::ACCEPTED; + s->pcb = newpcb; + s->retries = 0; + s->p = nullptr; + s->tx = nullptr; + s->channel_id = 0; //count_connections % Server::get_instance()->_count_channels; + + tcp_backlog_accepted(newpcb); + /* Register callback functions */ + tcp_arg(newpcb, s); + tcp_recv(newpcb, &Server::_handle_tcp_recv); + tcp_err(newpcb, &Server::_handle_tcp_error); + tcp_poll(newpcb, &Server::_handle_tcp_poll, 50); + tcp_sent(newpcb, &Server::_handle_tcp_sent); + newpcb->flags |= TF_NODELAY; + + return ERR_OK; +} + +err_t Server::_handle_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) +{ + static std::uint16_t next_receive_task = 0; + struct state *s; + err_t rc = ERR_OK; + + std::uint16_t next_channel_id = 0; + + s = static_cast(arg); + + if (!s) { + pbuf_free(p); + return ERR_ARG; + } + + if (err != ERR_OK) { + pbuf_free(p); + return err; + } + + if (p == nullptr) { + s->state = CLOSING; + auto task = mx::tasking::runtime::new_task(0, *s); + //auto task = new (mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(Close_task))) Close_task(*s); + if (!task) { + Genode::warning("Failed to allocate close task"); + return ERR_MEM; + } + task->annotate(static_cast(s->channel_id)); + mx::tasking::runtime::spawn(*task); + return ERR_OK; + } else if (err != ERR_OK) { + rc = err; + } else if (s->state == states::ACCEPTED) { + //s->state = states::RECEIVED; + + rc = ERR_OK; + { + auto task = mx::tasking::runtime::new_task(0, s, p, p->len); + //ReceiveTask *task = new (mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(ReceiveTask))) ReceiveTask(s, payload, p->len); + if (!task) { + Genode::warning("Could not allocate request handler task"); + return ERR_MEM; + } + task->annotate(static_cast(s->channel_id)); + mx::tasking::runtime::spawn(*task); + } + tcp_recved(s->pcb, p->len); + //pbuf_free(p); + + //Server::get_instance()->send(s, "Nope"); + } else if (s->state == states::RECEIVED) { + //void *payload = mx::memory::GlobalHeap::allocate_cache_line_aligned(p->len); + //std::memcpy(payload, p->payload, p->len); + + auto task = mx::tasking::runtime::new_task(0, s, p, p->len); + //ReceiveTask *task = new (mx::memory::GlobalHeap::allocate_cache_line_aligned(sizeof(ReceiveTask))) ReceiveTask(s, payload, p->len); + if (!task) { + Genode::warning("Could not allocate request handler task"); + return ERR_MEM; + } + task->annotate(static_cast(s->channel_id)); + mx::tasking::runtime::spawn(*task); + + tcp_recved(s->pcb, p->len); + //pbuf_free(p); + + rc = ERR_OK; + } + else + { + pbuf_free(p); + tcp_recved(tpcb, p->tot_len); + rc = ERR_OK; + } + + + return rc; +} + +err_t Server::_handle_tcp_poll(void *arg, struct tcp_pcb *tpcb) +{ + err_t rc; + struct state *s; + + //GENODE_LOG_TSC(1); + s = static_cast(arg); + + if (s) { + if (s->tx) { + rc = tcp_write(tpcb, s->tx->payload, s->tx->len, 1); + if (rc == ERR_OK) { + tcp_output(tpcb); + pbuf *ptr = s->tx; + if (ptr->next) { + s->tx = ptr->next; + pbuf_ref(s->tx); + } + tcp_recved(tpcb, ptr->len); + pbuf_free(ptr); + } + // TODO: process remaning pbuf entry + } else { + /*if (s->state == states::CLOSING) { + Server::tcpbtree_close(tpcb, s); + }*/ + } + rc = ERR_OK; + } else { + tcp_abort(tpcb); + rc = ERR_ABRT; + } + + return ERR_OK; +} + + +err_t Server::_handle_tcp_sent(void *arg, struct tcp_pcb *tpcb, std::uint16_t len) +{ + //GENODE_LOG_TSC(1); + struct state *s = static_cast(arg); + + if (!s) + return ERR_ARG; + + s->retries = 0; + + if (s->tx) { + err_t rc = tcp_write(tpcb, s->tx->payload, s->tx->len, 1); + if (rc == ERR_OK) { + tcp_output(tpcb); + pbuf *ptr = s->tx; + if (ptr->next) { + s->tx = ptr->next; + pbuf_ref(s->tx); + } + tcp_recved(tpcb, ptr->len); + pbuf_free(ptr); + } + tcp_sent(tpcb, &Server::_handle_tcp_sent); // Genode::log("In _handle_tcp_sent"); + } + + return ERR_OK; +} + +mx::tasking::TaskResult application::echo_server::network::ReceiveTask::execute(std::uint16_t core_id, std::uint16_t channel_id) +{ + err_t rc = ERR_OK; + + if (!_payload || !_payload->payload) + return mx::tasking::TaskResult::make_remove(); + + std::string request = std::string(static_cast(_payload->payload), _length); + auto key = 0ULL; + auto index = 2U; // Skip request type and comma. + while (request[index] >= '0' && request[index] <= '9') + { + key = key * 10 + (request[index++] - '0'); + } + Server::get_instance()->send(_state, std::to_string(key)); + + pbuf_free(_payload); + //mx::memory::GlobalHeap::free(_payload); + + //Server::free_task(static_cast(this)); + return mx::tasking::TaskResult::make_remove(); +} \ No newline at end of file diff --git a/repos/ealanos/src/app/echo_server/server.h b/repos/ealanos/src/app/echo_server/server.h new file mode 100644 index 0000000000..0ee127d19c --- /dev/null +++ b/repos/ealanos/src/app/echo_server/server.h @@ -0,0 +1,251 @@ +#pragma once + +#include "config.h" +#include "ealanos/memory/hamstraaja.h" +#include "lwip/pbuf.h" +#include "mx/memory/global_heap.h" +#include +#include +#include +#include +#include +#include + +/* B-link tree includes */ +#include +#include + +/* lwIP wrapper for Genode's NIC session */ +#include +#include +#include + +/* Genode includes */ +#include +#include +#include + +/* MxTasking includes*/ +#include +#include +#include +#include +#include + +/* lwIP includes */ +namespace Lwip { + extern "C" { + #include + #include + #include + } +} + +namespace application::echo_server::network { + + class ResponseHandler; + class RequestTask; + class ReceiveTask; + + using namespace Lwip; + + class Server + { + public: + enum states + { + NONE = 0, + ACCEPTED, + RECEIVED, + CLOSING, + CLOSED + }; + + struct Wakeup_scheduler : Mxip::Nic_netif::Wakeup_scheduler { + Mxip::Nic_netif *nic{nullptr}; + + void schedule_nic_server_wakeup() override { nic->wakeup_nic_server(); } + + void set_nic(Mxip::Nic_netif *nic) { this->nic = nic; } + + Wakeup_scheduler() = default; + } _wakeup_scheduler; + + struct state + { + std::uint8_t state; + std::uint8_t retries; + struct tcp_pcb *pcb; + struct pbuf *p; + struct pbuf *tx; + std::uint16_t channel_id; + std::uint64_t id; + }; + Server(Libc::Env &env, std::uint64_t port, + std::uint16_t count_channels, Timer::Connection &timer, Genode::Heap &alloc, Ealan::Memory::Hamstraaja<128, 4*4096> *talloc, Mxip::Nic_netif::Payload_allocator *palloc) noexcept; + ~Server(); + + [[nodiscard]] std::uint16_t port() const noexcept { return _port; } + void stop() noexcept; + void send(struct Server::state *s, std::string &&message); + bool listen(); + void parse(struct Server::state *s, std::string &message); + + [[nodiscard]] bool is_running() const noexcept { return _is_running; } + + static void tcp_send(struct tcp_pcb *tpcb, struct state *s) + { + using namespace Lwip; + struct pbuf *ptr; + err_t rc = ERR_OK; + + if (!s) + return; + + while ((rc == ERR_OK) && (s->tx != nullptr) /* && (s->tx->len <= tcp_sndbuf(tpcb) */) + { + ptr = s->tx; + // Genode::log("Sending response"); + rc = tcp_write(tpcb, ptr->payload, ptr->len, 1); + if (rc == ERR_OK) + { + std::uint16_t plen; + + plen = ptr->len; + + s->tx = ptr->next; + if (s->tx != nullptr) + { + pbuf_ref(s->tx); + } + tcp_output(tpcb); + pbuf_free(ptr); + } + else if (rc == ERR_MEM) + { + Genode::warning("Low on memory. Defering to poll()"); + s->tx = ptr; + } + else + { + Genode::warning("An error ", static_cast(rc), " occured."); + } + } + } + + static void tcpbtree_close(struct tcp_pcb *tpcb, struct state *s) + { + if (!s || s->pcb != tpcb) { + Genode::error("Tried closing connection with invalid session state"); + return; + } + tcp_arg(tpcb, NULL); + tcp_sent(tpcb, NULL); + tcp_recv(tpcb, NULL); + tcp_poll(tpcb, NULL, 0); + tcp_err(tpcb, nullptr); + + Genode::log("Unregistered handlers"); + + tcp_close(tpcb); + Server::tcp_free(s); + } + + /* tcp_recv */ + static err_t _handle_tcp_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err); + + /* tcp_err */ + static void _handle_tcp_error(void *arg, err_t err) + { + struct state *s; + LWIP_UNUSED_ARG(err); + + s = static_cast(arg); + + Server::tcp_free(s); + } + + /* tcp_poll */ + static err_t _handle_tcp_poll(void *arg, struct tcp_pcb *tpcb); + + /* tcp_sent */ + static err_t _handle_tcp_sent(void *arg, struct tcp_pcb *tpcb, std::uint16_t len); + + /* helper function for free */ + static void tcp_free(struct state *s) + { + // Genode::log("Freeing state obj s=", s); + if (s) + { + if (s->p) + pbuf_free(s->p); + if (s->tx) pbuf_free(s->tx); + Genode::log("Freeing state object ", s); + mx::memory::GlobalHeap::free(s); // mem_free(s); + Genode::log("Freed state object"); + } + } + + static Server *get_instance() { return _myself; } + + static void free_handler_task(std::uint16_t core_id, void* task) + { + mx::memory::GlobalHeap::free(task); + } + + static void free_task(void* task) + { + mx::memory::GlobalHeap::free(task); + } + + private: + static Server *_myself; + const std::uint64_t _port; + struct tcp_pcb *_socket; + Libc::Env &_env; + + std::array _buffer; + static ReceiveTask *_receive_tasks; + + alignas(64) bool _is_running = true; + alignas(64) std::atomic_uint64_t _next_worker_id{0U}; + const std::uint16_t _count_channels; + + std::uint16_t add_client(tcp_pcb *client_socket); + + /* Genode environment for NIC session */ + Genode::Attached_rom_dataspace _config; + Genode::Heap &_alloc; + Timer::Connection &_timer; + + /* lwIP network device (NIC session wrapper) */ + Mxip::Nic_netif _netif; + + /************************************************ + * lwIP callback API: TCP callback functions + ************************************************/ + + /* tcp_accept */ + static err_t + _handle_tcp_connect(void *arg, struct tcp_pcb *newpcb, err_t err); + + + + /* helper function for close() */ + +}; + +class alignas(64) ReceiveTask final : public mx::tasking::TaskInterface +{ + public: + ReceiveTask(Server::state *state, pbuf *pb, std::size_t len) : _state(state), _payload(pb), _length(len) {} + + mx::tasking::TaskResult execute(std::uint16_t core_id, std::uint16_t channel_id) override; + + private: + Server::state *_state; + pbuf *_payload; + std::size_t _length; +}; + +} // namespace mx::io::network \ No newline at end of file diff --git a/repos/ealanos/src/app/echo_server/target.mk b/repos/ealanos/src/app/echo_server/target.mk new file mode 100644 index 0000000000..ed1594c5e0 --- /dev/null +++ b/repos/ealanos/src/app/echo_server/target.mk @@ -0,0 +1,32 @@ +MXINC_DIR=$(REP_DIR)/src/app/echo_server +MXINC_DIR+=-I$(REP_DIR)/src/app/blinktree +GENODE_GCC_TOOLCHAIN_DIR ?= /usr/local/genode/tool/23.05 +MXBENCH_DIR=$(REP_DIR)/src/lib + +TARGET = blinktree_daemon +# soure file for benchmark framework + +# source files for blinktree benchmark +SRC_BTREE += main.cpp +SRC_BTREE += server.cpp + +INC_DIR += /usr/local/genode/tool/lib/clang/14.0.5/include/ +INC_DIR += $(REP_DIR)/src/lib +INC_DIR += $(REP_DIR)/include +INC_DIR += $(REP_DIR)/include/ealanos/util +INC_DIR += $(call select_from_repositories,src/lib/libc) +INC_DIR += $(call select_from_repositories,src/lib/libc)/spec/x86_64 +vpath %.h ${INC_DIR} +LD_OPT += --allow-multiple-definition + +SRC_CC = ${SRC_MXBENCH} ${SRC_BTREE} +LIBS += base libc stdcxx mxtasking mxip +EXT_OBJECTS += /usr/local/genode/tool/lib/libatomic.a /usr/local/genode/tool/23.05/lib/gcc/x86_64-pc-elf/12.3.0/libgcc_eh.a /usr/local/genode/tool/lib/clang/14.0.5/lib/linux/libclang_rt.builtins-x86_64.a +CUSTOM_CC = /usr/local/genode/tool/bin/clang +CUSTOM_CXX = /usr/local/genode/tool/bin/clang++ +CC_OPT := --target=x86_64-genode --sysroot=/does/not/exist --gcc-toolchain=$(GENODE_GCC_TOOLCHAIN_DIR) -Wno-error -g -DNDEBUG -I$(MXINC_DIR) -std=c++20 #-D_GLIBCXX_ATOMIC_BUILTINS_8 -D__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 +CC_OPT += -I$(MXBENCH_DIR) +CC_OLEVEL = -O3 +CC_CXX_WARN_STRICT = +CUSTOM_CXX_LIB := $(CROSS_DEV_PREFIX)g++ +#CXX_LD += $(CROSS_DEV_PREFIX)g++