summaryrefslogtreecommitdiffstats
path: root/xmrstak/net
diff options
context:
space:
mode:
Diffstat (limited to 'xmrstak/net')
-rw-r--r--xmrstak/net/jpsock.cpp620
-rw-r--r--xmrstak/net/jpsock.hpp98
-rw-r--r--xmrstak/net/msgstruct.hpp136
-rw-r--r--xmrstak/net/socket.cpp366
-rw-r--r--xmrstak/net/socket.hpp57
-rw-r--r--xmrstak/net/socks.hpp97
6 files changed, 1374 insertions, 0 deletions
diff --git a/xmrstak/net/jpsock.cpp b/xmrstak/net/jpsock.cpp
new file mode 100644
index 0000000..d287375
--- /dev/null
+++ b/xmrstak/net/jpsock.cpp
@@ -0,0 +1,620 @@
+/*
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Additional permission under GNU GPL version 3 section 7
+ *
+ * If you modify this Program, or any covered work, by linking or combining
+ * it with OpenSSL (or a modified version of that library), containing parts
+ * covered by the terms of OpenSSL License and SSLeay License, the licensors
+ * of this Program grant you additional permission to convey the resulting work.
+ *
+ */
+
+#include <stdarg.h>
+#include <assert.h>
+
+#include "jpsock.h"
+#include "executor.h"
+#include "jconf.h"
+
+#include "rapidjson/document.h"
+#include "jext.h"
+#include "socks.h"
+#include "socket.h"
+#include "version.h"
+
+#define AGENTID_STR XMR_STAK_NAME "/" XMR_STAK_VERSION
+
+using namespace rapidjson;
+
+struct jpsock::call_rsp
+{
+ bool bHaveResponse;
+ uint64_t iCallId;
+ Value* pCallData;
+ std::string sCallErr;
+
+ call_rsp(Value* val) : pCallData(val)
+ {
+ bHaveResponse = false;
+ iCallId = 0;
+ sCallErr.clear();
+ }
+};
+
+typedef GenericDocument<UTF8<>, MemoryPoolAllocator<>, MemoryPoolAllocator<>> MemDocument;
+
+/*
+ *
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ * ASSUMPTION - only one calling thread. Multiple calling threads would require better
+ * thread safety. The calling thread is assumed to be the executor thread.
+ * If there is a reason to call the pool outside of the executor context, consider
+ * doing it via an executor event.
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ *
+ * Call values and allocators are for the calling thread (executor). When processing
+ * a call, the recv thread will make a copy of the call response and then erase its copy.
+ */
+
+struct jpsock::opaque_private
+{
+ Value oCallValue;
+
+ MemoryPoolAllocator<> callAllocator;
+ MemoryPoolAllocator<> recvAllocator;
+ MemoryPoolAllocator<> parseAllocator;
+ MemDocument jsonDoc;
+ call_rsp oCallRsp;
+
+ opaque_private(uint8_t* bCallMem, uint8_t* bRecvMem, uint8_t* bParseMem) :
+ callAllocator(bCallMem, jpsock::iJsonMemSize),
+ recvAllocator(bRecvMem, jpsock::iJsonMemSize),
+ parseAllocator(bParseMem, jpsock::iJsonMemSize),
+ jsonDoc(&recvAllocator, jpsock::iJsonMemSize, &parseAllocator),
+ oCallRsp(nullptr)
+ {
+ }
+};
+
+struct jpsock::opq_json_val
+{
+ const Value* val;
+ opq_json_val(const Value* val) : val(val) {}
+};
+
+jpsock::jpsock(size_t id, bool tls) : pool_id(id)
+{
+ sock_init();
+
+ bJsonCallMem = (uint8_t*)malloc(iJsonMemSize);
+ bJsonRecvMem = (uint8_t*)malloc(iJsonMemSize);
+ bJsonParseMem = (uint8_t*)malloc(iJsonMemSize);
+
+ prv = new opaque_private(bJsonCallMem, bJsonRecvMem, bJsonParseMem);
+
+#ifndef CONF_NO_TLS
+ if(tls)
+ sck = new tls_socket(this);
+ else
+ sck = new plain_socket(this);
+#else
+ sck = new plain_socket(this);
+#endif
+
+ oRecvThd = nullptr;
+ bRunning = false;
+ bLoggedIn = false;
+ iJobDiff = 0;
+
+ memset(&oCurrentJob, 0, sizeof(oCurrentJob));
+}
+
+jpsock::~jpsock()
+{
+ delete prv;
+ prv = nullptr;
+
+ free(bJsonCallMem);
+ free(bJsonRecvMem);
+ free(bJsonParseMem);
+}
+
+std::string&& jpsock::get_call_error()
+{
+ return std::move(prv->oCallRsp.sCallErr);
+}
+
+bool jpsock::set_socket_error(const char* a)
+{
+ if(!bHaveSocketError)
+ {
+ bHaveSocketError = true;
+ sSocketError.assign(a);
+ }
+
+ return false;
+}
+
+bool jpsock::set_socket_error(const char* a, const char* b)
+{
+ if(!bHaveSocketError)
+ {
+ bHaveSocketError = true;
+ size_t ln_a = strlen(a);
+ size_t ln_b = strlen(b);
+
+ sSocketError.reserve(ln_a + ln_b + 2);
+ sSocketError.assign(a, ln_a);
+ sSocketError.append(b, ln_b);
+ }
+
+ return false;
+}
+
+bool jpsock::set_socket_error(const char* a, size_t len)
+{
+ if(!bHaveSocketError)
+ {
+ bHaveSocketError = true;
+ sSocketError.assign(a, len);
+ }
+
+ return false;
+}
+
+bool jpsock::set_socket_error_strerr(const char* a)
+{
+ char sSockErrText[512];
+ return set_socket_error(a, sock_strerror(sSockErrText, sizeof(sSockErrText)));
+}
+
+bool jpsock::set_socket_error_strerr(const char* a, int res)
+{
+ char sSockErrText[512];
+ return set_socket_error(a, sock_gai_strerror(res, sSockErrText, sizeof(sSockErrText)));
+}
+
+void jpsock::jpsock_thread()
+{
+ jpsock_thd_main();
+ executor::inst()->push_event(ex_event(std::move(sSocketError), pool_id));
+
+ // If a call is wating, send an error to end it
+ bool bCallWaiting = false;
+ std::unique_lock<std::mutex> mlock(call_mutex);
+ if(prv->oCallRsp.pCallData != nullptr)
+ {
+ prv->oCallRsp.bHaveResponse = true;
+ prv->oCallRsp.iCallId = 0;
+ prv->oCallRsp.pCallData = nullptr;
+ bCallWaiting = true;
+ }
+ mlock.unlock();
+
+ if(bCallWaiting)
+ call_cond.notify_one();
+
+ bRunning = false;
+ bLoggedIn = false;
+
+ std::unique_lock<std::mutex>(job_mutex);
+ memset(&oCurrentJob, 0, sizeof(oCurrentJob));
+}
+
+bool jpsock::jpsock_thd_main()
+{
+ if(!sck->connect())
+ return false;
+
+ executor::inst()->push_event(ex_event(EV_SOCK_READY, pool_id));
+
+ char buf[iSockBufferSize];
+ size_t datalen = 0;
+ while (true)
+ {
+ int ret = sck->recv(buf + datalen, sizeof(buf) - datalen);
+
+ if(ret <= 0)
+ return false;
+
+ datalen += ret;
+
+ if (datalen >= sizeof(buf))
+ {
+ sck->close(false);
+ return set_socket_error("RECEIVE error: data overflow");
+ }
+
+ char* lnend;
+ char* lnstart = buf;
+ while ((lnend = (char*)memchr(lnstart, '\n', datalen)) != nullptr)
+ {
+ lnend++;
+ int lnlen = lnend - lnstart;
+
+ if (!process_line(lnstart, lnlen))
+ {
+ sck->close(false);
+ return false;
+ }
+
+ datalen -= lnlen;
+ lnstart = lnend;
+ }
+
+ //Got leftover data? Move it to the front
+ if (datalen > 0 && buf != lnstart)
+ memmove(buf, lnstart, datalen);
+ }
+}
+
+bool jpsock::process_line(char* line, size_t len)
+{
+ prv->jsonDoc.SetNull();
+ prv->parseAllocator.Clear();
+ prv->callAllocator.Clear();
+
+ /*NULL terminate the line instead of '\n', parsing will add some more NULLs*/
+ line[len-1] = '\0';
+
+ //printf("RECV: %s\n", line);
+
+ if (prv->jsonDoc.ParseInsitu(line).HasParseError())
+ return set_socket_error("PARSE error: Invalid JSON");
+
+ if (!prv->jsonDoc.IsObject())
+ return set_socket_error("PARSE error: Invalid root");
+
+ const Value* mt;
+ if (prv->jsonDoc.HasMember("method"))
+ {
+ mt = GetObjectMember(prv->jsonDoc, "method");
+
+ if(!mt->IsString())
+ return set_socket_error("PARSE error: Protocol error 1");
+
+ if(strcmp(mt->GetString(), "job") != 0)
+ return set_socket_error("PARSE error: Unsupported server method ", mt->GetString());
+
+ mt = GetObjectMember(prv->jsonDoc, "params");
+ if(mt == nullptr || !mt->IsObject())
+ return set_socket_error("PARSE error: Protocol error 2");
+
+ opq_json_val v(mt);
+ return process_pool_job(&v);
+ }
+ else
+ {
+ uint64_t iCallId;
+ mt = GetObjectMember(prv->jsonDoc, "id");
+ if (mt == nullptr || !mt->IsUint64())
+ return set_socket_error("PARSE error: Protocol error 3");
+
+ iCallId = mt->GetUint64();
+
+ mt = GetObjectMember(prv->jsonDoc, "error");
+
+ const char* sError = nullptr;
+ size_t iErrorLn = 0;
+ if (mt == nullptr || mt->IsNull())
+ {
+ /* If there was no error we need a result */
+ if ((mt = GetObjectMember(prv->jsonDoc, "result")) == nullptr)
+ return set_socket_error("PARSE error: Protocol error 7");
+ }
+ else
+ {
+ if(!mt->IsObject())
+ return set_socket_error("PARSE error: Protocol error 5");
+
+ const Value* msg = GetObjectMember(*mt, "message");
+
+ if(msg == nullptr || !msg->IsString())
+ return set_socket_error("PARSE error: Protocol error 6");
+
+ iErrorLn = msg->GetStringLength();
+ sError = msg->GetString();
+ }
+
+ std::unique_lock<std::mutex> mlock(call_mutex);
+ if (prv->oCallRsp.pCallData == nullptr)
+ {
+ /*Server sent us a call reply without us making a call*/
+ mlock.unlock();
+ return set_socket_error("PARSE error: Unexpected call response");
+ }
+
+ prv->oCallRsp.bHaveResponse = true;
+ prv->oCallRsp.iCallId = iCallId;
+
+ if(sError != nullptr)
+ {
+ prv->oCallRsp.pCallData = nullptr;
+ prv->oCallRsp.sCallErr.assign(sError, iErrorLn);
+ }
+ else
+ prv->oCallRsp.pCallData->CopyFrom(*mt, prv->callAllocator);
+
+ mlock.unlock();
+ call_cond.notify_one();
+
+ return true;
+ }
+}
+
+bool jpsock::process_pool_job(const opq_json_val* params)
+{
+ if (!params->val->IsObject())
+ return set_socket_error("PARSE error: Job error 1");
+
+ const Value * blob, *jobid, *target;
+ jobid = GetObjectMember(*params->val, "job_id");
+ blob = GetObjectMember(*params->val, "blob");
+ target = GetObjectMember(*params->val, "target");
+
+ if (jobid == nullptr || blob == nullptr || target == nullptr ||
+ !jobid->IsString() || !blob->IsString() || !target->IsString())
+ {
+ return set_socket_error("PARSE error: Job error 2");
+ }
+
+ if (jobid->GetStringLength() >= sizeof(pool_job::sJobID)) // Note >=
+ return set_socket_error("PARSE error: Job error 3");
+
+ uint32_t iWorkLn = blob->GetStringLength() / 2;
+ if (iWorkLn > sizeof(pool_job::bWorkBlob))
+ return set_socket_error("PARSE error: Invalid job legth. Are you sure you are mining the correct coin?");
+
+ pool_job oPoolJob;
+ if (!hex2bin(blob->GetString(), iWorkLn * 2, oPoolJob.bWorkBlob))
+ return set_socket_error("PARSE error: Job error 4");
+
+ oPoolJob.iWorkLen = iWorkLn;
+ memset(oPoolJob.sJobID, 0, sizeof(pool_job::sJobID));
+ memcpy(oPoolJob.sJobID, jobid->GetString(), jobid->GetStringLength()); //Bounds checking at proto error 3
+
+ size_t target_slen = target->GetStringLength();
+ if(target_slen <= 8)
+ {
+ uint32_t iTempInt = 0;
+ char sTempStr[] = "00000000"; // Little-endian CPU FTW
+ memcpy(sTempStr, target->GetString(), target_slen);
+ if(!hex2bin(sTempStr, 8, (unsigned char*)&iTempInt) || iTempInt == 0)
+ return set_socket_error("PARSE error: Invalid target");
+
+
+ oPoolJob.iTarget = t32_to_t64(iTempInt);
+ oPoolJob.iTarget32 = iTempInt;
+
+ }
+ else if(target_slen <= 16)
+ {
+ oPoolJob.iTarget = 0;
+ char sTempStr[] = "0000000000000000";
+ memcpy(sTempStr, target->GetString(), target_slen);
+ if(!hex2bin(sTempStr, 16, (unsigned char*)&oPoolJob.iTarget) || oPoolJob.iTarget == 0)
+ return set_socket_error("PARSE error: Invalid target");
+ }
+ else
+ return set_socket_error("PARSE error: Job error 5");
+
+ iJobDiff = t64_to_diff(oPoolJob.iTarget);
+
+ executor::inst()->push_event(ex_event(oPoolJob, pool_id));
+
+ std::unique_lock<std::mutex>(job_mutex);
+ oCurrentJob = oPoolJob;
+ return true;
+}
+
+bool jpsock::connect(const char* sAddr, std::string& sConnectError)
+{
+ bHaveSocketError = false;
+ sSocketError.clear();
+ iJobDiff = 0;
+
+ if(sck->set_hostname(sAddr))
+ {
+ bRunning = true;
+ oRecvThd = new std::thread(&jpsock::jpsock_thread, this);
+ return true;
+ }
+
+ sConnectError = std::move(sSocketError);
+ return false;
+}
+
+void jpsock::disconnect()
+{
+ sck->close(false);
+
+ if(oRecvThd != nullptr)
+ {
+ oRecvThd->join();
+ delete oRecvThd;
+ oRecvThd = nullptr;
+ }
+
+ sck->close(true);
+}
+
+bool jpsock::cmd_ret_wait(const char* sPacket, opq_json_val& poResult)
+{
+ //printf("SEND: %s\n", sPacket);
+
+ /*Set up the call rsp for the call reply*/
+ prv->oCallValue.SetNull();
+ prv->callAllocator.Clear();
+
+ std::unique_lock<std::mutex> mlock(call_mutex);
+ prv->oCallRsp = call_rsp(&prv->oCallValue);
+ mlock.unlock();
+
+ if(!sck->send(sPacket))
+ {
+ disconnect(); //This will join the other thread;
+ return false;
+ }
+
+ //Success is true if the server approves, result is true if there was no socket error
+ bool bSuccess;
+ mlock.lock();
+ bool bResult = call_cond.wait_for(mlock, std::chrono::seconds(jconf::inst()->GetCallTimeout()),
+ [&]() { return prv->oCallRsp.bHaveResponse; });
+
+ bSuccess = prv->oCallRsp.pCallData != nullptr;
+ prv->oCallRsp.pCallData = nullptr;
+ mlock.unlock();
+
+ if(bHaveSocketError)
+ return false;
+
+ //This means that there was no socket error, but the server is not taking to us
+ if(!bResult)
+ {
+ set_socket_error("CALL error: Timeout while waiting for a reply");
+ disconnect();
+ return false;
+ }
+
+ if(bSuccess)
+ poResult.val = &prv->oCallValue;
+
+ return bSuccess;
+}
+
+bool jpsock::cmd_login(const char* sLogin, const char* sPassword)
+{
+ char cmd_buffer[1024];
+
+ snprintf(cmd_buffer, sizeof(cmd_buffer), "{\"method\":\"login\",\"params\":{\"login\":\"%s\",\"pass\":\"%s\",\"agent\":\"" AGENTID_STR "\"},\"id\":1}\n",
+ sLogin, sPassword);
+
+ opq_json_val oResult(nullptr);
+
+ /*Normal error conditions (failed login etc..) will end here*/
+ if (!cmd_ret_wait(cmd_buffer, oResult))
+ return false;
+
+ if (!oResult.val->IsObject())
+ {
+ set_socket_error("PARSE error: Login protocol error 1");
+ disconnect();
+ return false;
+ }
+
+ const Value* id = GetObjectMember(*oResult.val, "id");
+ const Value* job = GetObjectMember(*oResult.val, "job");
+
+ if (id == nullptr || job == nullptr || !id->IsString())
+ {
+ set_socket_error("PARSE error: Login protocol error 2");
+ disconnect();
+ return false;
+ }
+
+ if (id->GetStringLength() >= sizeof(sMinerId))
+ {
+ set_socket_error("PARSE error: Login protocol error 3");
+ disconnect();
+ return false;
+ }
+
+ memset(sMinerId, 0, sizeof(sMinerId));
+ memcpy(sMinerId, id->GetString(), id->GetStringLength());
+
+ opq_json_val v(job);
+ if(!process_pool_job(&v))
+ {
+ disconnect();
+ return false;
+ }
+
+ bLoggedIn = true;
+
+ return true;
+}
+
+bool jpsock::cmd_submit(const char* sJobId, uint32_t iNonce, const uint8_t* bResult)
+{
+ char cmd_buffer[1024];
+ char sNonce[9];
+ char sResult[65];
+
+ bin2hex((unsigned char*)&iNonce, 4, sNonce);
+ sNonce[8] = '\0';
+
+ bin2hex(bResult, 32, sResult);
+ sResult[64] = '\0';
+
+ snprintf(cmd_buffer, sizeof(cmd_buffer), "{\"method\":\"submit\",\"params\":{\"id\":\"%s\",\"job_id\":\"%s\",\"nonce\":\"%s\",\"result\":\"%s\"},\"id\":1}\n",
+ sMinerId, sJobId, sNonce, sResult);
+
+ opq_json_val oResult(nullptr);
+ return cmd_ret_wait(cmd_buffer, oResult);
+}
+
+bool jpsock::get_current_job(pool_job& job)
+{
+ std::unique_lock<std::mutex>(job_mutex);
+
+ if(oCurrentJob.iWorkLen == 0)
+ return false;
+
+ oCurrentJob.iResumeCnt++;
+ job = oCurrentJob;
+ return true;
+}
+
+inline unsigned char hf_hex2bin(char c, bool &err)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ else if (c >= 'a' && c <= 'f')
+ return c - 'a' + 0xA;
+ else if (c >= 'A' && c <= 'F')
+ return c - 'A' + 0xA;
+
+ err = true;
+ return 0;
+}
+
+bool jpsock::hex2bin(const char* in, unsigned int len, unsigned char* out)
+{
+ bool error = false;
+ for (unsigned int i = 0; i < len; i += 2)
+ {
+ out[i / 2] = (hf_hex2bin(in[i], error) << 4) | hf_hex2bin(in[i + 1], error);
+ if (error) return false;
+ }
+ return true;
+}
+
+inline char hf_bin2hex(unsigned char c)
+{
+ if (c <= 0x9)
+ return '0' + c;
+ else
+ return 'a' - 0xA + c;
+}
+
+void jpsock::bin2hex(const unsigned char* in, unsigned int len, char* out)
+{
+ for (unsigned int i = 0; i < len; i++)
+ {
+ out[i * 2] = hf_bin2hex((in[i] & 0xF0) >> 4);
+ out[i * 2 + 1] = hf_bin2hex(in[i] & 0x0F);
+ }
+}
diff --git a/xmrstak/net/jpsock.hpp b/xmrstak/net/jpsock.hpp
new file mode 100644
index 0000000..4baaade
--- /dev/null
+++ b/xmrstak/net/jpsock.hpp
@@ -0,0 +1,98 @@
+#pragma once
+#include <mutex>
+#include <atomic>
+#include <condition_variable>
+#include <thread>
+#include <string>
+
+#include "msgstruct.h"
+
+/* Our pool can have two kinds of errors:
+ - Parsing or connection error
+ Those are fatal errors (we drop the connection if we encounter them).
+ After they are constructed from const char* strings from various places.
+ (can be from read-only mem), we passs them in an exectutor message
+ once the recv thread expires.
+ - Call error
+ This error happens when the "server says no". Usually because the job was
+ outdated, or we somehow got the hash wrong. It isn't fatal.
+ We parse it in-situ in the network buffer, after that we copy it to a
+ std::string. Executor will move the buffer via an r-value ref.
+*/
+class base_socket;
+
+class jpsock
+{
+public:
+ jpsock(size_t id, bool tls);
+ ~jpsock();
+
+ bool connect(const char* sAddr, std::string& sConnectError);
+ void disconnect();
+
+ bool cmd_login(const char* sLogin, const char* sPassword);
+ bool cmd_submit(const char* sJobId, uint32_t iNonce, const uint8_t* bResult);
+
+ static bool hex2bin(const char* in, unsigned int len, unsigned char* out);
+ static void bin2hex(const unsigned char* in, unsigned int len, char* out);
+
+ inline bool is_running() { return bRunning; }
+ inline bool is_logged_in() { return bLoggedIn; }
+
+ std::string&& get_call_error();
+ bool have_sock_error() { return bHaveSocketError; }
+
+ inline static uint64_t t32_to_t64(uint32_t t) { return 0xFFFFFFFFFFFFFFFFULL / (0xFFFFFFFFULL / ((uint64_t)t)); }
+ inline static uint64_t t64_to_diff(uint64_t t) { return 0xFFFFFFFFFFFFFFFFULL / t; }
+ inline static uint64_t diff_to_t64(uint64_t d) { return 0xFFFFFFFFFFFFFFFFULL / d; }
+
+ inline uint64_t get_current_diff() { return iJobDiff; }
+
+ bool get_current_job(pool_job& job);
+
+ size_t pool_id;
+
+ bool set_socket_error(const char* a);
+ bool set_socket_error(const char* a, const char* b);
+ bool set_socket_error(const char* a, size_t len);
+ bool set_socket_error_strerr(const char* a);
+ bool set_socket_error_strerr(const char* a, int res);
+
+private:
+ std::atomic<bool> bRunning;
+ std::atomic<bool> bLoggedIn;
+
+ uint8_t* bJsonRecvMem;
+ uint8_t* bJsonParseMem;
+ uint8_t* bJsonCallMem;
+
+ static constexpr size_t iJsonMemSize = 4096;
+ static constexpr size_t iSockBufferSize = 4096;
+
+ struct call_rsp;
+ struct opaque_private;
+ struct opq_json_val;
+
+ void jpsock_thread();
+ bool jpsock_thd_main();
+ bool process_line(char* line, size_t len);
+ bool process_pool_job(const opq_json_val* params);
+ bool cmd_ret_wait(const char* sPacket, opq_json_val& poResult);
+
+ char sMinerId[64];
+ std::atomic<uint64_t> iJobDiff;
+
+ std::string sSocketError;
+ std::atomic<bool> bHaveSocketError;
+
+ std::mutex call_mutex;
+ std::condition_variable call_cond;
+ std::thread* oRecvThd;
+
+ std::mutex job_mutex;
+ pool_job oCurrentJob;
+
+ opaque_private* prv;
+ base_socket* sck;
+};
+
diff --git a/xmrstak/net/msgstruct.hpp b/xmrstak/net/msgstruct.hpp
new file mode 100644
index 0000000..f3a39b2
--- /dev/null
+++ b/xmrstak/net/msgstruct.hpp
@@ -0,0 +1,136 @@
+#pragma once
+#include <string>
+#include <string.h>
+#include <assert.h>
+
+// Structures that we use to pass info between threads constructors are here just to make
+// the stack allocation take up less space, heap is a shared resouce that needs locks too of course
+
+struct pool_job
+{
+ char sJobID[64];
+ uint8_t bWorkBlob[112];
+ uint64_t iTarget;
+ // \todo remove workaround needed for amd
+ uint32_t iTarget32;
+ uint32_t iWorkLen;
+ uint32_t iResumeCnt;
+
+ pool_job() : iWorkLen(0), iResumeCnt(0) {}
+ pool_job(const char* sJobID, uint64_t iTarget, const uint8_t* bWorkBlob, uint32_t iWorkLen) :
+ iTarget(iTarget), iWorkLen(iWorkLen), iResumeCnt(0)
+ {
+ assert(iWorkLen <= sizeof(pool_job::bWorkBlob));
+ memcpy(this->sJobID, sJobID, sizeof(pool_job::sJobID));
+ memcpy(this->bWorkBlob, bWorkBlob, iWorkLen);
+ }
+};
+
+struct job_result
+{
+ uint8_t bResult[32];
+ char sJobID[64];
+ uint32_t iNonce;
+
+ job_result() {}
+ job_result(const char* sJobID, uint32_t iNonce, const uint8_t* bResult) : iNonce(iNonce)
+ {
+ memcpy(this->sJobID, sJobID, sizeof(job_result::sJobID));
+ memcpy(this->bResult, bResult, sizeof(job_result::bResult));
+ }
+};
+
+
+enum ex_event_name { EV_INVALID_VAL, EV_SOCK_READY, EV_SOCK_ERROR,
+ EV_POOL_HAVE_JOB, EV_MINER_HAVE_RESULT, EV_PERF_TICK, EV_RECONNECT,
+ EV_SWITCH_POOL, EV_DEV_POOL_EXIT, EV_USR_HASHRATE, EV_USR_RESULTS, EV_USR_CONNSTAT,
+ EV_HASHRATE_LOOP, EV_HTML_HASHRATE, EV_HTML_RESULTS, EV_HTML_CONNSTAT, EV_HTML_JSON };
+
+/*
+ This is how I learned to stop worrying and love c++11 =).
+ Ghosts of endless heap allocations have finally been exorcised. Thanks
+ to the nifty magic of move semantics, string will only be allocated
+ once on the heap. Considering that it makes a jorney across stack,
+ heap alloced queue, to another stack before being finally processed
+ I think it is kind of nifty, don't you?
+ Also note that for non-arg events we only copy two qwords
+*/
+
+struct ex_event
+{
+ ex_event_name iName;
+ size_t iPoolId;
+
+ union
+ {
+ pool_job oPoolJob;
+ job_result oJobResult;
+ std::string sSocketError;
+ };
+
+ ex_event() { iName = EV_INVALID_VAL; iPoolId = 0;}
+ ex_event(std::string&& err, size_t id) : iName(EV_SOCK_ERROR), iPoolId(id), sSocketError(std::move(err)) { }
+ ex_event(job_result dat, size_t id) : iName(EV_MINER_HAVE_RESULT), iPoolId(id), oJobResult(dat) {}
+ ex_event(pool_job dat, size_t id) : iName(EV_POOL_HAVE_JOB), iPoolId(id), oPoolJob(dat) {}
+ ex_event(ex_event_name ev, size_t id = 0) : iName(ev), iPoolId(id) {}
+
+ // Delete the copy operators to make sure we are moving only what is needed
+ ex_event(ex_event const&) = delete;
+ ex_event& operator=(ex_event const&) = delete;
+
+ ex_event(ex_event&& from)
+ {
+ iName = from.iName;
+ iPoolId = from.iPoolId;
+
+ switch(iName)
+ {
+ case EV_SOCK_ERROR:
+ new (&sSocketError) std::string(std::move(from.sSocketError));
+ break;
+ case EV_MINER_HAVE_RESULT:
+ oJobResult = from.oJobResult;
+ break;
+ case EV_POOL_HAVE_JOB:
+ oPoolJob = from.oPoolJob;
+ break;
+ default:
+ break;
+ }
+ }
+
+ ex_event& operator=(ex_event&& from)
+ {
+ assert(this != &from);
+
+ if(iName == EV_SOCK_ERROR)
+ sSocketError.~basic_string();
+
+ iName = from.iName;
+ iPoolId = from.iPoolId;
+
+ switch(iName)
+ {
+ case EV_SOCK_ERROR:
+ new (&sSocketError) std::string();
+ sSocketError = std::move(from.sSocketError);
+ break;
+ case EV_MINER_HAVE_RESULT:
+ oJobResult = from.oJobResult;
+ break;
+ case EV_POOL_HAVE_JOB:
+ oPoolJob = from.oPoolJob;
+ break;
+ default:
+ break;
+ }
+
+ return *this;
+ }
+
+ ~ex_event()
+ {
+ if(iName == EV_SOCK_ERROR)
+ sSocketError.~basic_string();
+ }
+};
diff --git a/xmrstak/net/socket.cpp b/xmrstak/net/socket.cpp
new file mode 100644
index 0000000..52f46b5
--- /dev/null
+++ b/xmrstak/net/socket.cpp
@@ -0,0 +1,366 @@
+/*
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Additional permission under GNU GPL version 3 section 7
+ *
+ * If you modify this Program, or any covered work, by linking or combining
+ * it with OpenSSL (or a modified version of that library), containing parts
+ * covered by the terms of OpenSSL License and SSLeay License, the licensors
+ * of this Program grant you additional permission to convey the resulting work.
+ *
+ */
+
+#include "socket.h"
+#include "jpsock.h"
+#include "jconf.h"
+#include "console.h"
+#include "executor.h"
+
+#ifndef CONF_NO_TLS
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/opensslconf.h>
+
+#ifndef OPENSSL_THREADS
+#error OpenSSL was compiled without thread support
+#endif
+#endif
+
+plain_socket::plain_socket(jpsock* err_callback) : pCallback(err_callback)
+{
+ hSocket = INVALID_SOCKET;
+ pSockAddr = nullptr;
+}
+
+bool plain_socket::set_hostname(const char* sAddr)
+{
+ char sAddrMb[256];
+ char *sTmp, *sPort;
+
+ size_t ln = strlen(sAddr);
+ if (ln >= sizeof(sAddrMb))
+ return pCallback->set_socket_error("CONNECT error: Pool address overflow.");
+
+ memcpy(sAddrMb, sAddr, ln);
+ sAddrMb[ln] = '\0';
+
+ if ((sTmp = strstr(sAddrMb, "//")) != nullptr)
+ {
+ sTmp += 2;
+ memmove(sAddrMb, sTmp, strlen(sTmp) + 1);
+ }
+
+ if ((sPort = strchr(sAddrMb, ':')) == nullptr)
+ return pCallback->set_socket_error("CONNECT error: Pool port number not specified, please use format <hostname>:<port>.");
+
+ sPort[0] = '\0';
+ sPort++;
+
+ addrinfo hints = { 0 };
+ hints.ai_family = AF_UNSPEC;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_protocol = IPPROTO_TCP;
+
+ pAddrRoot = nullptr;
+ int err;
+ if ((err = getaddrinfo(sAddrMb, sPort, &hints, &pAddrRoot)) != 0)
+ return pCallback->set_socket_error_strerr("CONNECT error: GetAddrInfo: ", err);
+
+ addrinfo *ptr = pAddrRoot;
+ std::vector<addrinfo*> ipv4;
+ std::vector<addrinfo*> ipv6;
+
+ while (ptr != nullptr)
+ {
+ if (ptr->ai_family == AF_INET)
+ ipv4.push_back(ptr);
+ if (ptr->ai_family == AF_INET6)
+ ipv6.push_back(ptr);
+ ptr = ptr->ai_next;
+ }
+
+ if (ipv4.empty() && ipv6.empty())
+ {
+ freeaddrinfo(pAddrRoot);
+ pAddrRoot = nullptr;
+ return pCallback->set_socket_error("CONNECT error: I found some DNS records but no IPv4 or IPv6 addresses.");
+ }
+ else if (!ipv4.empty() && ipv6.empty())
+ pSockAddr = ipv4[rand() % ipv4.size()];
+ else if (ipv4.empty() && !ipv6.empty())
+ pSockAddr = ipv6[rand() % ipv6.size()];
+ else if (!ipv4.empty() && !ipv6.empty())
+ {
+ if(jconf::inst()->PreferIpv4())
+ pSockAddr = ipv4[rand() % ipv4.size()];
+ else
+ pSockAddr = ipv6[rand() % ipv6.size()];
+ }
+
+ hSocket = socket(pSockAddr->ai_family, pSockAddr->ai_socktype, pSockAddr->ai_protocol);
+
+ if (hSocket == INVALID_SOCKET)
+ {
+ freeaddrinfo(pAddrRoot);
+ pAddrRoot = nullptr;
+ return pCallback->set_socket_error_strerr("CONNECT error: Socket creation failed ");
+ }
+
+ return true;
+}
+
+bool plain_socket::connect()
+{
+ int ret = ::connect(hSocket, pSockAddr->ai_addr, (int)pSockAddr->ai_addrlen);
+
+ freeaddrinfo(pAddrRoot);
+ pAddrRoot = nullptr;
+
+ if (ret != 0)
+ return pCallback->set_socket_error_strerr("CONNECT error: ");
+ else
+ return true;
+}
+
+int plain_socket::recv(char* buf, unsigned int len)
+{
+ int ret = ::recv(hSocket, buf, len, 0);
+
+ if(ret == 0)
+ pCallback->set_socket_error("RECEIVE error: socket closed");
+ if(ret == SOCKET_ERROR || ret < 0)
+ pCallback->set_socket_error_strerr("RECEIVE error: ");
+
+ return ret;
+}
+
+bool plain_socket::send(const char* buf)
+{
+ int pos = 0, slen = strlen(buf);
+ while (pos != slen)
+ {
+ int ret = ::send(hSocket, buf + pos, slen - pos, 0);
+ if (ret == SOCKET_ERROR)
+ {
+ pCallback->set_socket_error_strerr("SEND error: ");
+ return false;
+ }
+ else
+ pos += ret;
+ }
+
+ return true;
+}
+
+void plain_socket::close(bool free)
+{
+ if(hSocket != INVALID_SOCKET)
+ {
+ sock_close(hSocket);
+ hSocket = INVALID_SOCKET;
+ }
+}
+
+#ifndef CONF_NO_TLS
+tls_socket::tls_socket(jpsock* err_callback) : pCallback(err_callback)
+{
+}
+
+void tls_socket::print_error()
+{
+ BIO* err_bio = BIO_new(BIO_s_mem());
+ ERR_print_errors(err_bio);
+
+ char *buf = nullptr;
+ size_t len = BIO_get_mem_data(err_bio, &buf);
+
+ pCallback->set_socket_error(buf, len);
+
+ BIO_free(err_bio);
+}
+
+void tls_socket::init_ctx()
+{
+ const SSL_METHOD* method = SSLv23_method();
+
+ if(method == nullptr)
+ return;
+
+ ctx = SSL_CTX_new(method);
+ if(ctx == nullptr)
+ return;
+
+ if(jconf::inst()->TlsSecureAlgos())
+ {
+ SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1 | SSL_OP_NO_COMPRESSION);
+ }
+}
+
+bool tls_socket::set_hostname(const char* sAddr)
+{
+ if(ctx == nullptr)
+ {
+ init_ctx();
+ if(ctx == nullptr)
+ {
+ print_error();
+ return false;
+ }
+ }
+
+ if((bio = BIO_new_ssl_connect(ctx)) == nullptr)
+ {
+ print_error();
+ return false;
+ }
+
+ if(BIO_set_conn_hostname(bio, sAddr) != 1)
+ {
+ print_error();
+ return false;
+ }
+
+ BIO_get_ssl(bio, &ssl);
+ if(ssl == nullptr)
+ {
+ print_error();
+ return false;
+ }
+
+ if(jconf::inst()->TlsSecureAlgos())
+ {
+ if(SSL_set_cipher_list(ssl, "HIGH:!aNULL:!kRSA:!PSK:!SRP:!MD5:!RC4:!SHA1") != 1)
+ {
+ print_error();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool tls_socket::connect()
+{
+ if(BIO_do_connect(bio) != 1)
+ {
+ print_error();
+ return false;
+ }
+
+ if(BIO_do_handshake(bio) != 1)
+ {
+ print_error();
+ return false;
+ }
+
+ /* Step 1: verify a server certificate was presented during the negotiation */
+ X509* cert = SSL_get_peer_certificate(ssl);
+ if(cert == nullptr)
+ {
+ print_error();
+ return false;
+ }
+
+ const EVP_MD* digest;
+ unsigned char md[EVP_MAX_MD_SIZE];
+ unsigned int dlen;
+
+ digest = EVP_get_digestbyname("sha256");
+ if(digest == nullptr)
+ {
+ print_error();
+ return false;
+ }
+
+ if(X509_digest(cert, digest, md, &dlen) != 1)
+ {
+ X509_free(cert);
+ print_error();
+ return false;
+ }
+
+ if(pCallback->pool_id != executor::dev_pool_id)
+ {
+ //Base64 encode digest
+ BIO *bmem, *b64;
+ b64 = BIO_new(BIO_f_base64());
+ bmem = BIO_new(BIO_s_mem());
+
+ BIO_puts(bmem, "SHA256:");
+ b64 = BIO_push(b64, bmem);
+ BIO_set_flags(b64, BIO_FLAGS_BASE64_NO_NL);
+ BIO_write(b64, md, dlen);
+ BIO_flush(b64);
+
+ const char* conf_md = jconf::inst()->GetTlsFingerprint();
+ char *b64_md = nullptr;
+ size_t b64_len = BIO_get_mem_data(bmem, &b64_md);
+
+ if(strlen(conf_md) == 0)
+ {
+ printer::inst()->print_msg(L1, "Server fingerprint: %.*s", (int)b64_len, b64_md);
+ }
+ else if(strncmp(b64_md, conf_md, b64_len) != 0)
+ {
+ printer::inst()->print_msg(L0, "FINGERPRINT FAILED CHECK: %.*s was given, %s was configured",
+ (int)b64_len, b64_md, conf_md);
+
+ pCallback->set_socket_error("FINGERPRINT FAILED CHECK");
+ BIO_free_all(b64);
+ X509_free(cert);
+ return false;
+ }
+
+ BIO_free_all(b64);
+ }
+
+ X509_free(cert);
+ return true;
+}
+
+int tls_socket::recv(char* buf, unsigned int len)
+{
+ int ret = BIO_read(bio, buf, len);
+
+ if(ret == 0)
+ pCallback->set_socket_error("RECEIVE error: socket closed");
+ if(ret < 0)
+ print_error();
+
+ return ret;
+}
+
+bool tls_socket::send(const char* buf)
+{
+ return BIO_puts(bio, buf) > 0;
+}
+
+void tls_socket::close(bool free)
+{
+ if(bio == nullptr || ssl == nullptr)
+ return;
+
+ if(!free)
+ {
+ sock_close(BIO_get_fd(bio, nullptr));
+ }
+ else
+ {
+ BIO_free_all(bio);
+ ssl = nullptr;
+ bio = nullptr;
+ }
+}
+#endif
+
diff --git a/xmrstak/net/socket.hpp b/xmrstak/net/socket.hpp
new file mode 100644
index 0000000..94bbf03
--- /dev/null
+++ b/xmrstak/net/socket.hpp
@@ -0,0 +1,57 @@
+#pragma once
+#include "socks.h"
+class jpsock;
+
+class base_socket
+{
+public:
+ virtual bool set_hostname(const char* sAddr) = 0;
+ virtual bool connect() = 0;
+ virtual int recv(char* buf, unsigned int len) = 0;
+ virtual bool send(const char* buf) = 0;
+ virtual void close(bool free) = 0;
+};
+
+class plain_socket : public base_socket
+{
+public:
+ plain_socket(jpsock* err_callback);
+
+ bool set_hostname(const char* sAddr);
+ bool connect();
+ int recv(char* buf, unsigned int len);
+ bool send(const char* buf);
+ void close(bool free);
+
+private:
+ jpsock* pCallback;
+ addrinfo *pSockAddr;
+ addrinfo *pAddrRoot;
+ SOCKET hSocket;
+};
+
+typedef struct ssl_ctx_st SSL_CTX;
+typedef struct bio_st BIO;
+typedef struct ssl_st SSL;
+
+class tls_socket : public base_socket
+{
+public:
+ tls_socket(jpsock* err_callback);
+
+ bool set_hostname(const char* sAddr);
+ bool connect();
+ int recv(char* buf, unsigned int len);
+ bool send(const char* buf);
+ void close(bool free);
+
+private:
+ void init_ctx();
+ void print_error();
+
+ jpsock* pCallback;
+
+ SSL_CTX* ctx = nullptr;
+ BIO* bio = nullptr;
+ SSL* ssl = nullptr;
+};
diff --git a/xmrstak/net/socks.hpp b/xmrstak/net/socks.hpp
new file mode 100644
index 0000000..82bfa2f
--- /dev/null
+++ b/xmrstak/net/socks.hpp
@@ -0,0 +1,97 @@
+#pragma once
+#ifdef _WIN32
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x0601 /* Windows 7 */
+#endif
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+
+inline void sock_init()
+{
+ static bool bWSAInit = false;
+
+ if (!bWSAInit)
+ {
+ WSADATA wsaData;
+ WSAStartup(MAKEWORD(2, 2), &wsaData);
+ bWSAInit = true;
+ }
+}
+
+inline void sock_close(SOCKET s)
+{
+ shutdown(s, SD_BOTH);
+ closesocket(s);
+}
+
+inline const char* sock_strerror(char* buf, size_t len)
+{
+ buf[0] = '\0';
+
+ FormatMessageA(
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_MAX_WIDTH_MASK,
+ NULL, WSAGetLastError(),
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)buf, len, NULL);
+
+ return buf;
+}
+
+inline const char* sock_gai_strerror(int err, char* buf, size_t len)
+{
+ buf[0] = '\0';
+
+ FormatMessageA(
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_MAX_WIDTH_MASK,
+ NULL, (DWORD)err,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)buf, len, NULL);
+
+ return buf;
+}
+
+#else
+
+/* Assume that any non-Windows platform uses POSIX-style sockets instead. */
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <netdb.h> /* Needed for getaddrinfo() and freeaddrinfo() */
+#include <unistd.h> /* Needed for close() */
+#include <errno.h>
+#include <string.h>
+#if defined(__FreeBSD__)
+#include <netinet/in.h> /* Needed for IPPROTO_TCP */
+#endif
+
+inline void sock_init() {}
+typedef int SOCKET;
+
+#define INVALID_SOCKET (-1)
+#define SOCKET_ERROR (-1)
+
+inline void sock_close(SOCKET s)
+{
+ shutdown(s, SHUT_RDWR);
+ close(s);
+}
+
+inline const char* sock_strerror(char* buf, size_t len)
+{
+ buf[0] = '\0';
+
+#if defined(__APPLE__) || defined(__FreeBSD__) || !defined(_GNU_SOURCE) || !defined(__GLIBC__)
+
+ strerror_r(errno, buf, len);
+ return buf;
+#else
+ return strerror_r(errno, buf, len);
+#endif
+}
+
+inline const char* sock_gai_strerror(int err, char* buf, size_t len)
+{
+ buf[0] = '\0';
+ return gai_strerror(err);
+}
+#endif
OpenPOWER on IntegriCloud