miner.h 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621
  1. #ifndef __MINER_H__
  2. #define __MINER_H__
  3. #include "config.h"
  4. #include <stdbool.h>
  5. #include <stdint.h>
  6. #include <sys/time.h>
  7. #include <pthread.h>
  8. #include <jansson.h>
  9. #ifdef HAVE_LIBCURL
  10. #include <curl/curl.h>
  11. #else
  12. typedef char CURL;
  13. extern char *curly;
  14. #define curl_easy_init(curl) (curly)
  15. #define curl_easy_cleanup(curl) {}
  16. #define curl_global_cleanup() {}
  17. #define CURL_GLOBAL_ALL 0
  18. #define curl_global_init(X) (0)
  19. #endif
  20. #include <sched.h>
  21. #include "elist.h"
  22. #include "uthash.h"
  23. #include "logging.h"
  24. #include "util.h"
  25. #include <sys/types.h>
  26. #ifndef WIN32
  27. # include <sys/socket.h>
  28. # include <netdb.h>
  29. #endif
  30. #ifdef USE_USBUTILS
  31. #include <semaphore.h>
  32. #endif
  33. #ifdef STDC_HEADERS
  34. # include <stdlib.h>
  35. # include <stddef.h>
  36. #else
  37. # ifdef HAVE_STDLIB_H
  38. # include <stdlib.h>
  39. # endif
  40. #endif
  41. #ifdef HAVE_ALLOCA_H
  42. # include <alloca.h>
  43. #elif defined __GNUC__
  44. # ifndef WIN32
  45. # define alloca __builtin_alloca
  46. # else
  47. # include <malloc.h>
  48. # endif
  49. #elif defined _AIX
  50. # define alloca __alloca
  51. #elif defined _MSC_VER
  52. # include <malloc.h>
  53. # define alloca _alloca
  54. #else
  55. # ifndef HAVE_ALLOCA
  56. # ifdef __cplusplus
  57. extern "C"
  58. # endif
  59. void *alloca (size_t);
  60. # endif
  61. #endif
  62. #ifdef __MINGW32__
  63. #include <windows.h>
  64. #include <io.h>
  65. static inline int fsync (int fd)
  66. {
  67. return (FlushFileBuffers ((HANDLE) _get_osfhandle (fd))) ? 0 : -1;
  68. }
  69. #ifndef EWOULDBLOCK
  70. # define EWOULDBLOCK EAGAIN
  71. #endif
  72. #ifndef MSG_DONTWAIT
  73. # define MSG_DONTWAIT 0x1000000
  74. #endif
  75. #endif /* __MINGW32__ */
  76. #if defined (__linux)
  77. #ifndef LINUX
  78. #define LINUX
  79. #endif
  80. #endif
  81. #ifdef WIN32
  82. #ifndef timersub
  83. #define timersub(a, b, result) \
  84. do { \
  85. (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \
  86. (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \
  87. if ((result)->tv_usec < 0) { \
  88. --(result)->tv_sec; \
  89. (result)->tv_usec += 1000000; \
  90. } \
  91. } while (0)
  92. #endif
  93. #ifndef timeradd
  94. # define timeradd(a, b, result) \
  95. do { \
  96. (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
  97. (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
  98. if ((result)->tv_usec >= 1000000) \
  99. { \
  100. ++(result)->tv_sec; \
  101. (result)->tv_usec -= 1000000; \
  102. } \
  103. } while (0)
  104. #endif
  105. #endif
  106. #ifdef USE_USBUTILS
  107. #include <libusb.h>
  108. #endif
  109. #ifdef USE_USBUTILS
  110. #include "usbutils.h"
  111. #endif
  112. #if (!defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
  113. || (defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)))
  114. #ifndef bswap_16
  115. #define bswap_16 __builtin_bswap16
  116. #define bswap_32 __builtin_bswap32
  117. #define bswap_64 __builtin_bswap64
  118. #endif
  119. #else
  120. #if HAVE_BYTESWAP_H
  121. #include <byteswap.h>
  122. #elif defined(USE_SYS_ENDIAN_H)
  123. #include <sys/endian.h>
  124. #elif defined(__APPLE__)
  125. #include <libkern/OSByteOrder.h>
  126. #define bswap_16 OSSwapInt16
  127. #define bswap_32 OSSwapInt32
  128. #define bswap_64 OSSwapInt64
  129. #else
  130. #define bswap_16(value) \
  131. ((((value) & 0xff) << 8) | ((value) >> 8))
  132. #define bswap_32(value) \
  133. (((uint32_t)bswap_16((uint16_t)((value) & 0xffff)) << 16) | \
  134. (uint32_t)bswap_16((uint16_t)((value) >> 16)))
  135. #define bswap_64(value) \
  136. (((uint64_t)bswap_32((uint32_t)((value) & 0xffffffff)) \
  137. << 32) | \
  138. (uint64_t)bswap_32((uint32_t)((value) >> 32)))
  139. #endif
  140. #endif /* !defined(__GLXBYTEORDER_H__) */
  141. /* This assumes htobe32 is a macro in endian.h, and if it doesn't exist, then
  142. * htobe64 also won't exist */
  143. #ifndef htobe32
  144. # if __BYTE_ORDER == __LITTLE_ENDIAN
  145. # define htole16(x) (x)
  146. # define le16toh(x) (x)
  147. # define htole32(x) (x)
  148. # define htole64(x) (x)
  149. # define le32toh(x) (x)
  150. # define le64toh(x) (x)
  151. # define be32toh(x) bswap_32(x)
  152. # define be64toh(x) bswap_64(x)
  153. # define htobe16(x) bswap_16(x)
  154. # define htobe32(x) bswap_32(x)
  155. # define htobe64(x) bswap_64(x)
  156. # elif __BYTE_ORDER == __BIG_ENDIAN
  157. # define htole16(x) bswap_16(x)
  158. # define le16toh(x) bswap_16(x)
  159. # define htole32(x) bswap_32(x)
  160. # define le32toh(x) bswap_32(x)
  161. # define le64toh(x) bswap_64(x)
  162. # define htole64(x) bswap_64(x)
  163. # define be32toh(x) (x)
  164. # define be64toh(x) (x)
  165. # define htobe16(x) (x)
  166. # define htobe32(x) (x)
  167. # define htobe64(x) (x)
  168. #else
  169. #error UNKNOWN BYTE ORDER
  170. #endif
  171. #endif
  172. #undef unlikely
  173. #undef likely
  174. #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
  175. #define unlikely(expr) (__builtin_expect(!!(expr), 0))
  176. #define likely(expr) (__builtin_expect(!!(expr), 1))
  177. #else
  178. #define unlikely(expr) (expr)
  179. #define likely(expr) (expr)
  180. #endif
  181. #define __maybe_unused __attribute__((unused))
  182. #define uninitialised_var(x) x = x
  183. #if defined(__i386__)
  184. #define WANT_CRYPTOPP_ASM32
  185. #endif
  186. #ifndef ARRAY_SIZE
  187. #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
  188. #endif
  189. /* No semtimedop on apple so ignore timeout till we implement one */
  190. #ifdef __APPLE__
  191. #define semtimedop(SEM, SOPS, VAL, TIMEOUT) semop(SEM, SOPS, VAL)
  192. #endif
  193. #ifndef MIN
  194. #define MIN(x, y) ((x) > (y) ? (y) : (x))
  195. #endif
  196. #ifndef MAX
  197. #define MAX(x, y) ((x) > (y) ? (x) : (y))
  198. #endif
  199. /* Put avalon last to make it the last device it tries to detect to prevent it
  200. * trying to claim same chip but different devices. Adding a device here will
  201. * update all macros in the code that use the *_PARSE_COMMANDS macros for each
  202. * listed driver. */
  203. #define FPGA_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  204. DRIVER_ADD_COMMAND(bitforce) \
  205. DRIVER_ADD_COMMAND(modminer)
  206. #define ASIC_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  207. DRIVER_ADD_COMMAND(ants1) \
  208. DRIVER_ADD_COMMAND(ants2) \
  209. DRIVER_ADD_COMMAND(avalon) \
  210. DRIVER_ADD_COMMAND(avalon2) \
  211. DRIVER_ADD_COMMAND(bflsc) \
  212. DRIVER_ADD_COMMAND(bitfury) \
  213. DRIVER_ADD_COMMAND(blockerupter) \
  214. DRIVER_ADD_COMMAND(cointerra) \
  215. DRIVER_ADD_COMMAND(hashfast) \
  216. DRIVER_ADD_COMMAND(hashratio) \
  217. DRIVER_ADD_COMMAND(icarus) \
  218. DRIVER_ADD_COMMAND(klondike) \
  219. DRIVER_ADD_COMMAND(knc) \
  220. DRIVER_ADD_COMMAND(bitmineA1) \
  221. DRIVER_ADD_COMMAND(drillbit) \
  222. DRIVER_ADD_COMMAND(bab) \
  223. DRIVER_ADD_COMMAND(minion) \
  224. DRIVER_ADD_COMMAND(sp10) \
  225. DRIVER_ADD_COMMAND(sp30) \
  226. DRIVER_ADD_COMMAND(gridseed) \
  227. DRIVER_ADD_COMMAND(lketc) \
  228. DRIVER_ADD_COMMAND(zeus)
  229. #define DRIVER_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  230. FPGA_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \
  231. ASIC_PARSE_COMMANDS(DRIVER_ADD_COMMAND)
  232. #define DRIVER_ENUM(X) DRIVER_##X,
  233. #define DRIVER_PROTOTYPE(X) struct device_drv X##_drv;
  234. /* Create drv_driver enum from DRIVER_PARSE_COMMANDS macro */
  235. enum drv_driver {
  236. DRIVER_PARSE_COMMANDS(DRIVER_ENUM)
  237. DRIVER_MAX
  238. };
  239. /* Use DRIVER_PARSE_COMMANDS to generate extern device_drv prototypes */
  240. DRIVER_PARSE_COMMANDS(DRIVER_PROTOTYPE)
  241. enum alive {
  242. LIFE_WELL,
  243. LIFE_SICK,
  244. LIFE_DEAD,
  245. LIFE_NOSTART,
  246. LIFE_INIT,
  247. };
  248. enum pool_strategy {
  249. POOL_FAILOVER,
  250. POOL_ROUNDROBIN,
  251. POOL_ROTATE,
  252. POOL_LOADBALANCE,
  253. POOL_BALANCE,
  254. };
  255. #define TOP_STRATEGY (POOL_BALANCE)
  256. struct strategies {
  257. const char *s;
  258. };
  259. struct cgpu_info;
  260. extern void blank_get_statline_before(char *buf, size_t bufsiz, struct cgpu_info __maybe_unused *cgpu);
  261. struct api_data;
  262. struct thr_info;
  263. struct work;
  264. struct device_drv {
  265. enum drv_driver drv_id;
  266. char *dname;
  267. char *name;
  268. // DRV-global functions
  269. void (*drv_detect)(bool);
  270. // Device-specific functions
  271. void (*reinit_device)(struct cgpu_info *);
  272. void (*get_statline_before)(char *, size_t, struct cgpu_info *);
  273. void (*get_statline)(char *, size_t, struct cgpu_info *);
  274. struct api_data *(*get_api_stats)(struct cgpu_info *);
  275. bool (*get_stats)(struct cgpu_info *);
  276. void (*identify_device)(struct cgpu_info *); // e.g. to flash a led
  277. char *(*set_device)(struct cgpu_info *, char *option, char *setting, char *replybuf);
  278. // Thread-specific functions
  279. bool (*thread_prepare)(struct thr_info *);
  280. uint64_t (*can_limit_work)(struct thr_info *);
  281. bool (*thread_init)(struct thr_info *);
  282. bool (*prepare_work)(struct thr_info *, struct work *);
  283. /* Which hash work loop this driver uses. */
  284. void (*hash_work)(struct thr_info *);
  285. /* Two variants depending on whether the device divides work up into
  286. * small pieces or works with whole work items and may or may not have
  287. * a queue of its own. */
  288. int64_t (*scanhash)(struct thr_info *, struct work *, int64_t);
  289. int64_t (*scanwork)(struct thr_info *);
  290. /* Used to extract work from the hash table of queued work and tell
  291. * the main loop that it should not add any further work to the table.
  292. */
  293. bool (*queue_full)(struct cgpu_info *);
  294. /* Tell the driver of a block change */
  295. void (*flush_work)(struct cgpu_info *);
  296. /* Tell the driver of an updated work template for eg. stratum */
  297. void (*update_work)(struct cgpu_info *);
  298. void (*hw_error)(struct thr_info *);
  299. void (*thread_shutdown)(struct thr_info *);
  300. void (*thread_enable)(struct thr_info *);
  301. /* What should be zeroed in this device when global zero stats is sent */
  302. void (*zero_stats)(struct cgpu_info *);
  303. // Does it need to be free()d?
  304. bool copy;
  305. /* Highest target diff the device supports */
  306. double max_diff;
  307. /* Lowest diff the controller can safely run at */
  308. double min_diff;
  309. };
  310. extern struct device_drv *copy_drv(struct device_drv*);
  311. enum dev_enable {
  312. DEV_ENABLED,
  313. DEV_DISABLED,
  314. DEV_RECOVER,
  315. };
  316. enum dev_reason {
  317. REASON_THREAD_FAIL_INIT,
  318. REASON_THREAD_ZERO_HASH,
  319. REASON_THREAD_FAIL_QUEUE,
  320. REASON_DEV_SICK_IDLE_60,
  321. REASON_DEV_DEAD_IDLE_600,
  322. REASON_DEV_NOSTART,
  323. REASON_DEV_OVER_HEAT,
  324. REASON_DEV_THERMAL_CUTOFF,
  325. REASON_DEV_COMMS_ERROR,
  326. REASON_DEV_THROTTLE,
  327. };
  328. #define REASON_NONE "None"
  329. #define REASON_THREAD_FAIL_INIT_STR "Thread failed to init"
  330. #define REASON_THREAD_ZERO_HASH_STR "Thread got zero hashes"
  331. #define REASON_THREAD_FAIL_QUEUE_STR "Thread failed to queue work"
  332. #define REASON_DEV_SICK_IDLE_60_STR "Device idle for 60s"
  333. #define REASON_DEV_DEAD_IDLE_600_STR "Device dead - idle for 600s"
  334. #define REASON_DEV_NOSTART_STR "Device failed to start"
  335. #define REASON_DEV_OVER_HEAT_STR "Device over heated"
  336. #define REASON_DEV_THERMAL_CUTOFF_STR "Device reached thermal cutoff"
  337. #define REASON_DEV_COMMS_ERROR_STR "Device comms error"
  338. #define REASON_DEV_THROTTLE_STR "Device throttle"
  339. #define REASON_UNKNOWN_STR "Unknown reason - code bug"
  340. #define MIN_SEC_UNSET 99999999
  341. struct cgminer_stats {
  342. uint32_t getwork_calls;
  343. struct timeval getwork_wait;
  344. struct timeval getwork_wait_max;
  345. struct timeval getwork_wait_min;
  346. };
  347. // Just the actual network getworks to the pool
  348. struct cgminer_pool_stats {
  349. uint32_t getwork_calls;
  350. uint32_t getwork_attempts;
  351. struct timeval getwork_wait;
  352. struct timeval getwork_wait_max;
  353. struct timeval getwork_wait_min;
  354. double getwork_wait_rolling;
  355. bool hadrolltime;
  356. bool canroll;
  357. bool hadexpire;
  358. uint32_t rolltime;
  359. double min_diff;
  360. double max_diff;
  361. double last_diff;
  362. uint32_t min_diff_count;
  363. uint32_t max_diff_count;
  364. uint64_t times_sent;
  365. uint64_t bytes_sent;
  366. uint64_t net_bytes_sent;
  367. uint64_t times_received;
  368. uint64_t bytes_received;
  369. uint64_t net_bytes_received;
  370. };
  371. struct cgpu_info {
  372. int cgminer_id;
  373. struct device_drv *drv;
  374. int device_id;
  375. char *name;
  376. char *device_path;
  377. void *device_data;
  378. void *dup_data;
  379. char *unique_id;
  380. #ifdef USE_USBUTILS
  381. struct cg_usb_device *usbdev;
  382. struct cg_usb_info usbinfo;
  383. bool blacklisted;
  384. #endif
  385. #if defined(USE_AVALON) || defined(USE_AVALON2)
  386. struct work **works;
  387. int work_array;
  388. int queued;
  389. int results;
  390. #endif
  391. #ifdef USE_MODMINER
  392. char fpgaid;
  393. unsigned char clock;
  394. pthread_mutex_t *modminer_mutex;
  395. #endif
  396. #ifdef USE_BITFORCE
  397. struct timeval work_start_tv;
  398. unsigned int wait_ms;
  399. unsigned int sleep_ms;
  400. double avg_wait_f;
  401. unsigned int avg_wait_d;
  402. uint32_t nonces;
  403. bool nonce_range;
  404. bool polling;
  405. bool flash_led;
  406. #endif /* USE_BITFORCE */
  407. #if defined(USE_BITFORCE) || defined(USE_BFLSC)
  408. pthread_mutex_t device_mutex;
  409. #endif /* USE_BITFORCE || USE_BFLSC */
  410. enum dev_enable deven;
  411. int accepted;
  412. int rejected;
  413. int hw_errors;
  414. double rolling;
  415. double rolling1;
  416. double rolling5;
  417. double rolling15;
  418. double total_mhashes;
  419. double utility;
  420. enum alive status;
  421. char init[40];
  422. struct timeval last_message_tv;
  423. int threads;
  424. struct thr_info **thr;
  425. int64_t max_hashes;
  426. const char *kname;
  427. bool new_work;
  428. double temp;
  429. int cutofftemp;
  430. int64_t diff1;
  431. double diff_accepted;
  432. double diff_rejected;
  433. int last_share_pool;
  434. time_t last_share_pool_time;
  435. double last_share_diff;
  436. time_t last_device_valid_work;
  437. time_t device_last_well;
  438. time_t device_last_not_well;
  439. enum dev_reason device_not_well_reason;
  440. int thread_fail_init_count;
  441. int thread_zero_hash_count;
  442. int thread_fail_queue_count;
  443. int dev_sick_idle_60_count;
  444. int dev_dead_idle_600_count;
  445. int dev_nostart_count;
  446. int dev_over_heat_count; // It's a warning but worth knowing
  447. int dev_thermal_cutoff_count;
  448. int dev_comms_error_count;
  449. int dev_throttle_count;
  450. struct cgminer_stats cgminer_stats;
  451. pthread_rwlock_t qlock;
  452. struct work *queued_work;
  453. struct work *unqueued_work;
  454. unsigned int queued_count;
  455. bool shutdown;
  456. struct timeval dev_start_tv;
  457. /* For benchmarking only */
  458. int hidiff;
  459. int lodiff;
  460. int direction;
  461. };
  462. extern bool add_cgpu(struct cgpu_info*);
  463. struct thread_q {
  464. struct list_head q;
  465. bool frozen;
  466. pthread_mutex_t mutex;
  467. pthread_cond_t cond;
  468. };
  469. struct thr_info {
  470. int id;
  471. int device_thread;
  472. bool primary_thread;
  473. pthread_t pth;
  474. cgsem_t sem;
  475. struct thread_q *q;
  476. struct cgpu_info *cgpu;
  477. void *cgpu_data;
  478. struct timeval last;
  479. struct timeval sick;
  480. bool pause;
  481. bool getwork;
  482. bool work_restart;
  483. bool work_update;
  484. };
  485. struct string_elist {
  486. char *string;
  487. bool free_me;
  488. struct list_head list;
  489. };
  490. static inline void string_elist_add(const char *s, struct list_head *head)
  491. {
  492. struct string_elist *n;
  493. n = calloc(1, sizeof(*n));
  494. n->string = strdup(s);
  495. n->free_me = true;
  496. list_add_tail(&n->list, head);
  497. }
  498. static inline void string_elist_del(struct string_elist *item)
  499. {
  500. if (item->free_me)
  501. free(item->string);
  502. list_del(&item->list);
  503. }
  504. static inline uint32_t swab32(uint32_t v)
  505. {
  506. return bswap_32(v);
  507. }
  508. static inline void swap256(void *dest_p, const void *src_p)
  509. {
  510. uint32_t *dest = dest_p;
  511. const uint32_t *src = src_p;
  512. dest[0] = src[7];
  513. dest[1] = src[6];
  514. dest[2] = src[5];
  515. dest[3] = src[4];
  516. dest[4] = src[3];
  517. dest[5] = src[2];
  518. dest[6] = src[1];
  519. dest[7] = src[0];
  520. }
  521. static inline void swab256(void *dest_p, const void *src_p)
  522. {
  523. uint32_t *dest = dest_p;
  524. const uint32_t *src = src_p;
  525. dest[0] = swab32(src[7]);
  526. dest[1] = swab32(src[6]);
  527. dest[2] = swab32(src[5]);
  528. dest[3] = swab32(src[4]);
  529. dest[4] = swab32(src[3]);
  530. dest[5] = swab32(src[2]);
  531. dest[6] = swab32(src[1]);
  532. dest[7] = swab32(src[0]);
  533. }
  534. static inline void flip12(void *dest_p, const void *src_p)
  535. {
  536. uint32_t *dest = dest_p;
  537. const uint32_t *src = src_p;
  538. int i;
  539. for (i = 0; i < 3; i++)
  540. dest[i] = swab32(src[i]);
  541. }
  542. static inline void flip32(void *dest_p, const void *src_p)
  543. {
  544. uint32_t *dest = dest_p;
  545. const uint32_t *src = src_p;
  546. int i;
  547. for (i = 0; i < 8; i++)
  548. dest[i] = swab32(src[i]);
  549. }
  550. static inline void flip64(void *dest_p, const void *src_p)
  551. {
  552. uint32_t *dest = dest_p;
  553. const uint32_t *src = src_p;
  554. int i;
  555. for (i = 0; i < 16; i++)
  556. dest[i] = swab32(src[i]);
  557. }
  558. static inline void flip80(void *dest_p, const void *src_p)
  559. {
  560. uint32_t *dest = dest_p;
  561. const uint32_t *src = src_p;
  562. int i;
  563. for (i = 0; i < 20; i++)
  564. dest[i] = swab32(src[i]);
  565. }
  566. static inline void flip128(void *dest_p, const void *src_p)
  567. {
  568. uint32_t *dest = dest_p;
  569. const uint32_t *src = src_p;
  570. int i;
  571. for (i = 0; i < 32; i++)
  572. dest[i] = swab32(src[i]);
  573. }
  574. /* For flipping to the correct endianness if necessary */
  575. #if defined(__BIG_ENDIAN__) || defined(MIPSEB)
  576. static inline void endian_flip32(void *dest_p, const void *src_p)
  577. {
  578. flip32(dest_p, src_p);
  579. }
  580. static inline void endian_flip128(void *dest_p, const void *src_p)
  581. {
  582. flip128(dest_p, src_p);
  583. }
  584. #else
  585. static inline void
  586. endian_flip32(void __maybe_unused *dest_p, const void __maybe_unused *src_p)
  587. {
  588. }
  589. static inline void
  590. endian_flip128(void __maybe_unused *dest_p, const void __maybe_unused *src_p)
  591. {
  592. }
  593. #endif
  594. extern double cgpu_runtime(struct cgpu_info *cgpu);
  595. extern double tsince_restart(void);
  596. extern double tsince_update(void);
  597. extern void __quit(int status, bool clean);
  598. extern void _quit(int status);
  599. /*
  600. * Set this to non-zero to enable lock tracking
  601. * Use the API lockstats command to see the locking status on stderr
  602. * i.e. in your log file if you 2> log.log - but not on the screen
  603. * API lockstats is privilidged but will always exist and will return
  604. * success if LOCK_TRACKING is enabled and warning if disabled
  605. * In production code, this should never be enabled since it will slow down all locking
  606. * So, e.g. use it to track down a deadlock - after a reproducable deadlock occurs
  607. * ... Of course if the API code itself deadlocks, it wont help :)
  608. */
  609. #define LOCK_TRACKING 0
  610. #if LOCK_TRACKING
  611. enum cglock_typ {
  612. CGLOCK_MUTEX,
  613. CGLOCK_RW,
  614. CGLOCK_UNKNOWN
  615. };
  616. extern uint64_t api_getlock(void *lock, const char *file, const char *func, const int line);
  617. extern void api_gotlock(uint64_t id, void *lock, const char *file, const char *func, const int line);
  618. extern uint64_t api_trylock(void *lock, const char *file, const char *func, const int line);
  619. extern void api_didlock(uint64_t id, int ret, void *lock, const char *file, const char *func, const int line);
  620. extern void api_gunlock(void *lock, const char *file, const char *func, const int line);
  621. extern void api_initlock(void *lock, enum cglock_typ typ, const char *file, const char *func, const int line);
  622. #define GETLOCK(_lock, _file, _func, _line) uint64_t _id1 = api_getlock((void *)(_lock), _file, _func, _line)
  623. #define GOTLOCK(_lock, _file, _func, _line) api_gotlock(_id1, (void *)(_lock), _file, _func, _line)
  624. #define TRYLOCK(_lock, _file, _func, _line) uint64_t _id2 = api_trylock((void *)(_lock), _file, _func, _line)
  625. #define DIDLOCK(_ret, _lock, _file, _func, _line) api_didlock(_id2, _ret, (void *)(_lock), _file, _func, _line)
  626. #define GUNLOCK(_lock, _file, _func, _line) api_gunlock((void *)(_lock), _file, _func, _line)
  627. #define INITLOCK(_lock, _typ, _file, _func, _line) api_initlock((void *)(_lock), _typ, _file, _func, _line)
  628. #else
  629. #define GETLOCK(_lock, _file, _func, _line)
  630. #define GOTLOCK(_lock, _file, _func, _line)
  631. #define TRYLOCK(_lock, _file, _func, _line)
  632. #define DIDLOCK(_ret, _lock, _file, _func, _line)
  633. #define GUNLOCK(_lock, _file, _func, _line)
  634. #define INITLOCK(_typ, _lock, _file, _func, _line)
  635. #endif
  636. #define mutex_lock(_lock) _mutex_lock(_lock, __FILE__, __func__, __LINE__)
  637. #define mutex_unlock_noyield(_lock) _mutex_unlock_noyield(_lock, __FILE__, __func__, __LINE__)
  638. #define mutex_unlock(_lock) _mutex_unlock(_lock, __FILE__, __func__, __LINE__)
  639. #define mutex_trylock(_lock) _mutex_trylock(_lock, __FILE__, __func__, __LINE__)
  640. #define wr_lock(_lock) _wr_lock(_lock, __FILE__, __func__, __LINE__)
  641. #define wr_trylock(_lock) _wr_trylock(_lock, __FILE__, __func__, __LINE__)
  642. #define rd_lock(_lock) _rd_lock(_lock, __FILE__, __func__, __LINE__)
  643. #define rw_unlock(_lock) _rw_unlock(_lock, __FILE__, __func__, __LINE__)
  644. #define rd_unlock_noyield(_lock) _rd_unlock_noyield(_lock, __FILE__, __func__, __LINE__)
  645. #define wr_unlock_noyield(_lock) _wr_unlock_noyield(_lock, __FILE__, __func__, __LINE__)
  646. #define rd_unlock(_lock) _rd_unlock(_lock, __FILE__, __func__, __LINE__)
  647. #define wr_unlock(_lock) _wr_unlock(_lock, __FILE__, __func__, __LINE__)
  648. #define mutex_init(_lock) _mutex_init(_lock, __FILE__, __func__, __LINE__)
  649. #define rwlock_init(_lock) _rwlock_init(_lock, __FILE__, __func__, __LINE__)
  650. #define cglock_init(_lock) _cglock_init(_lock, __FILE__, __func__, __LINE__)
  651. #define cg_rlock(_lock) _cg_rlock(_lock, __FILE__, __func__, __LINE__)
  652. #define cg_ilock(_lock) _cg_ilock(_lock, __FILE__, __func__, __LINE__)
  653. #define cg_uilock(_lock) _cg_uilock(_lock, __FILE__, __func__, __LINE__)
  654. #define cg_ulock(_lock) _cg_ulock(_lock, __FILE__, __func__, __LINE__)
  655. #define cg_wlock(_lock) _cg_wlock(_lock, __FILE__, __func__, __LINE__)
  656. #define cg_dwlock(_lock) _cg_dwlock(_lock, __FILE__, __func__, __LINE__)
  657. #define cg_dwilock(_lock) _cg_dwilock(_lock, __FILE__, __func__, __LINE__)
  658. #define cg_dlock(_lock) _cg_dlock(_lock, __FILE__, __func__, __LINE__)
  659. #define cg_runlock(_lock) _cg_runlock(_lock, __FILE__, __func__, __LINE__)
  660. #define cg_ruwlock(_lock) _cg_ruwlock(_lock, __FILE__, __func__, __LINE__)
  661. #define cg_wunlock(_lock) _cg_wunlock(_lock, __FILE__, __func__, __LINE__)
  662. static inline void _mutex_lock(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  663. {
  664. GETLOCK(lock, file, func, line);
  665. if (unlikely(pthread_mutex_lock(lock)))
  666. quitfrom(1, file, func, line, "WTF MUTEX ERROR ON LOCK! errno=%d", errno);
  667. GOTLOCK(lock, file, func, line);
  668. }
  669. static inline void _mutex_unlock_noyield(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  670. {
  671. if (unlikely(pthread_mutex_unlock(lock)))
  672. quitfrom(1, file, func, line, "WTF MUTEX ERROR ON UNLOCK! errno=%d", errno);
  673. GUNLOCK(lock, file, func, line);
  674. }
  675. static inline void _mutex_unlock(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  676. {
  677. _mutex_unlock_noyield(lock, file, func, line);
  678. selective_yield();
  679. }
  680. static inline int _mutex_trylock(pthread_mutex_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line)
  681. {
  682. TRYLOCK(lock, file, func, line);
  683. int ret = pthread_mutex_trylock(lock);
  684. DIDLOCK(ret, lock, file, func, line);
  685. return ret;
  686. }
  687. static inline void _wr_lock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  688. {
  689. GETLOCK(lock, file, func, line);
  690. if (unlikely(pthread_rwlock_wrlock(lock)))
  691. quitfrom(1, file, func, line, "WTF WRLOCK ERROR ON LOCK! errno=%d", errno);
  692. GOTLOCK(lock, file, func, line);
  693. }
  694. static inline int _wr_trylock(pthread_rwlock_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line)
  695. {
  696. TRYLOCK(lock, file, func, line);
  697. int ret = pthread_rwlock_trywrlock(lock);
  698. DIDLOCK(ret, lock, file, func, line);
  699. return ret;
  700. }
  701. static inline void _rd_lock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  702. {
  703. GETLOCK(lock, file, func, line);
  704. if (unlikely(pthread_rwlock_rdlock(lock)))
  705. quitfrom(1, file, func, line, "WTF RDLOCK ERROR ON LOCK! errno=%d", errno);
  706. GOTLOCK(lock, file, func, line);
  707. }
  708. static inline void _rw_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  709. {
  710. if (unlikely(pthread_rwlock_unlock(lock)))
  711. quitfrom(1, file, func, line, "WTF RWLOCK ERROR ON UNLOCK! errno=%d", errno);
  712. GUNLOCK(lock, file, func, line);
  713. }
  714. static inline void _rd_unlock_noyield(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  715. {
  716. _rw_unlock(lock, file, func, line);
  717. }
  718. static inline void _wr_unlock_noyield(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  719. {
  720. _rw_unlock(lock, file, func, line);
  721. }
  722. static inline void _rd_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  723. {
  724. _rw_unlock(lock, file, func, line);
  725. selective_yield();
  726. }
  727. static inline void _wr_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  728. {
  729. _rw_unlock(lock, file, func, line);
  730. selective_yield();
  731. }
  732. static inline void _mutex_init(pthread_mutex_t *lock, const char *file, const char *func, const int line)
  733. {
  734. if (unlikely(pthread_mutex_init(lock, NULL)))
  735. quitfrom(1, file, func, line, "Failed to pthread_mutex_init errno=%d", errno);
  736. INITLOCK(lock, CGLOCK_MUTEX, file, func, line);
  737. }
  738. static inline void mutex_destroy(pthread_mutex_t *lock)
  739. {
  740. /* Ignore return code. This only invalidates the mutex on linux but
  741. * releases resources on windows. */
  742. pthread_mutex_destroy(lock);
  743. }
  744. static inline void _rwlock_init(pthread_rwlock_t *lock, const char *file, const char *func, const int line)
  745. {
  746. if (unlikely(pthread_rwlock_init(lock, NULL)))
  747. quitfrom(1, file, func, line, "Failed to pthread_rwlock_init errno=%d", errno);
  748. INITLOCK(lock, CGLOCK_RW, file, func, line);
  749. }
  750. static inline void rwlock_destroy(pthread_rwlock_t *lock)
  751. {
  752. pthread_rwlock_destroy(lock);
  753. }
  754. static inline void _cglock_init(cglock_t *lock, const char *file, const char *func, const int line)
  755. {
  756. _mutex_init(&lock->mutex, file, func, line);
  757. _rwlock_init(&lock->rwlock, file, func, line);
  758. }
  759. static inline void cglock_destroy(cglock_t *lock)
  760. {
  761. rwlock_destroy(&lock->rwlock);
  762. mutex_destroy(&lock->mutex);
  763. }
  764. /* Read lock variant of cglock. Cannot be promoted. */
  765. static inline void _cg_rlock(cglock_t *lock, const char *file, const char *func, const int line)
  766. {
  767. _mutex_lock(&lock->mutex, file, func, line);
  768. _rd_lock(&lock->rwlock, file, func, line);
  769. _mutex_unlock_noyield(&lock->mutex, file, func, line);
  770. }
  771. /* Intermediate variant of cglock - behaves as a read lock but can be promoted
  772. * to a write lock or demoted to read lock. */
  773. static inline void _cg_ilock(cglock_t *lock, const char *file, const char *func, const int line)
  774. {
  775. _mutex_lock(&lock->mutex, file, func, line);
  776. }
  777. /* Unlock intermediate variant without changing to read or write version */
  778. static inline void _cg_uilock(cglock_t *lock, const char *file, const char *func, const int line)
  779. {
  780. _mutex_unlock(&lock->mutex, file, func, line);
  781. }
  782. /* Upgrade intermediate variant to a write lock */
  783. static inline void _cg_ulock(cglock_t *lock, const char *file, const char *func, const int line)
  784. {
  785. _wr_lock(&lock->rwlock, file, func, line);
  786. }
  787. /* Write lock variant of cglock */
  788. static inline void _cg_wlock(cglock_t *lock, const char *file, const char *func, const int line)
  789. {
  790. _mutex_lock(&lock->mutex, file, func, line);
  791. _wr_lock(&lock->rwlock, file, func, line);
  792. }
  793. /* Downgrade write variant to a read lock */
  794. static inline void _cg_dwlock(cglock_t *lock, const char *file, const char *func, const int line)
  795. {
  796. _wr_unlock_noyield(&lock->rwlock, file, func, line);
  797. _rd_lock(&lock->rwlock, file, func, line);
  798. _mutex_unlock_noyield(&lock->mutex, file, func, line);
  799. }
  800. /* Demote a write variant to an intermediate variant */
  801. static inline void _cg_dwilock(cglock_t *lock, const char *file, const char *func, const int line)
  802. {
  803. _wr_unlock(&lock->rwlock, file, func, line);
  804. }
  805. /* Downgrade intermediate variant to a read lock */
  806. static inline void _cg_dlock(cglock_t *lock, const char *file, const char *func, const int line)
  807. {
  808. _rd_lock(&lock->rwlock, file, func, line);
  809. _mutex_unlock_noyield(&lock->mutex, file, func, line);
  810. }
  811. static inline void _cg_runlock(cglock_t *lock, const char *file, const char *func, const int line)
  812. {
  813. _rd_unlock(&lock->rwlock, file, func, line);
  814. }
  815. /* This drops the read lock and grabs a write lock. It does NOT protect data
  816. * between the two locks! */
  817. static inline void _cg_ruwlock(cglock_t *lock, const char *file, const char *func, const int line)
  818. {
  819. _rd_unlock_noyield(&lock->rwlock, file, func, line);
  820. _cg_wlock(lock, file, func, line);
  821. }
  822. static inline void _cg_wunlock(cglock_t *lock, const char *file, const char *func, const int line)
  823. {
  824. _wr_unlock_noyield(&lock->rwlock, file, func, line);
  825. _mutex_unlock(&lock->mutex, file, func, line);
  826. }
  827. struct pool;
  828. #define API_LISTEN_ADDR "0.0.0.0"
  829. #define API_MCAST_CODE "FTW"
  830. #define API_MCAST_ADDR "224.0.0.75"
  831. extern bool opt_work_update;
  832. extern bool opt_protocol;
  833. extern bool have_longpoll;
  834. extern char *opt_kernel_path;
  835. extern char *opt_socks_proxy;
  836. extern int opt_suggest_diff;
  837. extern char *cgminer_path;
  838. extern bool opt_fail_only;
  839. extern bool opt_lowmem;
  840. extern bool opt_autofan;
  841. extern bool opt_autoengine;
  842. extern bool use_curses;
  843. extern char *opt_api_allow;
  844. extern bool opt_api_mcast;
  845. extern char *opt_api_mcast_addr;
  846. extern char *opt_api_mcast_code;
  847. extern char *opt_api_mcast_des;
  848. extern int opt_api_mcast_port;
  849. extern char *opt_api_groups;
  850. extern char *opt_api_description;
  851. extern int opt_api_port;
  852. extern char *opt_api_host;
  853. extern bool opt_api_listen;
  854. extern bool opt_api_network;
  855. extern bool opt_delaynet;
  856. extern time_t last_getwork;
  857. extern bool opt_restart;
  858. #ifdef USE_ICARUS
  859. extern char *opt_icarus_options;
  860. extern char *opt_icarus_timing;
  861. extern float opt_anu_freq;
  862. extern float opt_rock_freq;
  863. #endif
  864. extern bool opt_worktime;
  865. #ifdef USE_AVALON
  866. extern char *opt_avalon_options;
  867. extern char *opt_bitburner_fury_options;
  868. #endif
  869. #ifdef USE_KLONDIKE
  870. extern char *opt_klondike_options;
  871. #endif
  872. #ifdef USE_DRILLBIT
  873. extern char *opt_drillbit_options;
  874. extern char *opt_drillbit_auto;
  875. #endif
  876. #ifdef USE_BAB
  877. extern char *opt_bab_options;
  878. #endif
  879. #ifdef USE_BITMINE_A1
  880. extern char *opt_bitmine_a1_options;
  881. #endif
  882. #ifdef USE_ANT_S1
  883. extern char *opt_bitmain_options;
  884. extern bool opt_bitmain_hwerror;
  885. #endif
  886. #ifdef USE_ANT_S2
  887. extern char *opt_bitmain_dev;
  888. extern char *opt_bitmain_options;
  889. extern bool opt_bitmain_hwerror;
  890. extern bool opt_bitmain_checkall;
  891. extern bool opt_bitmain_checkn2diff;
  892. extern bool opt_bitmain_beeper;
  893. extern bool opt_bitmain_tempoverctrl;
  894. #endif
  895. #ifdef USE_MINION
  896. extern int opt_minion_chipreport;
  897. extern char *opt_minion_cores;
  898. extern bool opt_minion_extra;
  899. extern char *opt_minion_freq;
  900. extern int opt_minion_freqchange;
  901. extern int opt_minion_freqpercent;
  902. extern bool opt_minion_idlecount;
  903. extern int opt_minion_ledcount;
  904. extern int opt_minion_ledlimit;
  905. extern bool opt_minion_noautofreq;
  906. extern bool opt_minion_overheat;
  907. extern int opt_minion_spidelay;
  908. extern char *opt_minion_spireset;
  909. extern int opt_minion_spisleep;
  910. extern int opt_minion_spiusec;
  911. extern char *opt_minion_temp;
  912. #endif
  913. #ifdef USE_USBUTILS
  914. extern char *opt_usb_select;
  915. extern int opt_usbdump;
  916. extern bool opt_usb_list_all;
  917. extern cgsem_t usb_resource_sem;
  918. #endif
  919. #ifdef USE_BITFORCE
  920. extern bool opt_bfl_noncerange;
  921. #endif
  922. #ifdef USE_GRIDSEED
  923. extern char *opt_gridseed_options;
  924. extern char *opt_gridseed_freq;
  925. extern char *opt_gridseed_override;
  926. #endif
  927. #ifdef USE_ZEUS
  928. extern bool opt_zeus_debug;
  929. extern int opt_zeus_chips_count;
  930. extern int opt_zeus_chip_clk;
  931. extern bool opt_zeus_nocheck_golden;
  932. extern char *opt_zeus_options;
  933. #endif
  934. #ifdef USE_LKETC
  935. extern bool opt_lketc_debug;
  936. extern int opt_lketc_chips_count;
  937. extern int opt_lketc_chip_clk;
  938. extern bool opt_lketc_nocheck_golden;
  939. extern char *opt_lketc_options;
  940. #endif
  941. extern int swork_id;
  942. #if LOCK_TRACKING
  943. extern pthread_mutex_t lockstat_lock;
  944. #endif
  945. extern pthread_rwlock_t netacc_lock;
  946. extern const uint32_t sha256_init_state[];
  947. #ifdef HAVE_LIBCURL
  948. extern json_t *json_web_config(const char *url);
  949. extern json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass,
  950. const char *rpc_req, bool, bool, int *,
  951. struct pool *pool, bool);
  952. #endif
  953. extern const char *proxytype(proxytypes_t proxytype);
  954. extern char *get_proxy(char *url, struct pool *pool);
  955. extern void __bin2hex(char *s, const unsigned char *p, size_t len);
  956. extern char *bin2hex(const unsigned char *p, size_t len);
  957. extern bool hex2bin(unsigned char *p, const char *hexstr, size_t len);
  958. typedef bool (*sha256_func)(struct thr_info*, const unsigned char *pmidstate,
  959. unsigned char *pdata,
  960. unsigned char *phash1, unsigned char *phash,
  961. const unsigned char *ptarget,
  962. uint32_t max_nonce,
  963. uint32_t *last_nonce,
  964. uint32_t nonce);
  965. extern bool fulltest(const unsigned char *hash, const unsigned char *target);
  966. extern int opt_queue;
  967. extern int opt_scantime;
  968. extern int opt_expiry;
  969. extern cglock_t control_lock;
  970. extern pthread_mutex_t hash_lock;
  971. extern pthread_mutex_t console_lock;
  972. extern cglock_t ch_lock;
  973. extern pthread_rwlock_t mining_thr_lock;
  974. extern pthread_rwlock_t devices_lock;
  975. extern pthread_mutex_t restart_lock;
  976. extern pthread_cond_t restart_cond;
  977. extern void clear_stratum_shares(struct pool *pool);
  978. extern void clear_pool_work(struct pool *pool);
  979. extern void set_target(unsigned char *dest_target, double diff);
  980. #if defined (USE_AVALON2) || defined (USE_HASHRATIO)
  981. bool submit_nonce2_nonce(struct thr_info *thr, struct pool *pool, struct pool *real_pool,
  982. uint32_t nonce2, uint32_t nonce);
  983. #endif
  984. extern int restart_wait(struct thr_info *thr, unsigned int mstime);
  985. extern void kill_work(void);
  986. extern void reinit_device(struct cgpu_info *cgpu);
  987. extern void api(int thr_id);
  988. extern struct pool *current_pool(void);
  989. extern int enabled_pools;
  990. extern void get_intrange(char *arg, int *val1, int *val2);
  991. extern bool detect_stratum(struct pool *pool, char *url);
  992. extern void print_summary(void);
  993. extern void adjust_quota_gcd(void);
  994. extern struct pool *add_pool(void);
  995. extern bool add_pool_details(struct pool *pool, bool live, char *url, char *user, char *pass);
  996. #define MAX_DEVICES 4096
  997. extern bool hotplug_mode;
  998. extern int hotplug_time;
  999. extern struct list_head scan_devices;
  1000. extern int nDevs;
  1001. extern int num_processors;
  1002. extern int hw_errors;
  1003. extern bool use_syslog;
  1004. extern bool opt_quiet;
  1005. extern struct thr_info *control_thr;
  1006. extern struct thr_info **mining_thr;
  1007. #ifdef USE_SCRYPT
  1008. extern bool opt_scrypt;
  1009. #else
  1010. #define opt_scrypt (0)
  1011. #endif
  1012. extern double total_secs;
  1013. extern int mining_threads;
  1014. extern int total_devices;
  1015. extern int zombie_devs;
  1016. extern struct cgpu_info **devices;
  1017. extern int total_pools;
  1018. extern struct pool **pools;
  1019. extern struct strategies strategies[];
  1020. extern enum pool_strategy pool_strategy;
  1021. extern int opt_rotate_period;
  1022. extern double rolling1, rolling5, rolling15;
  1023. extern double total_rolling;
  1024. extern double total_mhashes_done;
  1025. extern unsigned int new_blocks;
  1026. extern unsigned int found_blocks;
  1027. extern int64_t total_accepted, total_rejected, total_diff1;
  1028. extern int64_t total_getworks, total_stale, total_discarded;
  1029. extern double total_diff_accepted, total_diff_rejected, total_diff_stale;
  1030. extern unsigned int local_work;
  1031. extern unsigned int total_go, total_ro;
  1032. extern const int opt_cutofftemp;
  1033. extern int opt_log_interval;
  1034. extern unsigned long long global_hashrate;
  1035. extern char current_hash[68];
  1036. extern double current_diff;
  1037. extern uint64_t best_diff;
  1038. extern struct timeval block_timeval;
  1039. extern char *workpadding;
  1040. struct curl_ent {
  1041. CURL *curl;
  1042. struct list_head node;
  1043. struct timeval tv;
  1044. };
  1045. /* Disabled needs to be the lowest enum as a freshly calloced value will then
  1046. * equal disabled */
  1047. enum pool_enable {
  1048. POOL_DISABLED,
  1049. POOL_ENABLED,
  1050. POOL_REJECTING,
  1051. };
  1052. struct stratum_work {
  1053. char *job_id;
  1054. unsigned char **merkle_bin;
  1055. bool clean;
  1056. double diff;
  1057. };
  1058. #define RBUFSIZE 8192
  1059. #define RECVSIZE (RBUFSIZE - 4)
  1060. struct pool {
  1061. int pool_no;
  1062. int prio;
  1063. int64_t accepted, rejected;
  1064. int seq_rejects;
  1065. int seq_getfails;
  1066. int solved;
  1067. int64_t diff1;
  1068. char diff[8];
  1069. int quota;
  1070. int quota_gcd;
  1071. int quota_used;
  1072. int works;
  1073. double diff_accepted;
  1074. double diff_rejected;
  1075. double diff_stale;
  1076. bool submit_fail;
  1077. bool idle;
  1078. bool lagging;
  1079. bool probed;
  1080. enum pool_enable enabled;
  1081. bool submit_old;
  1082. bool removed;
  1083. bool lp_started;
  1084. bool blocking;
  1085. char *hdr_path;
  1086. char *lp_url;
  1087. unsigned int getwork_requested;
  1088. unsigned int stale_shares;
  1089. unsigned int discarded_work;
  1090. unsigned int getfail_occasions;
  1091. unsigned int remotefail_occasions;
  1092. struct timeval tv_idle;
  1093. double utility;
  1094. int last_shares, shares;
  1095. char *rpc_req;
  1096. char *rpc_url;
  1097. char *rpc_userpass;
  1098. char *rpc_user, *rpc_pass;
  1099. proxytypes_t rpc_proxytype;
  1100. char *rpc_proxy;
  1101. pthread_mutex_t pool_lock;
  1102. cglock_t data_lock;
  1103. struct thread_q *submit_q;
  1104. struct thread_q *getwork_q;
  1105. pthread_t longpoll_thread;
  1106. pthread_t test_thread;
  1107. bool testing;
  1108. int curls;
  1109. pthread_cond_t cr_cond;
  1110. struct list_head curlring;
  1111. time_t last_share_time;
  1112. double last_share_diff;
  1113. uint64_t best_diff;
  1114. struct cgminer_stats cgminer_stats;
  1115. struct cgminer_pool_stats cgminer_pool_stats;
  1116. /* The last block this particular pool knows about */
  1117. char prev_block[32];
  1118. /* Stratum variables */
  1119. char *stratum_url;
  1120. bool extranonce_subscribe;
  1121. char *stratum_port;
  1122. SOCKETTYPE sock;
  1123. char *sockbuf;
  1124. size_t sockbuf_size;
  1125. char *sockaddr_url; /* stripped url used for sockaddr */
  1126. char *sockaddr_proxy_url;
  1127. char *sockaddr_proxy_port;
  1128. char *nonce1;
  1129. unsigned char *nonce1bin;
  1130. uint64_t nonce2;
  1131. int n2size;
  1132. char *sessionid;
  1133. bool has_stratum;
  1134. bool stratum_active;
  1135. bool stratum_init;
  1136. bool stratum_notify;
  1137. struct stratum_work swork;
  1138. pthread_t stratum_sthread;
  1139. pthread_t stratum_rthread;
  1140. pthread_mutex_t stratum_lock;
  1141. struct thread_q *stratum_q;
  1142. int sshares; /* stratum shares submitted waiting on response */
  1143. /* GBT variables */
  1144. bool has_gbt;
  1145. cglock_t gbt_lock;
  1146. unsigned char previousblockhash[32];
  1147. unsigned char gbt_target[32];
  1148. char *coinbasetxn;
  1149. char *longpollid;
  1150. char *gbt_workid;
  1151. int gbt_expires;
  1152. uint32_t gbt_version;
  1153. uint32_t curtime;
  1154. uint32_t gbt_bits;
  1155. unsigned char *txn_hashes;
  1156. int gbt_txns;
  1157. int height;
  1158. bool gbt_solo;
  1159. unsigned char merklebin[16 * 32];
  1160. int transactions;
  1161. char *txn_data;
  1162. unsigned char scriptsig_base[100];
  1163. unsigned char script_pubkey[25 + 3];
  1164. int nValue;
  1165. CURL *gbt_curl;
  1166. bool gbt_curl_inuse;
  1167. /* Shared by both stratum & GBT */
  1168. size_t n1_len;
  1169. unsigned char *coinbase;
  1170. int coinbase_len;
  1171. int nonce2_offset;
  1172. unsigned char header_bin[128];
  1173. int merkles;
  1174. char prev_hash[68];
  1175. char bbversion[12];
  1176. char nbit[12];
  1177. char ntime[12];
  1178. double next_diff;
  1179. double sdiff;
  1180. struct timeval tv_lastwork;
  1181. };
  1182. #define GETWORK_MODE_TESTPOOL 'T'
  1183. #define GETWORK_MODE_POOL 'P'
  1184. #define GETWORK_MODE_LP 'L'
  1185. #define GETWORK_MODE_BENCHMARK 'B'
  1186. #define GETWORK_MODE_STRATUM 'S'
  1187. #define GETWORK_MODE_GBT 'G'
  1188. #define GETWORK_MODE_SOLO 'C'
  1189. struct work {
  1190. unsigned char data[128];
  1191. unsigned char midstate[32];
  1192. unsigned char target[32];
  1193. unsigned char hash[32];
  1194. /* This is the diff the device is currently aiming for and must be
  1195. * the minimum of work_difficulty & drv->max_diff */
  1196. double device_diff;
  1197. uint64_t share_diff;
  1198. int rolls;
  1199. int drv_rolllimit; /* How much the driver can roll ntime */
  1200. uint32_t nonce; /* For devices that hash sole work */
  1201. struct thr_info *thr;
  1202. int thr_id;
  1203. struct pool *pool;
  1204. struct timeval tv_staged;
  1205. bool mined;
  1206. bool clone;
  1207. bool cloned;
  1208. int rolltime;
  1209. bool longpoll;
  1210. bool stale;
  1211. bool mandatory;
  1212. bool block;
  1213. bool stratum;
  1214. char *job_id;
  1215. uint64_t nonce2;
  1216. size_t nonce2_len;
  1217. char *ntime;
  1218. double sdiff;
  1219. char *nonce1;
  1220. bool gbt;
  1221. char *coinbase;
  1222. int gbt_txns;
  1223. unsigned int work_block;
  1224. uint32_t id;
  1225. UT_hash_handle hh;
  1226. /* This is the diff work we're aiming to submit and should match the
  1227. * work->target binary */
  1228. double work_difficulty;
  1229. // Allow devices to identify work if multiple sub-devices
  1230. int subid;
  1231. // Allow devices to flag work for their own purposes
  1232. bool devflag;
  1233. // Allow devices to timestamp work for their own purposes
  1234. struct timeval tv_stamp;
  1235. struct timeval tv_getwork;
  1236. struct timeval tv_getwork_reply;
  1237. struct timeval tv_cloned;
  1238. struct timeval tv_work_start;
  1239. struct timeval tv_work_found;
  1240. char getwork_mode;
  1241. };
  1242. #ifdef USE_MODMINER
  1243. struct modminer_fpga_state {
  1244. bool work_running;
  1245. struct work running_work;
  1246. struct timeval tv_workstart;
  1247. uint32_t hashes;
  1248. char next_work_cmd[46];
  1249. char fpgaid;
  1250. bool overheated;
  1251. bool new_work;
  1252. uint32_t shares;
  1253. uint32_t shares_last_hw;
  1254. uint32_t hw_errors;
  1255. uint32_t shares_to_good;
  1256. uint32_t timeout_fail;
  1257. uint32_t success_more;
  1258. struct timeval last_changed;
  1259. struct timeval last_nonce;
  1260. struct timeval first_work;
  1261. bool death_stage_one;
  1262. bool tried_two_byte_temp;
  1263. bool one_byte_temp;
  1264. };
  1265. #endif
  1266. #define TAILBUFSIZ 64
  1267. #define tailsprintf(buf, bufsiz, fmt, ...) do { \
  1268. char tmp13[TAILBUFSIZ]; \
  1269. size_t len13, buflen = strlen(buf); \
  1270. snprintf(tmp13, sizeof(tmp13), fmt, ##__VA_ARGS__); \
  1271. len13 = strlen(tmp13); \
  1272. if ((buflen + len13) >= bufsiz) \
  1273. quit(1, "tailsprintf buffer overflow in %s %s line %d", __FILE__, __func__, __LINE__); \
  1274. strcat(buf, tmp13); \
  1275. } while (0)
  1276. extern void get_datestamp(char *, size_t, struct timeval *);
  1277. extern void inc_hw_errors(struct thr_info *thr);
  1278. extern bool test_nonce(struct work *work, uint32_t nonce);
  1279. extern bool test_nonce_diff(struct work *work, uint32_t nonce, double diff);
  1280. extern bool submit_tested_work(struct thr_info *thr, struct work *work);
  1281. extern bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce);
  1282. extern bool submit_noffset_nonce(struct thr_info *thr, struct work *work, uint32_t nonce,
  1283. int noffset);
  1284. extern int share_work_tdiff(struct cgpu_info *cgpu);
  1285. extern struct work *get_work(struct thr_info *thr, const int thr_id);
  1286. extern void __add_queued(struct cgpu_info *cgpu, struct work *work);
  1287. extern struct work *get_queued(struct cgpu_info *cgpu);
  1288. extern struct work *__get_queued(struct cgpu_info *cgpu);
  1289. extern void add_queued(struct cgpu_info *cgpu, struct work *work);
  1290. extern struct work *get_queue_work(struct thr_info *thr, struct cgpu_info *cgpu, int thr_id);
  1291. extern struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1292. extern struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1293. extern struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1294. extern struct work *__find_work_byid(struct work *que, uint32_t id);
  1295. extern struct work *find_queued_work_byid(struct cgpu_info *cgpu, uint32_t id);
  1296. extern void __work_completed(struct cgpu_info *cgpu, struct work *work);
  1297. extern int age_queued_work(struct cgpu_info *cgpu, double secs);
  1298. extern void work_completed(struct cgpu_info *cgpu, struct work *work);
  1299. extern struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen);
  1300. extern void flush_queue(struct cgpu_info *cgpu);
  1301. extern void hash_driver_work(struct thr_info *mythr);
  1302. extern void hash_queued_work(struct thr_info *mythr);
  1303. extern void _wlog(const char *str);
  1304. extern void _wlogprint(const char *str);
  1305. extern int curses_int(const char *query);
  1306. extern char *curses_input(const char *query);
  1307. extern void kill_work(void);
  1308. extern void switch_pools(struct pool *selected);
  1309. extern void _discard_work(struct work *work);
  1310. #define discard_work(WORK) do { \
  1311. _discard_work(WORK); \
  1312. WORK = NULL; \
  1313. } while (0)
  1314. extern void remove_pool(struct pool *pool);
  1315. extern void write_config(FILE *fcfg);
  1316. extern void zero_bestshare(void);
  1317. extern void zero_stats(void);
  1318. extern void default_save_file(char *filename);
  1319. extern bool log_curses_only(int prio, const char *datetime, const char *str);
  1320. extern void clear_logwin(void);
  1321. extern void logwin_update(void);
  1322. extern bool pool_tclear(struct pool *pool, bool *var);
  1323. extern void stratum_resumed(struct pool *pool);
  1324. extern void pool_died(struct pool *pool);
  1325. extern struct thread_q *tq_new(void);
  1326. extern void tq_free(struct thread_q *tq);
  1327. extern bool tq_push(struct thread_q *tq, void *data);
  1328. extern void *tq_pop(struct thread_q *tq, const struct timespec *abstime);
  1329. extern void tq_freeze(struct thread_q *tq);
  1330. extern void tq_thaw(struct thread_q *tq);
  1331. extern bool successful_connect;
  1332. extern void adl(void);
  1333. extern void app_restart(void);
  1334. extern void roll_work(struct work *work);
  1335. extern struct work *make_clone(struct work *work);
  1336. extern void clean_work(struct work *work);
  1337. extern void _free_work(struct work *work);
  1338. #define free_work(WORK) do { \
  1339. _free_work(WORK); \
  1340. WORK = NULL; \
  1341. } while (0)
  1342. extern void set_work_ntime(struct work *work, int ntime);
  1343. extern struct work *copy_work_noffset(struct work *base_work, int noffset);
  1344. #define copy_work(work_in) copy_work_noffset(work_in, 0)
  1345. extern uint64_t share_diff(const struct work *work);
  1346. extern struct thr_info *get_thread(int thr_id);
  1347. extern struct cgpu_info *get_devices(int id);
  1348. enum api_data_type {
  1349. API_ESCAPE,
  1350. API_STRING,
  1351. API_CONST,
  1352. API_UINT8,
  1353. API_SHORT,
  1354. API_INT16,
  1355. API_UINT16,
  1356. API_INT,
  1357. API_UINT,
  1358. API_UINT32,
  1359. API_HEX32,
  1360. API_UINT64,
  1361. API_INT64,
  1362. API_DOUBLE,
  1363. API_ELAPSED,
  1364. API_BOOL,
  1365. API_TIMEVAL,
  1366. API_TIME,
  1367. API_MHS,
  1368. API_KHS,
  1369. API_MHTOTAL,
  1370. API_TEMP,
  1371. API_UTILITY,
  1372. API_FREQ,
  1373. API_VOLTS,
  1374. API_HS,
  1375. API_DIFF,
  1376. API_PERCENT,
  1377. API_AVG
  1378. };
  1379. struct api_data {
  1380. enum api_data_type type;
  1381. char *name;
  1382. void *data;
  1383. bool data_was_malloc;
  1384. struct api_data *prev;
  1385. struct api_data *next;
  1386. };
  1387. extern struct api_data *api_add_escape(struct api_data *root, char *name, char *data, bool copy_data);
  1388. extern struct api_data *api_add_string(struct api_data *root, char *name, char *data, bool copy_data);
  1389. extern struct api_data *api_add_const(struct api_data *root, char *name, const char *data, bool copy_data);
  1390. extern struct api_data *api_add_uint8(struct api_data *root, char *name, uint8_t *data, bool copy_data);
  1391. extern struct api_data *api_add_short(struct api_data *root, char *name, short *data, bool copy_data);
  1392. extern struct api_data *api_add_int16(struct api_data *root, char *name, uint16_t *data, bool copy_data);
  1393. extern struct api_data *api_add_uint16(struct api_data *root, char *name, uint16_t *data, bool copy_data);
  1394. extern struct api_data *api_add_int(struct api_data *root, char *name, int *data, bool copy_data);
  1395. extern struct api_data *api_add_uint(struct api_data *root, char *name, unsigned int *data, bool copy_data);
  1396. extern struct api_data *api_add_uint32(struct api_data *root, char *name, uint32_t *data, bool copy_data);
  1397. extern struct api_data *api_add_hex32(struct api_data *root, char *name, uint32_t *data, bool copy_data);
  1398. extern struct api_data *api_add_uint64(struct api_data *root, char *name, uint64_t *data, bool copy_data);
  1399. extern struct api_data *api_add_double(struct api_data *root, char *name, double *data, bool copy_data);
  1400. extern struct api_data *api_add_elapsed(struct api_data *root, char *name, double *data, bool copy_data);
  1401. extern struct api_data *api_add_bool(struct api_data *root, char *name, bool *data, bool copy_data);
  1402. extern struct api_data *api_add_timeval(struct api_data *root, char *name, struct timeval *data, bool copy_data);
  1403. extern struct api_data *api_add_time(struct api_data *root, char *name, time_t *data, bool copy_data);
  1404. extern struct api_data *api_add_mhs(struct api_data *root, char *name, double *data, bool copy_data);
  1405. extern struct api_data *api_add_khs(struct api_data *root, char *name, double *data, bool copy_data);
  1406. extern struct api_data *api_add_mhstotal(struct api_data *root, char *name, double *data, bool copy_data);
  1407. extern struct api_data *api_add_temp(struct api_data *root, char *name, float *data, bool copy_data);
  1408. extern struct api_data *api_add_utility(struct api_data *root, char *name, double *data, bool copy_data);
  1409. extern struct api_data *api_add_freq(struct api_data *root, char *name, double *data, bool copy_data);
  1410. extern struct api_data *api_add_volts(struct api_data *root, char *name, float *data, bool copy_data);
  1411. extern struct api_data *api_add_hs(struct api_data *root, char *name, double *data, bool copy_data);
  1412. extern struct api_data *api_add_diff(struct api_data *root, char *name, double *data, bool copy_data);
  1413. extern struct api_data *api_add_percent(struct api_data *root, char *name, double *data, bool copy_data);
  1414. extern struct api_data *api_add_avg(struct api_data *root, char *name, float *data, bool copy_data);
  1415. extern void dupalloc(struct cgpu_info *cgpu, int timelimit);
  1416. extern void dupcounters(struct cgpu_info *cgpu, uint64_t *checked, uint64_t *dups);
  1417. extern bool isdupnonce(struct cgpu_info *cgpu, struct work *work, uint32_t nonce);
  1418. #endif /* __MINER_H__ */