diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..19efe46 --- /dev/null +++ b/.gitignore @@ -0,0 +1,86 @@ +nbproject +ccan/opt/.dirstamp +compat/jansson-2.6/jansson.pc +compat/jansson-2.6/libtool +compat/jansson-2.6/ltmain.sh +compat/jansson-2.6/m4/libtool.m4 +compat/jansson-2.6/m4/ltoptions.m4 +compat/jansson-2.6/m4/ltsugar.m4 +compat/jansson-2.6/m4/ltversion.m4 +compat/jansson-2.6/m4/lt~obsolete.m4 +compat/jansson-2.6/src/.libs/ +compat/jansson-2.6/src/dump.lo +compat/jansson-2.6/src/error.lo +compat/jansson-2.6/src/hashtable.lo +compat/jansson-2.6/src/jansson_config.h +compat/jansson-2.6/src/libjansson.la +compat/jansson-2.6/src/load.lo +compat/jansson-2.6/src/memory.lo +compat/jansson-2.6/src/pack_unpack.lo +compat/jansson-2.6/src/strbuffer.lo +compat/jansson-2.6/src/strconv.lo +compat/jansson-2.6/src/utf.lo +compat/jansson-2.6/src/value.lo +compat/libusb-1.0/libtool +compat/libusb-1.0/libusb-1.0.pc +compat/libusb-1.0/libusb/.libs/ +compat/libusb-1.0/libusb/os/.dirstamp +compat/libusb-1.0/ltmain.sh +libtool +ltmain.sh +m4/libtool.m4 +m4/ltoptions.m4 +m4/ltsugar.m4 +m4/ltversion.m4 +m4/lt~obsolete.m4 + + + +cgminer +cgminer.exe +minerd +minerd.exe +*.o +*.bin + +autom4te.cache +.deps + +Makefile +Makefile.in +INSTALL +aclocal.m4 +configure +depcomp +missing +install-sh +stamp-h1 +cpuminer-config.h* +compile +config.log +config.status +config.guess +config.sub + +mingw32-config.cache + +*~ + +ext_deps +config.h.in +config.h + + +ccan/libccan.a +lib/arg-nonnull.h +lib/c++defs.h +lib/libgnu.a +lib/signal.h +lib/string.h +lib/stdint.h +lib/warn-on-use.h + +mkinstalldirs + +*.swp +*.pre diff --git a/01-cgminer.rules b/01-cgminer.rules new file mode 100644 index 0000000..93c25a8 --- /dev/null +++ b/01-cgminer.rules @@ -0,0 +1,52 @@ +# Butterfly Labs FPGA and ASIC devices +ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6014", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# ModMinerQuad +ATTRS{idVendor}=="1fc9", ATTRS{idProduct}=="0003", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# Lancelot and Avalon +ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6001", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# Icarus +ATTRS{idVendor}=="067b", ATTRS{idProduct}=="2303", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" +ATTRS{idVendor}=="1fc9", ATTRS{idProduct}=="0083", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# AsicminerUSB and Antminer U1 +ATTRS{idVendor}=="10c4", ATTRS{idProduct}=="ea60", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# Cairnsmore1 +ATTRS{idVendor}=="067b", ATTRS{idProduct}=="0230", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# Cairnsmore1-2 +ATTRS{idVendor}=="0403", ATTRS{idProduct}=="8350", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# Ztex +ATTRS{idVendor}=="221a", ATTRS{idProduct}=="0100", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# BF1 +ATTRS{idVendor}=="03eb", ATTRS{idProduct}=="204b", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# Klondike +ATTRS{idVendor}=="04d8", ATTRS{idProduct}=="f60a", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# HashFast +ATTRS{idVendor}=="297c", ATTRS{idProduct}=="0001", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" +ATTRS{idVendor}=="297c", ATTRS{idProduct}=="8001", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# BXF +ATTRS{idVendor}=="198c", ATTRS{idProduct}=="b1f1", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# NF1 +ATTRS{idVendor}=="04d8", ATTRS{idProduct}=="00de", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# ANT_S1 +ATTRS{idVendor}=="4254", ATTRS{idProduct}=="4153", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# Cointerra +ATTRS{idVendor}=="1cbe", ATTRS{idProduct}=="0003", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# Drillbit Thumb +ATTRS{idVendor}=="03eb", ATTRS{idProduct}=="2404", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" + +# Avalon4 +ATTRS{idVendor}=="29f1", ATTRS{idProduct}=="33f2", SUBSYSTEM=="usb", ACTION=="add", MODE="0666", GROUP="plugdev" diff --git a/A1-board-selector-CCD.c b/A1-board-selector-CCD.c new file mode 100644 index 0000000..d0e173f --- /dev/null +++ b/A1-board-selector-CCD.c @@ -0,0 +1,117 @@ +/* + * board selector support for TCA9535 used in Bitmine's CoinCraft Desk + * + * Copyright 2014 Zefir Kurtisi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "miner.h" + +#include "A1-board-selector.h" +#include "i2c-context.h" + +static struct board_selector ccd_selector; + +struct i2c_ctx *U1_tca9535; +uint8_t chain_mask = 0xff; +uint8_t active_chain = 255; +pthread_mutex_t lock; + + +#define UNUSED_BITS 0xe0 + +static void ccd_unlock(void) +{ + mutex_unlock(&lock); +} + +static void ccd_exit(void) +{ + if (U1_tca9535 != NULL) + U1_tca9535->exit(U1_tca9535); +} +uint8_t retval = 0; + +extern struct board_selector *ccd_board_selector_init(void) +{ + mutex_init(&lock); + U1_tca9535 = i2c_slave_open(I2C_BUS, 0x27); + if (U1_tca9535 == NULL) + return NULL; + bool retval = U1_tca9535->write(U1_tca9535, 0x06, 0xe0) && + U1_tca9535->write(U1_tca9535, 0x07, 0xe0) && + U1_tca9535->write(U1_tca9535, 0x02, 0x1f) && + U1_tca9535->write(U1_tca9535, 0x03, 0x00); + if (retval) + return &ccd_selector; + ccd_exit(); + return NULL; +} + +static bool ccd_select(uint8_t chain) +{ + if (chain >= CCD_MAX_CHAINS) + return false; + + mutex_lock(&lock); + if (active_chain == chain) + return true; + + active_chain = chain; + chain_mask = 1 << active_chain; + return U1_tca9535->write(U1_tca9535, 0x02, ~chain_mask); +} + +static bool __ccd_board_selector_reset(uint8_t mask) +{ + if (!U1_tca9535->write(U1_tca9535, 0x03, mask)) + return false; + cgsleep_ms(RESET_LOW_TIME_MS); + if (!U1_tca9535->write(U1_tca9535, 0x03, 0x00)) + return false; + cgsleep_ms(RESET_HI_TIME_MS); + return true; +} +// we assume we are already holding the mutex +static bool ccd_reset(void) +{ + return __ccd_board_selector_reset(chain_mask); +} + +static bool ccd_reset_all(void) +{ + mutex_lock(&lock); + bool retval = __ccd_board_selector_reset(0xff & ~UNUSED_BITS); + mutex_unlock(&lock); + return retval; +} + + +static struct board_selector ccd_selector = { + .select = ccd_select, + .release = ccd_unlock, + .exit = ccd_exit, + .reset = ccd_reset, + .reset_all = ccd_reset_all, + /* don't have a temp sensor dedicated to chain */ + .get_temp = dummy_get_temp, +}; + diff --git a/A1-board-selector-CCR.c b/A1-board-selector-CCR.c new file mode 100644 index 0000000..1752d3c --- /dev/null +++ b/A1-board-selector-CCR.c @@ -0,0 +1,186 @@ +/* + * board selector support for TCA9535 used in Bitmine's CoinCraft Desk + * + * Copyright 2014 Zefir Kurtisi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + + +#include "miner.h" + +#include "A1-board-selector.h" +#include "i2c-context.h" + + +static struct board_selector ccr_selector; + +static struct i2c_ctx *U1_tca9548; +static struct i2c_ctx *U3_tca9535; +static struct i2c_ctx *U4_tca9535; +static uint8_t active_chain; +static pthread_mutex_t lock; + +struct chain_mapping { + uint8_t chain_id; + uint8_t U1; + uint8_t U3p0; + uint8_t U3p1; +}; + +static const struct chain_mapping chain_mapping[CCR_MAX_CHAINS] = { + { 0, 0x01, 0x01, 0x00, }, + { 1, 0x01, 0x00, 0x80, }, + { 2, 0x02, 0x02, 0x00, }, + { 3, 0x02, 0x00, 0x40, }, + { 4, 0x04, 0x04, 0x00, }, + { 5, 0x04, 0x00, 0x20, }, + { 6, 0x08, 0x08, 0x00, }, + { 7, 0x08, 0x00, 0x10, }, + { 8, 0x10, 0x10, 0x00, }, + { 9, 0x10, 0x00, 0x08, }, + { 10, 0x20, 0x20, 0x00, }, + { 11, 0x20, 0x00, 0x04, }, + { 12, 0x40, 0x40, 0x00, }, + { 13, 0x40, 0x00, 0x02, }, + { 14, 0x80, 0x80, 0x00, }, + { 15, 0x80, 0x00, 0x01, }, +}; + +static void ccr_unlock(void) +{ + mutex_unlock(&lock); +} + +static void ccr_exit(void) +{ + if (U1_tca9548 != NULL) + U1_tca9548->exit(U1_tca9548); + if (U3_tca9535 != NULL) + U3_tca9535->exit(U3_tca9535); + if (U4_tca9535 != NULL) + U4_tca9535->exit(U4_tca9535); +} + + +extern struct board_selector *ccr_board_selector_init(void) +{ + mutex_init(&lock); + applog(LOG_INFO, "ccr_board_selector_init()"); + + /* detect all i2c slaves */ + U1_tca9548 = i2c_slave_open(I2C_BUS, 0x70); + U3_tca9535 = i2c_slave_open(I2C_BUS, 0x23); + U4_tca9535 = i2c_slave_open(I2C_BUS, 0x22); + if (U1_tca9548 == NULL || U3_tca9535 == NULL || U4_tca9535 == NULL) + goto fail; + + /* init I2C multiplexer */ + bool res = U1_tca9548->write(U1_tca9548, 0x00, 0x00) && + /* init reset selector */ + U3_tca9535->write(U3_tca9535, 0x06, 0x00) && + U3_tca9535->write(U3_tca9535, 0x07, 0x00) && + U3_tca9535->write(U3_tca9535, 0x02, 0x00) && + U3_tca9535->write(U3_tca9535, 0x03, 0x00) && + /* init chain selector */ + U4_tca9535->write(U4_tca9535, 0x06, 0x00) && + U4_tca9535->write(U4_tca9535, 0x07, 0x00) && + U4_tca9535->write(U4_tca9535, 0x02, 0x00) && + U4_tca9535->write(U4_tca9535, 0x03, 0x00); + + if (!res) + goto fail; + + return &ccr_selector; + +fail: + ccr_exit(); + return NULL; +} + +static bool ccr_select(uint8_t chain) +{ + if (chain >= CCR_MAX_CHAINS) + return false; + + mutex_lock(&lock); + if (active_chain == chain) + return true; + + active_chain = chain; + const struct chain_mapping *cm = &chain_mapping[chain]; + + if (!U1_tca9548->write(U1_tca9548, cm->U1, cm->U1)) + return false; + + if (!U4_tca9535->write(U4_tca9535, 0x02, cm->U3p0) || + !U4_tca9535->write(U4_tca9535, 0x03, cm->U3p1)) + return false; + + /* sanity check: ensure i2c command has been written before we leave */ + uint8_t tmp; + if (!U4_tca9535->read(U4_tca9535, 0x02, &tmp) || tmp != cm->U3p0) { + applog(LOG_ERR, "ccr_select: wrote 0x%02x, read 0x%02x", + cm->U3p0, tmp); + } + applog(LOG_DEBUG, "selected chain %d", chain); + return true; +} + +static bool __ccr_board_selector_reset(uint8_t p0, uint8_t p1) +{ + if (!U3_tca9535->write(U3_tca9535, 0x02, p0) || + !U3_tca9535->write(U3_tca9535, 0x03, p1)) + return false; + cgsleep_ms(RESET_LOW_TIME_MS); + if (!U3_tca9535->write(U3_tca9535, 0x02, 0x00) || + !U3_tca9535->write(U3_tca9535, 0x03, 0x00)) + return false; + cgsleep_ms(RESET_HI_TIME_MS); + return true; +} +// we assume we are already holding the mutex +static bool ccr_reset(void) +{ + const struct chain_mapping *cm = &chain_mapping[active_chain]; + applog(LOG_DEBUG, "resetting chain %d", cm->chain_id); + bool retval = __ccr_board_selector_reset(cm->U3p0, cm->U3p1); + return retval; +} + +static bool ccr_reset_all(void) +{ + mutex_lock(&lock); + bool retval = __ccr_board_selector_reset(0xff, 0xff); + mutex_unlock(&lock); + return retval; +} + +static uint8_t ccr_get_temp(uint8_t sensor_id) +{ + if ((active_chain & 1) != 0 || sensor_id != 0) + return 0; + + struct i2c_ctx *U7 = i2c_slave_open(I2C_BUS, 0x4c); + if (U7 == NULL) + return 0; + + uint8_t retval = 0; + if (!U7->read(U7, 0, &retval)) + retval = 0; + U7->exit(U7); + return retval; +} + +static struct board_selector ccr_selector = { + .select = ccr_select, + .release = ccr_unlock, + .exit = ccr_exit, + .reset = ccr_reset, + .reset_all = ccr_reset_all, + .get_temp = ccr_get_temp, +}; + diff --git a/A1-board-selector.h b/A1-board-selector.h new file mode 100644 index 0000000..bcadf7d --- /dev/null +++ b/A1-board-selector.h @@ -0,0 +1,50 @@ +#ifndef A1_BOARD_SELECTOR_H +#define A1_BOARD_SELECTOR_H + +#include +#include + +#define RESET_LOW_TIME_MS 200 +#define RESET_HI_TIME_MS 100 + +struct board_selector { + /* destructor */ + void (*exit)(void); + /* select board and chip chain for given chain index*/ + bool (*select)(uint8_t chain); + /* release access to selected chain */ + void (*release)(void); + /* reset currently selected chain */ + bool (*reset)(void); + /* reset all chains on board */ + bool (*reset_all)(void); + /* get temperature for selected chain at given sensor */ + uint8_t (*get_temp)(uint8_t sensor); + /* prepare board (voltage) for given sys_clock */ + bool (*prepare_clock)(int clock_khz); +}; + +static bool dummy_select(uint8_t b) { (void)b; return true; } +static void dummy_void(void) { }; +static bool dummy_bool(void) { return true; } +//static uint8_t dummy_u8(void) { return 0; } +static uint8_t dummy_get_temp(uint8_t s) { (void)s; return 0; } +static bool dummy_prepare_clock(int c) { (void)c; return true; } + +static const struct board_selector dummy_board_selector = { + .exit = dummy_void, + .select = dummy_select, + .release = dummy_void, + .reset = dummy_bool, + .reset_all = dummy_bool, + .get_temp = dummy_get_temp, + .prepare_clock = dummy_prepare_clock, +}; + +/* CoinCraft Desk and Rig board selector constructors */ +#define CCD_MAX_CHAINS 5 +#define CCR_MAX_CHAINS 16 +extern struct board_selector *ccd_board_selector_init(void); +extern struct board_selector *ccr_board_selector_init(void); + +#endif /* A1_BOARD_SELECTOR_H */ diff --git a/A1-common.h b/A1-common.h new file mode 100644 index 0000000..b048a99 --- /dev/null +++ b/A1-common.h @@ -0,0 +1,91 @@ +#ifndef A1_COMMON_H +#define A1_COMMON_H + +#include +#include +#include + +/********** work queue */ +struct work_ent { + struct work *work; + struct list_head head; +}; + +struct work_queue { + int num_elems; + struct list_head head; +}; + +/********** chip and chain context structures */ +/* the WRITE_JOB command is the largest (2 bytes command, 56 bytes payload) */ +#define WRITE_JOB_LENGTH 58 +#define MAX_CHAIN_LENGTH 64 +/* + * For commands to traverse the chain, we need to issue dummy writes to + * keep SPI clock running. To reach the last chip in the chain, we need to + * write the command, followed by chain-length words to pass it through the + * chain and another chain-length words to get the ACK back to host + */ +#define MAX_CMD_LENGTH (WRITE_JOB_LENGTH + MAX_CHAIN_LENGTH * 2 * 2) + +struct A1_chip { + int num_cores; + int last_queued_id; + struct work *work[4]; + /* stats */ + int hw_errors; + int stales; + int nonces_found; + int nonce_ranges_done; + + /* systime in ms when chip was disabled */ + int cooldown_begin; + /* number of consecutive failures to access the chip */ + int fail_count; + /* mark chip disabled, do not try to re-enable it */ + bool disabled; +}; + +struct A1_chain { + int chain_id; + struct cgpu_info *cgpu; + struct mcp4x *trimpot; + int num_chips; + int num_cores; + int num_active_chips; + int chain_skew; + uint8_t spi_tx[MAX_CMD_LENGTH]; + uint8_t spi_rx[MAX_CMD_LENGTH]; + struct spi_ctx *spi_ctx; + struct A1_chip *chips; + pthread_mutex_t lock; + + struct work_queue active_wq; + + /* mark chain disabled, do not try to re-enable it */ + bool disabled; + uint8_t temp; + int last_temp_time; +}; + +#define MAX_CHAINS_PER_BOARD 2 +struct A1_board { + int board_id; + int num_chains; + struct A1_chain *chain[MAX_CHAINS_PER_BOARD]; +}; + +/********** config paramters */ +struct A1_config_options { + int ref_clk_khz; + int sys_clk_khz; + int spi_clk_khz; + /* limit chip chain to this number of chips (testing only) */ + int override_chip_num; + int wiper; +}; + +/* global configuration instance */ +extern struct A1_config_options A1_config_options; + +#endif /* A1_COMMON_H */ diff --git a/A1-desk-board-selector.c b/A1-desk-board-selector.c new file mode 100644 index 0000000..fdc1a6d --- /dev/null +++ b/A1-desk-board-selector.c @@ -0,0 +1,158 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "miner.h" + +struct pcf8575_ctx { + uint8_t addr; + uint8_t p0; + uint8_t p1; + int file; + uint8_t active_board; + pthread_mutex_t lock; +}; + +static struct pcf8575_ctx board_ctx = { 0x27, 0xff, 0xff, -1, .active_board = 255,}; + + +#define UNUSED_BITS 0xe0 +#define SLEEP_MS_AFTER_CS 0 +static bool pcf8575_write(void) +{ + union i2c_smbus_data data; + data.byte = board_ctx.p1 | UNUSED_BITS; + + struct i2c_smbus_ioctl_data args; + __s32 err; + + args.read_write = I2C_SMBUS_WRITE; + args.command = board_ctx.p0 | UNUSED_BITS; + args.size = I2C_SMBUS_BYTE_DATA; + args.data = &data; + + err = ioctl(board_ctx.file, I2C_SMBUS, &args); + if (err == -1) { + fprintf(stderr, + "Error: Failed to write: %s\n", + strerror(errno)); + err = -errno; + } else { + applog(LOG_DEBUG, "written: 0x%02x, 0x%02x", board_ctx.p0, board_ctx.p1); +// usleep(25000); + cgsleep_ms(SLEEP_MS_AFTER_CS); + } + return err == 0; +} + +void lock_board_selector(void) +{ +// applog(LOG_WARNING, "lock_board_selector()"); + mutex_lock(&board_ctx.lock); +} + +void unlock_board_selector(void) +{ +// applog(LOG_WARNING, "unlock_board_selector()"); + mutex_unlock(&board_ctx.lock); +} + +bool a1_board_selector_init(void) +{ + mutex_init(&board_ctx.lock); + applog(LOG_WARNING, "a1_board_selector_init()"); + + board_ctx.file = open("/dev/i2c-1", O_RDWR); + if (board_ctx.file < 0) { + fprintf(stderr, + "Error: Could not open i2c-1: %s\n", + board_ctx.addr, strerror(errno)); + return false; + } + + if (ioctl(board_ctx.file, I2C_SLAVE, board_ctx.addr) < 0) { + fprintf(stderr, + "Error: Could not set address to 0x%02x: %s\n", + board_ctx.addr, strerror(errno)); + return false; + } + return pcf8575_write(); +} + +void a1_board_selector_exit(void) +{ + close(board_ctx.file); + board_ctx.file = -1; +} + +bool a1_board_selector_select_board(uint8_t board) +{ + if (board > 7) + return false; + +// applog(LOG_WARNING, "board_selector_select_board(%d)", board); + lock_board_selector(); + if (board_ctx.active_board == board) + return true; + + board_ctx.active_board = board; + board_ctx.p0 = 1 << board_ctx.active_board; + board_ctx.p1 = 0xff; + bool retval = pcf8575_write(); + return retval; +} + +static bool __board_selector_reset(void) +{ + board_ctx.p1 = ~board_ctx.p0; + if (!pcf8575_write()) + return false; + usleep(1000000); + board_ctx.p1 = 0xff; + if (!pcf8575_write()) + return false; + usleep(1000000); + return true; +} +// we assume we are already holding the mutex +bool a1_board_selector_reset_board(void) +{ +// lock_board_selector(); + bool retval = __board_selector_reset(); +// unlock_board_selector(); + return retval; +} + +bool a1_board_selector_reset_all_boards(void) +{ + lock_board_selector(); + board_ctx.p1 = 0; + bool retval = __board_selector_reset(); + unlock_board_selector(); + return retval; +} + +#if 0 +int main(void) +{ + if (init_pcf8575(&board_ctx)) { + if (!pcf8575_write(&g_ctx)) { + fprintf(stderr, + "Error: Failed to write: %s\n", + strerror(errno)); + } + a1_board_selector_exit(&g_ctx); + } + return 0; +} +#endif +///////////////////////////////////////////////////////////////////////////// diff --git a/A1-trimpot-mcp4x.c b/A1-trimpot-mcp4x.c new file mode 100644 index 0000000..a9244fd --- /dev/null +++ b/A1-trimpot-mcp4x.c @@ -0,0 +1,116 @@ +/* + * support for MCP46x digital trimpot used in Bitmine's products + * + * Copyright 2014 Zefir Kurtisi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "miner.h" + +#include "A1-trimpot-mcp4x.h" + + +static bool mcp4x_check_status(int file) +{ + union i2c_smbus_data data; + struct i2c_smbus_ioctl_data args; + + args.read_write = I2C_SMBUS_READ; + args.command = ((5 & 0x0f) << 4) | 0x0c; + args.size = I2C_SMBUS_WORD_DATA; + args.data = &data; + + return ioctl(file, I2C_SMBUS, &args) >= 0; +} + +static uint16_t mcp4x_get_wiper(struct mcp4x *me, uint8_t id) +{ + assert(id < 2); + union i2c_smbus_data data; + struct i2c_smbus_ioctl_data args; + + args.read_write = I2C_SMBUS_READ; + args.command = ((id & 0x0f) << 4) | 0x0c; + args.size = I2C_SMBUS_WORD_DATA; + args.data = &data; + + if (ioctl(me->file, I2C_SMBUS, &args) < 0) { + applog(LOG_ERR, "Failed to read id %d: %s\n", id, + strerror(errno)); + return 0xffff; + } + return htobe16(data.word & 0xffff); +} + +static bool mcp4x_set_wiper(struct mcp4x *me, uint8_t id, uint16_t w) +{ + assert(id < 2); + union i2c_smbus_data data; + data.word = w; + + struct i2c_smbus_ioctl_data args; + + args.read_write = I2C_SMBUS_WRITE; + args.command = (id & 0x0f) << 4; + args.size = I2C_SMBUS_WORD_DATA; + args.data = &data; + + if (ioctl(me->file, I2C_SMBUS, &args) < 0) { + applog(LOG_ERR, "Failed to read id %d: %s\n", id, + strerror(errno)); + return false; + } + return me->get_wiper(me, id) == w; +} + +void mcp4x_exit(struct mcp4x *me) +{ + close(me->file); + free(me); +} + +struct mcp4x *mcp4x_init(uint8_t addr) +{ + struct mcp4x *me; + int file = open("/dev/i2c-1", O_RDWR); + if (file < 0) { + applog(LOG_INFO, "Failed to open i2c-1: %s\n", strerror(errno)); + return NULL; + } + + if (ioctl(file, I2C_SLAVE, addr) < 0) + return NULL; + + if (!mcp4x_check_status(file)) + return NULL; + + me = malloc(sizeof(*me)); + assert(me != NULL); + + me->addr = addr; + me->file = file; + me->exit = mcp4x_exit; + me->get_wiper = mcp4x_get_wiper; + me->set_wiper = mcp4x_set_wiper; + return me; +} + diff --git a/A1-trimpot-mcp4x.h b/A1-trimpot-mcp4x.h new file mode 100644 index 0000000..b7ff600 --- /dev/null +++ b/A1-trimpot-mcp4x.h @@ -0,0 +1,19 @@ +#ifndef TRIMPOT_MPC4X_H +#define TRIMPOT_MPC4X_H + +#include +#include + + +struct mcp4x { + uint16_t (*get_wiper)(struct mcp4x *me, uint8_t id); + bool (*set_wiper)(struct mcp4x *me, uint8_t id, uint16_t w); + void (*exit)(struct mcp4x *me); + uint8_t addr; + int file; +}; + +/* constructor */ +extern struct mcp4x *mcp4x_init(uint8_t addr); + +#endif /* TRIMPOT_MPC4X_H */ diff --git a/API-README b/API-README new file mode 100644 index 0000000..78564b0 --- /dev/null +++ b/API-README @@ -0,0 +1,1852 @@ + +This README contains details about the cgminer RPC API + +It also includes some detailed information at the end, +about using miner.php + + +If you start cgminer with the "--api-listen" option, it will listen on a +simple TCP/IP socket for single string API requests from the same machine +running cgminer and reply with a string and then close the socket each time +If you add the "--api-network" option, it will accept API requests from any +network attached computer. + +You can only access the comands that reply with data in this mode. +By default, you cannot access any privileged command that affects the miner - +you will receive an access denied status message see --api-allow below. + +You can specify IP addresses/prefixes that are only allowed to access the API +with the "--api-allow" option e.g. --api-allow W:192.168.0.1,10.0.0/24 +will allow 192.168.0.1 or any address matching 10.0.0.*, but nothing else +IP addresses are automatically padded with extra '.0's as needed +Without a /prefix is the same as specifying /32 +0/0 means all IP addresses. +The 'W:' on the front gives that address/subnet privileged access to commands +that modify cgminer (thus all API commands) +Without it those commands return an access denied status. +See --api-groups below to define other groups like W: +Privileged access is checked in the order the IP addresses were supplied to +"--api-allow" +The first match determines the privilege level. +Using the "--api-allow" option overides the "--api-network" option if they +are both specified +With "--api-allow", 127.0.0.1 is not by default given access unless specified + +If you start cgminer also with the "--api-mcast" option, it will listen for +a multicast message and reply to it with a message containing it's API port +number, but only if the IP address of the sender is allowed API access + +More groups (like the privileged group W:) can be defined using the +--api-groups command +Valid groups are only the letters A-Z (except R & W are predefined) and are +not case sensitive +The R: group is the same as not privileged access +The W: group is (as stated) privileged access (thus all API commands) +To give an IP address/subnet access to a group you use the group letter +in front of the IP address instead of W: e.g. P:192.168.0/32 +An IP address/subnet can only be a member of one group +A sample API group would be: + --api-groups + P:switchpool:enablepool:addpool:disablepool:removepool:poolpriority:* +This would create a group 'P' that can do all current pool commands and all +non-priviliged commands - the '*' means all non-priviledged commands +Without the '*' the group would only have access to the pool commands +Defining multiple groups example: + --api-groups Q:quit:restart:*,S:save +This would define 2 groups: + Q: that can 'quit' and 'restart' as well as all non-priviledged commands + S: that can only 'save' and no other commands + +The RPC API request can be either simple text or JSON. + +If the request is JSON (starts with '{'), it will reply with a JSON formatted +response, otherwise it replies with text formatted as described further below. + +The JSON request format required is '{"command":"CMD","parameter":"PARAM"}' +(though of course parameter is not required for all requests) +where "CMD" is from the "Request" column below and "PARAM" would be e.g. +the ASC/PGA number if required. + +An example request in both formats to disable Hotplug: + hotplug|0 + {"command":"hotplug","parameter":"0"} + +The format of each reply (unless stated otherwise) is a STATUS section +followed by an optional detail section + +From API version 1.7 onwards, reply strings in JSON and Text have the +necessary escaping as required to avoid ambiguity - they didn't before 1.7 +For JSON the 2 characters '"' and '\' are escaped with a '\' before them +For Text the 4 characters '|' ',' '=' and '\' are escaped the same way + +Only user entered information will contain characters that require being +escaped, such as Pool URL, User and Password or the Config save filename, +when they are returned in messages or as their values by the API + +For API version 1.4 and later: + +The STATUS section is: + + STATUS=X,When=NNN,Code=N,Msg=string,Description=string| + + STATUS=X Where X is one of: + W - Warning + I - Informational + S - Success + E - Error + F - Fatal (code bug) + + When=NNN + Standard long time of request in seconds + + Code=N + Each unique reply has a unique Code (See api.c - #define MSG_NNNNNN) + + Msg=string + Message matching the Code value N + + Description=string + This defaults to the cgminer version but is the value of --api-description + if it was specified at runtime. + +With API V3.1 you can also request multiple report replies in a single command +request +e.g. to request both summary and devs, the command would be summary+devs + +This is only available for report commands that don't need parameters, +and is not available for commands that change anything +Any parameters supplied will be ignored + +The extra formatting of the result is to have a section for each command +e.g. CMD=summary|STATUS=....|CMD=devs|STATUS=... +With JSON, each result is within a section of the command name +e.g. {"summary":{"STATUS":[{"STATUS":"S"...}],"SUMMARY":[...],"id":1}, + "devs":{"STATUS":[{"STATUS:"S"...}],"DEVS":[...],"id":1},"id":1} + +As before, if you supply bad JSON you'll just get a single 'E' STATUS section +in the old format, since it doesn't switch to using the new format until it +correctly processes the JSON and can match a '+' in the command + +If you request a command multiple times, e.g. devs+devs +you'll just get it once +If this results in only one command, it will still use the new layout +with just the one command + +If you request a command that can't be used due to requiring parameters, +a command that isn't a report, or an invalid command, you'll get an 'E' STATUS +for that one but it will still attempt to process all other commands supplied + +Blank/missing commands are ignored e.g. +devs++ +will just show 'devs' using the new layout + +For API version 1.10 and later: + +The list of requests - a (*) means it requires privileged access - and replies: + + Request Reply Section Details + ------- ------------- ------- + version VERSION CGMiner=cgminer, version + API=API| version + + config CONFIG Some miner configuration information: + ASC Count=N, <- the number of ASCs + PGA Count=N, <- the number of PGAs + Pool Count=N, <- the number of Pools + Strategy=Name, <- the current pool strategy + Log Interval=N, <- log interval (--log N) + Device Code=ICA , <- spaced list of compiled + device drivers + OS=Linux/Apple/..., <- operating System + Failover-Only=true/false, <- failover-only setting + ScanTime=N, <- --scan-time setting + Queue=N, <- --queue setting + Expiry=N| <- --expiry setting + + summary SUMMARY The status summary of the miner + e.g. Elapsed=NNN,Found Blocks=N,Getworks=N,...| + + pools POOLS The status of each pool e.g. + Pool=0,URL=http://pool.com:6311,Status=Alive,...| + + devs DEVS Each available PGA and ASC with their details + e.g. ASC=0,Accepted=NN,MHS av=NNN,...,Intensity=D| + Last Share Time=NNN, <- standand long time in sec + (or 0 if none) of last accepted share + Last Share Pool=N, <- pool number (or -1 if none) + Last Valid Work=NNN, <- standand long time in sec + of last work returned that wasn't an HW: + Will not report PGAs if PGA mining is disabled + Will not report ASCs if ASC mining is disabled + + edevs[|old] DEVS The same as devs, except it ignores blacklisted + devices and zombie devices + If you specify the optional 'old' parameter, then + the output will include zombie devices that became + zombies less than 'old' seconds ago + A value of zero for 'old', which is the default, + means ignore all zombies + It will return an empty list of devices if all + devices are blacklisted or zombies + + pga|N PGA The details of a single PGA number N in the same + format and details as for DEVS + This is only available if PGA mining is enabled + Use 'pgacount' or 'config' first to see if there + are any + + pgacount PGAS Count=N| <- the number of PGAs + Always returns 0 if PGA mining is disabled + + switchpool|N (*) + none There is no reply section just the STATUS section + stating the results of switching pool N to the + highest priority (the pool is also enabled) + The Msg includes the pool URL + + enablepool|N (*) + none There is no reply section just the STATUS section + stating the results of enabling pool N + The Msg includes the pool URL + + addpool|URL,USR,PASS (*) + none There is no reply section just the STATUS section + stating the results of attempting to add pool N + The Msg includes the pool number and URL + Use '\\' to get a '\' and '\,' to include a comma + inside URL, USR or PASS + + poolpriority|N,... (*) + none There is no reply section just the STATUS section + stating the results of changing pool priorities + See usage below + + poolquota|N,Q (*) + none There is no reply section just the STATUS section + stating the results of changing pool quota to Q + + disablepool|N (*) + none There is no reply section just the STATUS section + stating the results of disabling pool N + The Msg includes the pool URL + + removepool|N (*) + none There is no reply section just the STATUS section + stating the results of removing pool N + The Msg includes the pool URL + N.B. all details for the pool will be lost + + save|filename (*) + none There is no reply section just the STATUS section + stating success or failure saving the cgminer + config to filename + The filename is optional and will use the cgminer + default if not specified + + quit (*) none Status is a single "BYE" reply before cgminer + quits + + notify NOTIFY The last status and history count of each devices + problem + This lists all devices including those not + supported by the 'devs' command e.g. + NOTIFY=0,Name=ASC,ID=0,Last Well=1332432290,...| + + privileged (*) + none There is no reply section just the STATUS section + stating an error if you do not have privileged + access to the API and success if you do have + privilege + The command doesn't change anything in cgminer + + pgaenable|N (*) + none There is no reply section just the STATUS section + stating the results of the enable request + You cannot enable a PGA if it's status is not WELL + This is only available if PGA mining is enabled + + pgadisable|N (*) + none There is no reply section just the STATUS section + stating the results of the disable request + This is only available if PGA mining is enabled + + pgaidentify|N (*) + none There is no reply section just the STATUS section + stating the results of the identify request + This is only available if PGA mining is enabled + and currently only BFL singles and Cairnsmore1's + with the appropriate firmware support this command + On a BFL single it will flash the led on the front + of the device for appoximately 4s + All other non BFL,ICA PGA devices will return a + warning status message stating that they dont + support it. Non-CMR ICAs will ignore the command. + This adds a 4s delay to the BFL share being + processed so you may get a message stating that + procssing took longer than 7000ms if the request + was sent towards the end of the timing of any work + being worked on + e.g.: BFL0: took 8438ms - longer than 7000ms + You should ignore this + + devdetails DEVDETAILS Each device with a list of their static details + This lists all devices including those not + supported by the 'devs' command + e.g. DEVDETAILS=0,Name=ASC,ID=0,Driver=yuu,...| + + restart (*) none Status is a single "RESTART" reply before cgminer + restarts + + stats STATS Each device or pool that has 1 or more getworks + with a list of stats regarding getwork times + The values returned by stats may change in future + versions thus would not normally be displayed + Device drivers are also able to add stats to the + end of the details returned + + estats[|old] STATS The same as stats, except it ignores blacklisted + devices, zombie devices and pools + If you specify the optional 'old' parameter, then + the output will include zombie devices that became + zombies less than 'old' seconds ago + A value of zero for 'old', which is the default, + means ignore all zombies + It will return an empty list of devices if all + devices are blacklisted or zombies + + check|cmd CHECK Exists=Y/N, <- 'cmd' exists in this version + Access=Y/N| <- you have access to use 'cmd' + + failover-only|true/false (*) + none There is no reply section just the STATUS section + stating what failover-only was set to + + coin COIN Coin mining information: + Hash Method=sha256/scrypt, + Current Block Time=N.N, <- 0 means none + Current Block Hash=XXXX..., <- blank if none + LP=true/false, <- LP is in use on at least 1 pool + Network Difficulty=NN.NN| + + debug|setting (*) + DEBUG Debug settings + The optional commands for 'setting' are the same + as the screen curses debug settings + You can only specify one setting + Only the first character is checked - case + insensitive: + Silent, Quiet, Verbose, Debug, RPCProto, + PerDevice, WorkTime, Normal + The output fields are (as above): + Silent=true/false, + Quiet=true/false, + Verbose=true/false, + Debug=true/false, + RPCProto=true/false, + PerDevice=true/false, + WorkTime=true/false| + + setconfig|name,N (*) + none There is no reply section just the STATUS section + stating the results of setting 'name' to N + The valid values for name are currently: + queue, scantime, expiry + N is an integer in the range 0 to 9999 + + usbstats USBSTATS Stats of all LIBUSB mining devices except ztex + e.g. Name=MMQ,ID=0,Stat=SendWork,Count=99,...| + + pgaset|N,opt[,val] (*) + none There is no reply section just the STATUS section + stating the results of setting PGA N with + opt[,val] + This is only available if PGA mining is enabled + + If the PGA does not support any set options, it + will always return a WARN stating pgaset isn't + supported + + If opt=help it will return an INFO status with a + help message about the options available + + The current options are: + MMQ opt=clock val=160 to 230 (a multiple of 2) + CMR opt=clock val=100 to 220 + + zero|Which,true/false (*) + none There is no reply section just the STATUS section + stating that the zero, and optional summary, was + done + If Which='all', all normal cgminer and API + statistics will be zeroed other than the numbers + displayed by the usbstats and stats commands + If Which='bestshare', only the 'Best Share' values + are zeroed for each pool and the global + 'Best Share' + The true/false option determines if a full summary + is shown on the cgminer display like is normally + displayed on exit. + + hotplug|N (*) none There is no reply section just the STATUS section + stating that the hotplug setting succeeded + If the code is not compiled with hotplug in it, + the the warning reply will be + 'Hotplug is not available' + If N=0 then hotplug will be disabled + If N>0 && <=9999, then hotplug will check for new + devices every N seconds + + asc|N ASC The details of a single ASC number N in the same + format and details as for DEVS + This is only available if ASC mining is enabled + Use 'asccount' or 'config' first to see if there + are any + + ascenable|N (*) + none There is no reply section just the STATUS section + stating the results of the enable request + You cannot enable a ASC if it's status is not WELL + This is only available if ASC mining is enabled + + ascdisable|N (*) + none There is no reply section just the STATUS section + stating the results of the disable request + This is only available if ASC mining is enabled + + ascidentify|N (*) + none There is no reply section just the STATUS section + stating the results of the identify request + This is only available if ASC mining is enabled + and currently only BFL ASICs support this command + On a BFL single it will flash the led on the front + of the device for appoximately 4s + All other non BFL ASIC devices will return a + warning status message stating that they dont + support it + + asccount ASCS Count=N| <- the number of ASCs + Always returns 0 if ASC mining is disabled + + ascset|N,opt[,val] (*) + none There is no reply section just the STATUS section + stating the results of setting ASC N with + opt[,val] + This is only available if ASC mining is enabled + + If the ASC does not support any set options, it + will always return a WARN stating ascset isn't + supported + + If opt=help it will return an INFO status with a + help message about the options available + + The current options are: + AVA+BTB opt=freq val=256 to 1024 - chip frequency + BTB opt=millivolts val=1000 to 1400 - corevoltage + MBA opt=reset val=0 to chipcount - reset a chip + BMA opt=volt val=0-9 opt=clock val=0-15 + MBA opt=freq val=0-chip:100-1400 - set chip freq + MBA opt=ledcount val=0-100 - chip count for led + MBA opt=ledlimit val=0-200 - led off below GHs + MBA opt=spidelay val=0-9999 - SPI per I/O delay + MBA opt=spireset i|s0-9999 - SPI regular reset + MBA opt=spisleep val=0-9999 - SPI reset sleep ms + + lcd LCD An all-in-one short status summary of the miner + e.g. Elapsed,GHS av,GHS 5m,GHS 5s,Temp, + Last Share Difficulty,Last Share Time, + Best Share,Last Valid Work,Found Blocks, + Pool,User| + + lockstats (*) none There is no reply section just the STATUS section + stating the results of the request + A warning reply means lock stats are not compiled + into cgminer + The API writes all the lock stats to stderr + +When you enable, disable or restart a PGA or ASC, you will also get +Thread messages in the cgminer status window + +The 'poolpriority' command can be used to reset the priority order of multiple +pools with a single command - 'switchpool' only sets a single pool to first +priority +Each pool should be listed by id number in order of preference (first = most +preferred) +Any pools not listed will be prioritised after the ones that are listed, in the +priority order they were originally +If the priority change affects the miner's preference for mining, it may switch +immediately + +When you switch to a different pool to the current one (including by priority +change), you will get a 'Switching to URL' message in the cgminer status +windows + +Obviously, the JSON format is simply just the names as given before the '=' +with the values after the '=' + +If you enable cgminer debug (-D or --debug) or, when cgminer debug is off, +turn on debug with the API command 'debug|debug' you will also get messages +showing some details of the requests received and the replies + +There are included 4 program examples for accessing the API: + +api-example.php - a php script to access the API + usAge: php api-example.php command + by default it sends a 'summary' request to the miner at 127.0.0.1:4028 + If you specify a command it will send that request instead + You must modify the line "$socket = getsock('127.0.0.1', 4028);" at the + beginning of "function request($cmd)" to change where it looks for cgminer + +api-example.rb - a Ruby script to access the API. + usage: ruby api-example.rb command[:parameter] [HOST [PORT]] +This script prints the parsed cgminer API response + +API.java/API.class + a java program to access the API (with source code) + usAge is: java API command address port + Any missing or blank parameters are replaced as if you entered: + java API summary 127.0.0.1 4028 + +api-example.c - a 'C' program to access the API (with source code) + usAge: api-example [command [ip/host [port]]] + again, as above, missing or blank parameters are replaced as if you entered: + api-example summary 127.0.0.1 4028 + +miner.php - an example web page to access the API + This includes buttons and inputs to attempt access to the privileged commands + See the end of this API-README for details of how to tune the display + and also to use the option to display a multi-rig summary + +---------- + +Feature Changelog for external applications using the API: + +--------- + +API V3.5 (cgminer v4.7.0) + +- Made quit and restart return valid JSON as a STATUS mirroring the reqest. +- Made addpool return what pool number the added pool is. + +--------- + +API V3.4 (cgminer v4.3.?) + +Added API commands: + 'lcd' - An all-in-one short status summary of the miner + +--------- + +API V3.3 (cgminer v4.2.0) + +Added API commands: + 'edevs' - Only enabled devices, for 'devs' + 'estats' - Only enabled devices, for 'stats' + +--------- + +API V3.2 (cgminer v4.1.0) + +Fix for: +HEX32 data type in the API version v3.1 JSON - since cgminer v3.12.1 - +returns an incorrect formatted json data element for the API stats command +for HashFast hardware + +--------- + +API V3.1 (cgminer v3.12.1) + +Multiple report request command with '+' e.g. summary+devs + +--------- + +API V3.0 (cgminer v3.11.0) + +Allow unlimited size replies + +--------- + +API V2.0 (cgminer v3.8.0) + +Removed all GPU related commands and information from the replies + +--------- + +API V1.32 (cgminer v3.6.5) + +Modified API commands: + 'devs' 'gpu' 'pga' and 'asc' - add 'Device Elapsed' + +--------- + +API V1.31 (cgminer v3.6.3) + +Added API command: + 'lockstats' - display cgminer dev lock stats if compiled in + +Modified API command: + 'summary' - add 'MHS %ds' (where %d is the log interval) + +--------- + +API V1.30 (cgminer v3.4.3) + +Added API command: + 'poolquota' - Set pool quota for load-balance strategy. + +Modified API command: + 'pools' - add 'Quota' + +--------- + +API V1.29 (cgminer v3.4.1) + +Muticast identification added to the API + +---------- + +API V1.28 (cgminer v3.3.4) + +Modified API commands: + 'devs', 'pga', 'asc', 'gpu' - add 'Device Hardware%' and 'Device Rejected%' + 'pools' - add 'Pool Rejected%' and 'Pool Stale%' + 'summary' - add 'Device Hardware%', 'Device Rejected%', 'Pool Rejected%', + 'Pool Stale%' + +---------- + +API V1.27 (cgminer v3.3.2) + +Added API commands: + 'ascset' - with: BTB opt=millivolts val=1000 to 1310 - core voltage + AVA+BTB opt=freq val=256 to 450 - chip frequency + +---------- + +API V1.26 (cgminer v3.2.3) + +Remove all CPU support (cgminer v3.0.0) + +Added API commands: + 'asc' + 'ascenable' + 'ascdisable' + 'ascidentify|N' (only works for BFL ASICs so far) + 'asccount' + +Various additions to the debug 'stats' command + +---------- + +API V1.25 + +Added API commands: + 'hotplug' + +Modified API commands: + 'devs' 'gpu' and 'pga' - add 'Last Valid Work' + 'devs' - list ASIC devices + 'config' - add 'Hotplug', 'ASC Count' + 'coin' - add 'Network Difficulty' + +---------- + +API V1.24 (cgminer v2.11.0) + +Added API commands: + 'zero' + +Modified API commands: + 'pools' - add 'Best Share' + 'devs' and 'pga' - add 'No Device' for PGAs if MMQ or BFL compiled + 'stats' - add pool: 'Net Bytes Sent', 'Net Bytes Recv' + +---------- + +API V1.23 (cgminer v2.10.2) + +Added API commands: + 'pgaset' - with: MMQ opt=clock val=160 to 230 (and a multiple of 2) + +---------- + +API V1.22 (cgminer v2.10.1) + +Enforced output limitation: + all extra records beyond the output limit of the API (~64k) are ignored + and chopped off at the record boundary before the limit is reached + however, JSON brackets will be correctly closed and the JSON id will be + set to 0 (instead of 1) if any data was truncated + +Modified API commands: + 'stats' - add 'Times Sent', 'Bytes Sent', 'Times Recv', 'Bytes Recv' + +---------- + +API V1.21 (cgminer v2.10.0) + +Added API commands: + 'usbstats' + +Modified API commands: + 'summary' - add 'Best Share' + +Modified output: + each MMQ shows up as 4 devices, each with it's own stats + +---------- + +API V1.20 (cgminer v2.8.5) + +Modified API commands: + 'pools' - add 'Has Stratum', 'Stratum Active', 'Stratum URL' + +---------- + +API V1.19 (cgminer v2.7.6) + +Added API commands: + 'debug' + 'pgaidentify|N' (only works for BFL Singles so far) + 'setconfig|name,N' + +Modified API commands: + 'devs' - add 'Diff1 Work', 'Difficulty Accepted', 'Difficulty Rejected', + 'Last Share Difficulty' to all devices + 'gpu|N' - add 'Diff1 Work', 'Difficulty Accepted', + 'Difficulty Rejected', 'Last Share Difficulty' + 'pga|N' - add 'Diff1 Work', 'Difficulty Accepted', + 'Difficulty Rejected', 'Last Share Difficulty' + 'notify' - add '*Dev Throttle' (for BFL Singles) + 'pools' - add 'Proxy Type', 'Proxy', 'Difficulty Accepted', + 'Difficulty Rejected', 'Difficulty Stale', + 'Last Share Difficulty' + 'config' - add 'Queue', 'Expiry' + 'stats' - add 'Work Diff', 'Min Diff', 'Max Diff', 'Min Diff Count', + 'Max Diff Count' to the pool stats + +---------- + +API V1.18 (cgminer v2.7.4) + +Modified API commands: + 'stats' - add 'Work Had Roll Time', 'Work Can Roll', 'Work Had Expire', + 'Work Roll Time' to the pool stats + 'config' - include 'ScanTime' + +---------- + +API V1.17 (cgminer v2.7.1) + +Added API commands: + 'coin' + +Modified API commands: + 'summary' - add 'Work Utility' + 'pools' - add 'Diff1 Shares' + +---------- + +API V1.16 (cgminer v2.6.5) + +Added API commands: + 'failover-only' + +Modified API commands: + 'config' - include failover-only state + +---------- + +API V1.15 (cgminer v2.6.1) + +Added API commands: + 'poolpriority' + +---------- + +API V1.14 (cgminer v2.5.0) + +Modified API commands: + 'stats' - more icarus timing stats added + 'notify' - include new device comms error counter + +The internal code for handling data was rewritten (~25% of the code) +Completely backward compatible + +---------- + +API V1.13 (cgminer v2.4.4) + +Added API commands: + 'check' + +Support was added to cgminer for API access groups with the --api-groups option +It's 100% backward compatible with previous --api-access commands + +---------- + +API V1.12 (cgminer v2.4.3) + +Modified API commands: + 'stats' - more pool stats added + +Support for the ModMinerQuad FPGA was added + +---------- + +API V1.11 (cgminer v2.4.2) + +Modified API commands: + 'save' no longer requires a filename (use default if not specified) + +'save' incorrectly returned status E (error) on success before. +It now correctly returns S (success) + +---------- + +API V1.10 (cgminer v2.4.1) + +Added API commands: + 'stats' + +N.B. the 'stats' command can change at any time so any specific content +present should not be relied upon. +The data content is mainly used for debugging purposes or hidden options +in cgminer and can change as development work requires + +Modified API commands: + 'pools' added "Last Share Time" + +---------- + +API V1.9 (cgminer v2.4.0) + +Added API commands: + 'restart' + +Modified API commands: + 'notify' corrected invalid JSON + +---------- + +API V1.8 (cgminer v2.3.5) + +Added API commands: + 'devdetails' + +Support for the ZTex FPGA was added + +---------- + +API V1.7 (cgminer v2.3.4) + +Added API commands: + 'removepool' + +Modified API commands: + 'pools' added "User" + +From API version 1.7 onwards, reply strings in JSON and Text have the +necessary escaping as required to avoid ambiguity +For JSON the 2 characters '"' and '\' are escaped with a '\' before them +For Text the 4 characters '|' ',' '=' and '\' are escaped the same way + +---------- + +API V1.6 (cgminer v2.3.2) + +Added API commands: + 'pga' + 'pgaenable' + 'pgadisable' + 'pgacount' + +Modified API commands: + 'devs' now includes Icarus and Bitforce FPGA devices + 'notify' added "*" to the front of the name of all numeric error fields + 'config' correct "Log Interval" to use numeric (not text) type for JSON + +Support for Icarus and Bitforce FPGAs was added + +---------- + +API V1.5 was not released + +---------- + +API V1.4 (Kano's interim release of cgminer v2.3.1) + +Added API commands: + 'notify' + +Modified API commands: + 'config' added "Device Code" and "OS" + +Added "When" to the STATUS reply section of all commands + +---------- + +API V1.3 (cgminer v2.3.1-2) + +Added API commands: + 'addpool' + +Modified API commands: + 'devs'/'gpu' added "Total MH" for each device + 'summary' added "Total MH" + +---------- + +API V1.2 (cgminer v2.3.0) + +Added API commands: + 'enablepool' + 'disablepool' + 'privileged' + +Modified API commands: + 'config' added "Log Interval" + +Starting with API V1.2, any attempt to access a command that requires +privileged security, from an IP address that does not have privileged +security, will return an "Access denied" Error Status + +---------- + +API V1.1 (cgminer v2.2.4) + +There were no changes to the API commands in cgminer v2.2.4, +however support was added to cgminer for IP address restrictions +with the --api-allow option + +---------- + +API V1.1 (cgminer v2.2.2) + +Prior to V1.1, devs/gpu incorrectly reported GPU0 Intensity for all GPUs + +Modified API commands: + 'devs'/'gpu' added "Last Share Pool" and "Last Share Time" for each device + +---------- + +API V1.0 (cgminer v2.2.0) + +Remove default CPU support + +Added API commands: + 'config' + 'gpucount' + 'cpucount' + 'switchpool' + 'gpuintensity' + 'gpumem' + 'gpuengine' + 'gpufan' + 'gpuvddc' + 'save' + +---------- + +API V0.7 (cgminer v2.1.0) + +Initial release of the API in the main cgminer git + +Commands: + 'version' + 'devs' + 'pools' + 'summary' + 'gpuenable' + 'gpudisable' + 'gpurestart' + 'gpu' + 'cpu' + 'gpucount' + 'cpucount' + 'quit' + +---------------------------------------- + +miner.php +========= + +miner.php is a PHP based interface to the cgminer RPC API +(referred to simply as the API below) + +It can show rig details, summaries and input fields to allow you to change +cgminer +You can also create custom summary pages with it + +It has two levels to the security: +1) cgminer can be configured to allow or disallow API access and access level + security for miner.php +2) miner.php can be configured to allow or disallow privileged cgminer + access, if cgminer is configured to allow privileged access for miner.php + +--------- + +To use miner.php requires a web server with PHP + +Basics: On xubuntu 11.04, to install apache2 and php, the commands are: + sudo apt-get install apache2 + sudo apt-get install php5 + sudo /etc/init.d/apache2 reload + +On Fedora 17: + yum install httpd php + systemctl restart httpd.service + systemctl enable httpd.service --system + +On windows there are a few options. +Try one of these (apparently the first one is easiest - thanks jborkl) + http://www.easyphp.org/ + http://www.apachefriends.org/en/xampp.html + http://www.wampserver.com/en/ + +--------- + +The basic cgminer option to enable the API is: + + --api-listen + +or in your cgminer.conf + + "api-listen" : true, + +(without the ',' on the end if it is the last item) + +If the web server is running on the cgminer computer, the above +is the only change required to give miner.php basic access to +the cgminer API + +- + +If the web server runs on a different computer to cgminer, +you will also need to tell cgminer to allow the web server +to access cgminer's API and tell miner.php where cgminer is + +Assuming a.b.c.d is the IP address of the web server, you +would add the following to cgminer: + + --api-listen --api-allow a.b.c.d + +or in your cgminer.conf + + "api-listen" : true, + "api-allow" : "a.b.c.d", + +to tell cgminer to give the web server read access to the API + +You also need to tell miner.php where cgminer is. +Assuming cgminer is at IP address e.f.g.h, then you would +edit miner.php and change the line + + $rigs = array('127.0.0.1:4028'); + +to + + $rigs = array('e.f.g.h:4028'); + +See --api-network or --api-allow for more access details +and how to give write access + +You can however, also tell miner.php to find your cgminer rigs automatically +on the local subnet + +Add the following to each cgminer: + + --api-mcast + +or in your cgminer.conf + + "api-mcast" : true, + +And in miner.php set $mcast = true; + +This will ignore the value of $rigs and overwrite it with the list of zero or +more rigs found on the network in the timeout specified +A rig will not reply if the API settings would mean it would also ignore an +API request from the web server running miner.php + +--------- + +Once you have a web server with PHP running + + copy your miner.php to the main web folder + +On Xubuntu 11.04 + /var/www/ + +On Fedora 17 + /var/www/html/ + +On Windows + see your windows Web/PHP documentation + +Assuming the IP address of the web server is a.b.c.d +Then in your web browser go to: + + http://a.b.c.d/miner.php + +Done :) + +--------- + +The rest of this documentation deals with the more complex +functions of miner.php, using myminer.php, creaing custom +summaries and displaying multiple cgminer rigs + +--------- + +If you create a file called myminer.php in the same web folder +where you put miner.php, miner.php will load it when it runs + +This is useful, to put any changes you need to make to miner.php +instead of changing miner.php +Thus if you update/get a new miner.php, you won't lose the changes +you have made if you put all your changes in myminer.php +(and don't change miner.php at all) + +A simple example myminer.php that defines 2 rigs +(that I will keep referring to further below) is: + + + +Changes in myminer.php superscede what is in miner.php +However, this is only valid for variables in miner.php before the +2 lines where myminer.php is included by miner.php: + + if (file_exists('myminer.php')) + include_once('myminer.php'); + +Every variable in miner.php above those 2 lines, can be changed by +simply defining them in your myminer.php + +So although miner.php originally contains the line + + $rigs = array('127.0.0.1:4028'); + +if you created the example myminer.php given above, it would actually +change the value of $rigs that is used when miner.php is running +i.e. you don't have to remove or comment out the $rigs line in miner.php +It will be superceded by myminer.php + +--------- + +The example myminer.php above also shows how to define more that one rig +to be shown my miner.php + +Each rig string is 2 or 3 values seperated by colons ':' +They are simply an IP address or host name, followed by the +port number (usually 4028) and an optional Name string + +miner.php displays rig buttons that will show the defails of a single +rig when you click on it - the button shows either the rig number, +or the 'Name' string if you provide it + +PHP arrays contain each string seperated by a comma, but no comma after +the last one + +So an example for 3 rigs would be: + + $rigs = array('192.168.0.100:4028:A', '192.168.0.102:4028:B', + '192.168.0.110:4028:C'); + +Of course each of the rigs listed would also have to have the API +running and be set to allow the web server to access the API - as +explained before + +--------- + +So basically, any variable explained below can be put in myminer.php +if you wanted to set it to something different to it's default value +and did not want to change miner.php itself every time you updated it + +Below is each variable that can be changed and an explanation of each + +--------- + +Default: + $dfmt = 'H:i:s j-M-Y \U\T\CP'; + +Define the date format used to print full length dates +If you get the string 'UTCP' on the end of your dates shown, that +means you are using an older version of PHP and you can instead use: + $dfmt = 'H:i:s j-M-Y \U\T\CO'; + +The PHP documentation on the date format is here: + http://us.php.net/manual/en/function.date.php + +--------- + +Default: + $title = 'Mine'; + +Web page title +If you know PHP you can of course use code to define it e.g. + $title = 'My Rig at: '.date($dfmt); + +Which would set the web page title to something like: + My Rig at: 10:34:00 22-Aug-2012 UTC+10:00 + +--------- + +Default: + $readonly = false; + +Set $readonly to true to force miner.php to be readonly +This means it won't allow you to change cgminer even if the cgminer API +options allow it to + +If you set $readonly to false then it will check cgminer 'privileged' +and will show input fields and buttons on the single rig page +allowing you to change devices, pools and even quit or restart +cgminer + +However, if the 'privileged' test fails, the code will set $readonly to +true + +--------- + +Default: + $userlist = null; + +Define password checking and default access + null means there is no password checking + +$userlist is an array of 3 arrays e.g. +$userlist = array('sys' => array('boss' => 'bpass'), + 'usr' => array('user' => 'upass', 'pleb' => 'ppass'), + 'def' => array('Pools')); + +'sys' is an array of system users and passwords (full access) +'usr' is an array of user level users and passwords (readonly access) +'def' is an array of custompages that anyone not logged in can view + +Any of the 3 can be null, meaning there are none of that item + +All validated 'usr' users are given $readonly = true; access +All validated 'sys' users are given the $readonly access you defined + +If 'def' has one or more values, and allowcustompages is true, then +anyone without a password can see the list of custompage buttons given +in 'def' and will see the first one when they go to the web page, with +a login button at the top right + +From the login page, if you login with no username or password, it will +show the first 'def' custompage (if there are any) + +If you are logged in, it will show a logout button at the top right + +--------- + +Default: + $notify = true; + +Set $notify to false to NOT attempt to display the notify command +table of data + +Set $notify to true to attempt to display the notify command on +the single rig page +If your older version of cgminer returns an 'Invalid command' +coz it doesn't have notify - it just shows the error status table + +--------- + +Default: + $checklastshare = true; + +Set $checklastshare to true to do the following checks: +If a device's last share is 12x expected ago then display as an error +If a device's last share is 8x expected ago then display as a warning +If either of the above is true, also display the whole line highlighted +This assumes shares are 1 difficulty shares + +Set $checklastshare to false to not do the above checks + +'expected' is calculated from the device MH/s value +So for example, a device that hashes at 380MH/s should (on average) +find a share every 11.3s +If the last share was found more than 11.3 x 12 seconds (135.6s) ago, +it is considered an error and highlighted +If the last share was found more than 11.3 x 8 seconds (90.4s) ago, +it is considered a warning and highlighted + +The default highlighting is very subtle + +--------- + +Default: + $poolinputs = false; + +Set $poolinputs to true to show the input fields for adding a pool +and changing the pool priorities on a single rig page +However, if $readonly is true, it will not display them + +--------- + +Default: + $rigport = 4028; + +Default port to use if any $rigs entries don't specify the port number + +--------- + +Default: + $rigs = array('127.0.0.1:4028'); + +Set $rigs to an array of your cgminer rigs that are running + format: 'IP' or 'Host' or 'IP:Port' or 'Host:Port' or 'Host:Port:Name' +If you only have one rig, it will just show the detail of that rig +If you have more than one rig it will show a summary of all the rigs + with buttons to show the details of each rig - + the button contents will be 'Name' rather than rig number, if you + specify 'Name' +If Port is missing or blank, it will try $rigport +e.g. $rigs = array('127.0.0.1:4028','myrig.com:4028:Sugoi'); + +--------- + +Default: + $rignames = false; + +Set $rignames to false to not affect the display. +Set $rignames to one of 'ip' or 'ipx' to alter the name displayed +if the rig doesn't have a 'name' in $rigs +Currently: + 'ip' means use the 4th byte of the rig IP address as an integer + 'ipx' means use the 4th byte of the rig IP address as 2 hex bytes + +--------- + +Default: + $rigbuttons = true; + +Set $rigbuttons to false to display a link rather than a button on +the left of any summary table with rig buttons, in order to reduce +the height of the table cells + +--------- + +Default: + $mcast = false; + +Set $mcast to true to look for your rigs and ignore $rigs + +--------- + +Default: + $mcastexpect = 0; + +The minimum number of rigs expected to be found when $mcast is true +If fewer are found, an error will be included at the top of the page + +--------- + +Default: + $mcastaddr = '224.0.0.75'; + +API Multicast address all cgminers are listening on + +--------- + +Default: + $mcastport = 4028; + +API Multicast UDP port all cgminers are listening on + +--------- + +Default: + $mcastcode = 'FTW'; + +The code all cgminers expect in the Multicast message sent +The message sent is "cgm-code-listport" +Don't use the '-' character if you change it + +--------- + +Default: + $mcastlistport = 4027; + +UDP port number that is added to the broadcast message sent +that specifies to the cgminers the port to reply on + +--------- + +Default: + $mcasttimeout = 1.5; + +Set $mcasttimeout to the number of seconds (floating point) +to wait for replies to the Multicast message +N.B. the accuracy of the timing used to wait for the replies is +~0.1s so there's no point making it more than one decimal place + +--------- + +Default: + $mcastretries = 0; + +Set $mcastretries to the number of times to retry the multicast + +If $mcastexpect is 0, this is simply the number of extra times +that it will send the multicast request +N.B. cgminer doesn't listen for multicast requests for 1000ms after +each one it hears + +If $mcastexpect is > 0, it will stop looking for replies once it +has found at least $mcastexpect rigs, but it only checks this rig +limit each time it reaches the $mcasttimeout limit, thus it can find +more than $mcastexpect rigs if more exist +It will send the multicast message up to $mcastretries extra times or +until it has found at least $mcastexpect rigs +However, when using $mcastretries, it is possible for it to sometimes +ignore some rigs on the network if $mcastexpect is less than the +number of rigs on the network and some rigs are too slow to reply + +--------- + +Default: + $allowgen = false; + +Set $allowgen to true to allow customsummarypages to use 'gen' and 'bgen' +false means ignore any 'gen' or 'bgen' options +This is disabled by default due to the possible security risk of using it +See the end of this document for an explanation + +--------- + +Default: + $rigipsecurity = true; + +Set $rigipsecurity to false to show the IP/Port of the rig +in the socket error messages and also show the full socket message + +--------- + +Default: + $rigtotals = true; + $forcerigtotals = false; + +Set $rigtotals to true to display totals on the single rig page +'false' means no totals (and ignores $forcerigtotals) + +If $rigtotals is true, all data is also right aligned +With false, it's as before, left aligned + +This option is just here to allow people to set it to false +if they prefer the old non-total display when viewing a single rig + +Also, if there is only one line shown in any section, then no +total will be shown (to save screen space) +You can force it to always show rig totals on the single rig page, +even if there is only one line, by setting $forcerigtotals = true; + +--------- + +Default: + $socksndtimeoutsec = 10; + $sockrcvtimeoutsec = 40; + +The numbers are integer seconds + +The defaults should be OK for most cases +However, the longer SND is, the longer you have to wait while +php hangs if the target cgminer isn't runnning or listening + +RCV should only ever be relevant if cgminer has hung but the +API thread is still running, RCV would normally be >= SND + +Feel free to increase SND if your network is very slow +or decrease RCV if that happens often to you + +Also, on some windows PHP, apparently the $usec is ignored +(so usec can't be specified) + +--------- + +Default: + $hidefields = array(); + +List of fields NOT to be displayed +You can use this to hide data you don't want to see or don't want +shown on a public web page +The list of sections are: + SUMMARY, POOL, PGA, GPU, NOTIFY, CONFIG, DEVDETAILS, DEVS +See the web page for the list of field names (the table headers) +It is an array of 'SECTION.Field Name' => 1 + +This example would hide the slightly more sensitive pool information: +Pool URL and pool username: + $hidefields = array('POOL.URL' => 1, 'POOL.User' => 1); + +If you just want to hide the pool username: + $hidefields = array('POOL.User' => 1); + +--------- + +Default: + $ignorerefresh = false; + $changerefresh = true; + $autorefresh = 0; + +Auto-refresh of the page (in seconds) - integers only + +$ignorerefresh = true/false always ignore refresh parameters +$changerefresh = true/false show buttons to change the value +$autorefresh = default value, 0 means dont auto-refresh + +--------- + +Default: + $miner_font_family = 'verdana,arial,sans'; + $miner_font_size = '13pt'; + +Change these to set the font and font size used on the web page + +--------- + +Default: + $add_css_names = array(); + +List of CSS names to add to the CSS style object + e.g. array('td.cool' => false); +true/false to not include the default $miner_font +The CSS name/value pairs must be defined in $colouroverride below + +This allows you to create multiple complete CSS styles, optionally +using a different font to the default used/specified for all other +styles, and then when using the class name in a custom formatting +function (fmt) in a customsummarypage, it can use this style + +--------- + +Default: + $colouroverride = array(); + +Use this to change the web page colour scheme + +See $colourtable in miner.php for the list of possible names to change + +Simply put in $colouroverride, just the colours you wish to change + +e.g. to change the colour of the header font and background +you could do the following: + + $colouroverride = array( + 'td.h color' => 'green', + 'td.h background' => 'blue' + ); + +You can also add your own CSS styles to be used by a customsummarypage +custom format function, if you specify the class name in $add_css_names +and put the class styles in $colouroverride + +--------- + +Default: + $placebuttons = 'top'; + +Where to place the Refresh, Summary, Custom Pages, Quit, etc. buttons + +Valid values are: 'top' 'bot' 'both' + anything else means don't show them - case sensitive + +--------- + +Default: + $allowcustompages = true; + +Should we allow custom pages? +(or just completely ignore them and don't display the buttons) + +--------- + +OK this part is more complex: Custom Summary Pages + +A custom summary page in an array of 'section' => array('FieldA','FieldB'...) + +The section defines what data you want in the summary table and the Fields +define what data you want shown from that section + +Standard sections are: + SUMMARY, POOL, PGA, GPU, NOTIFY, CONFIG, DEVDETAILS, DEVS, EDEVS, STATS, + ESTATS, COIN + +Fields are the names as shown on the headers on the normal pages + +There is a special field name '#' that will total to the number of rows +displayed in the custom summary page +In the actual row output it is a row counter per rig + +Fields can be 'name=new name' to display 'name' with a different heading +'new name' + +There are also now joined sections: + SUMMARY+POOL, SUMMARY+DEVS, SUMMARY+EDEVS, DEVS+STATS, EDEVS+ESTATS, + POOL+STATS plus many more +See the miner.php function joinsections() for the full list + +These sections are an SQL join of the two sections and the fields in them +are named section.field where section. is the section the field comes from +See the example further down + +Also note: +- empty tables are not shown +- empty columns (e.g. an unknown field) are not shown +- missing field data shows as blank +- the field name '*' matches all fields except in joined sections + (useful for STATS and COIN) + +There are 2 hard coded sections: + DATE - displays a date table like at the start of 'Summary' + RIGS - displays a rig table like at the start of 'Summary' + +Each custom summary requires a second array, that can be empty, listing fields +to be totaled for each section +If there is no matching total data, no total will show + +--------- + +Looking at the Mobile example: + + $mobilepage = array( + 'DATE' => null, + 'RIGS' => null, + 'SUMMARY' => array('Elapsed', 'MHS av', 'Found Blocks=Blks', + Accepted', 'Rejected=Rej', 'Utility'), + 'DEVS+NOTIFY' => array('DEVS.Name=Name', 'DEVS.ID=ID', 'DEVS.Status=Status', + 'DEVS.Temperature=Temp', 'DEVS.MHS av=MHS av', + 'DEVS.Accepted=Accept', 'DEVS.Rejected=Rej', + 'DEVS.Utility=Utility', 'NOTIFY.Last Not Well=Not Well'), + 'POOL' => array('POOL', 'Status', 'Accepted', 'Rejected=Rej', + 'Last Share Time')); + + $mobilesum = array( + 'SUMMARY' => array('MHS av', 'Found Blocks', 'Accepted', 'Rejected', + 'Utility'), + 'DEVS+NOTIFY' => array('DEVS.MHS av', 'DEVS.Accepted', 'DEVS.Rejected', + 'DEVS.Utility'), + 'POOL' => array('Accepted', 'Rejected')); + + $customsummarypages = array('Mobile' => array($mobilepage, $mobilesum)); + +This will show 5 tables (according to $mobilepage) +Each table will have the chosen details for all the rigs specified in $rigs + + DATE + A single box with the web server's current date and time + + RIGS + A table of the rigs: description, time, versions etc + + SUMMARY + + This will use the API 'summary' command and show the selected fields: + Elapsed, MHS av, Found Blocks, Accepted, Rejected and Utility + However, 'Rejected=Rej' means that the header displayed for the 'Rejected' + field will be 'Rej', instead of 'Rejected' (to save space) + Same for 'Found Blocks=Blks' - to save space + + DEVS+NOTIFY + + This will list each of the devices on each rig and display the list of + fields as shown + It will also include the 'Last Not Well' field from the 'notify' command + so you know when the device was last not well + + You will notice that you need to rename each field e.g. 'DEVS.Name=Name' + since each field name in the join between DEVS and NOTIFY is actually + section.fieldname, not just fieldname + + The join code automatically adds 2 fields to each GPU device: 'Name' and 'ID' + They don't exist in the API 'devs' output but I can correctly calculate + them from the GPU device data + These two fields are used to join DEVS to NOTIFY i.e. find the NOTIFY + record that has the same Name and ID as the DEVS record and join them + + POOL + + This will use the API 'pools' command and show the selected fields: + POOL, Status, Accepted, Rejected, Last Share Time + Again, I renamed the 'Rejected' field using 'Rejected=Rej', to save space + +$mobilesum lists the sections and fields that should have a total +You can't define them for 'DATE' or 'RIGS' since they are hard coded tables +The example given: + + SUMMARY + Show a total at the bottom of the columns for: + MHS av, Found Blocks, Accepted, Rejected, Utility + + Firstly note that you use the original name i.e. for 'Rejected=Rej' + you use 'Rejected', not 'Rej' and not 'Rejected=Rej' + + Secondly note that it simply adds up the fields + If you ask for a total of a string field you will get the numerical + sum of the string data + + DEVS+NOTIFY + + Simply note in this join example that you must use the original field + names which are section.fieldname, not just fieldname + + POOL + Show a total at the bottom of the columns for: + Accepted and Rejected + + Again remember to use the original field name 'Rejected' + +--------- + +With cgminer 2.10.2 and later, miner.php includes an extension to +the custom pages that allows you to apply SQL style commands to +the data: where, group, and having +cgminer 3.4.2 and later also includes another option 'gen' +cgminer 4.2.0 and later also includes another option 'fmt' +cgminer 4.2.1 and later also includes another option 'bgen' + +An example of an 'ext' section in a more complex custom summary page: + +$poolsext = array( + 'POOL+STATS' => array( + 'where' => null, + 'group' => array('POOL.URL', 'POOL.Has Stratum', + 'POOL.Stratum Active', 'POOL.Has GBT'), + 'calc' => array('POOL.Difficulty Accepted' => 'sum', + 'POOL.Difficulty Rejected' => 'sum', + 'STATS.Times Sent' => 'sum', + 'STATS.Bytes Sent' => 'sum', + 'STATS.Times Recv' => 'sum', + 'STATS.Bytes Recv' => 'sum'), + 'gen' => array('AvShr', 'POOL.Difficulty Accepted/max(POOL.Accepted,1)), + 'having' => array(array('STATS.Bytes Recv', '>', 0)), + 'fmt' => 'myfmtfunc')); + +function myfmtfunc($section, $name, $value, $when, $alldata, + $warnclass, $errorclass, $hiclass, $loclass, $totclass); +{ + $ret = ''; + $class = ''; + switch ($section.'.'.$name) + { + case 'GEN.AvShr': + $ret = number_format((float)$value, 2); + if ($value == 0) + $class = $errorclass; + break; + // Nonsence example :) since total would show the sum of the averages + case 'total.AvShr': + $ret = $value; + if ($value == 0) + $class = $warnclass; + break; + } + return array($ret, $class); +} + +This allows you to group records together from one or more rigs +In the example, you'll get each Pool (with the same URL+Stratum+GBT settings) +listed once for all rigs and a sum of each of the fields listed in 'calc' + + +'where' and 'having' are an array of fields and restrictions to apply + +In the above example, it will only display the rows where it contains the +'STATS.Bytes Recv' field with a value greater than zero +If the row doesn't have the field, it will always be included +All restrictions must be true in order for the row to be included +Any restiction that is invalid or unknown is true +An empty array, or null, means there are no restrictions + +A restriction is formatted as: array('Field', 'restriction', 'value') +Field is the simple field name as normally displayed, or SECTION.Field +if it is a joined section (as in this case 'POOL+STATS') +The list of restrictions are: +'set' - true if the row contains the 'Field' ('value' is not required or used) +'=', '<', '<=', '>', '>' - a numerical comparison +'eq', 'lt', 'le', 'gt', 'ge' - a case insensitive string comparison + +You can have multiple restrictions on a 'Field' - but all must be true to +include the row containing the 'Field' +e.g. a number range between 0 and 10 would be: +array('STATS.Bytes Recv', '>', 0), array('STATS.Bytes Recv', '<', 10) + +The difference between 'where' and 'having' is that 'where' is applied to the +data before grouping it and 'having' is applied to the data after grouping it +- otherwise they work the same + + +'group' lists the fields to group over and 'calc' lists the function to apply +to other fields that are not part of 'group' + +You can only see fields listed in 'group' and 'calc' + +A 'calc' is formatted as: 'Field' => 'function' +The current list of operations available for 'calc' are: +'sum', 'avg', 'min', 'max', 'lo', 'hi', 'count', 'any' +The first 4 are as expected - the numerical sum, average, minimum or maximum +'lo' is the first string of the list, sorted ignoring case +'hi' is the last string of the list, sorted ignoring case +'count' is the number of rows in the section specified in the calc e.g. + ('DEVS.Name' => 'count') would be the number of DEVS selected in the 'where' + of course any valid 'DEVS.Xyz' would give the same 'count' value +'any' is effectively random: the field value in the 1st row of the grouped data +An unrecognised 'function' uses 'any' + + +A 'fmt' allows you to specify a function to be called by miner.php to format +data to be displayed in the output html +If the function doesn't exist in miner.php or myminer.php, then it will be +ignored +If the function returns a $ret value (see the example 'myfmtfunc' above) then +that will be displayed, however if $ret is empty, then the normal formatting +code will process the data to be displayed +Thus, if there is no formatting code in miner.php for the field value, then it +will be displayed as it was received from the API +i.e. this allows you to either supply some php code to format field values +that are not formatted by miner.php, or you can also override the formatting +done by miner.php itself for your chosen list of field data +You can return an ' ' if you wish to force it to display as blank +Use the example 'myfmtfunc' above as a template to write your own +Note that your provided function will be called for all data being displayed, +so you should use the 'case' layout as in the example to select the data fields +you wish to format, but return '' for fields you don't wish to change the way +they are formatted +The 2nd return field is the name of a CSS class in $colourtable or created in +your own $add_css_names and $colouroverride +The value you return can stay in effect even if you return an empty $ret, if +the default formatting function for the field doesn't set the $class variable +The fields passed to your function by miner.php: + $warnclass, $errorclass, $hiclass, $loclass, $totclass +contain the default class names used for formatting + + +A 'gen' or 'bgen' allows you to generate new fields from any php valid function +of any of the other fields + e.g. 'gen' => array('AvShr', 'POOL.Difficulty Accepted/max(POOL.Accepted,1)), +will generate a new field called GEN.AvShr that is the function shown, which +in this case is the average difficulty of each share submitted + +The difference between 'bgen' and 'gen' is that 'bgen' is done before doing +the 'group' and 'calc', however 'gen' is done after doing 'group' and 'calc' +This means that 'group' and 'calc' can also use 'bgen' fields +As before, 'gen' fields act on the results of the 'group' and 'calc' +If there is no 'group' or 'calc' then they both will produce the same results +Note that 'gen' fields are called 'GEN.field' and 'bgen' fields, 'BGEN.field' + +THERE IS A SECURITY RISK WITH HOW GEN/BGEN WORKS +It simply replaces all the variables with their values and then requests PHP +to execute the formula - thus if a field value returned from a cgminer API +request contained PHP code, it could be executed by your web server +Of course cgminer doesn't do this, but if you do not control the cgminer that +returns the data in the API calls, someone could modify cgminer to return a +PHP string in a field you use in 'gen' or 'bgen' +Thus use 'gen' and 'bgen' at your own risk +If someone feels the urge to write a mathematical interpreter in PHP to get +around this risk, feel free to write one and submit it to the API author for +consideration diff --git a/API.class b/API.class new file mode 100644 index 0000000..0fdf734 Binary files /dev/null and b/API.class differ diff --git a/API.java b/API.java new file mode 100644 index 0000000..8ba0991 --- /dev/null +++ b/API.java @@ -0,0 +1,166 @@ +/* + * + * Copyright (C) Andrew Smith 2012-2013 + * + * Usage: java API command ip port + * + * If any are missing or blank they use the defaults: + * + * command = 'summary' + * ip = '127.0.0.1' + * port = '4028' + * + */ + +import java.net.*; +import java.io.*; + +class API +{ + static private final int MAXRECEIVESIZE = 65535; + + static private Socket socket = null; + + private void closeAll() throws Exception + { + if (socket != null) + { + socket.close(); + socket = null; + } + } + + public void display(String result) throws Exception + { + String value; + String name; + String[] sections = result.split("\\|", 0); + + for (int i = 0; i < sections.length; i++) + { + if (sections[i].trim().length() > 0) + { + String[] data = sections[i].split(",", 0); + + for (int j = 0; j < data.length; j++) + { + String[] nameval = data[j].split("=", 2); + + if (j == 0) + { + if (nameval.length > 1 + && Character.isDigit(nameval[1].charAt(0))) + name = nameval[0] + nameval[1]; + else + name = nameval[0]; + + System.out.println("[" + name + "] =>"); + System.out.println("("); + } + + if (nameval.length > 1) + { + name = nameval[0]; + value = nameval[1]; + } + else + { + name = "" + j; + value = nameval[0]; + } + + System.out.println(" ["+name+"] => "+value); + } + System.out.println(")"); + } + } + } + + public void process(String cmd, InetAddress ip, int port) throws Exception + { + StringBuffer sb = new StringBuffer(); + char buf[] = new char[MAXRECEIVESIZE]; + int len = 0; + +System.out.println("Attempting to send '"+cmd+"' to "+ip.getHostAddress()+":"+port); + + try + { + socket = new Socket(ip, port); + PrintStream ps = new PrintStream(socket.getOutputStream()); + ps.print(cmd.toCharArray()); + ps.flush(); + + InputStreamReader isr = new InputStreamReader(socket.getInputStream()); + while (0x80085 > 0) + { + len = isr.read(buf, 0, MAXRECEIVESIZE); + if (len < 1) + break; + sb.append(buf, 0, len); + if (buf[len-1] == '\0') + break; + } + + closeAll(); + } + catch (IOException ioe) + { + System.err.println(ioe.toString()); + closeAll(); + return; + } + + String result = sb.toString(); + + System.out.println("Answer='"+result+"'"); + + display(result); + } + + public API(String command, String _ip, String _port) throws Exception + { + InetAddress ip; + int port; + + try + { + ip = InetAddress.getByName(_ip); + } + catch (UnknownHostException uhe) + { + System.err.println("Unknown host " + _ip + ": " + uhe); + return; + } + + try + { + port = Integer.parseInt(_port); + } + catch (NumberFormatException nfe) + { + System.err.println("Invalid port " + _port + ": " + nfe); + return; + } + + process(command, ip, port); + } + + public static void main(String[] params) throws Exception + { + String command = "summary"; + String ip = "127.0.0.1"; + String port = "4028"; + + if (params.length > 0 && params[0].trim().length() > 0) + command = params[0].trim(); + + if (params.length > 1 && params[1].trim().length() > 0) + ip = params[1].trim(); + + if (params.length > 2 && params[2].trim().length() > 0) + port = params[2].trim(); + + new API(command, ip, port); + } +} diff --git a/ASIC-README b/ASIC-README new file mode 100644 index 0000000..f290032 --- /dev/null +++ b/ASIC-README @@ -0,0 +1,763 @@ +SUPPORTED DEVICES + +Currently supported devices include: +- Antminer U1/U2/U2+/U3 USB +- Antminer S1 +- ASICMINER block erupters +- ASICMINER Tube/Prisma +- Avalon (including BitBurner and Klondike) +- Avalon2/3 +- Avalon4 +- BFx2 USB +- Butterfly Labs SC 65/28nm range +- BF1 (bitfury) USB (red and blue) +- BlackArrow Bitfury +- BlackArrow Minion +- Bi*fury USB +- Cointerra +- Hashfast Babyjet and Sierra +- Hashratio +- Hexfury USB +- KnCminer Mercury, Saturn and Jupiter +- Nanofury USB +- Other bitfury USB devices +- Onestring miner USB +- Rockminer R-Box/RK-Box/T1/New R-Box +- Spondoolies SP10, SP30 + + +No COM ports on windows or TTY devices will be used by cgminer as it +communicates directly with them via USB so it is normal for them to not exist or +be disconnected when cgminer is running. + +The BFL devices should come up as one of the following: + +BAJ: BFL ASIC Jalapeño +BAL: BFL ASIC Little Single +BAS: BFL ASIC Single +BAM: BFL ASIC Minirig +BMA: BFL Monarch + +BFL devices need the --enable-bflsc option when compiling cgminer yourself. + +Avalon will come up as AVA. + +Avalon devices need the --enable-avalon option when compiling cgminer. + +Avalon2/3 will come up as AV2. + +Avalon2/3 devices need the --enable-avalon2 option when compiling cgminer. + +Avalon4 will come up as AV4. + +Avalon4 devies need the --enable-avalon4 option when compiling cgminer. + +Klondike will come up as KLN. + +Klondike devices need the --enable-klondike option when compiling cgminer. + +ASICMINER block erupters will come up as AMU. + +ASICMINER devices need the --enable-icarus option when compiling cgminer. +Also note that the AMU is managed by the Icarus driver which is detailed +in the FPGA-README. Configuring them uses the same mechanism as outlined +below for getting started with USB ASICs. + +ASICMINER BlockErupter Tube/Prisma will come up as BET. + +ASICMINER Tube/Prisma devices need the --enable-blockerupter option when +compiling cgminer. + +BlackArrow Bitfury devices + +BlackArrow Bitfury devices need the --enable-bab option when compiling cgminer. + +The current BlackArrow Bitfury devices are similar to the Bitfury GPIO mining +boards, with both V1 and V2 controllers, and come up as BaB. + + +BlackArrow Minion devices + +BlackArrow Minion devices need the --enable-minion option when compiling +cgminer. + +BlackArrow Minion devices are SPI/GPIO mining devices and come up as MBA + + +BITFURY devices + +Bitfury devices need the --enable-bitfury option when compiling cgminer. + +Currently the BPMC/BGMC BF1 devices AKA redfury/bluefury are supported and +come up as BF1, along with the Bi*fury USB devices which come up as BXF. +Nanofury devices come up as NF1. BFx2 devices come up as BXM. + +Bitfury USB devices are also set up as per the USB ASICs below. + + +COINTERRA devices + +Cointerra devices need the --enable-cointerra option when compiling cgminer. + +Cointerra devices come up as CTA devices and currently take only hidden command +line arguments for power settings. + +Cointerra USB devices are set up as per the USB ASIC instructions below. + + +HASHFAST devices + +Hashfast devices need the --enable-hashfast option when compiling cgminer. + +All current HFA devices are supported and are recognised with the name HFA +in the --usb commands. After initialisation, cgminer will determine what type +they are and give them the following names: + +HFB: Hashfast Babyjet +HFS: Hashfast Sierra +HFA: Hashfast non standard (eg. a Babyjet with an added board, Habanero) + + +HASHRATIO devices + +Hashratio devices need the --enable-hashratio option when compiling cgminer. + + +ANTMINER U1/U2+/U3 devices + +Antminer devices need the --enable-icarus option when compiling cgminer. + +Currently the U1/2/3 USB sticks are supported and come up as the following +devices: + +ANU: Antminer U1/U2/U2+ +AU3: Antminer U3 + +They are also set up as per the USB ASICs below. + +ANTMINER S1 devices + +Antminer S1 devices need the --enable-ants1 option when compiling cgminer. + +They are custom OpenWRT linux devices + +They are recognised with the name ANT + + +BITMINE A1 devices + +Bitmine A1 devices need the --enable-bitmine_A1 compile option set. + + +Rockminer R*Box + +Rockminer R*Box devices need the --enable-icarus compile option set. + +They appear with the following names: +LIN: R-Box +LIR: New R-Box + +--- +GETTING STARTED WITH USB ASICS + +Unlike other software, cgminer uses direct USB communication instead of the +ancient serial USB communication to be much faster, more reliable and use a +lot less CPU. For this reason, setting up for mining with cgminer on these +devices requires different drivers. + + +WINDOWS: + +On windows, the direct USB support requires the installation of a WinUSB +driver (NOT the ftdi_sio driver), and attach it to the chosen USB device. +When configuring your device, plug it in and wait for windows to attempt to +install a driver on its own. It may think it has succeeded or failed but wait +for it to finish regardless. This is NOT the driver you want installed. At this +point you need to associate your device with the WinUSB driver. The easiest +way to do this is to use the zadig utility which you must right click on and +run as administrator. Then once you plug in your device you can choose the +"list all devices" from the "option" menu and you should be able to see the +device as something like: "BitFORCE SHA256 SC". Choose the install or replace +driver option and select WinUSB. You can either google for zadig or download +it from the cgminer directory in the DOWNLOADS link above. + +When you first switch a device over to WinUSB with zadig and it shows that +correctly on the left of the zadig window, but it still gives permission +errors, you may need to unplug the USB miner and then plug it back in. Some +users may need to reboot at this point. + + +LINUX: + +On linux, the direct USB support requires no drivers at all. However due to +permissions issues, you may not be able to mine directly on the devices as a +regular user without giving the user access to the device or by mining as +root (administrator). In order to give your regular user access, you can make +him a member of the plugdev group with the following commands: + + sudo usermod -G plugdev -a `whoami` + +If your distribution does not have the plugdev group you can create it with: + + sudo groupadd plugdev + +In order for the BFL devices to instantly be owned by the plugdev group and +accessible by anyone from the plugdev group you can copy the file +"01-cgminer.rules" from the cgminer archive into the /etc/udev/rules.d +directory with the following command: + + sudo cp 01-cgminer.rules /etc/udev/rules.d/ + +After this you can either manually restart udev and re-login, or more easily +just reboot. + + +OSX: + +On OSX, like Linux, no drivers need to be installed. However some devices +like the bitfury USB sticks automatically load a driver thinking they're a +modem and the driver needs to be unloaded for cgminer to work: + + sudo kextunload -b com.apple.driver.AppleUSBCDC + sudo kextunload -b com.apple.driver.AppleUSBCDCACMData + +There may be a limit to the number of USB devices that you are allowed to start. +The following set of commands, followed by a reboot will increase that: + + sudo su + touch /etc/sysctl.conf + echo kern.sysv.semume=100 >> /etc/sysctl.conf + chown root:wheel /etc/sysctl.conf + chmod 0644 /etc/sysctl.conf + +Some devices need superuser access to mine on them so cgminer may need to +be started with sudo +i.e.: + sudo cgminer + + +--- + +ASIC SPECIFIC COMMANDS + +--anu-freq Set AntminerU1/2 frequency in MHz, range 125-500 (default: 250.0) +--au3-freq Set AntminerU3 frequency in MHz, range 100-250 (default: 225.0) +--au3-volt Set AntminerU3 voltage in mv, range 725-850, 0 to not set (default: 750) +--avalon-auto Adjust avalon overclock frequency dynamically for best hashrate +--avalon-cutoff Set avalon overheat cut off temperature (default: 60) +--avalon-fan Set fanspeed percentage for avalon, single value or range (default: 20-100) +--avalon-freq Set frequency range for avalon-auto, single value or range +--avalon-options Set avalon options baud:miners:asic:timeout:freq:tech +--avalon-temp Set avalon target temperature (default: 50) +--avalon2-freq Set frequency range for Avalon2, single value or range +--avalon2-voltage Set Avalon2 core voltage, in millivolts +--avalon2-fan Set Avalon2 target fan speed +--avalon2-cutoff Set Avalon2 overheat cut off temperature (default: 88) +--avalon2-fixed-speed Set Avalon2 fan to fixed speed +--avalon4-automatic-voltage Automatic adjust voltage base on module DH +--avalon4-voltage Set Avalon4 core voltage, in millivolts, step: 125 +--avalon4-freq Set frequency for Avalon4, 1 to 3 values, example: 445:385:370 +--avalon4-fan Set Avalon4 target fan speed range +--avalon4-temp Set Avalon4 target temperature (default: 42) +--avalon4-cutoff Set Avalon4 overheat cut off temperature (default: 65) +--avalon4-polling-delay Set Avalon4 polling delay value (ms) (default: 20) +--avalon4-ntime-offset Set Avalon4 MM ntime rolling max offset (default: 4) +--avalon4-aucspeed Set Avalon4 AUC IIC bus speed (default: 400000) +--avalon4-aucxdelay Set Avalon4 AUC IIC xfer read delay, 4800 ~= 1ms (default: 9600) +--bab-options Set BaB options max:def:min:up:down:hz:delay:trf +--bet-clk Set clockspeed of ASICMINER Tube/Prisma to (arg+1)*10MHz (default: 23) +--bflsc-overheat Set overheat temperature where BFLSC devices throttle, 0 to disable (default: 90) +--bitburner-fury-options Override avalon-options for BitBurner Fury boards baud:miners:asic:timeout:freq +--bitburner-fury-voltage Set BitBurner Fury core voltage, in millivolts +--bitburner-voltage Set BitBurner (Avalon) core voltage, in millivolts +--bitmain-auto Adjust bitmain overclock frequency dynamically for best hashrate +--bitmain-cutoff Set bitmain overheat cut off temperature +--bitmain-fan Set fanspeed percentage for bitmain, single value or range (default: 20-100) +--bitmain-freq Set frequency range for bitmain-auto, single value or range +--bitmain-hwerror Set bitmain device detect hardware error +--bitmain-options Set bitmain options baud:miners:asic:timeout:freq +--bitmain-temp Set bitmain target temperature +--bxf-bits Set max BXF/HXF bits for overclocking (default: 54) +--bxf-temp-target Set target temperature for BXF/HXF devices (default: 82) +--bxm-bits Set BXM bits for overclocking (default: 54) +--hfa-hash-clock Set hashfast clock speed (default: 550) +--hfa-fail-drop Set how many MHz to drop clockspeed each failure on an overlocked hashfast device (default: 10) +--hfa-fan Set fanspeed percentage for hashfast, single value or range (default: 10-85) +--hfa-name Set a unique name for a single hashfast device specified with --usb or the first device found +--hfa-noshed Disable hashfast dynamic core disabling feature +--hfa-options Set hashfast options name:clock or name:clock@voltage (comma separated) +--hfa-temp-overheat Set the hashfast overheat throttling temperature (default: 95) +--hfa-temp-target Set the hashfast target temperature (0 to disable) (default: 88) +--hro-freq Set the hashratio clock frequency (default: 280) +--klondike-options Set klondike options clock:temptarget +--minion-chipreport Seconds to report chip 5min hashrate, range 0-100 (default: 0=disabled) +--minion-freq Set minion chip frequencies in MHz, single value or comma list, range 100-1400 (default: 1200) +--minion-freqchange Millisecond total time to do frequency changes (default: 1000) +--minion-freqpercent Percentage to use when starting up a chip (default: 70%) +--minion-idlecount Report when IdleCount is >0 or changes +--minion-ledcount Turn off led when more than this many chips below the ledlimit (default: 0) +--minion-ledlimit Turn off led when chips GHs are below this (default: 90) +--minion-idlecount Report when IdleCount is >0 or changes +--minion-noautofreq Disable automatic frequency adjustment +--minion-overheat Enable directly halting any chip when the status exceeds 100C +--minion-spidelay Add a delay in microseconds after each SPI I/O +--minion-spireset SPI regular reset: iNNN for I/O count or sNNN for seconds - 0 means none +--minion-spisleep Sleep time in milliseconds when doing an SPI reset +--minion-temp Set minion chip temperature threshold, single value or comma list, range 120-160 (default: 135C) +--nfu-bits Set nanofury bits for overclocking, range 32-63 (default: 50) +--rock-freq Set RockMiner frequency in MHz, range 125-500 (default: 270) + + +ANTMINER S1 DEVICES + +--bitmain-auto Adjust bitmain overclock frequency dynamically for best hashrate +--bitmain-cutoff Set bitmain overheat cut off temperature +--bitmain-fan Set fanspeed percentage for bitmain, single value or range (default: 20-100) +--bitmain-freq Set frequency range for bitmain-auto, single value or range +--bitmain-hwerror Set bitmain device detect hardware error +--bitmain-options Set bitmain options baud:miners:asic:timeout:freq +--bitmain-temp Set bitmain target temperature + +The Antminer S1 device comes with its own operating system and a preinstalled +version of cgminer as part of the flash firmware. No configuration should be +necessary. + + +ANTMINER U1/2/3 DEVICES + +--anu-freq Set AntminerU1 frequency in MHz, range 150-500 (default: 200) +--au3-freq Set AntminerU3 frequency in MHz, range 100-250 (default: 225.0) +--au3-volt Set AntminerU3 voltage in mv, range 725-850, 0 to not set (default: 750) + +By default, Antminer U1 devices run at a clockspeed of 200. This command allows +you to specify a chosen frequency to attempt to run all ANU devices at. Cgminer +will try to find the nearest frequency the device supports and will report if +the frequency is not exactly as requested. Note that cgminer reports hashrate +ONLY FROM VALID HASHES so if you increase the frequency but your hashrate does +not increase or it decreases and hardware errors start showing up, you have +overclocked it too much. In the worst case scenario it will fail to start at too +high a speed. Most will run happily up to 250. + +ASICMINER BlockErupter Tube/Prisma DEVICES + +--bet-clk Set clockspeed of ASICMINER Tube/Prisma to (arg+1)*10MHz (default: 23) + +Default clockspeed for Tube/Prisma is 240MHz. This command allows to set clockspeed +of on board BE200 chips in range from 200MHz to 320MHz. For Tube devices, you can +try overclocking to 270MHz or even higher, but NOT recommended for Prisma devices. +If you notice hash rate drops or board fails to start, restart cgminer with lower +clockspeed. + + +AVALON AND BITBURNER DEVICES + +Currently all known Avalon devices come with their own operating system and +a preinstalled version of cgminer as part of the flash firmware, based on the +most current cgminer version so no configuration should be necessary. It is +possible to plug a USB cable from a PC into the Avalon device and mine using +cgminer as per any other device. It will autodetect and hotplug using default +options. You can customise the avalon behaviour by using the avalon-options +command, and adjust its fan control-temperature relationship with avalon-temp. +By default the avalon will also cut off when its temperature reaches 60 +degrees. + +All current BitBurner devices (BitBurner X, BitBurner XX and BitBurner Fury) +emulate Avalon devices, whether or not they use Avalon chips. + +Avalon commands: + +--avalon-auto Adjust avalon overclock frequency dynamically for best hashrate +--avalon-cutoff Set avalon overheat cut off temperature (default: 60) +--avalon-fan Set fanspeed percentage for avalon, single value or range (default: 20-100) +--avalon-freq Set frequency range for avalon-auto, single value or range +--avalon-options Set avalon options baud:miners:asic:timeout:freq:tech +--avalon-temp Set avalon target temperature (default: 50) +--bitburner-fury-options Override avalon-options for BitBurner Fury boards baud:miners:asic:timeout:freq +--bitburner-fury-voltage Set BitBurner Fury core voltage, in millivolts +--bitburner-voltage Set BitBurner (Avalon) core voltage, in millivolts + + +Avalon auto will enable dynamic overclocking gradually increasing and +decreasing the frequency till the highest hashrate that keeps hardware errors +under 2% is achieved. This WILL run your avalon beyond its normal specification +so the usual warnings apply. When avalon-auto is enabled, the avalon-options +for frequency and timeout are used as the starting point only. + +eg: +--avalon-fan 50 +--avalon-fan 40-80 + +By default the avalon fans will be adjusted to maintain a target temperature +over a range from 20 to 100% fanspeed. avalon-fan allows you to limit the +range of fanspeeds to a single value or a range of values. + +eg: +--avalon-freq 300-350 + +In combination with the avalon-auto command, the avalon-freq command allows you +to limit the range of frequencies which auto will adjust to. + +eg: +--avalon-temp 55 + +This will adjust fanspeed to keep the temperature at or slightly below 55. +If you wish the fans to run at maximum speed, setting the target temperature +very low such as 0 will achieve this. This option can be added to the "More +options" entry in the web interface if you do not have a direct way of setting +it. + +eg: +--avalon-cutoff 65 + +This will cut off the avalon should it get up to 65 degrees and will then +re-enable it when it gets to the target temperature as specified by avalon-temp. + +eg: +--avalon-options 115200:24:10:D:1500:55 + +The values are baud : miners : asic count : timeout : frequency : technology. + +Baud: +The device is pretty much hard coded to emulate 115200 baud so you shouldn't +change this. + +Miners: +Most Avalons are 3 module devices, which come to 24 miners. 4 module devices +would use 32 here. + +For BitBurner X and BitBurner XX devices you should use twice the number of +boards in the stack. e.g. for a two-board stack you would use 4. For +BitBurner Fury devices you should use the total number of BitFury chips in the +stack (i.e. 16 times the number of boards). e.g. for a two-board stack you +would use 32. + +Asic count: +Virtually all have 10, so don't change this. BitBurner devices use 10 here +even if the boards have some other number of ASICs. + +Timeout: +This is how long the device will work on a work item before accepting new work +to replace it. It should be changed according to the frequency (last setting). +It is possible to set this a little lower if you are trying to tune for short +block mining (eg p2pool) but much lower and the device will start creating +duplicate shares. +A value of 'd' means cgminer will calculate it for you based on the frequency +and is highly recommended. + +Sample settings for valid different frequencies (last 3 values) for 110nm AVAs: +34:375:110 * +36:350:110 * +43:300:110 +45:282:110 (default) +50:256:110 + +Note that setting a value with an asterisk next to it will be using your +avalon outside its spec and you do so at your own risk. + +For 55nm AVAs, the usual values are 8:1500 + +Frequency: +This is the clock speed of the devices. For Avalon 110nm devices, values from +256 upwards are valid with the default being 282 and the maximum practical +being approximately 350. For 55nm devices values from 1000-2000 are valid with +1500 being the default. + +Technology: +What sized technology ASICs are in use in the avalon, choices are 55 or 110, +corresponding to the nm technology chips in use. + +The default frequency for BitBurner X and BitBurner XX boards is 282. The +default frequency for BitBurner Fury boards is 256. Overclocking is +possible - please consult the product documentation and/or manufacturer for +information on safe values. Values outside this range are used at your own +risk. Underclocking is also possible, at least with the X and XX boards. + +eg: +--bitburner-fury-options Override avalon-options for BitBurner Fury boards baud:miners:asic:timeout:freq + +This option takes the same format as --avalon-options. When specified, it +will be used for BitBurner Fury boards in preference to the values specified +in --avalon-options. (If not specified, BitBurner Fury boards will be +controlled by the values used in --avalon options.) See --avalon-options for +a detailed description of the fields. + +This option is particularly useful when using a mixture of different BitBurner +devices as BitBurner Fury devices generally require significantly different +clock frequencies from Avalon-based devices. This option is only available +for boards with recent firmware that are recognized by cgminer as BBF. + +eg: +--bitburner-fury-voltage Set BitBurner Fury core voltage, in millivolts + +Sets the core voltage for the BitBurner Fury boards. The default value is +900. Overvolting is possible - please consult the product documentation +and/or manufaturer about the safe range of values. Values outside this range +are used at your own risk. + +This option is only available for boards with recent firmware that are +recognized by cgminer as BBF. For boards recognized as BTB, see +--bitburner-voltage + +eg: +--bitburner-voltage Set BitBurner (Avalon) core voltage, in millivolts + +Sets the core voltage for the Avalon-based BitBurner X and BitBurner XX +boards. The default value is 1200. Overvolting and undervolting is +possible - please consult the product documentation and/or the manufacturer +for information about the safe range. Values outside this range are used at +your own risk. + +Older BitBurner Fury firmware emulates a BitBurner XX board and is identified +by cgminer as BTB. On these devices, --bitburner-voltage is used to control +the voltage of the BitBurner Fury board. The actual core voltage will be +300mV less than the requested voltage, so to run a BitBurner Fury board at +950mV use --bitburner-voltage 1250. The default value of 1200 therefore +corresponds to the default core voltage of 900mV. + + +If you use the full curses based interface with Avalons you will get this +information: +AVA 0: 22/ 46C 2400R + +The values are: +ambient temp / highest device temp lowest detected ASIC cooling fan RPM. + +Use the API for more detailed information than this. + + +Avalon2 Devices + +--avalon2-freq Set frequency range for Avalon2, single value or range +--avalon2-voltage Set Avalon2 core voltage, in millivolts +--avalon2-fan Set Avalon2 target fan speed +--avalon2-cutoff Set Avalon2 overheat cut off temperature (default: 88) +--avalon2-fixed-speed Set Avalon2 fan to fixed speed + + +Avalon4 Devices + +--avalon4-automatic-voltage Automatic adjust voltage base on module DH +--avalon4-voltage Set Avalon4 core voltage, in millivolts, step: 125 +--avalon4-freq Set frequency for Avalon4, 1 to 3 values, example: 445:385:370 +--avalon4-fan Set Avalon4 target fan speed range +--avalon4-temp Set Avalon4 target temperature (default: 42) +--avalon4-cutoff Set Avalon4 overheat cut off temperature (default: 65) +--avalon4-polling-delay Set Avalon4 polling delay value (ms) (default: 20) +--avalon4-ntime-offset Set Avalon4 MM ntime rolling max offset (default: 4) +--avalon4-aucspeed Set Avalon4 AUC IIC bus speed (default: 400000) +--avalon4-aucxdelay Set Avalon4 AUC IIC xfer read delay, 4800 ~= 1ms (default: 9600) + + +BFLSC Devices + +--bflsc-overheat Set overheat temperature where BFLSC devices throttle, 0 to disable (default: 90) + +This will allow you to change or disable the default temperature where cgminer +throttles BFLSC devices by allowing them to temporarily go idle. + + +BITFURY Devices + +--bxf-bits Set max BXF/HXF bits for overclocking (default: 54) + +In combination with the dynamic clocking on Bi*fury devices, this sets the +highest bit target that cgminer will aim for. + + +--bxf-temp-target Set target temperature for BXF/HXF devices (default: 82) + +Cgminer uses dynamic clocking on Bi*fury devices to try and maintain the +temperature just below an optimal target. This option allows you to change the +target temperature. When actively cooled below this, the devices will run at +maximum speed. + +--bxm-bits Set BXM bits for overclocking (default: 54) + +Choose the overclocking bits for BFx2 devices. + + +--nfu-bits Set nanofury bits for overclocking range 32-63 (default: 50) + +Cgminer by default sets the clockspeed on nanofury devices to the highest that +is still within USB2 spec. This value allows you to alter the clockspeed, with +~54 being the optimal but requiring a higher power or USB3 port. + + +Cointerra Devices + +--cta-load (0 - 255) +--ps-load (0 - 100) + +These are undocumented. + + +Drillbit Systems Devices + +--drillbit-options Set drillbit options :clock[:clock_divider][:voltage] + +* int/ext defines the clock source - default int. Not all boards support ext. +* clock_divider must be 1 or 2 with a default of 1. Bitfury only, + ignored on Avalon. +* clock is in MHz, on Drillbit range 80-250 with a default of 200, + recommended maximum 230. On Avalon range 500-1000 with a + recommended maximum of 800. +* voltage is ASIC core voltage in millivolts, available values vary per board but + default is 850 and the recommended maximum is 950 (Bitfury) and 1000 (Avalon.) + +--drillbit-auto :[::] + +If supported by firmware and device, this feature allows cgminer to +automatically tweak each ASIC's clock rate up and down in to achieve +optimal performance. + +* every - only required param, check each ASIC after each block of + this many work units. Recommended value 100. +* gooderr - the "Good" threshold is when less hardware errors than + this per "every" work units, the clock rate will be increased. + Default value 1. +* baderr - the "Bad" threshold is when more hardware errors than + this per "every" work units, the clock rate will be decreased. + Default value 3. +* maxerr - the "Max" threshold is when more hardware errors than + this per "every" work units (including pre-empting before + "every" work units is up), the clock rate will be decreased and + will not be increased again past this point. Default value 10. + + +BlackArrow Bitfury devices + +--bab-options Set BaB options Max:Def:Min:Up:Down:Hz:Delay:Trf + +Any option left blank or starting with 'd' will use the default setting +If there are not enough options, then the remaining will be left at their +default value + +Max:Def:Min are the chip speed limits to allow, ranging from 52 to 57 + +Up:Down are the HW error % used to tune the chip speed +Up means if the HW error % is less than up, over a 5 minute period, +then increase the chip speed +Down means if the HW error % is greater than down, over 5 minutes, +then decrease the chip speed + +Hz is the SPI clock speed to use + +Delay is the us delay used between bytes for the SPI I/O - default 0 + +Trf is the us delay used between sends for the SPI I/O - default 0 + + +Hashfast devices + +--hfa-hash-clock Set hashfast clock speed (default: 550) + +This will change the initialisation clock speed on all attached hfa devices. +Note that if instability is detected by cgminer and the device has to undergo +a reset, cgminer will lower the clockspeed on resetting it each time till the +value returns to the default of 550. + +--hfa-fail-drop Set how many MHz to drop clockspeed each failure on an overlocked hashfast device (default: 10) + +If you overclock your hashfast device with --hfa-hash-clock and cgminer detects +it failing to return hashes, it will restart it at a lower clock speed if +possible. Changing this value will allow you to choose how much it will lower +the clock speed or to disable this function entirely. + +--hfa-fan Set fanspeed percentage for hashfast, single value or range (default: 10-85) + +This changes the range of fanspeeds used on hashfast devices with firmware that +supports it. Note that the fanspeed will dynamically change to try and maintain +a target temperature with --hfa-temp-target but if the target temperature is +disabled, the fanspeed will remain static. +eg: +--hfa-fan 25-100 + +--hfa-temp-overheat Set the hashfast overheat throttling temperature (default: 95) + +Cgminer will temporarily stop sending hashfast devices work once this +temperature is reached. Note that with the water cooling in these devices, +temperature recovery is likely to be very quick and the device will start +hashing again after only a very brief period. + +--hfa-temp-target Set the hashfast target temperature (0 to disable) (default: 88) + +On hashfast devices with firmware that supports dynamic fanspeed and die speeds, +cgminer will try to maintain temperature according to this target by adjusting +fanspeed and then if need be, throttle speeds on a die-by-die basis. Disabling +this feature will leave a constant fanspeed and die speed but will not disable +the temp-overheat feature. + +--hfa-name Set a unique name for a single hashfast device specified with --usb or the first device found + +This command allows you to specify the unique name stored in nvram on a single +hashfast device. This name can be queried from the API stats command and comes +up as "op name". Discrete names are used by cgminer to try to maintain settings +across restarts, unplugs/hotplugs and so on. If this command is used by itself, +the name will be given to the first hashfast device it encounters and then +cgminer will proceed to go back to regular mining. If you have multiple devices, +it is best to discretely choose the device you wish to use with the --usb +command. For example +'lsusb' on linux shows the following devices (297c:0001 is a hfa device): + Bus 001 Device 079: ID 297c:0001 + Bus 004 Device 042: ID 297c:0001 +If you wished to name the second device Slug you would add the commands: + --hfa-name Slug --usb 4:42 + +--hfa-noshed Disable hashfast dynamic core disabling feature + +Newer firmwares on hashfast devices dynamically disable cores that generate +invalid data. This command will disable this feature where possible. + +--hfa-options Set hashfast options name:clock or clock@voltage (comma separated) + +This command allows you to set options for each discrete hashfast device by +its name (if the firmware has naming support, i.e. version 0.3+). Currently +this takes as option the clock speed alone or clock speed and voltage, +although future options may be added. +e.g.: +--hfa-options "rabbit:650,turtle:550@800" + +Would set a device named rabbit to clock speed 650 MHz using default voltage +and the one named turtle to 550 MHz using a voltage of 800 mv. Starting the +device at a speed where it is most stable will give more reliable hashrates +long term and prevent it interacting with other devices, rather than depending +on the clockdown feature in cgminer. + +Note: Setting voltage cause a board reset and hotplug event on cgminer startup. + +Other undocumented hashfast command line options are for development purposes +only at this stage and serve no useful purpose to end users. + + +Hashratio Devices + +--hro-freq Set the hashratio clock frequency (default: 280) + + +Bitmine A1 Devices + +--bitmine-a1-options ::: +ref_clk: reference clock in kHz (default: 16000) +sys_clk: target system clock in kHz to be set in PLL (default: 250000) +spi_clk: SPI clock in kHz (default: 800) +max_chip: [debug/testing] limit chip chain + +Set 0 for fields you want to keep untouched to default, e.g. +--bitmine-a1-options 0:0:400 +to only set SPI clock to 400kHz + + +Rockminer R-Box Devices + +--rock-freq Set RockMiner frequency in MHz, range 125-500 (default: 270) + +Note that only a limited range is likely to be accepted (usually 200-290) + +--- + +This code is provided entirely free of charge by the programmer in his spare +time so donations would be greatly appreciated. Please consider donating to the +address below. + +Con Kolivas +15qSxP1SQcUX3o4nhkfdbgyoWEFMomJ4rZ diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..3ec6174 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,7 @@ +Current maintainers and active developers: +Main code+USB+ASIC+maintainer: Con Kolivas 15qSxP1SQcUX3o4nhkfdbgyoWEFMomJ4rZ +API+USB+FPGA+ASIC: Andrew Smith 1Jjk2LmktEQKnv8r2cZ9MvLiZwZ9gxabKm + +Legacy: +Original CPU mining software: Jeff Garzik +BitFORCE FPGA mining and refactor: Luke Dashjr 1NbRmS6a4dniwHHoSS9v3tEYUpP1Z5VVdL diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..94a9ed0 --- /dev/null +++ b/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ChangeLog b/ChangeLog new file mode 100644 index 0000000..c9f6227 --- /dev/null +++ b/ChangeLog @@ -0,0 +1,6 @@ +See git repository ('git log') for full changelog. + +Git repo can be found at: +https://github.com/ckolivas/cgminer + +The NEWS file contains most of the changelog diff --git a/FPGA-README b/FPGA-README new file mode 100644 index 0000000..48751d3 --- /dev/null +++ b/FPGA-README @@ -0,0 +1,271 @@ + +This README contains extended details about FPGA mining with cgminer + + +For ModMinerQuad (MMQ) BitForce (BFL) and Icarus (ICA, BLT, LLT, AMU, CMR) +-------------------------------------------------------------------------- + +When mining on windows, the driver being used will determine if mining will work. + +If the driver doesn't allow mining, you will get a "USB init," error message +i.e. one of: + open device failed, err %d, you need to install a WinUSB driver for the device +or + claim interface %d failed, err %d + +The best solution for this is to use a tool called Zadig to set the driver: + http://sourceforge.net/projects/libwdi/files/zadig/ + +This allows you set the driver for the device to be WinUSB which is usually +required to make it work if you're having problems + +With Zadig, you may need to run it as administrator and if your device is +plugged in but you cannot see it, use the Menu: Options -> List All Devices + +You must also make sure you are using the latest libusb-1.0.dll supplied +with cgminer (not the libusbx version) + +When you first switch a device over to WinUSB with Zadig and it shows that +correctly on the left of the Zadig window, but it still gives permission +errors, you may need to unplug the USB miner and then plug it back in + +- + +When mining on linux, but not using 'sudo' and not logged into 'root' you +may get a USB priviledge error (-3), so you may also need to do the following: + + sudo cp 01-cgminer.rules /etc/udev/rules.d/ + +And also: + sudo usermod -G plugdev -a `whoami` + +If your linux distro doesn't have the 'plugdev' group, you can create it like: + sudo groupadd plugdev + +Then reboot ... + +- + +There is a hidden option in cgminer to dump out a lot of information +about USB that will help the developers to assist you if you are having +problems: + + --usb-dump 0 + +It will only help if you have a working FPGA device listed above + + +ModMinerQuad (MMQ) +------------------ + +The mining bitstream does not survive a power cycle, so cgminer will upload +it, if it needs to, before it starts mining (approx 7min 40sec) + +The red LED also flashes while it is uploading the bitstream + +- + +If the MMQ doesn't respond to cgminer at all, or the red LED isn't flashing +then you will need to reset the MMQ + +The red LED should always be flashing when it is mining or ready to mine + +To reset the MMQ, you are best to press the left "RESET" button on the +backplane, then unplug and replug the USB cable + +If your MMQ doesn't have a button on the "RESET" pad, you need to join +the two left pads of the "RESET" pad with conductive wire to reset it. +Cutting a small (metal) paper-clip in half works well for this + +Then unplug the USB cable, wait for 5 seconds, then plug it back in + +After you press reset, the red LED near the USB port should blink continuously + +If it still wont work, power off, wait for 5 seconds, then power on the MMQ +This of course means it will upload the bitstream again when you start cgminer + +- + +Device 0 is on the power end of the board + +- + +You must make sure you have an approriate firmware in your MMQ +Read here for official details of changing the firmware: + http://wiki.btcfpga.com/index.php?title=Firmware + +The basics of changing the firmware are: + You need two short pieces of conductive wire if your MMQ doesn't have + buttons on the "RESET" and "ISP" pads on the backplane board + Cutting a small (metal) paper-clip in half works well for this + + Join the 2 left pads of the "RESET" pad with wire and the led will dim + Without disconnecting the "RESET", join the 2 left pads of the "ISP" pad + with a wire and it will stay dim + Release "RESET" then release "ISP" and is should still be dim + Unplug the USB and when you plug it back in it will show up as a mass + storage device + Linux: (as one single line): + mcopy -i /dev/disk/by-id/usb-NXP_LPC134X_IFLASH_ISP000000000-0:0 + modminer091012.bin ::/firmware.bin + Windows: delete the MSD device file firmware.bin and copy in the new one + rename the new file and put it under the same name 'firmware.bin' + Disconnect the USB correctly (so writes are flushed first) + Join and then disconnect "RESET" and then plug the USB back in and it's done + +Best to update to one of the latest 2 listed below if you don't already +have one of them in your MMQ + +The current latest different firmware are: + + Latest for support of normal or TLM bitstream: + http://btcfpga.com/files/firmware/modminer092612-TLM.bin + + Latest with only normal bitstream support (Temps/HW Fix): + http://btcfpga.com/files/firmware/modminer091012.bin + +The code is currently tested on the modminer091012.bin firmware. +This comment will be updated when others have been tested + +- + +On many linux distributions there is an app called modem-manager that +may cause problems when it is enabled, due to opening the MMQ device +and writing to it + +The problem will typically present itself by the flashing led on the +backplane going out (no longer flashing) and it takes a power cycle to +re-enable the MMQ firmware - which then can lead to the problem happening +again + +You can either disable/uninstall modem-manager if you don't need it or: +a (hack) solution to this is to blacklist the MMQ USB device in +/lib/udev/rules.d/77-mm-usb-device-blacklist.rules + +Adding 2 lines like this (just above APC) should help +# MMQ +ATTRS{idVendor}=="1fc9", ATTRS{idProduct}=="0003", ENV{ID_MM_DEVICE_IGNORE}="1" + +The change will be lost and need to be re-done, next time you update the +modem-manager software + +TODO: check that all MMQ's have the same product ID + + +BitForce (BFL) +-------------- + +--bfl-range Use nonce range on bitforce devices if supported + +This option is only for bitforce devices. Earlier devices such as the single +did not have any way of doing small amounts of work which meant that a lot of +work could be lost across block changes. Some of the "minirigs" have support +for doing this, so less work is lost across a longpoll. However, it comes at +a cost of 1% in overall hashrate so this feature is disabled by default. It +is only recommended you enable this if you are mining with a minirig on +p2pool. + +C source is included for a bitforce firmware flash utility on Linux only: + bitforce-firmware-flash.c +Using this, you can change the bitstream firmware on bitforce singles. +It is untested with other devices. Use at your own risk! + +To compile: + make bitforce-firmware-flash +To flash your BFL, specify the BFL port and the flash file e.g.: + sudo ./bitforce-firmware-flash /dev/ttyUSB0 alphaminer_832.bfl +It takes a bit under 3 minutes to flash a BFL and shows a progress % counter +Once it completes, you may also need to wait about 15 seconds, +then power the BFL off and on again + +If you get an error at the end of the BFL flash process stating: + "Error reading response from ZBX" +it may have worked successfully anyway. +Test mining on it to be sure if it worked or not. + +You need to give cgminer about 10 minutes mining with the BFL to be sure of +the MH/s value reported with the changed firmware - and the MH/s reported +will be less than the firmware speed since you lose work on every block change. + + +Icarus (ICA, BLT, LLT, AMU, CMR) +-------------------------------- + +There are two hidden options in cgminer when Icarus support is compiled in: + +--icarus-options Set specific FPGA board configurations - one set of values for all or comma separated + baud:work_division:fpga_count + + baud The Serial/USB baud rate - 115200 or 57600 only - default 115200 + work_division The fraction of work divided up for each FPGA chip - 1, 2, 4 or 8 + e.g. 2 means each FPGA does half the nonce range - default 2 + fpga_count The actual number of FPGA working - this would normally be the same + as work_division - range is from 1 up to 'work_division' + It defaults to the value of work_division - or 2 if you don't specify + work_division + +If you define fewer comma seperated values than Icarus devices, the last values will be used +for all extra devices + +An example would be: --icarus-options 57600:2:1 +This would mean: use 57600 baud, the FPGA board divides the work in half however +only 1 FPGA actually runs on the board (e.g. like an early CM1 Icarus copy bitstream) + +--icarus-timing Set how the Icarus timing is calculated - one setting/value for all or comma separated + default[=N] Use the default Icarus hash time (2.6316ns) + short=[N] Calculate the hash time and stop adjusting it at ~315 difficulty 1 shares (~1hr) + long=[N] Re-calculate the hash time continuously + value[=N] Specify the hash time in nanoseconds (e.g. 2.6316) and abort time (e.g. 2.6316=80) + +If you define fewer comma seperated values than Icarus devices, the last values will be used +for all extra devices + +Icarus timing is required for devices that do not exactly match a default Icarus Rev3 in +processing speed +If you have an Icarus Rev3 you should not normally need to use --icarus-timing since the +default values will maximise the MH/s and display it correctly + +Icarus timing is used to determine the number of hashes that have been checked when it aborts +a nonce range (including on a LongPoll) +It is also used to determine the elapsed time when it should abort a nonce range to avoid +letting the Icarus go idle, but also to safely maximise that time + +'short' or 'long' mode should only be used on a computer that has enough CPU available to run +cgminer without any CPU delays (an active desktop or swapping computer would not be stable enough) +Any CPU delays while calculating the hash time will affect the result +'short' mode only requires the computer to be stable until it has completed ~315 difficulty 1 shares +'long' mode requires it to always be stable to ensure accuracy, however, over time it continually +corrects itself +The optional additional =N for 'short' or 'long' specifies the limit to set the timeout to in N * 100ms +thus if the timing code calculation is higher while running, it will instead use N * 100ms +This can be set to the appropriate value to ensure the device never goes idle even if the +calculation is negatively affected by system performance + +When in 'short' or 'long' mode, it will report the hash time value each time it is re-calculated +In 'short' or 'long' mode, the scan abort time starts at 5 seconds and uses the default 2.6316ns +scan hash time, for the first 5 nonce's or one minute (whichever is longer) + +In 'default' or 'value' mode the 'constants' are calculated once at the start, based on the default +value or the value specified +The optional additional =N specifies to set the default abort at N * 100ms, not the calculated +value, which is ~112 for 2.6316ns + +To determine the hash time value for a non Icarus Rev3 device or an Icarus Rev3 with a different +bitstream to the default one, use 'long' mode and give it at least a few hundred shares, or use +'short' mode and take note of the final hash time value (Hs) calculated +You can also use the RPC API 'stats' command to see the current hash time (Hs) at any time + +The Icarus code currently only works with an FPGA device that supports the same commands as +Icarus Rev3 requires and also is less than ~840MH/s and greater than 2MH/s +If an FPGA device does hash faster than ~840MH/s it should work correctly if you supply the +correct hash time nanoseconds value + +The Icarus code will automatically detect Icarus, Lancelot, AsicminerUSB and Cairnsmore1 +FPGA devices and set default settings to match those devices if you don't specify them + +The timing code itself will affect the Icarus performance since it increases the delay after +work is completed or aborted until it starts again +The increase is, however, extremely small and the actual increase is reported with the +RPC API 'stats' command (a very slow CPU will make it more noticeable) +Using the 'short' mode will remove this delay after 'short' mode completes +The delay doesn't affect the calculation of the correct hash time diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..47f9265 --- /dev/null +++ b/LICENSE @@ -0,0 +1,3 @@ +Cgminer is available under the terms of the GNU Public License version 3. + +See COPYING for details. diff --git a/MCast.class b/MCast.class new file mode 100644 index 0000000..09dde32 Binary files /dev/null and b/MCast.class differ diff --git a/MCast.java b/MCast.java new file mode 100644 index 0000000..4ba237e --- /dev/null +++ b/MCast.java @@ -0,0 +1,333 @@ +/* + * + * Copyright (C) Andrew Smith 2013 + * + * Usage: java MCast [-v] code toaddr port replyport wait + * + * If any are missing or blank they use the defaults: + * + * -v means report how long the last reply took + * + * code = 'FTW' + * toaddr = '224.0.0.75' + * port = '4028' + * replyport = '4027' + * wait = '1000' + * + */ + +import java.net.*; +import java.io.*; +import java.util.*; + +class MCast implements Runnable +{ + static private final String MCAST_CODE = "FTW"; + static private final String MCAST_ADDR = "224.0.0.75"; + static private final int MCAST_PORT = 4028; + static private final int MCAST_REPORT = 4027; + static private final int MCAST_WAIT4 = 1000; + + static private String code = MCAST_CODE; + static private String addr = MCAST_ADDR; + static private int port = MCAST_PORT; + static private int report = MCAST_REPORT; + static private int wait4 = MCAST_WAIT4; + + private InetAddress mcast_addr = null; + + static private final Integer lock = new Integer(666); + + static private boolean ready = false; + + static private Thread listen = null; + + static public boolean verbose = false; + + static private Date start = null; + static private Date last = null; + static boolean got_last = false; + + static public void usAge() + { + System.err.println("usAge: java MCast [-v] [code [toaddr [port [replyport [wait]]]]]"); + System.err.println(" -v=report elapsed ms to last reply"); + System.err.println(" Anything below missing or blank will use it's default"); + System.err.println(" code=X in cgminer-X-Port default="+MCAST_CODE); + System.err.println(" toaddr=multicast address default="+MCAST_ADDR); + System.err.println(" port=multicast port default="+MCAST_PORT); + System.err.println(" replyport=local post to listen for replies default="+MCAST_REPORT); + System.err.println(" wait=how long to wait for replies default="+MCAST_WAIT4+"ms"); + System.exit(1); + } + + private int port(String _port, String name) + { + int tmp = 0; + + try + { + tmp = Integer.parseInt(_port); + } + catch (NumberFormatException nfe) + { + System.err.println("Invalid " + name + " - must be a number between 1 and 65535"); + usAge(); + System.exit(1); + } + + if (tmp < 1 || tmp > 65535) + { + System.err.println("Invalid " + name + " - must be a number between 1 and 65535"); + usAge(); + System.exit(1); + } + + return tmp; + } + + public void set_code(String _code) + { + if (_code.length() > 0) + code = _code; + } + + public void set_addr(String _addr) + { + if (_addr.length() > 0) + { + addr = _addr; + + try + { + mcast_addr = InetAddress.getByName(addr); + } + catch (Exception e) + { + System.err.println("ERR: Invalid multicast address"); + usAge(); + System.exit(1); + } + } + } + + public void set_port(String _port) + { + if (_port.length() > 0) + port = port(_port, "port"); + } + + public void set_report(String _report) + { + if (_report.length() > 0) + report = port(_report, "reply port"); + } + + public void set_wait(String _wait4) + { + if (_wait4.length() > 0) + { + try + { + wait4 = Integer.parseInt(_wait4); + } + catch (NumberFormatException nfe) + { + System.err.println("Invalid wait - must be a number between 0ms and 60000ms"); + usAge(); + System.exit(1); + } + + if (wait4 < 0 || wait4 > 60000) + { + System.err.println("Invalid wait - must be a number between 0ms and 60000ms"); + usAge(); + System.exit(1); + } + } + } + + public void run() // listen + { + byte[] message = new byte[1024]; + DatagramSocket socket = null; + DatagramPacket packet = null; + + try + { + socket = new DatagramSocket(report); + packet = new DatagramPacket(message, message.length); + + synchronized (lock) + { + ready = true; + } + + while (true) + { + socket.receive(packet); + + synchronized (lock) + { + last = new Date(); + } + + int off = packet.getOffset(); + int len = packet.getLength(); + + System.out.println("Got: '" + new String(message, off, len) + "' from" + packet.getSocketAddress()); + } + } + catch (Exception e) + { + socket.close(); + } + } + + public void sendMCast() + { + try + { + String message = new String("cgminer-" + code + "-" + report); + MulticastSocket socket = null; + DatagramPacket packet = null; + + socket = new MulticastSocket(); + packet = new DatagramPacket(message.getBytes(), message.length(), mcast_addr, port); + + System.out.println("About to send " + message + " to " + mcast_addr + ":" + port); + + start = new Date(); + + socket.send(packet); + + socket.close(); + } + catch (Exception e) + { + e.printStackTrace(); + } + } + + public void init() + { + MCast lis = new MCast(); + listen = new Thread(lis); + listen.start(); + + while (true) + { + synchronized (lock) + { + if (ready) + break; + } + + try + { + Thread.sleep(100); + } + catch (Exception sl1) + { + } + } + + try + { + Thread.sleep(500); + } + catch (Exception sl2) + { + } + + sendMCast(); + + try + { + Thread.sleep(wait4); + } + catch (Exception sl3) + { + } + + listen.interrupt(); + + if (verbose) + { + try + { + Thread.sleep(100); + } + catch (Exception sl4) + { + } + + synchronized (lock) + { + if (last == null) + System.out.println("No replies received"); + else + { + long diff = last.getTime() - start.getTime(); + System.out.println("Last reply took " + diff + "ms"); + } + } + } + + System.exit(0); + } + + public MCast() + { + } + + public static void main(String[] params) throws Exception + { + int p = 0; + + MCast mcast = new MCast(); + + mcast.set_addr(MCAST_ADDR); + + if (params.length > p) + { + if (params[p].equals("-?") + || params[p].equalsIgnoreCase("-h") + || params[p].equalsIgnoreCase("-help") + || params[p].equalsIgnoreCase("--help")) + MCast.usAge(); + else + { + if (params[p].equals("-v")) + { + mcast.verbose = true; + p++; + } + + if (params.length > p) + { + mcast.set_code(params[p++]); + + if (params.length > p) + { + mcast.set_addr(params[p++]); + + if (params.length > p) + { + mcast.set_port(params[p++]); + + if (params.length > p) + { + mcast.set_report(params[p++]); + if (params.length > p) + mcast.set_wait(params[p]); + } + } + } + } + } + } + + mcast.init(); + } +} diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..29de3ab --- /dev/null +++ b/Makefile.am @@ -0,0 +1,169 @@ + +ACLOCAL_AMFLAGS = -I m4 + +JANSSON_CPPFLAGS= -I$(top_builddir)/compat/jansson-2.6/src -I$(top_srcdir)/compat/jansson-2.6/src + +if WANT_USBUTILS +USBUTILS_CPPFLAGS = -I$(top_builddir)/compat/libusb-1.0/libusb -I$(top_srcdir)/compat/libusb-1.0/libusb +else +USBUTILS_CPPFLAGS = +endif + +EXTRA_DIST = example.conf linux-usb-cgminer \ + api-example.php miner.php \ + API.class API.java api-example.c windows-build.txt \ + bitstreams/README API-README FPGA-README \ + bitforce-firmware-flash.c hexdump.c ASIC-README \ + 01-cgminer.rules + +SUBDIRS = lib compat ccan + +cgminer_CPPFLAGS = $(PTHREAD_FLAGS) -fno-strict-aliasing $(JANSSON_CPPFLAGS) $(USBUTILS_CPPFLAGS) + +bin_PROGRAMS = cgminer + +cgminer_LDFLAGS = $(PTHREAD_FLAGS) +cgminer_LDADD = $(DLOPEN_FLAGS) @LIBCURL_LIBS@ @JANSSON_LIBS@ @PTHREAD_LIBS@ \ + @NCURSES_LIBS@ @PDCURSES_LIBS@ @WS2_LIBS@ \ + @LIBUSB_LIBS@ @MM_LIBS@ @RT_LIBS@ \ + @MATH_LIBS@ lib/libgnu.a ccan/libccan.a + +cgminer_CPPFLAGS += -I$(top_builddir)/lib -I$(top_srcdir)/lib + +if !HAVE_WINDOWS +cgminer_CPPFLAGS += @LIBCURL_CFLAGS@ +endif + +# common sources +cgminer_SOURCES := cgminer.c + +cgminer_SOURCES += elist.h miner.h compat.h bench_block.h \ + util.c util.h uthash.h logging.h \ + sha2.c sha2.h api.c + +cgminer_SOURCES += logging.c + +cgminer_SOURCES += klist.h klist.c + +cgminer_SOURCES += noncedup.c + +if NEED_FPGAUTILS +cgminer_SOURCES += fpgautils.c fpgautils.h +endif + +if WANT_USBUTILS +cgminer_SOURCES += usbutils.c usbutils.h +endif + +if WANT_LIBBITFURY +cgminer_SOURCES += libbitfury.c libbitfury.h mcp2210.c mcp2210.h +endif + +if WANT_CRC16 +cgminer_SOURCES += crc16.c crc.h +endif + +# Device drivers +if HAS_AVALON +cgminer_SOURCES += driver-avalon.c driver-avalon.h +endif + +if HAS_KNC +cgminer_SOURCES += driver-knc.c knc-asic.c knc-asic.h knc-transport.h knc-transport-spi.c +cgminer_LDADD += -lz +endif + +if HAS_BFLSC +cgminer_SOURCES += driver-bflsc.c driver-bflsc.h +endif + +if HAS_BITFORCE +cgminer_SOURCES += driver-bitforce.c +endif + +if HAS_HASHFAST +cgminer_SOURCES += driver-hashfast.c driver-hashfast.h hf_protocol.h hf_protocol_be.h +endif + +if HAS_HASHRATIO +cgminer_SOURCES += driver-hashratio.c driver-hashratio.h +endif + +if HAS_BITFURY +cgminer_SOURCES += driver-bitfury.c driver-bitfury.h +endif + +if HAS_BITMINE_A1 +cgminer_SOURCES += driver-SPI-bitmine-A1.c +cgminer_SOURCES += spi-context.c spi-context.h +cgminer_SOURCES += A1-common.h +cgminer_SOURCES += A1-board-selector.h +cgminer_SOURCES += A1-board-selector-CCD.c A1-board-selector-CCR.c +cgminer_SOURCES += A1-trimpot-mcp4x.h A1-trimpot-mcp4x.c +cgminer_SOURCES += i2c-context.c i2c-context.h +endif + +if HAS_DRILLBIT +cgminer_SOURCES += driver-drillbit.c driver-drillbit.h +endif + +if HAS_ICARUS +cgminer_SOURCES += driver-icarus.c +endif + +if HAS_KLONDIKE +cgminer_SOURCES += driver-klondike.c +endif + +if HAS_COINTERRA +cgminer_SOURCES += driver-cointerra.c driver-cointerra.h +endif + +if HAS_SP10 +cgminer_SOURCES += driver-spondoolies-sp10.c driver-spondoolies-sp10.h \ + driver-spondoolies-sp10-p.c driver-spondoolies-sp10-p.h +endif + + +if HAS_SP30 +cgminer_SOURCES += driver-spondoolies-sp30.c driver-spondoolies-sp30.h \ + driver-spondoolies-sp30-p.c driver-spondoolies-sp30-p.h +endif + +if HAS_BAB +cgminer_SOURCES += driver-bab.c +endif + +if HAS_AVALON2 +cgminer_SOURCES += driver-avalon2.c driver-avalon2.h +endif + +if HAS_AVALON4 +cgminer_SOURCES += driver-avalon4.c driver-avalon4.h +endif + +if HAS_MINION +cgminer_SOURCES += driver-minion.c +endif + +if HAS_BMSC +cgminer_SOURCES += driver-bmsc.c +endif + +if HAS_BITMAIN +cgminer_SOURCES += driver-bitmain.c driver-bitmain.h +endif + +if HAS_BITMAIN_C5 +cgminer_SOURCES += driver-btm-c5.c driver-btm-c5.h sha2_c5.c sha2_c5.h +endif + +if HAS_MODMINER +cgminer_SOURCES += driver-modminer.c +bitstreamsdir = $(bindir)/bitstreams +dist_bitstreams_DATA = $(top_srcdir)/bitstreams/README +endif + +if HAS_BLOCKERUPTER +cgminer_SOURCES += driver-blockerupter.c driver-blockerupter.h +endif diff --git a/NEWS b/NEWS new file mode 100644 index 0000000..86e9f53 --- /dev/null +++ b/NEWS @@ -0,0 +1,7017 @@ +Version 4.9.0 - 16th December 2014 + +- Minor fix +- Fix MM41 voltage setting +- Fix the default settings of new module +- Count non matching stratum as a hw error on ava4 +- Fix ava4 build incompatibilites and missing write config parameters +- Use strcasecmp for device matching in usbutils in case of subtle manufacturer +changes +- Add manufacturer and product definitions for ava4 +- Cosmetic ava4 change +- Cosmetic ava4 message fixes +- Add sanity check for NULL data being passed to usb_perform_transfer +- All write errors should be treated as fatal for ava4 devices +- Change initial fan start speed, mins and max for avalon4 to ensure fan starts +spinning but can go lower RPM +- Disable zero length packets on ava4 before trying to init +- Add a cgpu device option to disable zero length packets and enable it for +avalon4 +- Display ava4 stats consistent with other devices +- Add ava4 to udev rules file +- Fix build warnings on ava4 +- Fix ava4 build +- Add Avalon4 support +- Filter duplicate stratum shares from being submitted upstream +- Do rudimentary detection of duplicate shares per device + + +Version 4.8.0 - 25th November 2014 + +- Allow forcing of building driver combinations with --enable-forcecombo +- Put spaces between name and id in avalon2 and icarus +- Relax detection of a failing ava2 to more than 1 minute and perform the test +after polling for results +- Cap maximum diff on ava2 in order to still get shares +- Put space between device name and id to prevent device names with numbers in +them confusing the display +- USB write errors are always fatal so they should be treated as such on ava2 +- Issue a usb reset for ava2 that is not returning valid shares and then drop it +if it persists for over a minute +- Process share results without a result value +- Damp out hashrate displayed for antminer USBs +- Add voltage and speed where relevant to antminer USBs +- Don't estimate time on any antminer usb during a timeout +- Return icarus nonce ok only when the nonce size matches the device or more +- Don't discard old workids until we cycle back to them on antusb and look for +more nonces in the buffer +- Adjust ant usb timing for queued work +- Use a cyclical list for the ant queued work +- Mask and limit workid for antusb and dont clear buffer +- Check the nonce on the worked item, not the submitted work +- Skip over unfinished work that we can't free in ant usb +- Use a workid and array if possible for the small ant usb work queue +- Create an array for antworks for antminer usb devices +- On U3 calculate hashrate purely on shares, not timeouts +- Add switches for AU3 +- Adjust icarus wait timeout according to device +- Differentiate U3 from U1/2 as a separate driver with different parameters and +adjust timing accordingly +- Skip ANUs detected in rock detect +- Try U3 after trying other icarus options +- Add rudimentary ANU voltage setting support for U3 +- Fix ignoring unprefixed v6 address in api allow list +- Fix minor typos in Spondoolies SP10 and SP30 drivers +- Implement a basic rock_flush function to discard the base work we are rolling +work from. +- Task_no for rockminer from the nonce bin should simply be masked +- Change rbox default correction times to 5 in a revised frequency order +- Change default frequency on T1 to 330 +- Reinstate last received check and resend in rockminer, being more lenient at 2 +seconds to allow for dither errors at 1 +- Roll work for the rbox when possible + + +Version 4.7.1 - 4th November 2014 + +- Selectively yield on dropping a lock only on single CPU platforms +- Make it impossible to configure in more than one device that is meant to be +standalone. Add more information to configure help, along with comments for new +drivers. +- Add warning against system libusb in configure help +- stratum_rthread sleep only 3s when all the pool have disconnected +- Filter responses that don't have a result +- Implement support for pool ping and json integers of zero in getversion and +ping +- Fix segfault when writing config with hashratio built in +- Save pools in priority order at time of writing config +- Set the correct flag for close on exec for sockets +- Suspend stratum on removing a pool +- Set CLOEXEC on sockets on linux +- Drivers that take a diff should specify a max diff or it is assumed they don't +support one so set max_diff to 1 if unset +- Send hfa generic frame only if voltage was specified on the command line for +that device +- Set hashfast voltage settings only when really needed +- Hashfast voltage support +- Increase max diff on sp30 to 1024 +- Reset ipv6 flag to false in every api-allow loop +- undeclared identifier 'IPV6_ADD_MEMBERSHIP' fix for apple +- two back temps spondoolies2 +- two back temps spondoolies +- correct suggest_difficulty json rpc call +- Add more usb3 hub identifiers for windows +- Set driver max diff to large value if unset +- Wake gws on get queued +- Implement blacklisting of attempting to match known products from ones without +identifiers +- Fix hfa driver building without libcurl +- Enable building libusb without udev +- Fix off by one calculation error in sp30 leading zeroes +- Send correct diff work to sp30 for hashmeter to be correct +- Do the sleep in spondoolies_queue_full_sp30 after dropping the lock +- Minor tidy in sp30 driver +- Fix sp30 warnings + +Version 4.7.0 - 14th October 2014 + +- Implement generic inet_pton for windows +- Fix warnings +- Fix bulk of remaining style in blockerupter.c +- Tidy style in blockerupter.h +- Tidy bulk of style in blockerupter.c +- Fix missing minimum diff setting for blockerupter +- Fix unused variable warnings +- remove unnecessary sleep; fix potenital div by 0 errs; use min_diff in driver +definition +- Fix coding style +- Make the sp30 hashrate meter based on valid share generation +- Change default max queue back to 1 in line with speed of most current asic +controllers +- Change diff limits to values suitable for sp30 +- Add pool number to response from addpool to the API +- Make the restart and quit API commands valid json responses +- Fix number of nos +- Add option to set clock ('--bet-clk X' actual clock is (X+1)*10 ) +- compatible with X24 board +- Fix error when using v6 without mask in api-allow +- Support ipv6 multicast +- Set min_diff to 1 +- Allow arbitrary clamping of lower device diffs for slow controllers by driver +- Don't set default fan to max on hashratio +- The 2nd read never gets anything on ava2 so remove it entirely and just return +an error if we are out of sync +- Implement support for mining.suggest_difficulty +- Fix client ip address output +- Free addrinfo garbage +- Remove the brackets when using v6 pool address +- Add ipv6 support for api listen +- Avalon Nano: Add support Avalon Nano usb miner +- fix bug in setdiff +- limit minimum diff to 64 +- Add BlockErupter Driver +- Avalon2: display currect max temperature on statline +- Remove unused variable + + +Version 4.6.1 - 20th September 2014 + +- Throttle bflsc28 devices when they hit the overheat limit +- Add whitelisting of firmware used in final bflsc28 products +- API.java - remove lowercase of all data sent +- Avalon2: Add 3 bytes nonce2 support +- Avalon2: MM needs n2size length <= 4 +- Use fan min as fan speed when run with --avalon2-fixed-speed +- Clear the pool submit fail bool after adding shares to the stratum hashtable +to minimise window the share is not in the table +- api-example unlimited socket works +- Add custom strcasestr and use custom gnu type functions in bflsc +- Fix windows build of bflsc driver +- Fix possible deref in bflsc28 + + +Version 4.6.0 - 7th September 2014 + +- We should not be checking for pool_unworkable in cnx_needed as it is keeping +stratum connections open on unused pools +- Properly handle lack of input when adding pool via menu +- Allow workers without passwords +- minion - increase max chip number +- Avalon2: add more comments on Avalon2 options +- Avalon2: add polling delay option, long coinbase support, LED status on API, +change overheat from 88 to 98 +- minion - add a ' before non-zero core error counts +- minion - hidden optional per core nonce stats +- bflsc28 - clock is hex +- bflsc28 - allow setting clock and volt from the API ascset command +- bflsc28 - add chip count to stats +- bflsc28 stats +- Simplehacks to better serve bflsc28 +- Only use one hashtable for bflsc28 work queued +- Copy back the buffer after we've stripped the inprocess field on bflsc +- Parse results for BMA based on uid and remove work from the queue when found +- Strip out the inprocess details from bflsc results if it exists +- Create a hashtable of work by uid as it's accepted by BMA +- Add some rudimentary values for BMA sleep times +- Fix various errors in queueing work for bflsc28 and limit job queueing to 10 +to fit within a usb frame +- Create preliminary work queueing for bflsc28 using jobs of up to 20 at a time +by rolling work where possible +- Convert all bflsc transfers to the full 512 bytes +- Don't mistake bflsc28 for fpga +- Do initial detection of bflsc28 devices + + +Version 4.5.0 - 29th July 2014 + +- Fix windows build for hashratio and ava2 +- Demote bad checksum message in cointerra driver but allow message to still be +parsed since it won't allow existing firmwares to work otherwise +- Reorder and document the configure options +- Merge https://github.com/KnCMiner/cgminer into knc +- Change default voltage on ava2 to 0.666V because Satan +- Enable combined building of avalon2 and hashratio +- Fix stratum embedded fpgas to not duplicate work with other devices +- Implement smarter PID type fan control for ava2 allowing more generous +temperatures and far lower fan speeds for optimal power and noise usage. Adjust +default frequency to 450 as per recommendation. +- Fix various warnings in ava2 +- Go back to polling design since async will not work for ava2 and fix various +read design errors +- Fix error in 2nd read functions for av2 and hro +- Correct init and read sequence for ava2, and convert from a polling mechanism +to a separate read thread +- Initial commit of ava2 conversion to direct USB +- Display frequency and voltage with ava2 on the statline +- Store fan percentage and display temp and fan percent for ava2 +- Set avalon2 frequency and voltage to appropriate defaults if none are +specified on the command line +- Demote some ava2 messages that don't need to be errors and remove unused works +array +- Fix broken fan logic for ava2 +- Fix hexdump on 64bit +- rockminer frequency is between 200 and 400 MHz +- fix jansson include path in cgminer-api compile instructions +- Remove requirement for ifndefs for avalon2 from the generic driver work +function +- Fix hashratio device name +- Make submit_nonce2_nonce return whether the share was valid or not +- Reinstate missing necessary init sequence for hashratio +- Handle disconnected hashratio devices +- Add hashratio frequency command line +- Fix stratum updates not being passed to hashratio devices and clean up +- Move to updated avalon2 type driver model for hashratio device +- Initial import and conversion of hashratio driver to direct USB +- Increase the internal buffer for API response, as "stats" command response +can grow greater than 8K +- Detach test pool thread only if we have a blocking startup + + +Version 4.4.2 - 17th July 2014 + +- Remove the use of the pthread_tryjoin_np which is currently unimplemented on +many platforms +- Fix processarg parameters loaded from a config file not being saveable +- We only use the jansson in our source tree so no need for special case +handling of older versions +- Upgrade jansson to 2.6 +- Only clear sockbuf if it's been allocated +- Fix missing osm-led-mode support in write config +- Deal with nanosecond overflow in both directions on both addition and +subtration of timespecs +- Rename sp10 driver internally from spondoolies to sp10 +- minion - add a 2nd (optional - disabled) reset test +- production stats added, reset queue added +- minion - correct led ghs2 choice +- minion - correct ghs2 display +- minion - reset the led counter when doing a chip RSTN full reset +- minion - don't reset the led counter for an SPI reset +- minion - led per chip and use all time average +- minion - report spi error counts and settings in stats +- minion - undeclared fix +- minion - chip power cycle option +- minion - record 0xff error history and reduce screen output +- minion - reset on result or fifo 0xff +- minion - clarify the 0 value of spireset +- minion - make SPI reset more configurable +- minion - make the SPI reset ms sleep a parameter and API settable +- sp10 sensors +- sp30 +- minion - led+more api setting +- Avoid blocking all pool testing if one pool fails to ever init +- There is no point storing the hints addrinfo in struct pool +- minion - 'reset' SPI when getting errors +- initialise more pool values in benchmark +- minion - auto adjust freq +- merge upstream frequency changes +- icarus - timing history in its own function +- rbox - add lotsa stats, tidy up a bit more +- Fix an off-by-one. +- icarus - detect stat should be LOG_DEBUG +- icarus - tidy up rbox code, remove statics, and add rocketbox +- minion - do an early reset to clear the chip status +- minion - use descriptive names for the list types +- Avalon2: automatic adjust fan speed, using crc16 on job_id compare, turn on +the led by API, detect twice when start, remember the last stratum message +increase the hashrate, add cutoff option +- fix AntS1 breakages from AntS2 changes +- minion - disable dup nonce check +- minion - add an ioseq number for each ioctl to simplify work ordering +- minion - redo old work expiry based on txrx order +- minion - more work stats, minimise queued work, free flushed queued work +- minion - allow resetting a chip via the API +- minion - correct 'WQue Count' in stats +- minion - delay after reset, reset history also, add dups to api stats +- noncedup - give access to the internal stats +- minion - increase reset to 75% +- minion - dup checking, disable reread by default and extra ioctl debugging +- minion - always check the chip queue before queuing new work + + +Version 4.4.1 - 21st June 2014 + +- Move icarus driver to being seen as an asic +- Clear usb reads on each pass through icarus detect to hopefully prevent false +positives for detecting rboxes +- Clean up pool failure and failover code for stratum + + +Version 4.4.0 - 16th June 2014 + +- Tidy unused rockminer variables +- Tidy rockminer defines +- Make rockminer driver compatible with other icarus drivers being present +- Import basic rbox driver +- minion - add optional (on) GPIO chip selection +- Clear the pool idle flag in the pool test thread +- CoreFmatch in cointerra should be a uint16 +- Display error message if we receive one on share rejects +- Allow zero length strings to be passed to valid_hex +- delete unused roundl definition + + +Version 4.3.5 - 10th June 2014 + +- Cointerra driver updates. +- Sleep before retrying in the test pool thread after a pool has died +- Use valid_ascii testing for job_id since it need not be hex only +- Only show slow/down message till pool is flagged idle +- Do some random sanity checking for stratum message parsing +- Keep looking for when a pool comes to life at startup and touch the logwin so +the message is not invisible +- Fix no libcurl build +- Added Drillbit Thumb to udev rules. +- Avoid dereference on getting API stats on partially initialised HFA instances +- A1: add support for updated product variants, small fixes +- Add one more usbutils fix +- Convert uses of usbutils memcpy to cg_memcpy +- Add a sanity checking memcpy function which checks for overflows +- minion - count force use reread +- minion - add a disabled ioctl() test +- minion - add more checking of SPI results for corruption +- minion - optional (disabled) ioctl() debug +- Increase S1 overheat to 75 degrees C +- Add ruby api-example to API-README +- minion - allow core selection at runtime +- API - lcd all-in-one brief summary + + +Version 4.3.4 - 25th May 2014 + +- Add support for 2 nonces per block in spond driver +- Increase timeout on reset in cta driver to 5 seconds +- Increase max diff on spondoolies driver slightly to be well below spi comms +limitations +- Use the active contents lock and safe list iteration within the linux usbfs +code +- Add Ruby Api Example +- Automatic detect the small miners +- Update default modules from 3 to 4 +- Fix the temp max. we should use currect max temp +- add avalon2-cutoff options +- Enable the cutofftemp to Avalon2. ignore longer coinbase and longer merkles +stratum +- Fix the diff value used on MM firmware +- Mark pool as idle if stratum restart is failed +- Add hacky workaround for double list removal race in libusb +- Make the work given in benchmark mode deterministic on a per-device basis +- Rework the benchmarking code to use a deterministic set of work items with a +known number of diff share nonces at regular spaced intervals +- minion - restrict nonce read result size to ioctl() limit +- minion - must check temp when overheated +- minion - idle chips that hit >100C until back to 80C +- minion - report the chip/reg when aborting due to an invalid ioctl() size +- minion - all freq in Mhz but only convert when used +- minion - remove unused ioctl debug +- minion - command queue is now larger +- minion - check rolled in stale work cleanup +- Work stats should be based on device_diff not work_difficulty since non-shares +haven't been filtered out yet +- Prevent a segfault when writing a config file containing 'rotate' option +- minion - comment out HW debug message +- minion - roll work to reduce CPU +- minion - report init_freq in stats +- api - howoldsec is only used for USB +- minion - allow setting the frequency +- minion - disable iostats by default since it slows down mining +- minion - define frequency value table +- minion - report temp/cores/freq and handle temp formatting +- minion - item is undefined +- Rationalise diffs stored in the work struct and document them to avoid further +confusion +- Add basic API stats for nfu drivers to see how many submits each chip returns +- Add output direction for the EN0 pin on nfu driver +- Support power management optimisations in newer nf* firmware +- Support variable numbers of chips with NFU and BXM drivers +- Identify number of chips in nanofury devices and change name accordingly +- Rename nf1 driver to nfu in anticipation of support for more chips +- Make hashfast reset counter rise on old instances when inheriting the value on +new ones + + +Version 4.3.3 - 3rd May 2014 + +- Fix typo +- Work should be freed when aged, fixing a massive memory leak for bxf devices +- miner.php fix single rig summary/config field formatting +- miner.php fix single rig total formatting + + +Version 4.3.2 - 2nd May 2014 + +- Fix accounting bug with nrolltime drivers + + +Version 4.3.1 - 2nd May 2014 + +- upgrade some int to int64_t to avoid overflows in reporting +- Make reconnection messages more explanatory +- Stratum client.reconnect require matching URL +- Fix memory leak in submit_noffset_nonce +- Clean up any work that may not have been used in the work scheduler +- Avoid unnecessary deref now that it's done within discard_work +- Clean work pointers after one way usage functions +- Avoid unnecessary total_work_inc in generating local work +- Cosmetic fixes +- Fix idle bug, when redirected client can't auth +- Rename spond temp rate to asics total rate +- Build fixes +- Set the unique id only for usb devices with serial strings longer than 4 chars +long +- Use usb serial strings as unique id if devices have them +- Discretely identify the onestring miners as OSM +- Add bxf debugging option and osm led modes +- A1: modularize board selector / add initial CCR support +- A1: cleanup tca9535 logging +- A1: fix and extend PLL parameters +- A1: clean up compile warnings +- A1: use real level in hexdump +- Add identification for onestring miner variants +- Avalon2: Parser the power good signal +- driver-avalon2: this functions used on detect, which don't have thr setup yet + + +Version 4.3.0 - 18th April 2014 + +- Put sleep in spond hash instead of queue full function +- Remove unused function for when compiled without curses +- Fix typo +- Add temperature rate, front, rear and device temperature to spond API output +- Limit bxf sleep in bxf_scan to 100ms minimum for strings of many chips +- -Werror=format-security error on driver-bitmain.c +- Fix parameters passed with getblockhash +- Check the block hash with the proper command when looking for orphan chains +- syslog requires a facility ... in more than one place +- Shuffle windows headers included +- Adjust the bxf sleep time according to the number of chips detected +- Fix off by one error in bxf chip count when adjusting device size +- Recalloc correct pointer +- Make instructions associated with winusb error even more explicit +- Add midsing headers to cgminer source in Makefile +- Trivial style changes to mg proto parser +- Trivial style and warning clean ups on spondoolies driver +- Merge spondoolies driver patch +- Call any BXF device with 3-6 chips reported HXF +- Avoid derefrence when calling statline before on hfa device during init +sequence +- Calloc the info structures even on failed hfa reset to prevent later possible +dereference +- Load all hfa devices based on identification alone and defer init sequence +till mining thread init sequence to allow all devices to be recognised rapidly +but each device initialisation not delay others +- Do not do thread shutdown unless thread init succeeded +- Remove unnecessary check for thread_prepare function +- Recognise variations on BXF based on chip value returned in responses +- Provide helper function for recallocing memory +- syslog requires a facility + + +Version 4.2.3 - 3rd April 2014 + +- Decay the per device hashrates when only the watchdog is calling the hashmeter +- Fix parsing of config files failing on custom parsing +- Allow an arbitrary number of chips in the BXF driver, showing results from +each chip in the API and identify the hexfury, naming it HXF +- Disable toggling display by default and offer a --widescreen option to have +all the information on an extra wide display. +- Use OPT_WITH_CBARG for all custom parsing functions to allow their values to +be written generically when writing the config file from the menu. +- Provide a ccan variant OPT_WITH_CBARG that assigns the arguments passed as a +string and then performs the callback function on the string. +- Define strings to store special option parsing parameters leaving no +OPT_WITH_ARG missing args +- Correct the writing of special case options to the config file +- Provide support for writing anu freq from menu write option +- Update to diver-avalon2.c +- Generalise a lot more of the command line options simplifying the write config +function and making it write far more values unaided +- Use the general opt_set_charp functions for setting api parameters +- Json escape any strings written to the config file +- Store standard charp options when writing config files +- Add support for all the integer range options when writing the config file +from the menu +- Remove the --device and --remove-disabled options which don't work in a +meaningful way any more +- Make the bxf bits configurable on the command line +- Provide a --btc-sig option to optionally add a custom signature to the solo +mining coinbsae +- Compact gbt solo extra data and store the length, allowing it to be variable, +leaving room for a signature +- miner.php - Kano summary Pool Acc/Rej should be only work submitted +- miner.php add best share and gen formatting for pool summary +- miner.php - remove BGEN/GEN eval() errors from the web log +- miner.php allow optional fields when gen is disabled +- miner.php dont format missing gen fields +- miner.php make Summary a custompage +- miner.php allow uers and system lists of customsummarypages and add more +examples +- Fix getwork share submission +- Cosmetic fix to udev rules +- Put WU on the hashrate status to compact lines further +- miner.php show api/rig errors at the top of a customsummarypage + + +Version 4.2.2 - 29th March 2014 + +- Minor correctness fix for unnecessary free +- Clean up various curl build issues +- allow url based config files +- Frequency only needs 3 digits for cointerra statline +- Use the serial number as unique_id for cta display +- Make it possible to enable/disable the status window from switching via the +display menu +- We should not update the tv hashmeter time unless we're updating the hashrates +- Add cointerra devices to udev rules. +- Use hashfast unique id instead of number since the unique id is displayed +- Remove displayed space +- Left align the displayed unique id +- Use the hashfast opname as its unique identifier +- Display BF1 serial number as its unique identifier +- Display a unique identifier instead of a number if the device has one +- Use an alternating status display to return to a compact width of 80 +characters, allowing more information to be displayed. +- No need for looking for khash hashrates in summary any more +- Fix two potential minor mem leaks +- Fix memory leaks in setup and generate work for gbt solo. +- Fix off by one malloc size error +- Fix memory leak in update_gbt_solo +- Put sanity check on decay_time to prevent updates with no time +- Add 3 rolling average hashrates to API output for summary and devs. +- Use the extra status screen real estate better, displaying rolling 1/5/15min +average hashrates as well. +- Revamp the ageing crufty hashmeter code to have proper exponential decaying +values and store rolling 1/5/15min hashrates. +- Increment total_work under control lock. +- Trivial variable reuse +- Add support for other usb3 hubs on windows + + +Version 4.2.1 - 24th March 2014 + +- Fix various ava2 build issues generically +- Minimise the amount of heap memory allocations/frees when submitting gbt +shares. +- Make varint in gbt submission a stack object. +- Fix big endian problems with gbt submissions. +- Fix 32bit overflow on relative diff shown. +- ants1 - stop results read hard looping +- ants1 - slow down mining if overheat occurs +- miner.php allow gen before (bgen) and after (gen) grouping +- Change default solo mining to failing when no btc address is specified. +- Use upgrade cglock variants in get_gbt_curl +- Provide a cg_uilock to unlock the intermediate variant of cglocks. +- Use the one curl instance for all gbt solo operations, protecting its use with +a bool set under gbt lock. +- Only start block detection with gbt solo if setup succeeded +- One less block detection message +- Toss out the curl handle after each solo poll +- Don't reuse any curl handles for solo mining and break out of the lp thread if +the pool is removed. +- Make sure to only start the lognpoll thread once on gbt solo. +- Don't keep RPC connections open for solo mining since bitcoind doesn't like +having many persistent connections. +- GBT solo pools should be considered localgen pools. +- miner.php - speed up formatting and allow calc on gen fields +- Always show the address we're solo mining to to avoid confusion for when no +address is set. + + +Version 4.2.0 - 18th March 2014 + +- Fix missing htobe16 on windows and meaningless >u32 string warning. +- Software ntime roll for all hashfast devices. +- Silence harmless warning. +- Drop a failed restart icarus device to allow it to be rehotplugged if +possible. +- Work with more than one transaction. +- Kill gbt solo pools that don't respond to the gbt request 5 times +sequentially. +- Fix ser_number for no remaining val byte. +- Create a work item and stage it when updating the gbt solo template to allow +new block detection and restart code to work. +- Test block hash as well as block height when solo mining to ensure we haven't +been mining on an orphan branch. +- Fix transaction processing for gbt solo. +- Encode height using integer varint format. +- Make new block detection message not show in gbt solo from test_work_current +- Add block detection via getblockcount polling in gbt solo and update gbt +template every 60 seconds. +- Iterate over transactions twice to malloc only once when copying all the +transaction data. +- Update solo coinbase regularly and submit as gbt work +- Only show merkle hashes for solo mining in debug mode. +- Set correct flag for solo work. +- Generate gbt solo work emulating stratum work construction. +- Set the diff as a double sdiff from gbt solo data. +- Move swork.diff out of the stratum work section to be shared as sdiff. +- Generate a header bin from gbt solo as per the cached stratum one. +- Store strings similar to stratum's when decoding gbt solo +- Avoid allocing and freeing stratum strings that should be fixed length. +- Run parser through detect_stratum after stratum+tcp:// is force added +- Remove unnecessary header length calculation for stratum header binary and +only binary convert the correct length of the header. +- Share more fields between stratum and gbt +- Share coinbase_len variable b/w stratum and gbt and setup more gbt solo +parameters. +- Generate a valid coinbase and set nonce2offset for gbt solo +- Move scriptsig header bin conversion to setup gbt solo +- Create our own custom scriptsig base. +- Add helper functions for creating script signature templates and beging +building template. +- Do gbt solo decoding under gbt lock. +- Add more gbt variable decoding from gbt solo information. +- Store all the transaction data in binary form when using GBT +- When setting up solo mining, check validity of bitcoin address against +bitcoind +- Make pooled GBT mining use merkle bin optimisations slated for solo mining. +- Abstract out the merkle bin calculation for gbt solo +- Implement efficient merkle tree base from solo GBT information. +- miner.php custom formatting and row counter '#' +- Drillbit: Fix for underestimating hash rate from Bitfury devices +- Send per-core hashrates at regular ~5min intervals back to cta devices. +- Calculate the cta per core hashrate at 5 minute intervals. +- Check the bits of the correct core in cta bit count. +- Display the bit count along with the bitmap for each cta core in the API stats +output. +- Store and display the per core hashrate on cta relative to each work restart. +- Decrease the time we wait for unsetting a core on the cta bitmap to correspond +with the lower max diff of 32. +- Set max diff on cointerra devices to 32 which is still only 11 shares per +second but allows for earlier confirmation of per core hashrates. +- Keep track of when the last restart and work updates were triggered and +provide helper functions for knowing the time since then. +- hashfast make api stats field names unique +- Fix gcc longjmp warning in api.c +- Add a per-core hashrate to the cta API stats. +- miner.php support edevs and estats +- API - put edevstatus where it was supposed to be +- Icarus - allow timing mode to work with ANU and not slow it down +- drillbit - remove warnings +- drillbit - minor code tidy up +- Drillbit: Change language around 'void' to warning about limiter disabled +- Drillbit: Fix accidental over-counting of HW errors +- Drillbit: --drillbit-auto parameter for tweakable custom tuning of ASIC speeds +- Drillbit: Output warning if board reports void warranty +- Drillbit: Add Avalon & drillbit-autotune notes to ASIC-README +- Drillbit: Limit work sent out to 8 units in a single pass, was DoSing a full +double scroll +- Drillbit: Move drillbit_empty_buffer calls to only when errors occur, were +limiting performance on Windows +- Fix Windows bug with libusb_reset_device returning SUCCESS for disconnected +device +- Drillbit: Fix some warnings +- Drillbit: Add --drillbit-autotune option for device to dynamically alter clock +speed +- Drillbit: Fix typo in previous commit +- Drillbit: Remove default config in cgminer, rely on defaults in firmware +- Drillbit: Combine split USB transfer for sending new work, reduce overhead +- Drillbit: Add support for protocol V4, with device-agnostic board +configuration data +- Drillbit driver: Add support for Avalon-based Drillbit miners +- API - add edevs and estats - to only show enabled devices +- Check device data exists on a hfa instance before trying to reinit it. +- Print off what quadrant regulator failed if known in hfa driver. +- Reset all the stats on autovoltage complete in cta driver. +- Use correct diff instead of diffbits in cta driver. +- Whitelist all firmwares <= 0.5 on hfa for software rolling of ntime. +- Avoid a memory leak by reusing the ntime field when rolling stratum work. +- Clear the pipe bitmap on cta only when no share has occurred for 2 hours +instead of 1. +- Cta share_hashes should be added, and we can base it on device wdiff instead +of pool work difficulty for more accurate hashrates. +- Since the device runtime is now reset, the Raw hashrate entry in the cta API +output is no longer meaningful. +- Look for autovoltage returning to zero on cta driver and reset stats at that +point since the hashrate is unreliable till then. +- ants1 - cgminerise applog calls +- Default to stratum+tcp:// on any urls that don't have a prefix instead of +http. +- Trivial cta style changes. +- ants1 - fix/enable temperature checking and remove unneeded temp_old +- ants1 - move local cgpu variables to info structure +- ants1 use a klist to store work and copied work +- Simplify dramatically the cross-process cgminer locking through use of flock +instead of sysv semaphores. + + +Version 4.1.0 - 8th March 2014 + +- Correct fix for dev start time being adjusted for stat zeroing. +- Make per device stats work for average after a stat zeroing. +- Add an hfa-options command line that allows the clockspeed to be chosen per +device by name comma separated, with a function that can be expanded with more +options in the future. +- Off by one drv_rolllimit check against jobs +- Free the work that may be lost, leaking memory, in a failed hfa_send_frame +- Roll the ntime for work within the hfa driver for firmware we know doesn't do +it internally as an optimisation. +- Export the roll_work function to be usable by driver code and make it +compatible with rolling stratum work. +- Make opt_queue be respected as a maximum value for staged items. +- Disable mistakenly enabled lock tracking. +- api version update for HEX32 +- api.c - HEX32 type needs quotes +- Disable the MAX_CLOCK_DIFF check for newer hashfast firmwares since it's not +required. +- Store the hardware and firmware revision in the info struct for easy use in +the hfa driver. +- Only decrease the hfa clock rate if the device has been running for less than +an hour before dying. +- Change lack of op name response message in hfa driver +- Check for lost devices at every write/read in hfa_detect_common +- Make bxm bits configurable. +- Move avalon2 options to ~alphabetic position in help. +- Do a shutdown routine on bxm close. +- Provide support for 2 chips in libbitfury sendhashdata and enable the 2nd chip +on BXM devices. +- Remove unnecessary opayload and newbuf members of bitfury info struct. +- Add an spi add fasync command. +- Cope with older hfa firmware not even responding to op_name. +- Forcibly kill everything silently with an exit code of 1 should we fail to +cleanly shut down and use a completion timeout for the __kill_work in +app_restart. +- Make __kill_work itself also be a completion timeout. +- Generalise more of libbitfury for more reuse in both nf1 and bxm drivers. +- Remove redundant init components of bxm driver. +- Set default osc6 bits on bxm to 50 +- Enable the transaction translator emulator for bxm devices and use a dummy spi +tx the size of a normal payload. +- Store usb11 and tt flags as booleans in cgusbdev allowing them to be +discretely enabled as well as detected by the device data. +- Add bxm scan function and check spi txrx returns only as much as sent. +- Add init sequence to bxm detect one. +- Add a bxm specific txrx function for spi transfers. +- Add bxm close to bitfury shutdown switch. +- Add reset/purge/cshigh/low sequence to bxm init +- Add bitmode init to bxm open sequence. +- Add initial bxm opening sequence for detect one. +- Add identifiers for bxm bitfury devices. +- Clean up parse_method +- More gracefully break out of parse_notify on a corrupted hex string error, +checking the return value of all hex2bin conversions and being consistent with +using stack memory. Fix an unlocking error in cases of failure. +- AntS1 - add detection information to usbutils +- Enable Bitmain Ant S1 code and make it conform to cgminer requirements +- Make the cointerra displayed hashrate based on valid share generation. +- Convert and update values shown in the cointerra api output. +- Export the api_add_int16 function. +- Use a custom mystrstr function in cointerra driver. +- Add api_add_int16 to API functions. +- Add support for Bitmain Multi Chain and Single Chain and optimize the +efficiency +- Add support for bitmain devices +- Perfect function of BitMain Multi Chain +- Add support for Bitmain Multi Chain and Single Chain and optimize the +efficiency +- Add support for bitmain devices + + +Version 4.0.1 - 28th February 2014 + +- Refresh the log window on pool failure message at startup. +- Rework the pool fail to connect at startup to not get stuck indefinitely +repeatedly probing pools with new threads and to exit immediately when any key +is pressed. +- Use an early_quit function for shutting down when we have not successfully +initialised that does not try to clean up. +- Add more information to a hfa bad sequence tail event. +- Increase the work queue at the top end if we've hit the bottom as well. +- Set the work generation thread high priority, not the miner threads. +- Bringing each hfa device online takes a lot of work generation so only ever do +one at a time. +- Increase the opt_queue if we can hit the maximum amount asked for but are +still bottoming out. +- Keep the old hfa device data intact with a clean thread shutdown to allow it +to be re-hotplugged with the old information. +- Cope with the API calling hfa on partially initialised devices having no info. +- Show only as many digits as are required to display the number of devices. +- Cold plug only one hashfast device to get started, and then hotplug many to +minimise startup delays and possible communication delays causing failed first +starts. +- Send a shutdown and do a usb_nodev if hfa_reset fails. +- Null a device driver should thread prepare fail. +- Add a function for making all driver functions noops. +- Don't try to reinit a device that's disabled. +- Disable a device that fails to prepare. +- Check for lack of thread in watchdog thread for a failed startup. +- Make all device_data dereferences in the hfa driver safe by not accessing it +in statline before when it's non-existent. +- Add an option to disable dynamic core shedding on hashfast devices. +- Do not remove the info struct on a failure to hfa prepare. +- Detect an hfa device purely on the basis of getting a valid header response to +an OP_NAME query, leaving init to hfa_prepare which will allow multiple devices +to start without holding each other up at startup. +- Store the presence and validity of opname in the hfa info. +- api - buffer size off by 1 for joined commands +- minion - clean up statline +- Only break out of usb_detect_one when a new device is found. +- Use usb_detect_one in the hfa driver. +- Provide a usb_detect_one wrapper which only plugs one device at a time, +breaking out otherwise. +- Issue a usb_nodev on a bad work sequence tail in hfa +- Read in hfa stream until we get a HF_PREAMBLE +- Add shed count to hfa API stats output. +- Display the base clockrate for hfa devices with a different name to per die +clockrates to be able to easily distinguish them. +- Use op_name if possible first with hfa devices to detect old instances and be +able to choose the starting clockspeed before sending an init sequence, +reverting to setting op name and serial number as fallbacks. +- Make hfa resets properly inherit across a shutdown. +- Don't break out of hfa_old_device early if there's no serial number. +- Fix harmless warning. +- Allow the drop in MHz per hfa failure to be specified on the command line. +- Icarus - ignore HW errors in hash rate ... and fix detection of them +- Enable the hfa shed supported feature by default. +- Add to udev rules hfa devices for firmware writing. +- Remove ENV from hashfast udev rules. +- Add a --hfa-name command that allows one to specify the unique opname for a +hashfast device. +- Ava2 decode the voltage, get the temp_max +- Set the clock rate with a work restart instead of an init when changing to old +clocks for hfa +- Set opname on hfa devices without a serial number to a hex value based on time +to not overflow the field. +- Add op name to hfa API stats output if it exists. +- Set the actual op_name in hfa devices if cgminer is choosing it itself due to +it being invalid. +- Re-init an hfa device to its old data before setting up info structures as +their sizes may change. +- Remove the usb device whenever we do a running shutdown on hfa and do a +shutdown as the imitated reinit to allow it to hotplug again. +- Reset opt hfa dfu boot after it's used. +- Comment out windows only transfer on hfa startup. +- Clean up structures unused in case of all failures in hfa detect common +- Clear all structures should we fail to hfa reset on adjusting clock on a +hotplug. +- Set master and copy cgpu hash clock rate for hfa when dropping it on a +restart. +- Set the master hfa clock speed to lower when shutting down a copy. +- Do a clear readbuf on any hfa reset in case the device has not yet cleanly +shut down. +- Increase hfa fanspeed slightly more when it's rising in the optimal range than +falling. +- Always decrease hfa clock speed on a running shutdown and don't try sending an +init frame since it will be dropped regardless. +- Match hfa devices to old ones based on OP_NAME values before serial numbers if +possible. +- Read off the OP_NAME if it exists and is supported on hfa devices, setting it +to the device serial number or a timestamp if it is invalid. +- Updated hf protocol +- Check for an amount along with no error in hfa clear readbuf +- Hfa clear readbuf can return a nonsense amount when there's a return error so +ignore the amount. +- Running resets always cause a shutdown on hfa meaning the device will +disappear with modern firmware so always kill off the threads to allow +re-hotplugging. +- Reset the hfa hash clock rate to the old one if we find an old instance, only +setting the device id in hfa_prepare +- Keep the device_id on the original zombie thread for HFA in case of further +resets. +- Break out of hfa inherit if there is no device data. +- Inherit the hfa zombie instance after the device id has been allocated. +- The list_for_each_cgpu macro will dereference when there are no mining threads +yet. +- Make hfa hotplug inherit some parameters from a previous instance if the +serial number exists and is matching, avoiding dropping the clock on all +devices. +- Per device last getwork won't work if the device stops asking for work. +- Use the share_work_tdiff function in the driver watchdogs. +- Provide a helper function for determining time between valid share and getwork +per device. +- Store last_getwork time on a per-device basis. +- Limit the decrease of hfa clock rate on reset to the default clockrate. +- Base the hfa failure time on the current expected hashrate instead of a static +15 seconds. +- We shouldn't be trying to read from the hfa_send_shutdown function itself. +- Reset the icarus failing flag only when a valid nonce is found. +- Transferred value is corrupt on a NODEV error in usbutils. +- Set each miner thread last valid work just before starting its hash loop in +case there are delays at startup. +- Only memcopy *transferred data in usbutils if we have received only success or +a non-fatal error. +- Increase to 25 nonce ranges on icarus fail detect. +- Set icarus device fail time to be dependent on device speed to avoid falsely +detecting failure on slower AMU devices. +- Updated hf protocol header. +- Updated BE hf protocol header. +- Take into account shed cores on hfa devices when determining how many jobs to +send. +- Fix compilation error with two avalon types. +- Fix missing A1 files from distribution. + + +Version 4.0.0 - 21st February 2014 + +- Check for error from setfloatval +- Halfdelay cannot be larger than 255. +- Allow any arbitrary frequency to be specified for ANU devices and try to find +the nearest frequency when initialising it, reporting if the frequency is not +exactly as requested. +- Only show system libusb warning when appropriate during configure. +- Merge branch 'avalon2' of https://github.com/xiangfu/cgminer into +xiangfu-avalon2 +- Hfa cooling remains satisfactory down to a minimum fanspeed of 5% +- Give a nodev error if we have already set nodev in hfa clear readbuf to avoid +further usb comms attempts. +- Fix missing include +- Move bitmine options to alphabetic positioning. +- bab - missed a few 'DEAD's in last commit +- bab - use 'bad' instead of 'dead' as per the screen B: +- bab - roll work if possible to reduce CPU +- Update the per die hash clock data on a running reset on hfa devices. +- Set the per die clock on hfa to the known starting base clock instead of our +requested clock rate. +- Hfa device failure can be detected within 15 seconds so we should try +restarting it sooner to avoid tripping the device's own watchdog. +- Check return result of hfa clear readbuf to minimise error messages on device +failure. +- Put MHz into cta statline description. +- Send a work restart with every shutdown message to hfa devices to clear any +work that might be stale on the next restart. +- Store the hfa hash_clock rate and display it in the statline. +- Store the maximum board temperature for hfa devices and take that into +consideration when calculating the highest temperature as well as the dies. +- A1: CoinCraft-Desk driver variant +- Initial import of Bitmine.ch A1 SPI driver +- klondike ensure stats type matches +- avalon, bab, drillbit, klondike use more screen space rather than truncating +info +- Add hashfast fanspeed% to statline display. +- Move driver statline padding to cgminer.c, expanding width of maximum +displayable statistics and window width to add more info. +- Prune old stratum shares that we've seen no response for over 2 minutes to +avoid memory leaks for pools that don't respond about some shares. +- Add warning if system libusb is being added. +- Only run ./configure with autogen.sh if extra parameters are passed to it. +- Updated cointerra features. +- Add le16toh defines for platforms that may be missing it. +- Remove modminer bitstreams from distribution and replace with a README saying +what file needs to be added if modminer build is still desired. +- Use the simplelog function from usb_list() +- Add a simplelog function that does not log date and time. +- Use a unique usb_list function displaying only pertinent information when +listing usb devices from the menu. +- Abstract out the _in_use function to take different linked lists. +- Break out of linked list loop in remove_in_use in case we've gone over the +whole list. +- Check for hfa invalid hash clockrate after other error messages. +- Detect non-responsive icarus devices and attempt a usb reset before killing +them after 2 minutes of no hashes. +- Detect non-responsive bitfury devices and try a usb reset on them before +killing their instances off after 2 minutes of no activity. +- Allow hotplug interval to be changed from the USB menu. +- Prevent recursive loop in __is_in_use linked list walking. +- Add the ability to whitelist previously blacklisted usb devices from the menu. +- Use a bool in struct cgpu to know when a usb device has been blacklisted, +avoiding blacklisting it more than once. +- bab - ensure disabled chips are counted in the screen dead chip counter +- bab - only disable the chip once ... +- bab - short work list skip disabled chips +- api.c avoid incorrect gcc warning +- cgminer -h crash fix +- Add blacklisting as an option to the USB menu. +- Add a mechanism to blacklist a usb device from its cgpu. +- Add an option to the USB menu to list all known devices. +- Add an option to send a USB reset via the USB menu. +- Add a usb_reset by cgpu function to usbutils. +- Add warning for attempting to unplug a usb device that is already removed. +- Add USB Unplug option to USB management device management menu. +- Add enable and disable USB device functions to the menu. +- Add a [U]SB menu item, initially with just statistics per device, adding +device number to the device status window display. +- Reuse the cgpu temp entry for avalon and bitfury devices, changing avalon to a +damped value. +- Store the cointerra maximum temperature in the cgpu struct as an exponentially +changing value based on the maximum temperature. +- Reuse the cgpu->temp entry for max temperature in hfa driver. +- bab - disable chips that return only bad results +- Add driver for cointerra devices. +- Add Avalon2 (2U size machine) support +- miner.php - define a default rigport (that can be changed) and don't require a +port number in the rigs array +- miner.php allow links for rig buttons in tables and allow using the 4th IP +octet if no rig name - default disabled for both +- format fix and bad variable usage fix for --benchfile +- Allow running cgminer in benchmark mode with a work file --benchfile +- ANU frequency is in MHz, not hex. +- Remove bitfury devices from the usb list on shutdown in case they have stopped +responding but have not had a fatal usb error. + + +Version 3.12.3 - 8th February 2014 + +- Put the hashfast temperature into the cgpu structure so that it shows up in +the devs API call. +- We shouldn't block on no work situations directly from the getwork scheduler +itself. +- Revert "Make the pthread cond wait in the getwork scheduler a timed wait in +case we miss a wakeup." + + +Version 3.12.2 - 8th February 2014 + +- Adjust antminer U1 timing according to command line frequency set, fixing the +need for icarus timing on the command line. +- Read pipe errors that don't clear are worth attempting to reset the usb. +- Revert "Do away with usb resets entirely since we retry on both pipe and io +errors now and they're of dubious value." +- Make the pthread cond wait in the getwork scheduler a timed wait in case we +miss a wakeup. + + +Version 3.12.1 - 7th February 2014 + +- Document new features for antminer U1 and hfa devices. +- Add support for ANU overclocking. +- Increase hfa fanspeed by more if we're rising in temp above the target than if +the temp is staying the same. +- Add debug output when get_work() is blocked for an extended period and add +grace time to the device's last valid work to prevent false positives for device +failure. +- Issue a shutdown prior to a reset command for hfa devices and lock access to +reads awaiting the response if the device is already running. +- Do not register as successful a hfa init sequence that reports the clockrate +as zero. +- Show device info in noffset nonce share above target message. +- Widen lines in top menu to fit extra large share values. +- Only show one decimal place if pool diff is not an integer. +- Show serial number as a hex value in hfa verbose startup. +- Slowly remove work even if it's not being used to keep the getwork counter +incrementing even if work is not used and as a test that pools are still +working. +- Increase the maximum diff between hfa dies to 100Mhz. +- Show which hfa die is bringing down all the others when decreasing all the +clock speeds. +- Increase the decrease when temp has increased more and we want to decrease it +on hfa. +- Give device info with share above target message. +- Allow throttling of hfa dies more frequently and increasing of speeds less +frequently. +- Wait after sending a hfa shutdown to allow the device to properly shut down +before possibly sending it more commands. +- Minimise the die clock differences in hfa to no more than 50Mhz. +- Check for when errno is set on windows as well as the windows variant for +errors. +- Revert "Update to libusb-1.0.18" +- Disable fan/die clock control in hfa if the firmware does not support it, with +notification. +- Add ability to enter ANU frequency as a multiple of 25 from 150-500. +- Decrease hfa clock by 10 if a reset is attempted due to the device remaining +idle. +- ifdef out icarus options unused without icarus built in. +- Reorder command line options alphabetically. +- Add no matching work to hfa API output. +- Change various logging message levels in the hfa driver. +- Only adjust clocks if there is no restart in hfa to avoid 2 restarts back to +back. +- Ensure we iterate over all dies adjusting temperate for hfa by starting +iterating after the last die modified. +- Clamp initial hfa fanspeed to min/max if passed as parameters. +- Allow hfa fanspeed to be set via command line. +- Further relax the target temperatures on hfa driver, targetting 88 degrees. +- Try one more time to get the hfa header on init since it can take 2 seconds +for all 3 boards on a sierra. +- Update authors for removal of gpu/scrypt. +- Wait for 5 temperature updates in hfa before adjusting fanspeed. +- Have some leeway before starting to throttle hfa dies. +- Use increments of 10 when increasing hfa clock since it may not have 5 MHz +granularity internally. +- Only perform a hfa fan speed update if we have new temps to work with. +- Correctly measure the hfa max temp and smooth out the changes in its value. +- Choose better defaults for min/max/default fan settings for hfa driver. +- bab - reduce def speed, fix speed staying in ranges and report bank/chips in +ioctl() errors +- bab - add info about number of boards/chips to each Dead Chain +- These may not be longs (eg: OSX)... fo a safe cast to ensure. +- bab - add dead boards and dead chains to stats +- Add fanspeed to hfa api output and set initial fanspeed to 10% +- Add hfa fanspeed control to try and maintain a target temperature. +- API-README correct new text format documentation +- API allow multiple commands/replies in one request +- Add op commands necessary to control hfa fanspeeds. +- Add OP_FAN to hf protocol header. +- Always show the stratum share lag time in debug mode. +- Add stratum share response lag time to verbose output if it's greater than 1 +second. +- Add stratum share submission lag time to verbose information if it's over 1 +second. +- Check for more interrupted conditions in util.c and handle them gracefully. +- Send a ping to hfa devices if nothing is sent for over 5 seconds. +- Add OP_PING to hfa commands +- Display the hfa serial number as a hexadecimal value. +- Add the ability to display a hexadecimal 32 bit unsigned integer to the API. +- Limit all hfa restarts for temperature control to no closer than 15 seconds +apart. +- Allow the hfa temp target to be disabled by setting it to zero. +- Handle interruptions to various select calls in util.c +- Add sanity check for silly overflows in hfa die temperature readings. +- Add per-die throttling control for hfa driver based on each die's temperature, +issuing a suitable reset to maintain the temperature below a configurable target +temperature. +- Update hf protocol +- Do not memcpy in usbutils unless data was transferred. +- Send a full allotment of jobs to the hfa device after a restart instead of +reading the status. +- Export the flush_queue function for use by drivers. +- Remove wrong goto +- Remove the unqueued work reference when we discard work from get queued as +well. +- Wake the global work scheduler when we remove a work item from the unqueued +work pointer. +- Discard work that is stale in the get_queued() function, returning NULL +instead. +- Add a call to a driver specific zero stats function when zero stats is called +to allow each driver to reset its own stats as well if desired. + + +Version 3.12.0 - 29th January 2014 + +- Add support for AntminerU1 devices with the icarus driver. +- Add antminer U1 to comment in udev rules. +- Do away with usb resets entirely since we retry on both pipe and io errors now +and they're of dubious value. +- Retry on usb IO errors instead of faking success. +- Check that we've cleared the pipe error after a clear request, not the err +value which is unchanged. +- Update to libusb-1.0.18 +- Change hfa overheat limit to 90 degrees. +- Relax timeout in hf get header to 500ms to match the usb timeout. +- Minion - check/clear interrupts for all chips +- Set info work to null after it is freed in nf1 after a restart to prevent +double free later. +- The second_run bool in libbitfury should be per device. Microoptimise its and +job_switched usage, removing the unused results array for NF1 devices. +- Fix displayed diff when solo mining at >2^32 diff. +- bab - stop stale work accumulating +- bab - set the default SPI speed back to 96000 + + +Version 3.11.0 - 25th January 2014 + +- Add hashfast documentation to ASIC README +- Support the variable HFA naming throughout the driver notices. +- Set the global hfa hash clock rate to equal the lowest if we are lowering it +for a device reset since it may be re-hotplugged after failing reset. +- Decrease the hfa clock rate if it is overclocked and we have had to try +resetting it. +- Put a sanity check on the measured temperature in the hfa driver for obviously +wrong values. +- Avoid calling applog from within hfa statline before to avoid a deadlock. +- Add throttling control to hfa driver, configurable at command line, nominally +set to 85 degrees. +- Reset hfa device if no valid hashes are seen for 1 minute from the last work. +- Store when the last getwork was retrieved and display it in the API summary. +- bab - also report dead chip count screen +- Count share based hashes in the hfa driver with the device diff to get more +frequent updates. +- Only count 2/3 of the accumulated hashes on each pass through the hfa scan +work loop to smooth out displayed hashrate. +- bab add total history HW% to API stats +- Test valid nonces in the hashfast driver allowing us to check against the +target when trying to submit them. +- No point casting a double to a uint64 +- Convert the hfa hashmeter to one based on successful share return and display +the raw and calculated hash totals in the API. +- bab - remove libbitfury dependency since it requires USB +- Add description to hfa hash clock command. +- Add hfa board temperatures to API output. +- Wait for up to 0.5 seconds in the hashfast scanwork loop if no jobs are +required. +- Label HFA devices as B or S when their configuration matches babyjet or +sierra. +- Fix libbitfury being compiled in always by mistake. +- bab - spelling +- Add bab-options +- bab - tune the chip speed based on error rates +- bab record/report spie and miso errors +- Win32 falsely comes up as big endian pulling in the wrong hf protocol header. +- Remove unused components in hashfast driver. +- Check in all usb communication places for hashfast driver that the device +still exists. +- Do not send a usb reset on a usb read pipe error. +- Don't replace usb pipe errors with the pipe reset return code. +- Updated hf protocol header. +- The search for extra nonce is not worth performing in the hashfast driver. +- Add core address to hfa parse nonce debugging. +- Retry sending a frame once if it has failed in hfa_send_frame +- Add extra hfa usb init errors. +- Quiet now unused variable warning in hfa detect. +- Remove unused variable. +- Add board temperature to hfa debug +- Make submit_tested_work return a bool about whether it meets the work target +or not. +- Provide a helper function for determining dev runtime and use it in the +hashmeters used. +- Look for hfa usb init header for 2 seconds, then resend the init twice more +before failing. +- Really only set up the hfa crc table once. +- Generically increase the queue if we are mining on a pool without local work +generation each time we run out of work. +- Change new block detection message since longpoll is rarely relevant today. +- Change the default clockspeed bits on nanofury devices to 50 and add a command +line option to allow it to be changed. +- Use unused line at the top of the log window which often gets stuck +unchanging. +- Clear pool work on a stratum reconnect message. +- bab record/report spie and miso errors +- bab - cleanup old work for dead chips also +- bab add avg fail tests to API stats +- bab report bank/board/chip for dead and v.slow chips +- bab process all nonce replies per chip together +- bab reduce work delays +- bab record the number of E0s discarded +- bab - modified result parsing +- bab restore removed unused flag +- configure - correct minion name +- bab only scan valid nonce offsets +- bab record continuous (and max) bad nonces +- bab display Banks/Boards/Chips in the device window +- Modify thread naming to make them easier to identify +- bab reduce the work send delay +- bab remove results polling +- bab report SPI wait in seconds +- bab report missing chips at start and API +- bab ensure there's enough space for the nonce reply +- bab correct stats 'Send Max' +- bab allow long enough wait on ioctl() per board +- bab more I/O stats +- api.c 2014 +- api allow any size stats data +- bab add processed links which excludes expired links skipped +- bab report chips per bank, hw% and ghs per chip +- bab lock access to new_nonces to ensure correct reporting +- bab report V2 banks/boards during initialisation +- bab expire chip work +- bab use only k_lists and make work handling more refined +- klist - allow adding to tail +- bab remove old unused #define +- bab correct for master git +- correct klist reallocs +- klist lists for bab +- api.c correct DEVICECODE and ordering +- Maxchips should be 384 (16 chips/board 24 boards/controller) +- bab more detailed stats and delay less when waiting for a buffer +- api add data type AVG float 3 decimal +- bab - add V2 detect with bug fix in detect +- api.c set the actual version number to 3.0 +- API V3.0 unlimited socket reply size +- README update --usb +- Check for loss of device in usb read before any other code on the usbdev +- Change stratum strings under stratum_lock in reconnect and free old strings. +- Add mcp2210 compilation to want_libbitfury configs. +- Fix HF driver typo. + + +Version 3.10.0 - 9th January 2014 + +- Set the mcp2210 transfer setting only when it changes. +- Buffer sizes in nanofury device data are unnecessarily large. +- Only perform spi reset on init, not with each transaction. +- Remove spi_detect_bitfury at nanofury startup and fix incorrect refresh time. +- Use a simple serialised work model for nanofury +- Use bitfury_checkresults to avoid hashing results twice in nanofury. +- Export bitfury_checkresults in libbitfury +- Pass extra parameters for later use in libbitfury_sendHashData +- Avoid double handling bswap of the nonce value in nanofury +- Avoid unnecessary rehashing in nanofury nonce checking. +- Remove the unused portions of atrvec in the nanofury driver +- Age work in nf1_scan to avoid risk of losing a work item and leaking memory. +- bitfury_work_to_payload is double handling the data unnecessarily +- Default bitrate on nanofury should be 200kHz +- localvec should be only 80 bytes not 80 words +- Wrong init value for nanofury +- Remove unused rehash values from nanofury driver. +- Only update info work in nanofury driver when it's empty. +- Fill the appropriate type of usb transfer when we know if it's an interrupt +transfer instead of a bulk one. +- Use the internal knowledge of the usb epinfo to determine whether we should be +doing an interrupt instead of a bulk transfer, and do not send a ZLP if so, and +limit read transfer to expected size automatically. +- Avoid bin2hex memleak when we start getting nanofury nonces +- Set atrvec only once and use a local array for each device's work. +- Cancel any spi transfers on nf1 close +- Add bitfury detection loop to nanofury startup +- Move spi init code to libbitfury +- Remove inappropriate extra config reg in nanofury setup. +- Status 0x30 should never happen with spi transfers. +- Fix spi transfer data size transmission mistakes. +- Minor correctness change in spi_add_data +- spi_txrx should always send and receive the same size message +- Random libbitfury changes. +- Set value of gpio pins to low on closing nanofury. +- Fix more init sequence for nanofury. +- Add basic initialisation for nf1 devices +- Add basic nf1_scan function. +- Basic import of libbitfury functions from nanofury branch +- Import functions from nanofury fork for libbitfury +- Meter out spi sends to only 2 bytes at a time, offsetting according to how +much data returns. +- Use the usb read limit function for mcp2210 reads. +- Provide a way for usb reads to just read the size asked for with a limit bool. +- Get pin value after an nf1 spi reset. +- Make sure what we send in the buffer doesn't change during spi reset for +nanofury +- Remove all standalone gpio setting change functions in mcp2210 and just use +the one global setting function. +- Set gpio values in the one function with all values for nanofury. +- Provide a helper function for setting all mcp2210 gpio settings. +- Add a helper function for getting all mcp2210 gpio settings. +- Set all pin designations and directions in one call for nanofury and don't +bother storing their values in the info struct. +- Provide helper functions for setting all pins and dirs on mcp2210 +- Set all nanofury pin designations in one call +- Provide a helper function for setting all pin designations on mcp2210 +- Store the spi settings in a struct for nanofury devices. +- Check the received status in mcp2210 spi transfers and repeat a zero byte send +if it's in progress. +- Set the bytes per spi transfer prior to each mcp2210 transfer. +- Separate out the send and receive functions for mcp2210 and check response +value in return. +- Check that mcp2210 spi settings have taken and check the value of the pin +during nanofury setup. +- Don't set GPIO pin designations after initial setting in nanofury since the +direction and values will be changed. +- Provide an mcp 2210 set gpio input helper function that sets a pin to gpio and +input. +- Move the set gpio output function to a generic mcp2210 version from nanofury +which also sets the pin to gpio. +- Implement a nanofury txrx with a larger buffer and cycling over data too large +to send. +- Implement magic spi reset sequence for nanofury. +- Add more spi magic to the nanofury init sequence. +- Add lots of magic spi initialisation to nanofury. +- Export reused components of bitfury management into a libbitfury and use for +bab and bitfury drivers. +- More init sequence for nanofury and implement a close function that sets all +pins to input. +- Reword offset header handling in hfa_get_header +- Sanity check in hfa_get_header +- Add more checks in hashfast driver for lost devices. +- Change spimode and send more data in nanofury setup. +- Add basic setup comms to nanofury. +- Implement an mcp2210 spi transfer function. +- Set the initial spi settings for nanofury driver. +- Provide a helper function for gettings mcp2210 spi settings. +- Implement an mcp2210 set spi transfer settings function. +- Cancel any SPI transfers in progress in nanofury after initial setup. +- Implement an mcp2210 spi cancel function. +- Return only binary values for mcp2210 GPIO values. +- Set GPIO LED and power to high in nanofury driver. +- Implement initial part of nanofury init sequence for GPIO pin settings and add +output debugging of set values. +- Add helper functions for getting and setting mcp2210 gpio pin designations. +- Don't return an error in usb read if we've managed to get the whole read +length we've asked for. +- Use correct endpoint order for nanofury devices and read with a short timeout +on return loop from send_recv. +- Add mcp2210 helper functions for getting and setting one GPIO pin val and +direction. +- Create a generic gpio pin struct and add helpers for mcp get pin val and dirs. +- Check the receive msg of a send/receive cycle on mcp2210 matches the send +message. +- Add a set of usb commands to the usbutils defines for mcp2210 comms, and use +the same command name for send and receive. +- Create a generic mcp2210 send_rcv function. +- Include mcp header for bitfury and fix extra params in macro. +- Add basic SPI comms defines for mcp2210 and build rules for bitfury. +- Minion set some core defaults similar to final requirements +- minion compile warnings +- move driver-minion.c to main directory +- Minion with ioctl() stats, settings to attempt to emulate 21TH/s +- minion driver with results interrupt working +- tested working driver-minion.c without interrupts +- Working driver-minion.c v0.1 +- driver-minion.c compilable untested +- minion driver - incomplete +- Add minion driver into cgminer +- Add basic device detection and updated udev rules for nanofury devices. +- Remove GPU from share logging example. +- Don't keep resetting BXF clockspeed to default. +- If no pools are active on startup wait 60s before trying to reconnect since we +likely have the wrong credentials rather than all the pools being out. +- Discard bad crc packets for hashfast driver instead of trying to process them. +- Update documentation for modified avalon options syntax and document relevant +55nm details. +- Modify the auto tuning sequence to work with the 50MHz changes required to +work with 55nm Avalon. +- 55nm avalon requires the delays between writes reinstated for stability. +- Use an equation instead of a lookup table to set the frequency for 55nm avalon +allowing arbitrary values to be used. +- Make the result return rate low detection on avalon less trigger happy. +- Always send the bxf device a clockspeed after parsing the temperature in case +the device has changed the clockspeed itself without notification. +- Fix BXF being inappropriately dependent on drillbit. + + +Version 3.9.0 - 23rd December 2013 + +- drillbit asic - enable in api.c +- Fix trivial warnings in knc driver. +- Reinstate work utility based hashmeter for knc. +- drillbit format %z not valid on windows +- drillbit more formatting changes +- usbutils remove old code added back +- Memset the spi tx buffer under lock in knc driver. +- drillbit fix temp display to fit in standard space +- Drillbit formatting +- drillbit - use one drvlog and display dname before add_cgpu +- Keep orginal naming for the bitfury driver +- knc: Bugfix - good shares wrongly reported as HW errors. Root cause of the +problem: several work items were assigned the same work_id in the active works +queue of the knc driver. Thus when good nonce report arrived from the FPGA, +wrong work item was picked up from the queue, and submit_nonce evaluated that +as an error. Fix: Limit the work_id counter update rate. Update it only to the +number of works actually consumed by the FPGA, not to the number of works +send. +- Store per-chip submit information for bxf device and show them in the API. +- Check for removed bxf devices before trying to update work or send messages. +- api.c no decref if not json +- Minimise risk of nonce2 overflow with small nonce2 lengths by always encoding +the work little endian, and increasing the maximum size of nonce2 to 8 bytes. +- Change default hashfast timeout to 500ms. +- Ensure we can look up the work item in the hashfast driver or print out an +error if we don't. +- Drillbit source formatting - reindent and retabify +- Add ASIC count, temperature status to drillbit API output (closes #1) +- Many warning fixes +- knc: Do not include variable "last minute" data into the "last hour" per-core +stats +- knc: Make per-core statistics available through API +- Implement command line control of the bxf target temperature. +- Add a simple PID-like controller to bi*fury devices to dynamically alter the +clock setting to maintain a nominal target temperature set to 82 degrees. +- Add data to BXF API output. +- Add support for newer protocol bi*fury commands job, clock and hwerror, +setting clock to default 54 value, turning parsing into a compact macro. +- Look for the thermal overload flag in the gwq status message in the hashfast +driver and send it a shutdown followed by an attempted reset. +- Log message fixups +- Fix for "Timing out unresponsive ASIC" for pools which send early reconnect +requests, and then take a short time to send work (ie BTCGuild) +- Shorten initial config line, win32/pdcurses doesn't like long lines during +early logging +- Pull back the very long timeouts set in fe478953cf50 +- Fix bug where work restart during results scan could lead to bad device state +- Align device status lines same regardless of number of temp status or >10 +ASICs +- Tag log lines from brand new devices as DRB-1 until they are initialised +- Tag log lines as 'DRB0' rather than 'DRB 0', same as other places in cgminer +- Print a summary of the device settings at level NOTICE during initialisation +- Allow chosing device settings based on 'short' product names shown in status +line +- Allow per-device settings to use "DRBnn" as an identifier instead +- Issue an ASIC restart during a work_restart, removes spurious timeout messages +from ASICs and probably some rejected shares +- Check all results against all work instead of just taking the first match +(avoids some rejected submissions to the pool, ASIC can produce multiple +candidate results.) +- Fix memory leak caused by unnecesarily copied work +- Fix bug with find_settings not returning default value +- Set timeouts on write, set very long timeouts +- Merge drillbit driver + + +Version 3.8.5 - 10th December 2013 + +- Increase the BFLSC overtemp to 75 for fanspeed to maximum. +- Set bflsc cutoff temperature to 85 degrees and throttle 3 degrees below the +cutoff temperature. +- Only set LIBUSB_TRANSFER_ADD_ZERO_PACKET for libusb versions we know include +support for. +- Provide a helper function that can reset cgsems to zero. +- Add to cgminer_CPPFLAGS instead of redefining them. +- Attempt a libusb reset device on usb devices that have stopped responding. +- Replace deprecated use of INCLUDES with _CPPFLAGS. +- Remove more unused GPU code. +- Attempt USB device resets on usb read/write errors that will normally cause +the device to drop out. +- Quieten down jansson component of build. +- Cache the bool value for usb1.1 in _usb_write +- Initialise usb locks within usbutils.c instead of exporting them. +- Imitate a transaction translator for all usb1.1 device writes to compensate +for variable quality hubs and operating system support. +- Rationalise variables passed to usb_bulk_transfer. +- Unlink files opened as semaphores on releasing them. +- Remove user configuration flag from pll bypass enabling in hashfast driver. +- Provide an hfa-dfu-boot option for resetting hashfast devices for +reprogramming. +- Fixed one byte stack overflow in mcast recvfrom. +- Having changed C_MAX means we don't calloc enough for usb stats, off by one. +- Don't free the info struct on hashfast shutdown since it's still accessed +after a device is removed. + + +Version 3.8.4 - 1st December 2013 + +- Deprecate the usb usecps function and just split up transfers equal to the +maxpacketsize on usb1.1 devices. +- Retry sending after successfully clearing a pipe error. +- Drop logging of timeout overrun message to verbose level. +- Use a much longer callback timeout for USB writes on windows only as a last +resort since cancellations work so poorly. +- Use vcc2 in bflsc voltage displayed. +- Increment per core errors on false nonces in bflsc and add per core statistics +to api stats, removing debugging. +- Store a per-core nonce and hw error count for bflsc. +- Fix json parsing in api.c +- Add debugging to hfa driver for how many jobs are being sent. +- Shut down the hfa read thread if the device disappears. +- Add debug output saying what frame command is being sent in hfa driver. +- Revert "Disable USB stats which were not meant to be enabled by default and +add extra memory for a memory error when stats are enabled." +- Reset work restart flag in hfa driver since we may check for it again in +restart_wait. +- Add more op usb init errors for hfa driver. +- Perform basic displaying of hfa notices received. +- Add hfa op usb notice macros. +- Update hf protocol header. +- Use sync usb transfers in lowmem mode. +- Go back to allowing timeout errors on USB writes to be passed back to the +driver without removing the device in case the driver wishes to manage them. +- Initialise more values for the hfa data structures. +- A USB control error must be < 0 +- Simplify USB NODEV error checking to success only for writes and control +transfers, and success and timeout for reads. +- libusb error IO should be fatal as well if it gets through usb read and write. +- Allow IO errors in usb reads/writes to be ignored up to retry max times. +- Use correct padding for bxf temperature display. +- Initialise devices before attempting to connect to pools to allow their thread +prepare function to be called before having to connect to pools. +- Add hidden hfa options to set hash clock, group ntime roll and pll bypass, +fixing frame sent on reset to include extra data. +- Relax the timeouts for the slower usb devices on linux. +- Add big endian hf protocol header to Makefile +- Check for correct big endian macro in hf_protocol +- Use an absolute timeout in hfa_get_header to cope with buffered usb reads +returning instantly confusing the 200ms counter. +- Update hfa_detect_one to use the new detect function API. + + +Version 3.8.3 - 23rd November 2013 + +- Set the bitfury device start times from when we first get valid work. +- Fix stack corruption of zeroing too much in bf1 driver. +- Make usb_detect return the cgpu associated with it to check if it succeeds to +decide on whether to increment the device count or not. +- Set tv work start time for bxf driver. +- Age the bxf work items over 90 seconds, not the bf1 work items. +- Zero the read buffer in _usb_read to avoid stale data and only use stack +memory instead of using the bulkbuf since it is only used in _usb_read. +- Leave room for temperatures above 100 degrees and pad consistently for bxf +statline. +- Drop json stratum auth failed message log level to verbose. +- Change the processed value not the bufsiz in response to an end of message +marker. +- Don't lose data beyond the end of message in a usb read. +- Silence irrelevant warning. +- Only check strlen on end if end exists. +- Simplify the end of message detection in _usb_read and allow it to return +without doing another read if the message is already in the buffer. +- Increase work ageing time to 90 seconds for bxf driver to account for firmware +changes. +- Use the age_queued_work function in the bitfury driver. +- Provide a function to discard queued work based on age. +- The json_val in api.c is a borrowed reference, not a new one so don't decref +it. +- Decrement json references in api.c to not leak memory. +- line 2913 added urlencode +- With reliable writes to the avalon there is no need for the sleep delays +between writes. +- There is no need to limit usb write transfers to maxpacketsize and it's +harmful for large transfers on slow devices such as wrt routers. +- Disable USB stats which were not meant to be enabled by default and add extra +memory for a memory error when stats are enabled. +- Set limit and count to integers to not overflow during failed hotplug attempts +and then not trying again. +- Update api example compilation instructions. + + +Version 3.8.2 - 16th November 2013 + +- Add more verbose documentation to the readme files for windows users. +- Add more information on libusb failure to init telling users to check README +file. +- Add information on unloading cdc drivers on osx to README +- Prevent a deadlock with use of restart_threads by spawning a thread to send +the driver flush work messages. +- Set priority of various threads if possible. +- Add bxf data to api output. +- Do not hold the mining thread lock in restart_threads when calling the driver +flush work commands. +- Send extra work regularly to the bxf device and parse the needwork command by +sending the amount of work it requests. +- Allow messages to have arbitrary offsets in the bxf parser in case we have +lingering buffered data. +- Send the maxroll command to the bxf driver and store the value to see if we +need to update it. +- Add sending of flush command to bxf on flush_work +- Add flush and version commands to bxf start up, flush buffer and try to parse +version response string. +- Abstract out bxf recv message. +- Add extra bxf commands to usbutils +- Abstract out bxf send message to allow us to easily add extra commands. +- Don't run device restart code if the device is not enabled. +- Expand size of bitfury statline +- Various driver fixes for bitfury devices, including a flag from when first +valid work appears. +- Look up work results in bxf driver from correct variable. +- Correct incorrect error code in bxf driver for usb writes and add debugging. +- Add bxf details to usbutils. +- Implement a statline showing temperature for bxf +- Add api data for bxf device, sharing the hashrate function with bf1. +- Count no matching work as a hw error on bxf +- Add BXF to udev rules. +- Work id should be hexadecimal in bxf messages. +- Add unrecognised string debugging to bxf driver. +- Implement the main scanloop for bxf, trying to prevent it from ntime rolling +work if the work protocol does not allow it. +- Parse bxf work submits fully, submitting the results. +- Provide a function for setting the work ntime. +- Implement a skeleton parse bxf submit function. +- Use the bxf read thread to set the device target and send its first work item. +- Implement a bxf send work function and set update and restart functions to +sending new work since that's the equivalent for that device. +- Add temperature parsing to bxf driver +- Create and destroy a basic bxf read thread. +- Remove the buffer from bitfury info since it is only used on one pass in the +bf1 device. +- Add a rudimentary bxf detect one function. +- Rename all bf1 specific functions in the bitfury driver, using a switch to +choose correct function. +- Rename bitfury_getinfo to bf1_getinfo since it's unique to bf1 devices. +- Separate out the bf1 reset from bitfury reset. +- Store the bitfury identity in the info struct. +- BaB - updated tested OS comment +- Uniquely identify the BF1 and BXF bitfury devices. +- Remove the default libusb WinUsb pipe policies that don't suit us. +- Only set the winusb pipe policy if it doesn't match our requirements instead +of every transfer. +- klondike - dont try to flush if not initialised +- api.c trylock() add missing locklock +- Use our new zero length packet support directly in windows. +- Enable support for zero length packet on windows and auto clear pipe stalls. +- util.c: Decreasing reference count on allocated JSON obects to prevent memory +leak +- api.c: Release apisock on error in api() +- api.c: Release io_data->ptr when releasing io_data in io_free() +- We can't connect to a GBT pool at all with fix protocol enabled. +- Initialise the stgd lock mutex earlier to prevent dereferences when pool +testing occurs before it. +- Klondike support I2C USB layout also - as KLI +- Return error codes in avalon_read() if they're not timeouts. +- Break out of the avalon idle loop if we get a send error. +- Set avalon ftdi latency to just less than the time it would take to fill the +ftdi buffer at 115200 baud +- Update example.conf +- Only limit packetsize on usb out writes. +- We must chop up every 64 bytes returned on an ftdi chip, not just the first 2 +bytes so revert to parsing the data internally in the avalon instead of using +usbutils' simple ftdi parser. +- Only retry 3 times in hfa_reset. +- Only add_cgpu in hashfast driver once we have a real driver set up. +- Clean up properly if hfa_detect_common fails in the hashfast driver. +- --shares should be scaled to diff1 not absolute number of shares + + +Version 3.8.1 - 11th November 2013 + +- Revert "Send a zero length packet at the end of every usb transfer on windows +in case libusb internally has batched them into one maxpacket sized." + + +Version 3.8.0 - 10th November 2013 + +- api update version to 2.0 and remove GPU form API-README +-Remove now unused scrypt files. +- api.c remove all GPU/gpu references and correct code as required +- Rudimentary removal of GPU OpenCL and Scrypt features from api.c +- Reorder configure alphabetically for devices to compile and fail if no support +is selected to be compiled in. +- BaB update/format some comments +- BlackArrowBitfury early GPIO V1 driver +- Fine tune the reading of results in bitfury driver to not lose any across work +restarts or corrupt due to store results not parsed during restart. +- Send a zero length packet at the end of every usb transfer on windows in case +libusb internally has batched them into one maxpacket sized. +- Framework for ntime rolling, keep looking for OP_USB_INIT replies when other +packets received +- Configure source for a new BaB driver +- sha2 allow external access to some macros and the K array +- Fixed a math issue when reporting fan speed on the status line. +- Use the main hashlist to store work done in the bitfury driver and remove work +from the list by time, thereby fixing the duplicates at startup. Count hardware +errors for when no match occurs. +- Add a get and queue helper work function. +- Remove GPU mining code. +- Use libusb's own zero length packet support unless we have to emulate it on +windows since only libusb knows for sure if it's needed. +- Unlock the avalon qlock while sending tasks to not hold the lock for an +extended period. +- Sleep in avalon send task on return to the function to allow other code to +work during the sleep period. +- Send zero length packets when terminating a usb write aligned to +maxpacketsize. +- Do the driver flush in avalon code lockless since it can lead to deadlocks. +- Reset the work_restart bool after the scanwork loop in case the driver flushes +work synchronously. +- Only check for the stratum clean message if we have had a valid message. +- Get rid of the stage thread since all work can be asynchronously added now via +hash_push anyway. +- Remove the now incorrect faq entry regarding scrypt difficulty. +- Check for fatal read errors and break out of the read loop in avalon. +- Send errors are basically fatal in avalon driver so break out of the send +tasks loop. +- Make the avalon driver return -1 for hash count when usb fails, allowing the +main loop code to send it the shutdown flag. +- Break out of the hash work loops when a failure is detected instead of +dropping into mt disable. +- Use usbutils' own ftdi parser for avalon and the ftdir's own latency for +managing timeouts since we can wait on reads with completely asynchronous +reads+writes. +- Use usbutils' own cps function for slowing rate of usb writes on avalon. +- Fix build for no libcurl +- Check length before submitting sync transfers + + +Version 3.7.2 - 5th November 2013 + +- Clean up completely on avalon shutdown. +- Use cgsem timed waits in avalon driver to not miss any queued wake ups to +account for async messages coming during a flush work. +- Statline before is too long on icarus that doesn't have monitoring. +- Different windows+usb combinations respond with varying levels of reliability +wrt timeouts so use a nominal extra 40ms before cancelling transfers that fail +to time out on their own. +- Do all hotplug_process under the write mining_thr_lock +- Fix for opt_worktime on big endian machines. +- Correct set_blockdiff for big endian machines. +- Make sure cgpu exists in the restart threads loop in cases of hotplug etc. +- Treat usb write timeout errors as unrecoverable. +- Transfer errors are filtered out in usbutils now so no need to look for them +in NODEV checks. +- Remove now unused entries from struct cg_usb_device +- Do not double up with checking for end of timeout measurements in usb +read/write. +- Do get_work in fill_queue without holding other locks. +- Initialise usb after all the locks and conditionals are initialised. +- Use only a trylock in flush queue to prevent deadlocks. +- Add a wr_trylock wrapper for pthread rw lock write trylock. +- Scale diff for scrypt when testing for block solves. +- Fix for non curses build. + + +Version 3.7.0 - 4th November 2013 + +- Use WRITEIOERR macro check for all usb writes. +- Always use a usb read buffer instead of having to explicitly enable it. +- Force unlocking of the console lock on restart to avoid corrupting the console +state when we finally quit. +- Never wait indefinitely for a pthread conditional in the hash_pop loop in case +the work scheduler misses the last wakeup. +- Make hash_pop signal the work scheduler each time it waits on the conditional +that it should look for more work. +- Discriminate between libusb transfer errors and regular libusb errors and make +sure to capture them all. +- Always read a full sized transfer for bulk reads. +- Deprecate preferred packet size functions in usbutils since they're unhelpful. +- Copy known transferred amount back to buffer for usb reads instead of +requested length. +- Treat timeout errors on usb writes as IO errors. +- Ignore iManufacturer from bitfury devices to support bluefury as well as +redfury. +- Add more debugging info for when usb details don't match. +- Look for timeout overruns in usb read/write. +- Use an int for usb_read/write to identify overruns. +- Use the callback timeout as a safety mechanism only on windows. +- Instead of using complicated sleeps to emulate characters per second on usb +writes, submit only as many characters as can be transferred per usb poll of +1ms, and use timeouts in bulk transfers, cancelling transfers only as a +failsafe. +- Remove discarded work from quota used. +- Display works completed in summary and API data. +- Store how many work items are worked on per pool. +- Make each pool store its on reference for what the most current block is and +fine tune management of block change in shared pool failover strategies using +the information. +- Rationalise use of current_hash to a single hex string the length of the +previous block and display only the first non zero hex chars of the block in the +status window. +- Update uthash to latest. +- show_hash doesn't know the size of the string so hard code the max size. +- Remove as many initial zeroes as exist on share display, abstracting out a +hash show function to use across different submission mechanisms. +- Add missing endian swap functions for 64bits. +- Sanity check for absurd target setting and divide by zero. +- Abstract out conversion of a 256 bit endian number to a double, correcting +errors and use it for determining any magnitude share diff. +- Avoid the extra generation of a byte flipped hash2 in struct work and directly +use the LE work hash. +- Add a sanity check to avoid divide by zero crashes in set_target +- Calculate diff from target accurately for all 256 bits. +- Set a true 256bit binary target based on any diff value in set_target() +- Provide a copy_work_noffset function for copying a work struct but changing +its ntime. +- Make calls to flush queue and flush work asynchronous wrt to the main work +loops. +- Share is also above target for submit noffset nonce. +- Use round for displaying current pool diff. +- Use round for stratum share diff display instead of floor. +- Use round instead of floor for displayed pool difficulty. +- Allow arbitrary diffs to be tested against nonces via a test_nonce_diff +function. +- Abstract out the rebuilding of hash2 in work. +- Share is above, not below target, when it doesn't meet it. +- Add the ability to add uint8 and uint16 entities to api data. +- Use a non blocking connect with a 1 second select timeout when initiating +stratum to allow us to iterate over all IPs returned by getaddrinfo in round +robin DNS pools. +- Minor style changes to output. +- Revert two different hash_sequence(_head)'s to one variable, use +HF_SEQUENCE_DISTANCE in both places +- Remove duplicate HF_SEQUENCE_DISTANCE() macro, and duplicate hash_sequence +from info structure +- Change SEQUENCE_DISTANCE() macro to HF_SEQUENCE_DISTANCE() +- Structure changes for OP_NONCE, add big endian header +- klondike - initialise stat_lock +- klondike - better to unlock locks than to lock them twice :) +- Add copyright notice to knc driver. +- Trivial style changes to knc driver. +- Improve performance of work generation by optimizing hex2bin and bin2hex +- klondike - change options to clock and temptarget only +- klondike - fix another uninit dev warning +- klondike - downgrade 'late update' but add an idle detect - and correct error +levels +- klondike - fix isc uninit warning +- Use a mutex to protect data in the knc structure, to prevent loading more work +during a flush, and unlock and return to main between calls to get_queued_work. +- Use the existing device_data for knc state data. +- Only count successful nonces as hashrate in the knc driver. +- Fix trivial warnings in knc driver. +- Add KNC to api +- klondike - drop the device for hotplug if it's unresponsive +- usbutils - usb_nodev() allow a driver to drop a device +- klondike - single 'shutdown' and ensure it happens +- klondike remove SCNu8 - unsupported on windows +- Correctly calculate sleep_estimate in usbutils that may have been preventing +usecps from working. +- Use a sanity check on timeout on windows. +- Better HW error count; disable permanently those cores which fail often +- KnC driver: knc-spi-fpga ASIC driver +- Fixup jansson & libusb include paths when using separate build directory +- 'llround' is more suitable here than 'roundl' +- Silence warning if MAX/MIN is already defined +- Remove prebuild ccan/opt dependencies +- Reinstate block solve testing. +- Dramatically simplify the calculation of blockdiff. +- Simplify the set_target function, allowing it to work properly for fractional +diffs. +- Merge hashfast driver +- Merge KnC driver + + +Version 3.6.6 - 26th October 2013 + +- Remove inappropriate extra locking in _usb_transfer_read + + +Version 3.6.5 - 26th October 2013 + +- klondike - fix uninitialised dev bug +- Adjust the binary ntime data in submit_noffset_nonce even when there is no hex +ntime string for eg. gbt. +- Put an entry into the work struct telling drivers how much they can roll the +ntime themselves. +- Only set libusb cancellable status if the transfer succeeds. +- Remove the applog on miner threads dying to prevent deadlocks on exit. +- Do one extra guaranteed libusb event handling before testing if there are any +pending async usb transfers. +- Use a linked list for all usb transfers instead of just cancellable ones. +- Provide a mechanism for informing drivers of updated work templates for +stratum and gbt mining. +- Add cancellable transfers correctly to the ct_list +- Check for presence of thr in icarus get nonce for startup nonce testing to +work. +- Use cancellable usb transfers in the icarus driver to avoid having to loop and +poll when waiting for a response and to speed up work restart response time. +- Add a usb_read_ii_timeout_cancellable wrapper +- Add usb transfer cancellation on shutdown and documentation regarding where +cancellable transfers are suitable. +- Use cancellable transfers on bitfury device. +- Cancel cancellable usb transfers on work restart messages. +- Don't bother having a separate cancellable transfer struct for usb transfers, +simply include the list in the usb_transfer struct. +- Add wrappers for usb_read_cancellable and usb_read_timeout_cancellable +- Specifically set the cancellable state for it to not be uninitialised in the +usb transfer struct. +- Alter the usb cancellable list only under cgusb_fd_lock write lock. +- Pass the cancellable option to _usb_read options to decide on whether to add +usb transfers to the list of cancellable transfers. +- Create a linked list of potentially cancellable usb transfers. +- Don't attempt to disable curses or print a summary during an app restart to +prevent deadlocks. +- Keep the libusb event handle polling thread active until there are no async +usb transfers in progress. +- Keep a global counter of how many async usb transfers are in place. +- Perform libusb_submit_transfer under the write variant of cgusb_fd_lock +- klondike - error condition handling +- Avoid entering static libusb directory if --with-system-libusb is enabled. +- Minor opencl build corrections. +- Enable dynamic linking against system libusb --with-system-libusb +- Modify Makefile to only include opencl related code when configured in. +- Convert opencl to need to be explicitly enabled during build with +--enable-opencl +- Implement a cglock_destroy function. +- Implement a rwlock_destroy function. +- Implement a mutex_destroy function. +- Add usb command name to critical libusb error reporting. +- Use windows' own higher resolution time and handlers allowing us to have +higher precision absolute timeouts. +- Fix lldiv error in windows cgminer_t calculation. +- miner.php correct sort gen field names largest to smallest +- api ... the code related to device elapsed +- api add device elapsed since hotplug devices Elapsed is less than cgminer +Elapsed +- Drop usb buffering message to debug logging level. +- Do the ntime binary modification to the work struct when submitting an ntime +offset nonce within submit_noffset_nonce +- Code cleanup and improved documentation +- Improvements to support for BitBurner boards +- Convert libusb transfer errors to regular libusb error messages to allow for +accurate message reporting. + + +Version 3.6.4 - 18th October 2013 + +- Fixing the memory leak for remaining semaphores means we can go back to using +async transfers on other OSes with our own timeout management again. +- Use the forcelog function on shutdown to cope with indeterminate console lock +states due to killing of threads. +- Add a forcelog variant of applog which invalidates any console lock to force +output. +- Send pthread_cancel to failed completion_timeout that has timed out. +- Simplify queued hashtable by storing unqueued work separately in a single +pointer. +- bflsc use getinfo chip parallelization if it is present +- bflsc - fix brackets so [Chips] isn't always null +- Remove unused variables. +- Use cgcompletion timeouts for the unreliable shutdown functions on kill_work. +- Fix cgcompletion return code and free on successful completion. +- Provide a cg_completion_timeout helper function for unreliable functions that +takes arbitrary functions and parameters and reliably returns. +- Perform sync transfers on shutdown to allow final transfers to complete. +- Destroy cgsems used after transfers to not leave open files on osx. +- klondike rewrite work control +- allow __work_complete() access +- miner.h allow devices to tv_stamp work + + +Version 3.6.3 - 17th October 2013 + +- API add 'MHS %ds' to 'summary' +- Optional lock tracking and stats via the API +- Speed up polling repeat again in usb poll thread and handle async after the +message to disable polling is complete. +- Revert to using timeouts on !linux since libusb leaks memory without them. +- Revert to libusb instead of libusbx + + +Version 3.6.2 - 17th October 2013 + +- Remove unused components of jansson +- Remove unused parts of libusb +- Work around older libtoolize that fails without top ltmain.sh not being +present during autogen +- Fix open coded use of autoreconf in autogen +- Update jansson to only build parts we require and suited to our build +environment. +- Initial import of jansson-2.5 +- Prevent further USB transfers from occurring once the shutdown signal has been +sent to prevent transfers getting stuck and libusb failing to shut down. +- Make the USB polling thread poll every second to potentially aid longer +timeout transfers. +- Set device_diff on work in get_work to not be missed with drivers that use +get_work directly. +- Convert icarus driver to hash_driver_work model. +- bflsc - also allow ' 0' in DEVICES IN CHAIN +- bflsc - allow a 0 in DEVICES IN CHAIN +- Add needed EXTRA_DIST for libusbx. +- Update libusbx configure.ac changes. +- Revert libusb Makefile changes from going to libusbx. +- Fix trivial libusbx warnings. +- Convert libusb-1.0.16-rc10 to libusbx-1.0.17 + + +Version 3.6.1 - 14th October 2013 + +- Emulate the libusb_control_transfer sync setup in our async variant. +- usbutils - make all libusb_error_name messages the same + + +Version 3.6.0 - 14th October 2013 + +- increasing max miners for avalon driver +- using separate identifier for bitburner fury boards +- changes to bitburner driver for bitburner fury boards +- hexstr is too small in test_work_current +- Windows uses errno for WSAETIMEDOUT +- Convert the usb callback function to using cgsem_t timed waits to avoid race +conditions with conditionals/mutexes. +- Give correct return code in cgsem_mswait +- Check for correct timeout error in cgsem_mswait +- Fix util.h exports for cgsem_mswait +- Implement a generic cgsem_mswait similar to sem_timedwait +- Use the one LIBUSB_ERROR_TIMEOUT for cancelled transactions since this error +is explicitly tested for in various drivers. +- Do not use locking on usb callback function pthread signalling to prevent +deadlock with libusb's own event lock. +- Use a write lock when performing any USB control transfers to prevent +concurrent transfers. +- Free a libusb transfer after we have finished using it to avoid a dereference +in usb_control_transfer +- Do not perform bfi int patching for opencl1.2 or later. +- Although async transfers are meant to use heap memory, we never return before +the transfer function has completed so stack memory will suffice for control +transfers, fixing a memory leak in the process. +- klondike - correct/reverse min/max stats +- api incorrect message name +- klondike - use a link list queue rather than a circular buffer - and add +timing stats +- Use a timeout with usb handle events set to a nominal 200ms and wait for the +polling thread to shut down before deinitialising libusb. +- Use stack memory for hex used in stratum share submissions. +- Use stack memory in test_work_current, avoiding a malloc/free cycle each time. +- Provide a lower level __bin2hex function that does not allocate memory itself. +- Convert the bitfury driver to use the hash_driver_work version of hash_work. +- Add a hash_driver_work function to allow for drivers that wish to do their own +work queueing and management. +- Convert all usb control transfers to asynchronous communication with our own +timeout management as well. +- Klondike - increase circular read buffer size +- Klondike - extra zero value and range checking in temp conversion +- klondike - display MHz also +- Make pthread conditional timeouts handle all bulk usb transfer timeouts +performing libusb_cancel_transfer, disabling timeouts within libusb itself. +- Avoid calling get_statline_before on exit to avoid trying to use it on drivers +in an indeterminate state. +- Avoid calling get_statline on exit. +- Add a small amount to the usb timeout before cancelling to allow for a regular +usb polling interval to pass. +- Do not attempt to clear a usb halt before sending the cancel message since all +transfers should normally be cancelled before attempting to clear a halt +condition, and only change the return message to a timeout if it's consistent +with a cancellation. +- Retry up to USB_RETRY_MAX times to clear a halt condition before failing. +- Show the error number as well as the description in erroring bulk transfers. +- Drop logging level for failed to connect to stratum to verbose mode only since +we hit it regularly. +- We are always dependent on libusb handling events so use the blocking +libusb_handle_events in the polling thread and use a bool to know if we should +continue polling. +- Use fractional hashrate return values in bitfury_scanhash to minimise the +number of times we return 0 based on hashrate so far to further damp out +displayed hashrate. +- Check for presence of driver name in DRIVER_COUNT_FOUND to prevent strcmp on a +null pointer when a driver is not built in. +- CMR allow sending flash and clock commands +- Kill off threads that have failed using hash_sole_work instead of just +disabling them. +- Make the bf1 getinfo size a macro +- Failing to add_cgpu in bitfury should be a terminal failure. +- Check return values when attempting to open a BF1 device and set the msg size +as a macro. +- Display errors on failed usb read and write and consider sequential IO errors +a permanent failure. +- Use libusb's own error name function instead of hand coding the error names. +- Limit ms_tdiff to 1 hour as a sanity check. +- Enable the usb buffer in avalon driver. +- Check for async transfer variants of error messages. +- Remove unused variables. +- Try switching pools if for some reason we end up with only idle pools and have +ended up current_pool set to an idle one. +- Check a pool is stable for >5 mins before switching back to it. +- Minimise the time between dropping the read devlock and grabbing the write +devlock to avoid tons of logging spam in the interim. +- Check for libusb transfer stall error to be consistent with async IO errors +returned for a halt condition. +- Check for continuous IO errors on USB and consider the device inactive if more +than retry max. +- Make the devlock a cglock in usbutils and only grab the write lock for +fundamental changes allowing us to send and receive transfers concurrently +without lock contention. +- Prevent overflows in us_tdiff and ms_tdiff. +- Change second initialise message on bitfury verbose mode. +- Submitting an ntime offset nonce needs to be done on a copy of the work +instead of the original so abstract out shared components as much as possible, +minimising strdups in copy_work and make submit_work_async work take copied +work, cleaning up code in the process. +- Provide a way for drivers to submit work that it has internally rolled the +ntime value by returning the amount it has ntime rolled to be added. +- Typo in configure.ac +- Remove unmaintained broken ztex driver. +- Icarus - use a data structure for I/O rather than magic numbers +- delete old tracked ccan/opt/*.o files +- klondike correct cvtKlnToC() temperature calculation +- klondike - correct 1st reply debug based on define +- klondike - debug dump structured replies +- klondike - avoid division by zero if maxcount is unexpectedly zero +- klondike store and report errorcount and noise +- klondike - fix chipstats api stats buffer overrun with 16 chips +- klondike add new nonecount only once +- klondike - report mh/s based on nonces found + put old estimate into API stats +- klondike use a memcpy +- klondike fix bracket tabs indenting +- api.c missing Klondike from ASIC list +- Klondike update code to current git +- Add 2nd CMR to 01-cgminer.rules +- Add Klondike to 01-cgminer.rules +- Klondike to main directory +- Klondike consistent code spacing +- Klondike update driver code to current git +- update firmware for 16 chips, add dist files +- beta final 0.3.0 release +- updated firmware, IOC method +- prevent nonces when not state W +- added driver config option support +- fixes for 300 MHz, fix K1 parts list +- update driver, docs +- update firmware & utils +- updated cgminer driver for 3.3.1 +- update firmware and driver, create new cgminer fork +- update klondike driver +- add cgminer driver file as-is +- Add API output displaying USB cancellations. +- Store statistics on how often we have to cancel async bulk transfers and add a +debug message whenever we do. +- Treat any unexpected timeouts waiting for async transfers as though there may +be a usb halt condition and attempt to clear the halt before cancelling the +tranfer. +- Remove zero packet flag on usb as it's unsupported outside linux and +unnecessary. +- Fake the libusb transfer timed out message if we force cancel it with our own +async functions. +- Use asynchronous transfers for all bulk transfers, allowing us to use our own +timers and cancelling transfers that take too long. +- Add libusb error warning message when significant error occurs. +- Icarus CMR2 detect FPGA setup +- Disable bitfury device thread on it disappearing. + + +Version 3.5.0 - 29th September 2013 + +- Add magic init sequence required on BF1 devices to get them mining on windows. +- usbinfo.devlock is only ever write locked so convert it to a mutex +- Icarus remove unneeded opt_debug tests due to applog being a macro +- Icarus - CMR shouldn't wait the full timeout due to handle sharing +- We should only yield once in cg_wunlock +- Provide a function to downgrade a cglock from a write lock to an intermediate +variant. +- Deuglify use of _PARSE_COMMANDS macro expansions. +- Deuglify use of usb parse commands macro in usbutils. +- Use the driver add commands macros in api.c to avoid individually listing +them. +- Separate out asic fpga and opencl drivers in the driver parse commands macro +for use individually as needed. +- Use macro expansion in usb_find_devices to avoid explicitly listing them all. +- Use macro expansion to iterate over all the drivers without explicitly writing +them out in usbutils.c +- Iterate over the bitfury offsets in order of decreasing likelihood. +- Reattach the kernel driver on linux on usb_uninit. +- Attach the kernel driver on failure to usb init on linux. +- libusb kernel driver operations are only available on linux. +- There is no need to get the external prototypes for drivers in cgminer.c any +more. +- Remove unnecessary gpu_threads initialisation. +- Put avalon last in the sequence of adding drivers to prevent it trying to +claim similar chip devices on startup. +- Use macro expansion to iterate over all device drivers without needing to +explicitly code in support in all places. Pass a hotplug bool to the detect() +function to prevent opencl trying to hogplug GPUs. +- Forward declare all device drivers in miner.h avoiding the need to export them +everywhere else. +- Add a noop function for driver detect when it's missing. +- Reuse the DRIVER_ macros to avoid having yet another definition for DRV_ +- Use macro expansion to generate extern device_drv prototypes. +- Create a macro list of drivers to enable easier addition of further drivers. +- There is no point setting the BF1 preferred packet size to the maximum since +it will do so automatically. +- icarus ensure all cmr interfaces are initialised properly +- usbutils - fix USBDEBUG warnings +- Remove unnecessary steps in communicating with BF1 and just use USB interface +1. +- usbutils - usb_bulk_transfer fix the buf/data fix +- usb_bulk_transfer - use the allocated buffer +- Set preferred packet sizes per interface on BF1. +- usbutils allow PrefPacketSize per endpoint +- Remove magic control sequences on open/close on BF1 and just flush the read +buffers. +- Check return codes in getinfo and reset and fail as needed in BF1. +- Check return code for bitfury_open and release resources properly on failed +initialisation. +- Abstract out flushing of interrupt reads in BF1 devices. +- Perform interrupt read after close message on BF1 as per serial close. +- Perform interrupt read flush as per serial open on BF1 devices. +- Add information for 2nd USB interface on BF1 devices and choose interface 1 +for bulk transfers. +- usbutils - bulk transfer copy test fix +- usbutils - add USBDEBUG for usb_bulk_transfer +- Add more read_ii variants to usbutils. +- Name remainder of BFU usb commands used. +- Use submit_tested_work in bitfury driver to avoid unnecessarily re-testing the +work for validity. +- Abstract out work submission once it's been tested, to be used by drivers that +do their own internal validity testing. +- Store the hash2 array in struct work for further reuse. +- usbutils - which_intinfo not requried +- Use the test_nonce function within submit_nonce and store the uint32 +corresponding to hash2 37 for further use. +- usbutils - interfaces must all be on one handle - ep implies the interface +- avalon stats use exact type +- Only set share diff if we've confirmed it's a share first. +- Update ASIC-README for bitfury devices. +- Use an array of offsets when checking nonces in bitfury_checkresults +- Limit the duration we wait for reads in BF1 based on time already elapsed to +account for other delays such as work restart messages or out of work. +- Minimise size of serial string we copy in BF1 stats to avoid overflow. +- Implement basic API stats for BF1 and increase array of results to check for +the rare straggling result. +- Space debug output for bf1 to separate from numerals. +- Abstract out the bitfury open close and reset functions and use them on +reinit. +- Rename BF1 devices BF1 +- Check for work restart, breaking out early after usb reads in BF1. +- Do not lose the first sets of results from BF1. +- There is no point checking for results from the next round of work on BF1. +- Last result returned by BF1 is an end of results marker so ignore it. +- restart_wait should return 0 if thr_restart is true. +- Remove unused code by bitfury driver since current driver uses serialised +scanhash. +- Meter out return of estimated hashes in BF1 to smooth out visible hashrate. +- Optimise inner scanhash loop for bf1. +- Add yet another backup work for triple buffering of work in bf1 to account for +extra late results returned and don't check nonce offsets which appear to never +return. +- Name the work request and result usb commands for BF1 +- Define a mandatory upper limit to waiting for reset and data on BF1 based on +full nonce duration. +- Decrease usb buffering to verbose logging. +- Add in first draft for a serialised work model sending/receiving data for BF1 +devices. +- Add complete close sequence to bf1 as it happens on serial. +- Provide a bitfury identify function for bf1. +- Reliably extract BF1 information at startup and reset the device. +- Add commands for getting BF1 bitfury info +- Add magic BF1 bitfury open and close control sequences. +- Add BF1 detection code to bitfury driver. +- Create basic placeholders for bitfury driver code. +- Add bf1 device information to usbutils to enable device detection. +- Add basic defines for building for bitfury devices. +- Add redfury device to udev rules. +- avalon: display the FPGA controller version on API +- pool_active uninitialised_var rolltime +- Use macro expansion to only need to define usb enums and commands in one +place. +- usbutils saving incorrect overflow buffer +- ignore libusb.la and *.lo on linux +- icarus support CMR with no extensions +- usbtils - interfaces dont work yet in libusb windows so disable for that only +- Provide a --disable-libcurl config option to build support for stratum mining +only. +- Fix the api-example.c compile under Linux +- usbutils - only release the device once - for the first intinfo +- usbutils set_interface is no longer valid +- ubsutils interfaces much each have their own handle +- usbutils kernel_detach should use the interface number +- usbutils - allow the driver to change which_intinfo +- Reset quotas on load balance for all pools at the same time to avoid running +out during selection and unintentionally dropping to fallback. +- Break out of select pool from a common point for appropriate debug messages +and to avoid further tests. +- usbutils correct/reverse CMR product numbers +- usbutils specifically track handles and interfaces +- change drivers to use usb_interface() - required for multi interface change +- usbutils - allow a device to use multiple interfaces (and better var names) +- Cast -1 to (char) to cope with different default char types on ARM. + + +Version 3.4.3 - 13th September 2013 + +- Put corefoundation and iokit separate in ldflags for darwin. +- Add rules for libusb Makefile.am building on osx +- Add flags for building libusb statically on osx. +- Find the greatest common denominator in quotas and use the smallest number of +consecutive work items per pool in quota load balance mode to smooth hashrate +across pools with large quotas. Give excess quota to priority pool 0 instead of +pool 0. +- Avoid dynamically adding stack memory for nonce2 in the stratum send thread +and check the pool's nonce2_len will not cause an overflow. +- Add subdir-objects to automake options. +- Use inet_addr instead of inet_network to fix windows build. +- Remove unused pbase variable. +- Add support for socks4/4a proxies with stratum, and drop back to socks4 +support via the global --socks-proxy command to not break previous +configurations. +- Fix warning on mingw build. +- Only show long-poll message in pool summary if it's not using stratum. +- Increase the time for the waiting for work message to be given to be greater +than that required for a pool swap in the scheduler which is set to 5s. +- Change message in status when using a balanced pool strategy to notify if +there's a stratum pool as well. +- Use the --failover-only flag to have special meaning in combination with +load-balance mode to distribute any unused quota back to pool 0 to maintain +ratios amongst other pools. +- Display quota and allow it to be modified via the pool menu. +- Add API commands and modify output to support pool quota displaying and +changing. +- Change message in status when using a balanced pool strategy to notify if +there's a stratum pool as well. +- Add quota support to configuration files. +- Rotate pools on all failures to set a pool in select_pool. +- Use quotas for load-balance pool strategy. +- Provide a mechanism for setting a pool quota to be used by load-balance. +- Use the --socks-proxy option with stratum, changing it to defaulting to socks5 +and give appropriate message should it fail to connect. +- Cope with trailing slashes in stratum urls. +- Add more debugging messages when negotiating with proxies for stratum. +- Test specifically for socks5h in socks support for stratum. +- Add support for socks5 proxy with stratum +- Provide support for negotiating a stratum connection via http proxies. +- Connect to the proxy URL and port if specified for stratum sockets instead of +the pool directly. +- Extract any proxy url and port to be used by sockaddr if possible using +extract_sockaddr. +- Make extract_sockaddr set variables passed to it rather than pool struct +members. +- miner.php sort the mcast rigs so they are always in the same relative order +- miner.php allow sending the muticast message multiple times +- miner.php mcast ignore duplicate replies + + +Version 3.4.2 - 3rd September 2013 + +- take_queued_work_bymidstate should use a write lock. +- miner.php coding warning +- miner.php disable 'gen' by default +- miner.php allow formula generation of new fields +- miner.php add doctype +- miner.php remove incorrect echo +- miner.php optional error if not enough mcast rigs are found + + +Version 3.4.1 - 31st August 2013 + +- API mcast add a description option with miner.php +- Always use a maxpacketsize buffer in usb_bulk_transfer +- bflsc ensure getinfo cannot overflow it's storage buffer +- Don't decref json values in stratum parsing due to memory corruption. +- Use 64 bytes for all libusb control transfers. +- Skip dissecting opt->names in parse_config if it doesn't exist. +- Use an internal buffer in _usb_transfer_read in case the read is larger than +the buffer passed to it. +- ICA optional limit timing with short=N or long=N +- Revert to old custom tolines function since strtok_r is not portable. +- bflsc remove unused commented out code +- logging - code mistake +- logging - applogsiz() for large messages +- Provide base structures for getaddrinfo. +- Include string.h in bflsc driver. +- Get rid of linear removal of spaces in bflsc text parsing and use strstr +throughout instead. +- Use reentrant strtok in tolines() function in bflsc to avoid racing on +contextless calls. +- Show how small a too small result in bflsc is. +- Duplicate the buffer in process_results in bflsc since strtok modifies it +making debugging output limited to one line. +- Only process nonces in bflsc if the breakdown function succeeds. +- Ignore zero count messages in bflsc instead of trying to parse them. +- Return ok in tolines when it doesn't match inprocess message for bflsc. +- Remove inprocess line instead of deleting all following responses in bflsc. +- Change ok testing logic in breakdown() in bflsc and return if not ok at any +stage. +- Check the return value of tolines in bflsc driver. +- Use strtok to parse lines in bflsc driver. +- Add libusb-1.0 m4 directory and gitignore file. +- Properly convert from ranlib to lt_init in configure.ac +- Make autoconf always build for libusb. +- More autoconf fixes. +- Unconditionally build jansson statically from the cgminer source tree. +- Only test for all usb devices once in configure.ac +- Fix various libusb warnings and possible bugs on linux build. +- Add make clean and maintainer-clean to autogen +- Remove examples from libusb Makefile and generated autoconf files. +- Fix libusb subdirectory builds. +- Remove cached files from libusb autoconf on running autogen.sh +- Remove unused HAVE_LISBUSB macro and use USE_USBUTILS everywhere. +- Use direct auto* files to avoid failure of autoreconf +- Remove unused and maintainer cleaned files +- Show RT_LIBS in ./configure output. +- First import of libusb-1.0 +- bflsc xlinkstr use snprintf +- Fix win32 build. +- Use take_queued_work_bymidstate in the bflsc driver to avoid the rare chance +repeated results come back from the same work item. +- Provide a funcion that looks up queued work by midstate and then removes it +from the device hash database. +- Fix no -rt library on darwin. +- Update included jansson to v2.4 +- Fix OSX build. +- Provide an osx fix for cgtimers and a fallback to timevals for all other +platforms !linux !win32 !osx. +- Move two more timer functions out of define macros to enable them to be used +by future osx code. +- cgtimer_sub is now the same since cgtimer_t should be the same on all +platforms. +- miner.php fix missing global +- Only count submitted nonces as diff1shares if they're valid. +- Substantially raise the maximum avalon frequency for water-cooled, over-volted +designs. +- Compile MCast.java with an old java +- API Multicast sample MCast.java+MCast.class +- BTB show C/MHz/mV for device +- api.c remove unused reply string +- api.c fix mcast debug message bug +- miner.php implement API Multicast handling to automatically find your local +net miners +- API mcast only reply to remote IP's that are allowed access +- Initial API Multicast response v0.1 to find cgminer APIs +- Use timespecs on windows as cgtimer_t to capitalise on the higher resolution +clock changes. +- Abstract out the conversion of system time to an lldiv_t in decimicroseconds. +- Use our own gettimeofday implementation on windows for it to be consistent +across ming builds and higher resolution. + + +Version 3.4.0 - 21st August 2013 + +- Use stack data for HW error% in avalon stats. +- Add avalon HW error% to stats and only show BTB variables if avalon is a BTB. +- Check for cnx_needed on each loop through wait_lp_current. +- Return positive for cnx_needed when no_work is true. +- Stratum is used more often so test for it first. +- Reorder support names alphabetically. +- Only display the no pool work message once if there are multiple waiters in +hash_pop +- Provide a message and set a bool when no work is available from any pools and +when it resumes again. +- We don't want to continue into the hash_pop function if the getq is frozen. +- Only report threads in and out in queued work devices across a get work since +the rest happens asynchronously and the get work is what the device might be +waiting on. +- Thread reportin and out can be static non inline. +- usbutils cps sleep_estimate is not an underestimate +- usbutils add cps stats estimates +- Provide cgtimer_sub helper functions. +- Provide cgtimer_to_ms helper functions. +- Rename cgsleep_prepare_r as cgtimer_time to get time in cgtimer_t format and +call cgsleep_prepare_r as a macro for cgtimer_time +- Use the reentrant cgsleep functions for usecps in usbutils. +- TimeBeginPeriod and TimeEndPeriod do not add significant overhead when run the +entire time for cgminer so avoid trying to maintain balanced numbers of them for +specific time calls to simplify code. +- Replace all references to the old n*sleep functions with the equivalent +cgsleep_*s replacements. +- timeGetTime uses huge resources on windows so revert to using timevals for its +implementation of cgtimer_t +- Quotient/remainder error in ms division. +- Only grab a queued work item if we successfully grab the lock to submit work +in bflsc_send_work +- BTB get version from Firmware +- Carve out the unused portions of sha2 implementation. +- Import Aaron D. Gifford's fast sha256 implementation. +- Increase the que_low watermarks on BFLSC for they are too low to keep the +device busy on scanwork loops. +- Provide cgtimer_to_timeval helper functions. +- Provide a timeval_to_cgtime helper function to reuse values. +- Check for thr->work_restart in restart_wait. +- We should be using que_low to decrease scan sleep time in bflsc. +- Prepare sleep time on bflsc if no dev needs work yet to avoid busy waiting. +- Simplify cgsleep code for windows by using a typedef for cgtimer_t that +resolves to clock resolution, using that internally. +- On windows use the higher accuracy timegettime function to really get 1ms +clock and timer accuracy. +- Use the cgsleep reentrant function to sleep for bflsc between read results to +account for time taken to perform reads. +- Use 100ms delay between checking for results on all bflsc devices as the +buffering of results mean checking more frequently just wastes CPU and causes +more lock contention for only marginally better latencies. +- Fix missed endtimeperiod in overrun timer on windows. +- Make cgsleep_us_r take an int64_t for us. +- Make the cgsleep functions build on windows. +- Use the cgsleep reentrant function in avalon_send_task. +- Use the reentrant cgsleep functions within the avalon_send_tasks function. +- Set high resolution timing on windows within the cgsleep functions. +- Use the reentrant cgsleep function to time sleeps on reading from avalon. +- Provide reentrant versions of cgsleep functions to allow start time to be set +separately from the beginning of the actual sleep, allowing scheduling delays to +be counted in the sleep. +- Make the nmsleep and nusleep functions use the new cgsleep functions +internally till functions are migrated to the new cgsleep API. +- Add a ms_to_timespec helper function, and create a cgsleep_ms function that +uses absolute timers with clock_nanosleep to avoid overruns. +- Add rt lib linkage to enable use of clock_nanosleep functions with older +glibc. +- Add necessary time header include to avalon driver. +- Do a sleep of the full duration it would take to do all the work using +clock_nanosleep in avalon_send_tasks to avoid sleep overruns before polling to +see if it's ready. +- Add a timeraddspec helper function. +- Provide a us_to_timespec helper function. +- Use the us_to_timeval helper function in the avalon driver. +- Provide a us_to_timeval helper function. +- Use timeval_to_spec helper in avalon driver. +- Add helper functions to convert timespec to timeval and vice versa. +- simplifying buffer full check +- forking bitburner write thread function +- making sure original Avalon is unaffected by BitBurner changes +- changes to queueing strategy for BitBurner boards +- Do not poll in avalon_get_results without sleeping if we have finished parsing +a full result. +- Add c to ambient temperature display for avalon driver. +- BTB allow up to 1400mV as per firmware limits +- avalon for timeout allow d='calculate it' and fix uninitialised +- Use cloned work when finding avalon results since another thread can discard +the work item while it's in use. +- Provide a variant of find_work_bymidstate that returns a clone of the found +work. + + +Version 3.3.4 - 14th August 2013 + +- API/miner.php add some % fields +- Nonce2 stratum submission is not working with nonce2 lengths >4, revert the +buggy __bin2hex function and use bin2hex. +- The write thread in avalon is only ever actually woken up by timeout so remove +the write semaphore and use a simple sleep poll. +- Fix warning. +- Interrupting reads on the avalon to start writes loses data so remove the +cgsem_post in the read code. +- Add room for the null byte at the end of the nonce2 string on stratum share +submission and zero the allocated ram. + + +Version 3.3.3 - 13th August 2013 + +- Only perform the bin2hex on nonce2 data if it's required for stratum +submission, thereby removing the last conversion of that type from stratum work +generation. +- Create a work data template when receiving stratum notification, allowing a +simple memcpy of the merkle root avoiding more hex2bin conversions on each work +generation. +- Export the workpadding char in miner.h +- Avoid a potential overflow should a pool specify a large nonce2 length with +stratum. +- Avoid one more hex2bin in gen stratum work. +- Rename work gbt_coinbase to coinbase to be in line with pool variable name. +- Perform merkle bin hex2bin on stratum notify to avoid doing it on each work +generation. +- Reuse just the one pool coinbase variable in stratum, avoiding more string +functions and storage in gen_stratum_work on each work generation. +- Rename pool gbt_coinbase variable to coinbase to combine it with the stratum +coinbase data. +- Use a nonce2 offset variable for both gbt and stratum to consolidate +requirements on work generation. +- Merge pull request #474 from kanoi/master +- util.c update quit call for new functions +- use correct define for OSX in util.c +- miner.h inline semaphores increase information on failure +- util.c expand quit to show file/func/line +- Merge remote-tracking branch 'conman/master' +- Cache as much of the gbt coinbase as possible to avoid doing unnecessary +hex2bin conversion on every work generation with gbt. +- We should be using a cg_wlock initially in generating stratum and gbt work +before downgrading the lock. +- Add the ability to downgrade a write variant of the cglocks. +- Fix --scrypt being required before scrypt intensities on command line or not +working at all via config files. +- Cache the hex2bin of pool nonce1 in stratum, avoiding hex2bin on each work +generation. +- Cache the binary generation of coinbase1 and 2 on stratum, avoiding a hex2bin +of coinbase1 and 2 on each work generation. +- cgsem - increase information on failure +- avalon init write_sem before use + + +- 3.3.2 - 9th August 2013 + +- Fix uninit variable warnings. +- usbutils - force check every combination +- Fix warning. +- Recreate curses windows on windows when a device is hotplugged to allow window +resizing without crashing. +- Update copyright notice. +- Limit intensity range according to whether scrypt is in use or not. +- Do not allow benchmark mode to be used with scrypt. +- Add a --bflsc-overheat command which allows you to set the throttling +temperature for BFLSC devices or disable it. +- Move bflsc defines to a header file. +- avalon allow frequency to be set via the API +- BTB voltage management via the API - and set default on startup +- Avalon BTB allow partial work to be transferred +- avalon_cts use correct buffer +- miner.php format Best Share +- remove unnecessary memcpy +- using more concise description +- using usb_ident +- forgot a return +- changes to Avalon driver for BitBurner boards +- Revert "Sleep after sending icarus work to emulate working at 115200 baud." +- api correct timeout stat display +- usb timeouts - min/max also +- log USB timeouts in API stats +- usbutils report failed timeouts +- usbutils ensure stats macros are using the macro arguments +- Check for negative wait time in socket_full. +- Fix extra argument passed to statline before. +- Adjust socket wait timeout in recv_line according to how long we've already +waited to avoid a 60 second wait dropping to 1 second due to a blocked socket. +- usbutils use a heap buffer for bulk read rather than stack +- usbutils only one bulk transfer call per stat +- set device_drv function noops when first add_cgpu +- usbutils - in init only change the config if needed +- bflsc nonce per work item stats +- bflsc increase flush count to handle parallel work +- force type checking on curses +- logging - size check sprintf +- usbutils - size check all sprintf +- cgminer - size check all sprintf +- size check get_datestamp/get_timestamp and remove unused cgpu->init +- make all statline overflow safe +- WU only needs +2 width +- Check for a timeout in avalon_scanhash and post to the write sem if we receive +one. +- Decay result count in avalon more slowly to not falsely detect idle periods as +low result return rates. +- Count the number of miners idled in avalon to account more accurately for when +its result return rate is too low. +- Fix potential dereference when starting avalon with all new work. +- Convert the decay_time function into one that truly creates an exponentially +decaying average over opt_log_interval. +- Only throttle avalon clockspeed in avalon_auto in non optimal temperature +settings if the fanspeed has reached maximum. +- Reinstate more aggressive <2% HW error target for avalon-auto +- Set avalon fan min and fan max to PWM values instead of percentage. +- Provide an --avalon-freq command line to give a valid range of frequencies for +avalon in auto mode. +- Set the avalon idle frequency to lowest if avalon auto is enabled and we have +an overheat condition. +- Decrease avalon frequency in auto mode if we are unable to maintain the +temperature in the optimal range. +- Don't count invalid nonces as hashrate for bflsc. +- Use a more conservative upper limit of 1% for hardware errors with avalon auto +frequency. +- Allow the avalon fanspeed range to be passed as parameter on the command line, +default to 20-100% +- Just display A: and R: for difficulty accepted and rejected to preserve screen +real estate and decrease decimal places for WU. +- correct device DR: and remove global U: +- Update all screen A/R to instead use DA/DR and device U to WU +- miner.php add ASC fields +- GPU fan rpm display 9999 when it overflows +- bflsc get volts stats needs its own GETVOLTS +- Support all avalon frequencies on the command line. +- Move to slightly more relaxed timeouts for avalon. +- MMQ turn on cps delays +- bflsc x-link header different to documentation +- Reset the other auto counters in avalon when idling a device. +- usbutils/icarus include more locking to usbdev access +- Icarus turn on cps delays by default +- usbutils cps correct time measurement + + +Version 3.3.1 - 25th June 2013 + +- Add an avalon-auto option which enables dynamic overclocking based on hardware +error rate for maximum effective hashrate. +- Add an --avalon-cutoff feature which puts the avalon idle should it reach this +temperature, defaulting to 60, re-enabling it when it gets to target +temperature. +- Change default avalon target temperature to 50 degrees. +- usbutils - incorrect test for * in bus:dev +- Redo +1 fix in bflsc. + + +Version 3.3.0 - 24th June 2013 + +- Add an --avalon-temp option to allow a user specified target temperature. +- Demote no matching work message to verbose logging only on avalon. +- Make the fan control on the avalon a simple PID controller with a target +temperature of 45. +- Demote bflsc hw error messages to verbose logging only. +- bflsc - handle xlink timeouts by having generic IO functions +- Demote the invalid nonce warning to log info. +- Ignore iManufacturer for BFLSC devices since the device name will still match +and some unbinned chips are missing it. +- sc_count shouldn't be +1 in bflsc. +- Use the info timeout for read_nl in getidentify bflsc. +- Add a usb_read_nl_timeout macro. +- bflsc try getinfo twice +- set MSG_ASCUSBNODEV always defined +- Hard code the preferred packet size for AMU, BLT and ICA. +- API V1.26 update ASIC support +- Icarus enable the read buffer for the detect nonce +- Support new overclocking speeds for avalon: 325, 350 and 375 +- undo icarus show errno, put it as debug in ubsutils +- icarus add errno to rerr and werr +- Sleep after sending icarus work to emulate working at 115200 baud. +- Use the nusleep function for sleeping after sending work in avalon. +- Show an integer only for diff if it is one. +- Set the avalon preferred packet size to 512. +- Reinstate the maxPacketSize determined by the end descriptor but allow the +driver to override it. +- Only update hashmeter if we have done hashes or haven't updated longer than +the log interval, fixing a us/ms error. +- Use only one cgsem in avalon signalling when the write thread should commit +work by reading the status bytes off during an avalon_read, minimising the +number of usb calls and resetting from only one place. +- Change avalon no valid work message to no matching work to match API +terminology. +- Use low latency usb transfers on the avalon, sleeping up to half a buffer's +worth only if no data is returning to increase hashrate, abolish lost work and +decrease CPU. +- Minimise the sleep times in avalon read to avoid result loss. +- Use a half nonce range before cycling through avalon's scanwork to ensure it +gets a chance to fill work if time is tight for the write thread to signal a +wakeup. +- Temporarily limit usb transfer sizes to 512 till we provide a way for each +driver to choose the upper limit. +- Increase watchdog sick time to longer than it takes for a pool to be detected +dead. +- Limit USB transfers to the max size reported by the descriptors. +- Increase the BFLSC timeout to allow the maximum number of results to be +returned for BAS in time. +- Decrease BAL and BAS latency to be just larger than one result read. +- disable curses device resize that crashes on windows +- BFLSC latest firmware has its own thermal cutoff set to 90, so use the same +value in case we have an old firmware that isn't throttling by itself. +- Drop watermark low limits for bflsc. +- Set the fanspeed on bflsc to max if we don't know the temperature. +- Use a low watermark for queueing mandatory work on bflsc instead of zero. +- Only mandatorily grab the bflsc mutex on submitting work when the queue is +empty. +- Adjust bflsc v2 watermarks. +- Only increase sleep time on bflsc if the queue isn't emptying at all over the +sleep duration. +- Fix warning. +- bflsc yet more API stats +- bflsc add some more API stats +- bflsc correct firmware matching +- bflsc correct comment +- Fixed Commands with No params +- bflsc driver support for v2 firmware +- Odd Issues +- Fixed Python Example +- Added Python Api Example +- Added Python Api Example +- Multiplier fail for microseconds vs milliseconds when updating hashmeter in +hash_queued_work. +- Only make threads report in/out across the actual driver code and update their +status on reporting out as well as in. +- usbutils initialise close key/sem +- usbutils cleanup linux semaphores on release +- Difficulty should be unconditionally byteswapped, not swapped to big endian. +- We should be setting cancelstate, not canceltype when disabling it for usb +locking. +- Pthread cancel state should be set to disable on usb DEVLOCK. +- Fanauto on bflsc is Z9X according to the source code, not 5 as per the draft +protocol document. + + +Version 3.2.2 - 16th June 2013 + +- Record and report USB pipe errors via API stats +- Suspend stratum connections when we know they've failed and don't try to recv +data from them once the socket no longer exists. +- Pipe error is quite common on usb3 so drop logging to verbose level only. +- ocl.c fix applog warnings on windows +- applog/quit fix GPU errors created +- usbutils - DEVLOCK other usbdev access +- applog usb device list can be > LOGBUFSIZ +- fix windows log warnings +- logging remove extra added +- remove varargs from logging/quit/in general as much as possible +- Don't yield when downgrading a cg ilock. +- Don't yield on grabbing the read lock variant of cglocks. +- Off by one error in device count for display. +- Don't display devices beyond the most_devices count in the curses status. +- Only display as many device rows as the maximum live existed at any time. +- usb lock out use cg locks +- usb lock out transfers during open/close +- Add error message to libusb pipe error +- Differentiate libusb control transfer pipe errors from transfer errors since +they're not fatal. +- Create a usb_bulk_transfer wrapper for libusb_bulk_transfer to cope with pipe +errors. +- Only show efficiency in pool information for pools that don't support local +work generation. +- Create a pool_localgen bool function for testing when a pool can generate work +locally. +- ignore file that is generated on Macs +- compile unix code on Mac OS X fixes not finding the config file in $HOME +- Use mining start time for device MH/U calculations +- Decrease the sleep duration before reading in avalon to not let the read +buffer overflow. +- Failure to read and write on pseudo semaphores on apple happens routinely on +shut down so should not be a quit error, just a warning. +- Unlock usb dev lock in the same place in usbutils. +- Sleep if the avalon buffer is empty and we've requested a read to allow the +write thread to take precedence. +- Yield after releasing a lock in case we are on a device with limited CPU +resources. +- Add the cgpu_info structure before avalon reset. +- Tidy up DEVLOCK/UNLOCK to have consistent use of the pstate variable without +needing brace level match. +- Icarus driver elaspsed timeout shouldn't be just USB I/O +- usbutils avoid leaving devlock locked when thread cancelled +- MMQ fix nodev failure caused by changes +- ubsutils lock all access to nodev and cgusb +- USB make device_path handled by usbutils +- tidy up free in device detect functions +- USB control creation and free of cgpu +- Add FAQ regarding Work Utility. +- Throttling the BFLSC at 80 seems to prevent generating garbled responses of +higher temps. +- Return after failed bin2hex conversion in bflsc. +- Demote failed hex2bin result to LOG_INFO and check return result in +driver-bflsc to avoid doing find_work_by_midstate. +- Set BFLSC fan speed coarsely to keep it under 60 or auto as per specs saying +it tries to stay below 60. +- Limit usbutils LATENCY_STD to 32ms to keep transfers under 512 bytes. +- Move macro definition to bflsc driver +- Use a longer timeout for retrieving bflsc details. +- Add a usb_read_ok_timeout wrapper to cope with slow init'ing devices. +- cgsem_post after creating the thread info +- Fix build. +- Use cgsem structures instead of the flaky pings in the work queue to start +mining threads and remove the unused thr_info_freeze function. + + +Version 3.2.1 - 7th June 2013 + +- Shorten the avalon statline to fit in the curses interface and show the lowest +speed fan cooling the asic devices. +- Set usbdev in usbutils after checking for nodev to avoid trying to access a +dereferenced value. +- AMU usbstatus correct name from enable UART +- Icarus AMU enable the UART +- Only libusb close if libusb release succeeds. +- Failed reads and writes on cgsem_post and cgsem_wait should be extremely rare. +- Implement cgminer specific cgsem semaphores to imitate unnamed semaphore +behaviour on osx which does not support them. +- Set cgusb->buffer to NULL when doing usb_buffer_disable. +- Temporarily fix apple not having semtimedop by ignoring the timeout value. +- BFLSC enable buffered USB reading +- Icarus use buffered USB reading +- bflsc & icarus use usb_ftdi_set_latency +- usb_ftdi_set_latency LOG_ERRs if called incorrectly +- add usb_ftdi_set_latency +- usbutils optional read buffering +- Set the avalon read transfer latency to avoid sleeping when no data is +returned after very short latency settings. +- correct bflsc BFLSC_BUFSIZ max calculation +- Fix build for !curses +- restore max code - since timeout is unsigned +- compile warning - remove unused max +- usb set FTDI latency higher to minimise status bytes +- Check for zero timeout on _usb_write. +- Check for zero timeout in usb read. +- Define a minimum polling time based on frequency of mandatory updates of ftdi +responses at 40ms. +- Sleep right up to the timeout instead of the first half if we find ourselves +polling in _usb_read +- Enforce half timeout sized sleeps in usb_read if we find the device is not +respecting libusb timeouts to avoid polling frequently. +- Add more ASIC documentation. +- Update README +- Remove start device limitation on log window size to allow it to get larger +with hotplugged devices. +- Switch logsize after hotplugging a device. +- Change switch_compact function name to switch_logsize to be used for other +changes. +- Only adjust cursor positions with curses locked. +- devs display - fix GPU duplicate bug +- Do not hotplug enable a device if devices have been specified and the hotplug +device falls outside this range. +- Change the --device parameter parsing and configuration to use ranges and +comma separated values. +- basic copyright statement in API.java +- devs display - show ZOMBIEs after all others +- Modify scrypt kernel message. +- Check for pool_disabled in wait_lp_current +- usbutils semun use proper def for linux which fixes OSX also +- Check for pool enabled in cnx_needed. +- Icarus add delays during intialisation +- Update documentation. +- Update copyrights of modified files. + + +Version 3.2.0 - 31st May 2013 + +- Add FAQ about windows USB keyboards and hotplug interactions. +- Fix mingw build warnings in icarus driver. +- Make usb_ftdi_cts use the _usb_transfer_read function. +- Update ASIC-README with avalon info regarding default behaviour. +- Break out of idling loop in avalon_idle if the buffer is full. +- Provide some defaults for avalon if none are specified and do not try to claim +the device if it fails to reset with them and no options are specified. +- usbutils automatically track IO errors +- usbutils allow a short wait for resources to be released +- correct semaphore timeout comment +- Set the fanspeed to the nominal chosen for GPUs. +- Inverted sem_init logic. +- Document avalon options in ASIC-README +- Do avalon driver detection last as it will try to claim any similar device and +they are not reliably detected. +- Clamp initial GPU fanspeed to within user specified range. +- Use a counting semaphore to signal the usb resource thread that it has work to +do. +- Avalon fan factor is already multiplied into the info values. +- Get rid of zeros which corrupt display. +- Logic fail on minimum fanspeed reporting. +- Provide a workaround for fan0 sensor not being used on avalon and pad fan RPM +with zeros. +- Add ambient temp and lowest fan RPM information to avalon statline. +- Display max temperature and fanspeed data for avalon. +- Set devices to disabled after they exit the hashing loops to prevent the +watchdog thread from trying to act on them. +- Add avalon driver to hotplug. +- Shut down the avalon mining thread if the device disappears. +- Check for no usb device in usb_ftdi_cts +- Check for valid usbdev in _usb_read in case the device has been unplugged. +- Scanhash functions perform driver shutdown so don't repeat it. +- Change the opencl shutdown sequence. +- Send the shutdown message to threads and do the thread shutdown functions +before more forcefully sending pthread_cancel to threads. +- Use the cgpu_info shutdown to determine when to stop the avalon read and write +threads. +- Use semaphores to signal a reset to pause the read thread while the write +thread does the actual reset, making all writes come from the same place. +- Remove now unneeded fgpautils.h include from avalon. +- usb_transfer_read should also not play with the endianness. +- Use the USB wrappers for avalon, telling usbutils that we want the raw data. +- Use separate ep for avalon tasks vs avalon reset and do not loop in write +indefinitely. +- Remove unneeded function and checks in avalon write code. +- CMR handle baud options +- work_restart is reset within the queued hash work loop. +- Fix avalon shutdown sequence. +- Execute driver shutdown sequence during kill_work. +- Use nusleep in avalon_get_results in place of nmsleep. +- Provide an nusleep equivalent function to nmsleep. +- usb/ica add more (incomplete) CMR settings +- Give a buffer of perceived results in avalon during idle periods to allow for +results once it becomes active again. +- libusb_control_transfer are meant to be endian specific, but host endianness +so no conversion is needed. +- Reuse old MTX Handle +- usbutils check all memory allocation +- usb separate thread for resource locking and modified windows locking code +- Icarus report data direction with comms errors +- Set the read and write threads for avalon to not cancel within libusb +functions and wait for the threads to pthread_join on shutdown. +- Offset needs to be incremented after avalon reads. +- Make the avalon_read function parse the ftdi responses appopriately. +- Use the avalon read timeout to completion if no data has been read. +- wait_avalon_ready should only be used before writes. +- Ask for the correct amount to read in avalon get results. +- Spawn the avalon read thread first with info->reset set to discard any data +till work is adequately queued. +- Use direct usb read commands to avoid ftdi data being automatically cut off in +avalon reads. +- Do a simple usb_read_once for the avalon result from a reset command. +- Make sure avalon is ready to receive more usb commands before sending them. +- Implement avalon_ready and avalon_wait_ready functions for when usb is ready +to receive commands. +- avalon_read should not loop but just return whatever it has succeeded in +reading. +- Set avalon_info to device data void struct. +- Specify avalon in avalon_reset. +- First pass rewriting serialdev into direct usb dev for avalon driver. +- Define a cts equivalent for direct usb and use it for avalon driver full. +- Compile usbutils into avalon driver. +- Check results come in at least at 2/3 the rate they should be on avalon and if +not, reset it. +- Give a warning but don't reset if the avalon buffer is full early. +- Discard any reads obtained from the avalon get results thread during a reset. +- Differentiate initial reset in avalon from subsequent ones. +- Perform a mandatory reset if the avalon buffer signals it's full before it has +queued its normal quota of work. +- Wait till buffer is cleared after sending idle tasks to avalon before +returning from avalon_idle. +- Lock qlock mutex during reset from read thread in avalon to prevent more work +being sent till the reset is over. +- Reset avalon if we continue to be unable to send all the work items. +- Add avalon reset response to debugging output. +- Do a wait_avalon_ready before sending a reset code. +- Iterate over spare bytes in the avalon result returned from a reset request +trying to find the beginning of the reset. +- Idle avalon after reset. +- Check for nothing but consecutive bad results on avalon and reset the FPGA if +it happens. +- Make submit_nonce return a bool for whether it's a valid share or not. +- Unset the work restart flag sooner in avalon_flush_work to avoid re-entering +the flush work function and just reset the queued counter instead of rotating +the array to avoid runs of no valid work. +- Implement an avalon_flush_work function for work restarts. +- Shut down avalon read and write threads and idle the miners on closing it. +- Tighter control over work submissions in avalon allows us to use a smaller +array. +- Rotate avalon array to reset the queued count before releasing the lock so +work will always be available on next pass. +- Move avalon read thread start till after conditional wait, store idle status +in avalon_info and use it to determine whether an error is appropriate or not. +- Wait till the avalon_send_tasks thread has filled the avalon with idle work +before starting the avalon_get_results thread. +- Use AVA_GETS_OK macro in avalon_read. +- Do all writes on avalon with a select() timeout to prevent indefinite blocking +and loop if less than desired is written. +- Check explicitly that ava_buffer_full equals the macro. +- Send initial reset as an avalon task to remove avalon_write function. +- avalon_clear_readbuf is no longer required. +- Check for 2 stray bytes on avalon reset. +- Create a separate thread for handling all work and idle submission to the +avalon which messages the scanhash function it has completed to update +statistics. +- usbutils ensure it compiles without stats +- usbutils include transfer mode in usbstats +- Give the avalon get results thread name the device number as well. +- Make sure we're not adjusting temps on every successful work retrieval on +avalon. +- Count missing work items from behind a successful work read in avalon as well. +- Change message for work not found in avalon parser. +- usbutils handle bulk_transfer partial writes +- Simplify debugging and only discard from avalon read buffer if at least one +full result has been discarded. +- Only display discarded bytes in avalon if they're not used as nonces. +- Only loop once through avalon_parse_results, but do so after timeouts as well. +- Only debug and move ram if spare bytes exist in avalon buffer. +- Remove off by one error. +- Inverted logic. +- Add more debugging to avalon reads. +- Convert unsigned size_ts to ints for parsing avalon messages. +- Cope with not finding nonces in avalon parsing gracefully by not overflowing +buffers. +- Adjust avalon temp values on one lot of valid nonces from the parser. +- Created a threaded message parser for avalon reads. +- Avalon_wait_write is not effective during resets so do it after going idle. +- Send only a single byte reset. +- Repeat going idle after avalon reset, and wait for write ready before sending +each reset request instead of some arbitrary sleep time. +- Timeouts on avalon_read and avalon_write should be 100ms. +- Don't close avalon after detecting it until we're cleaning up, instead using +reset for comms failures. +- Check for avalon_wait_write before sending reset command. +- Sleep in avalon_write_ready. +- Make avalon_wait_write a bool function and check its return value. +- Show how many idle tasks are sent to avalon if it aborts on buffer full. +- Reset avalon->device_fd after it is closed. +- Create an avalon_wait_write function that is used before sending avalon idle +command. +- Avoid repeating avalon_idle in do_avalon_close and extra sleep. +- Pass fd to avalon_idle. +- Do avalon_reset after info structure is set up. +- Rework avalon reset sequence to include idling of chips and waiting for them +to go idle followed by 2nd reset and then checking result. +- Do a non-blocking read of anything in the avalon buffer after opening the +device. +- Assign the avalon info data to the device_data in cgpu_info. +- thread shutdown is different on windows +- usbutils make all windows timeouts 999ms +- usb add another Cairnsmore1 USB chip +- icarus do the full detect test twice if required +- CMR usb config guess +- usb add transfer_read and commented out in icarus +- usbutils allow unrounded control transfers +- icarus ICA initialisation +- icarus report err on read failure +- icarus correct device_id and use device_data for icarus_info +- miner.h remove unused device_file and add device_data +- miner.h icarus no long uses fd +- icarus AMU config transfers +- Create a logwin_update function which mandatorily updates the logwin and use +it when input is expected to prevent display refresh delays. +- usbutils force an unknown IDENT for zero +- icarus set default options/timing based on device +- Must unlock curses as well in logwin_update. +- Create a logwin_update function which mandatorily updates the logwin and use +it when input is expected to prevent display refresh delays. +- icarus report usb write error information +- Add name to icarus copyright notice. +- Check for *pth dereference on pthread_join +- usbutils name latency correctly +- Check for restart before buffering more reads in Icarus. +- Icarus should timeout if it's greater than the timeout duration even if it's +receiving data. +- We should check for amount buffered in icarus get_nonce against amount already +received. +- Make mining threads report out during work submission. +- submit_work_async is no longer used directly by driver code. +- Fix first read timeout on icarus get nonce. +- Retry icarus_initialise if the first read attempt fails. +- Properly pthread_join miner threads on shutdown. +- Properly pthread_join miner threads on shutdown. +- Use a persistent single separate thread for stratum share submission that uses +workqueues since all stratum sends are serialised. +- All stratum calls to recv_line are serialised from the one place so there is +no need to use locking around recv(). +- Only allow the mining thread to be cancelled when it is not within driver +code, making for cleaner shutdown and allowing us to pthread_join the miner +threads on kill_work(). +- Only allow the mining thread to be cancelled when it is not within driver +code, making for cleaner shutdown and allowing us to pthread_join the miner +threads on kill_work(). +- Set pool->probed to true after an attempt to resolve the url via stratum code. +- icarus test nodev everywhere +- usbutils/icarus separate FTDI transfer values and more debug +- add icarus to hotplug +- usbutils add rest of icarus +- simple serial-USB python test script +- icarus->USB v0.1 incomplete - missing initialise() +- README spelling +- Update documentation for icarus switch to USB +- Add USB rules for supported USB devices +- switch icarus configuration to usb +- usbutils new command for icarus +- usb add a numeric sub-indentity for each name +- usbutils - make FTDI handling automatic +- fix duplicate name +- usbutils set Black Arrow Lancelot's as BAL and match the lot->llt name +- usbutils identify Icarus devices +- libusb_control_transfer 16 bit words are endian specific. +- usb_applog separate amt display +- Show pool difficulty more verbosely if it changes via stratum. +- Attribute whatever stats we can get on untracked stratum shares based on +current pool diff. +- Provide a --lowmem option which does not cache shares on failed submission to +prevent low memory hardware (eg Avalon) from crashing. +- Update util.c + + +Version 3.1.1 - May 11th, 2013 + +- Use a discrete device target for scrypt that dynamically changes to ensure we +still report a work utility even if no shares are submitted such as in solo +mining. +- Make set_work_target a function to set a specified char as target for use +elsewhere. +- Further consolidate the hash regeneration between sha and scrypt doing it only +once and always checking the share diff for both before submission. +- Regenerate the hash before checking the share diff in hashtest(). +- Minor typo. +- Use a scantime of 30 seconds for scrypt if none is specified. +- Support more shares to be returned for scrypt mining. +- Update the write config to properly record device entries and remove disabled +option. +- Show a different warning and loglevel for failure to resolve a URL on first or +subsequent testing of stratum pool URLs. +- Fix the problem of seting up termio of ttyUSB0 for icarus. the CSIZE is the +mask of CS2/4/8 From: navyxliu +- Set all stratum sockets to nonblocking to avoid trying to use MSG_DONTWAIT on +windows. +- Fix warnings on win32 build. +- Only use MSG_NOSIGNAL for !win32 since it doesn't exist on windows. +- Use MSG_NOSIGNAL on stratum send() +- Set TCP_NODELAY for !linux for raw sockets. +- Use TCP_NODELAY with raw sockets if !opt_delaynet +- Make raw sockets compile on windows +- Recheck select succeeds on EWOULDBLOCK for stratum. +- usbutils/mmq fixed size usb_read default to wait for all data +- usbutils optional (disabled by default) dev debug +- Add an ftdi usb read macro without newline +- Avalon usb interface should be 0. +- Add more debug for failure to USB init. +- Recv() should all be non-blocking for raw sockets in stratum. +- Change verbosity and error for getaddrinfo warnings in setup stratum socket. +- Free servinfo after p is checked in setup stratum socket. +- Use raw sockets without curl for stratum communications. +- Sacrifice curl handle memory on stratum disconnects on all versions of libcurl +to avoid curl corruption. +- Don't use TCP_NODELAY if opt_delaynet is enabled with stratum. +- Fix warnings in avalon driver. +- Make FULLNONCE an ULL to fix a warning on 32 bit. +- ztx correct applog typing +- ocl correct applog typing +- util correct applog typing +- api correct applog typing +- cgminer correct applog typing +- scrypt correct applog typing +- bfl correct applog typing +- ica correct applog typing +- mmq correct applog typing +- adl fix trailing % +- usbutils correct applog typing +- applog - force type checking +- Simplify the many lines passed as API data in the avalon driver now that the +API does not need persistent storage for the name. +- Duplicate the name string always in api_add_data_full to not need persistent +storage for names passed to it. +- Add extra matching work count data in API for Avalon with 4 modules. + + +Version 3.1.0 - April 28th, 2013 + +- va_copy is meant to be matched by a va_end in log_generic. +- usbutils remove_in_use break +- usbutils remove_in_use missing prev +- usbutils missing add_in_use +- Clean up summary slightly better on exit. +- Make the scan sleep time after scanwork in bflsc dynamic to keep queues +between watermark levels. +- Remove unused temp counts in bflsc. +- Calculate a rolling 5 min average set of temperatures for bflsc. +- Damp the display of voltage for BFLSC devices. +- Damp the temperature display measurement for bflsc since it fluctuates so +wildly. +- bflsc add volt stats +- Handle failed tolines command in bflsc driver. +- Can use a read lock instead of a write lock in bflsc scanwork. +- Since we are filling a queue on the bflsc devices, there is no need to run +through scanwork frequently provided we use the restart_wait function to abort +early during a block change. +- Remove flushed work in bfl scanwork from the hash table. +- Set correct device in process_nonces in bflsc driver. +- bflsc add work reply INPROCESS: missing from the spec +- bflsc put in some error messages not yet written +- bflsc get completed hashes as late as possible +- Fix potential memory leak with unused work items in bflsc_queue_full +- Reverse bools in bflsc_queue_full +- Avoid recursive loop calling correct function instead. +- bflsc fix details identification +- Differentiate BFLSC device from regular bitforce and give warning if no +support is compiled in. +- util.c str_text make a fully text readable version of str +- BFLSC fix FPGA identity overlap +- Locking error in bflsc_send_work +- Use htobe32 function for converting nonce in bflsc. +- Replace deprecated bzero with memset in bflsc driver. +- Fix compilation of bflsc driver without opencl. +- Check for realloc failures in bflsc driver. +- Check for failure to calloc in bflsc driver. +- Trivial style change +- Use copy_time function in bflsc driver. +- Use cgtime in bflsc driver and update copyright notice. +- Use a separate function for bfl initialise that doesn't require locking. +- Fix BFLSC building. +- bflsc v0.1 + + +Version 3.0.1 - April 25th, 2013 + +- Bypass attempting to read and save binary files on OSX to avoid crashes on >1 +GPU. +- Receive failures in recv_line should unconditionally fail. +- Use sock_blocks in api.c +- Use sock_blocks function for stratum send and receive. +- Create an OS specific sock_blocks function. + + +Version 3.0.0 - April 22nd, 2013 + +- Further fix distdir for hexdump.c +- Fix build and distdir. +- Remove all CPU mining code. +- compile on win32 +- Update SCRYPT README with improved hashrates for 7970. +- Use copy_time helper throughout cgminer.c +- Provide wrappers for commonly used timer routines with API stats. +- Avoid one cgtime call in sole_hash_work. +- Fulltest is true if value is <= target. +- Use system host to endian functions for clarity in fulltest. +- Provide endian_flipX functions to avoid special casing big endian in cgminer.c +- Provide a flip128 helper to simplify big endian flipping. +- Use flip helpers to simplify code for calculation of midstate. +- Use flip32 function instead of open coding it in gen_stratum_work. +- Move util.c exports to util.h +- Fix warning on building avalon on win32 +- Use cgtime in driver-avalon.c +- Use cgtime in driver-icarus.c +- Use cgtime in driver-bitforce.c +- Use cgtime in logging.c +- Use cgtime in usbutils.c +- Use cgtime in driver-opencl.c +- Use cgtime wrapper in driver-modminer.c +- Use cgtime in driver-ztex.c +- Use cgtime in compat.h +- Use cgtime instead of gettimeofday in fpgautils.c +- Replace gettimeofday usage in cgminer.c with cgtime +- Create a cgminer specific gettimeofday wrapper that is always called with tz +set to NULL and increases the resolution on windows. +- Add high resolution to nmsleep wrapper on windows. +- Set default ocl work size for scrypt to 256. +- define le32toh if needed +- fliter out the wrong result from adjust fan code +- compile avalon driver on win32 and win64 +- Restart threads on the rare chance we found the block ourselves. +- Add more FAQs about crossfire. +- Set last device valid work on adding device. +- Increment last device valid work count in submit_nonce to cover scrypt. +- Set opt_scrypt drv max diff for correctness. +- Make scrypt submission use the submit_nonce code, with nonces matching +endianness. +- Do testing for HW errors on submit nonce for both scrypt and sha. +- Increment hardware error count from the one site. +- Rename scrypt regenhash function for consistency. +- Add new best share info to verbose logging. +- Add notice for when network diff is changed. +- Convert error getting device IDs in ocl code to info log level only since +multiple platforms may be installed and the error is harmless there. +- Unnecessary extra array in ocl code. +- Further driver FAQs. +- Add MAC FAQ. +- Add more FAQ details. +- Check for work restart after disable in the hash queued work loop since it may +be a long time before we re-enable a device. +- Unconditionally test for many wrong results on avalon and reset to avoid +passing a corrupt avalon result to temperature code. +- build out of source dir +- Set device_diff for queued work or there will be no diff1 share count. +- Only reset an avalon device with no results when there are no results +consecutively. +- More FAQs. +- More FAQs. +- Cleanup when stratum curl fails to initialise. +- Avoid applog in recalloc_sock. +- Avoid applog under stratum_lock in recv_line. +- Avoid applog under stratum_lock in __stratum_send. +- Put spacing around locking in util.c for clarity. +- Avoid applog under cg_wlock. +- Put spacing around locking code for clarity. +- Avoid applog under pool_lock. +- Avoid more recursive locks. +- Avoid applog while ch_lock is held. +- Avoid recursive locks in fill_queue. +- Variable is already initialised in global scope. +- More GPU faqs. +- More README faqs. +- Yet more README faqs. +- Add more faqs to README. +- Wrap result wrong tests in avalon scanhash in unlikely() and only consider a +hash count of zero wrong if a restart wasn't issued. +- avalon: if result_wrong >= get_work_count jump out the read loop +- Fix warning on 32bit. +- Fix warning on 32bit. +- Avoid curl_easy_cleanup on old curl versions in setup_stratum_curl as well. +- fix the fan control on max temp2/3 +- for some reason network down. one simple cgminer command: "cgminer -o +127.0.0.1:8888 -O fa:ke --avalon-options 115200:32:10:50:256" can idle the +avalon for safe power and protect chip +- if hash_count == 0; reinit avalon, fix the 0MHS bug use the max value of temp1 +and temp2 for fan control +- Reinstate the matching_work_count per subdevice on avalon based on the work +subid. +- Avalon driver is missing the drv_id. +- Rationalise and simplify the share diff and block solve detection to a common +site. +- Rationalise and simplify the share diff and block solve detection to a common +site. +- Make the avalon array size a macro. +- Use replacement of work items in the avalon buffer as needed instead of +flushing them. +- Reinstate wrong work count to reset avalon regardless and display number of +wrong results. +- Revert "The result_wrong measurement for avalon is continually leading to +false positives so remove it." +- select() on serial usb in avalon does not work properly with zero timeout. +- The result_wrong measurement for avalon is continually leading to false +positives so remove it. +- Revert "Use only 2 queued work arrays in avalon." +- Use no timeout on further reads in avalon_gets +- Do sequential reads in avalon_get_reset to cope with partial reads. +- Show read discrepancy in avalon_get_reset. +- Reuse avalon_get_work_count variable. +- Check for AVA_GETS_RESTART when deciding if avalon has messed up. +- Make the detection of all wrong results on avalon much more conservative to +avoid false positives on work restarts. +- Show error codes on select and read fail in avalon. +- If we get a restart message in avalon_gets still check if there's a receive +message to parse first without a timeout before returning AVA_GETS_RESTART. +- Use only 2 queued work arrays in avalon. +- avalon_gets is always called from the one call site so inline it. +- The read_count is unused by the avalon get result code and no longer required +for avalon reset so simplify code removing it. +- Use a separate avalon_get_reset function for resetting avalon instead of using +avalon_get_result. +- The current hash count returned by avalon scanhash is just an obfuscated +utility counter so make it explicit. +- Check for a restart before a timeout in message parsing code in avalon. +- We should check for a restart message before checking for a timeout in avalon +scanhash. +- Store the subid for the work item in avalon. +- usbutils more stats for bflsc +- Fix record_temp_fan function in avalon driver. Patch by Xiangfu + +- Remove inappropriate memset of struct avalon result which was corrupting fan +values. +- Fix warning with no curses built in. +- Bump version to 2.11.4 +- Add API support for Avalon. +- Only do_avalon_close once on multiple errors. +- Reset the result_wrong count on block change in avalon scanhash to prevent +false positives for all nonces failed. +- Small timeouts on select() instead of instant timeout increase reliability of +socket reads and writes. +- Only get extra work in fill_queue if we don't have any unqueued work in the +list. +- Small timeouts on select() instead of instant timeout increase reliability of +socket reads and writes. +- Rotate the avalon work array and free work on AVA_SEND_BUFFER_EMPTY as well. +- Only get extra work in fill_queue if we don't have any unqueued work in the +list. +- Don't get any work if our queue is already full in avalon_fill. +- Differentiate socket closed from socket error in recv_line. +- Differentiate socket closed from socket error in recv_line. +- Free avalon->works in the event we call avalon_prepare on failure to +initialise. +- Fix warnings. +- Create an array of 4 lots of work for avalon and cycle through them. +- Remove unused per unit matching work count for avalon. +- Rename the confusing avalon_info pointer. +- Simplify avalon scanhash code using the new find_queued_work_bymidstate +function. Partially works only. +- Members of cgpu_info for avalon are not meant to be in the union. +- Use correct struct device_drv for avalon_drv. +- cgminer.c -S help to only say Icarus +- Check enough work is queued before queueing more in avalon_fill. +- Actually put the work in the avalon queue. +- Rneame avalon_api to avalon_drv. +- First draft of port of avalon driver to new cgminer queued infrastructure. +- Add Makefile entry for driver-avalon. +- Add configure support for avalon. + + +Version 2.11.4 - April 5th, 2013 + +- Remove bfl-sc option from configure for 2.11 branch. +- Only update hashrate calculation with the log interval. +- Update the total_tv_end only when we show the log to prevent failure to update +logs. +- Minor README updates. +- Add example 7970 tuning for scrypt in readme. +- Update driver recommendations. +- Add extensive GPU FAQs for the flood of new Scrypt miners. +- Remove help option for cpumining in build environment. +- Remove scripts that make it too easy to compile CPU mining support. +- Win32 and win64 build updates +- Remove references to CPU mining from README. +- Show share hash as little endian as needed. +- usbutils extra message requirements +- Make hashmeter frequency for hash_queued_work match sole_work. +- Update links and recommended SDKs. +- Update scrypt readme re drivers and sdk. +- usbutils.c usb_cmdname() usb_cmds -> string name +- BFL FPGA Windows timeout set to 999ms +- AUTHORS - spam update time (one year since the last) +- Update README for x970 memdiff values. +- Update README to match changes to display. +- Remove increasingly irrelevant discarded work from status lines. +- Remove increasingly irrelevant queued and efficiency values from status and +move WU to status line. +- Allow cgminer to start if usb hotplug is enabled but no devices yet exist. +- Do not scan other gpu platforms if one is specified. +- Update README for sync objects on windows. +- Update README about intensity. +- Add information for setting gpu max alloc and sync parameters for windows with +scrypt. +- If the hashmeter is less than the log interval and being updated by the +watchdog, don't update the hashrate. + + +Version 2.11.3 - March 17, 2013 + +- Update docs and reorder README to show executive summary near top. +- Update the hashmeter at most 5 times per second. +- Usbutils use its own internal read buffer +- Calculate work utility for devices that support target diffs of greater than +1, and update scrypt code to use it. +- usbutils allow read termination match to be a string +- Set default GPU threads to 1 for scrypt. +- Connect backup stratum pools if the primary pool cannot deliver work. +- Use a new algorithm for choosing a thread concurrency when none or no shader +value is specified for scrypt. +- Do not round up the bufsize to the maximum allocable with scrypt. +- Remove the rounding-up of the scrypt padbuffer which was not effectual and +counter-productive on devices with lots of ram, limiting thread concurrencies +and intensities. +- bufsize is an unsigned integer, make it so for debug. +- Update the hashmeter once per second but only display the extra logs every +opt_log_inteval. +- add a dummy ztex to usbutils so cgminer -n lists ztex also +- nDevs required for -n with usb +- USB device list - convert some common error numbers to messages +- USB -n 'known' text only without ---usb-list-all +- USB modify -n and --usb-dump to only show known devices or use new +--usb-list-all option to see all +- Make pool adding while running asynchronous, using the pool test thread +functionality. +- Only curl easy cleanup a stratum curl if it exists. +- Sacrifice the ram of curl handles in stratum disconnects when we have built +with old libcurl to avoid crashes. +- cgminer -n to include a USB device list +- usbutils allow call of usb_all() from other code +- Convert gbt_lock to a cg_lock. +- Add intermediate variants of cglocks that can be up or downgraded to read or +write locks and use them for stratum work generation. +- Move the stratum and GBT data to be protected under a new cg_lock data_lock. +- Convert the ch_lock to cg_lock. +- Convert the control_lock to a cg_lock. +- Remove unused qd_lock. +- Implement cg_lock write biased rwlocks. +- do usb_initialise() after the started message so we see it +- --usb-dump display brief dump if value = 0 +- USB add --usb options to limit USB device selection v0.1 + + +Version 2.11.2 - March 9, 2013 + +- Whitelist AMD APP SDK 2.8 for diablo kernel. +- Cope with the highest opencl platform not having usable devices. +- Fix memory leak with share submission on GPU work structures as discovered by +twobitcoins. +- usb_cleanup() without locking. +- Use curl_easy_cleanup to close any open stratum sockets. +- Show pool number in switch message +- Don't start testing any pools with the watchpool thread if any of the test +threads are still active. +- Set sockd to false should curl setup fail on stratum. +- Close any open sockets when reusing a curl handle and reopen the socket +whenever we're retrying stratum. +- Set pool died on failed testing to allow idle flag and time to be set. +- Remove unused pthread_t typedefs from struct pool. +- Perform pool_resus on all pools that are found alive with the test pool +threads. +- Use pool_unworkable in select_balanced as well. +- Differentiate pool_unusable from pool_unworkable. +- Keep a connection open on higher priority stratum pools to fail back to them. +- Rename threads according to what pool they're associated with as well. +- Set the wrong bool in pool_active +- Start the stratum thread only if we successfully init and authorise it, +otherwise unset the init flag. +- Make the initialisation of the stratum thread more robust allowing the +watchpool thread safe access to it after the stratum thread is started. +- API no longer ignore send() status +- API make the main socket non-static + + +Version 2.11.1 - March 7, 2013 + +- Shorten the time before keepalive probes are sent out and how frequently +they're sent with stratum curls. +- Only set stratum auth once to prevent multiple threads being started. +- Display select return value on select fail in stratum thread. +- Clear the socket of anything in the receive buffer if we're going to retry +connecting. +- Allow pools to be resuscitated on first startup by the watchpool thread. +- Check all pools simultaneously at startup switching to the first alive one to +speed up startup. +- Clear just the socket buffer when we don't care what is left in a stratum +socket. +- Clear the stratum socket whenever we are closing it since the buffer is going +to be reused. +- Do not continue work from a stratum pool where the connection has been +interrupted. +- Reset stratum_notify flag on suspend_stratum as well. +- Close any sockets opened if we fail to initiate stratum but have opened the +socket. +- Close any existing stratum socket if we are attempting to restart stratum so +the pool knows the connection has gone. +- Show mechanism of stratum interruption if select times out. +- Make stratum connection interrupted message higher priority to be visible at +normal logging levels. +- Implement client.show_message support for stratum. +- API add 'Network Difficulty' to 'coin' +- Setup BFLSC support +- API use control_lock when switching pools +- Make sure to retry only once with noresume support for stratum. +- Instead of keeping track of when the last work item was generated to keep +stratum connections open, keep them open if any shares have been submitted +awaiting a response. +- usbutils.c copy full size to 'Last Command' +- configure - set USE_USBUTILS when usbutils is required and use it in the code +- Clear last pool work on switching pools if the current pool supports local +work generation or we are in failover only mode. +- make rw locks: mining_thr_lock and devices_lock +- Release MMQ device only once (not 4 times) +- api.c fix MSG overlap +- Hotplug - allow setting interval via --hotplug or API +- curses - fix - put a dev_width inside #ifdef +- usb_cleanup() use correct locking mechanism +- Implement and use usb_cleanup() on shutdown or restart +- miner.php report 'Last Valid Work' as time before request +- API - return Last Valid Work +- api -> drv +- ZTX bug set missing drv_id + + +Version 2.11.0 - March 2, 2013 + +- Update kernel file names signifying changes. +- Update a pool's last work time when the work is popped as well as staged. +- API always report failed send() replies +- Update diff stale: total and pools when stratum throws away shares +- Keep stratum connections open for 2 minutes after the last work item was +staged to allow stray shares to be submitted on pool switching. +- Try to extract the sessionid associated with mining.notify on 3rd level array +and submit it along with the userid to support mining resume, failing gracefully +and restarting if the pool rejects it. +- Speed up watchdog interval and therefore display updates to 2 seconds. +- Update copyright dates. +- Cope with misread sessionid on stratum for now. +- Use constants from the array of __constants throughout the diablo kernel. +- Create a __constant array for use within diablo kernel. +- Fix --benchmark generating valid work for cgminer. +- Use the sessionid as passed on stratum connect to attempt to resume a +connection once and then clear it if it fails, to use a new connection. +- Move to storing the nonce1 in the work struct instead of the sessionid for the +now defunct first draft mining.resume protocol. +- Use global constant arrays for all other constants used in scrypt kernel. +- Use global __constants for sha functions in scrypt kernel. +- Use constants for endian swap macros. +- Revise scrypt kernel copyright notice. +- Separate out additions in scrypt kernel. +- Reuse some Vals[] variables that can be assigned to constants earlier in the +poclbm kernel, making for fewer ops. +- Put all constants used in poclbm kernel into __const memory array to speed up +concurrent reads on the wavefront. +- BFL stop 1st init command if no device +- Add a get_queued function for devices to use to retrieve work items from the +queued hashtable. +- Bugfix: Duplicate stratum sessionid when copying work, to avoid double-free +- Bugfix: Missing pool_no parameter to applog for no-stratum-sessionid debug +message +- Add the choice of hash loop to the device driver, defaulting to hash_sole_work +if none is specified. +- Add comments. +- Add a driver specific flush_work for queued devices that may have work items +already queued to abort working on them on the device and discard them. +- Flush queued work on a restart from the hash database and discard the work +structs. +- Create a central point for removal of work items completed by queued device +drivers. +- Create a fill_queue function that creates hashtables of as many work items as +is required by the device driver till it flags the queue full. +- Create the hash queued work variant for use with devices that are fast enough +to require a queue. +- Update copyright year. +- Fix tv_lastupdate being made into tv_end and update the hashmeter on cycle, +not opt_log_interval. +- Fix tv_lastupdate being made into tv_end and update the hashmeter on cycle, +not opt_log_interval. +- Only continue submitting shares with mining.resume support on stratum when the +session id matches. +- Provide support for mining.resume with stratum, currently re-authorising after +successful resumption pending finalising of the protocol process. +- Provide basic framework for restarting stratum depending on whether resume +support exists or not. +- Abstract out the setting up of the stratum curl socket. +- Free sessionid in clean_work and remove redundant setting of strings to NULL +since the whole work struct is zeroed. +- Only clear stratum shares mandatorily on stratum dropouts when the pool does +not support resume. +- Try resubmitting stratum shares every 5 seconds for up to 2 minutes if the +pool session id exists and matches on failure to submit. +- Do as much outside of mutex locking of sshare_lock as possible. +- Remove last reference to struct work used outside the sshare_lock in +submit_work_thread +- Unlock the sshare_lock in submit_work_thread when all references to work and +sshare are complete. +- Add timestamps to stratum_share structs as they're generated and copy the +stratum sessionid if it exists to stratum work generated. +- Store session id for stratum if the pool supports it for future mining.resume +support. +- API.java allow partial reads +- debug_cb buffer type warning +- MMQ rewrite the last of the old scanhash loop and drastically reduce CPU +- hash_sole_work can be static +- Make the numbuf larger to accept larger scrypt parameters. +- Keep the unique id of each work item across copy_work to prevent multiple work +items having the same id. +- Abstract out the main hashing loop to allow us to use a separate loop for +devices that are fast enough to require queued work. +- Provide a noop thread_enable function for drivers that don't support it. +- Provide a noop thread_shutdown function for drivers that don't support it. +- Provide a noop hw_error function for drivers that don't support it. +- Provide a noop prepare_work for drivers that don't support it. +- Provide a noop thread_init for drivers that don't support it. +- Provide a noop can_limit_work for devices that don't support it. +- Provide a noop thread_prepare function for drivers that don't use +thread_prepare. +- Use blank_get_statline_before for GPU devices that don't support adl +monitoring. +- Provide a noop get_stats function for drivers that don't support it. +- Provide a blank get_statline for drivers that don't support it. +- Provide a blank get_statline_before function for drivers that don't have one. +- Fill drivers missing reinit_device with a noop version. +- add 'count' to cumstomsummarypage 'calc' +- hotplug use get_thread() where appropriate +- convert sleep(const) to nmsleep() +- remove empty #ifdef +- call a separate get_devices() with locking, as required +- usbutils - avoid free cgusb twice +- usbutils hotplug v0.1 +- Report USB nodev as ZOMBIE on the screen +- Change file modes. + + +Version 2.10.5 - February 7, 2013 + +- Fix logic fail on partial writes with stratum send that was leading to corrupt +message submissions. +- Do not consider every call to stratum_resumed a pool recovery unless it was +actually idle. +- Do not enable the pool disable on reject feature unless explicitly enabled +with --disable-rejecting. +- Stratum disconnect shares - count total against stale +- Use sanity checking to prevent a possible overflow with invalid data being +given by the pool for difficulty as reported by luke-Jr. +- Check for calloc failure for completeness in gen_stratum_work. +- Cache the coinbase length to speed up stratum work generation. +- Cache the header length when generating stratum work to avoid calculating it +on every work generation, and to only need one alloc+sprintf, speeding up work +generation. +- Use heap ram for coinbase in gen_stratum_work, zeroing it before use. +- Provide a wrapper for aligning lengths of size_t to 4 byte boundaries. +- Fix memory leak on stratum share submission. +- Zero the best share string memory when zeroing stats. + + +Version 2.10.4 - December 29, 2012 + +- Change the pool stratum socket buffer to be dynamically allocated to +accomodate any size coinbase and keep receiving data in recv line for up to 60s +if no end of line has been received. +- Differentiate socket full from sock full. +- Allow stratum to startup without notify but check it is valid before creating +stratum work. +- Do not try to generate stratum work unless the notify command has succeeded. +- Reset total diff1 shares when zeroing stats as well to show correct work +utility. + + +Version 2.10.3 - December 26, 2012 + +- Do not give the share submission failure message on planned stratum +disconnects. +- Parse anything in the stratum socket if it's full without waiting. Empty the +socket even if a connection is not needed in case there are share returns. +- Provide a mechanism to zero all the statistics from the menu. +- Display the current pool diff in the status line. +- Display block diff in status line. +- Generalise the code for solving a block to enable block solve detection with +scrypt mining. +- Generate the output hash for scrypt as well and use the one function to set +share_diff. +- Use the flip80 function in regeneratehash and the correct sized hash array. +- Use one size for scratchbuf as a macro in scrypt.c +- Stage work outside of the stgd lock to prevent attempted recursive locking in +clone_available. +- share_diff needs to be performed on a BE version of the output hash to work, +leading to false best_share values as spotted by luke-Jr. +- Remove the unused sha224 functions. +- Use the flip functions in hashtest. +- Simplify the setting of the nonce data field in work on submitting nonces. +- Scrypt code does not enter the hashtest function. +- Go back to cloning available work under staged lock. +- Updated links to AMD APP SDK +- Updated link to ADL SDK +- scrypt_diff uses a uint64_t as well. +- Correct target for stratum support with scrypt mining. +- libztex: fixed a typo +- libztex: check returnvalue of libusb_claim_interface() and release the +interface in case of early exit + + +Version 2.10.2 - December 19, 2012 + +- Stop all work from the current pool if it's a stratum pool once it is +disconnected since it will be invalid upon reconnecting. +- Discard all staged work from stratum pools as well as the shares upon +disconnection since all the work becomes invalid. +- Use correct cbreak after 15 second delay when no pool is found alive. +- MMQ missing firmware -> ERR not DEBUG +- Allow stratum to work with scrypt. +- MMQ ensure delta clock can never exceed limits +- MMQ lowercase new string constants +- MMQ add api pgaset for clock +- API V1.23 - new pgaset command, to be used soon +- Protect the best_share/best_diff values under control lock. +- MMQ style police +- MMQ count work check timeout failures +- MMQ allow partial work replies and count them +- Check a stratum pool hasn't gone dead while being a backup pool and missed +having its idle flag cleared. +- MMQ overheat: remove clockdown (doesn't help) + ensure no lost shares +- API-README grammar +- API-README explain custom page extensions in miner.php +- miner.php add a sample group pool report +- miner.php allow where,group,having on cumstom pages + + +Version 2.10.1 - December 14, 2012 + +- Check for EWOULDBLOCK when supported in send and recv as well. +- Use the raw send() command instead of curl_easy_send since curl raw socket +usage introduces random bugs on windows. +- Use raw recv() command in place of curl_easy_recv since the curl +implementation introduces random bugs on windows builds when the recv fails. +- miner.php when displaying a single rig, add prev/next rig buttons if they +exist, next to refresh +- miner.php allow custom page joins for STATS +- API show if pool has GBT (so people know not to use that pool) +- miner.php - include windows easyphp link +- driver-ztex: use the correct size for the swap array +- API stats - display pool byte transfer stats +- Pool store data transfer stats +- README ModMiner dependency +- Benchmark incorrect work size +- ChangeLog refer to NEWS +- MMQ handle over temp differently and hash longer +- driver-ztex: search the complete noncerange based on the actual speed +- README - update ModMiner details +- API-README update +- api use a dynamic io buffer, truncated before it reaches the current ~64k +limit + + +Version 2.10.0 - December 10, 2012 + +- Include prctl header for thread renaming to work. +- Set tv_idle time if a pool is not active when input from the menu. +- usb display message when device is in use/another cgminer +- libztex: avoid the use of libusb_error_name() +- minor unlikely zero pointer test +- BeaverCreek doesn't like BFI INT patching. +- Only stratum pools that are idle need to be kicked via cnx_needed. +- mmq - abbreviate the temperature numbers +- Do not do any setup if opt_api_listen is disabled in api.c. +- usbutils.c uninitialised usbstat for non-primary mmqs +- Only set the lagging flag for select_pool() on failed getwork if we're not in +opt_fail_only mode. +- libztex: in case the selectFpga() failed set the selected fpga to unknown +- Modified windows-build.txt to update git instructions. +- libztex: use a function for the twice called firmware reset code +- libztex: removed an unused struct member (ztex->valid) +- driver-ztex: support for broken fpga on a multifpga board +- Set the pool lagging flag on startup to avoid it being shown initially, and +only unset it once the maximum number of staged work items has been reached. +- Avoid recursive locking of the stgd lock. +- Return value of keep_sockalive is no longer used. +- Remove dependency on mstcpip.h for windows build by making curl version >= +7.25.0 mandatory on windows builds, and use curl functions for keepalive +whenever possible instead. +- Make main() the getwork scheduler once everything is set up, so that all app +exits use the kill_work and quit paths. +- ztex: more style and whitespace fixes +- libztex: silenced another warning +- Set successful connect to true on auth stratum to allow summary on exit from +single stratum pool. +- Only consider work stale for stratum of different job_id if it's not a share. +- Increment version preempting changed version signifying different codebase to +2.9 +- Hash_pop should signal further waiters on its own pthread conditional in case +there are multiple waiters. +- Check the job_id has not changed on stratum work when deciding if the work is +stale as might occur across disconnections. +- Perform pool_resus on getwork pool that generates work in getwork_thread. +- Set pool lagging message for getwork pool that falls to zero staged in getwork +thread. +- Stage extra work when the primary pool is a getwork pool without rolltime. +- Do not try to clean up twice if kill message is given. +- Only recalculate total_staged in getwork thread if required. +- Include the correct config header in libztex and include it before other +includes. +- Implement a completely new getwork scheduler. Stage all work from the one +thread, making it possible to serialise all requests minimising the number of +getworks requested or local work generated. Use a pthread conditional to wake up +the thread whenever work is removed to generate enough work to stay above the +watermark set by opt_queue. Remove all remnants of the old queueing mechanism, +deleting the now defunct queued count. +- libztex: fixed some warnings and removed some whitespaces +- libztex: silenced some warnings +- Remove all references to the now unused workio_cmd structure. +- Remove the old workio command queue thread, replacing it with a kill +conditional to exit the program. +- Remove getwork command from workio_cmd queues and do them directly from +queue_request. +- Begin tearing down the old workio command queues by removing submit commands +from there and submit them asynchronously via their own threads. +- Update windows build instructions. +- Set pool probed to true on successful authorisation with stratum to avoid it +being pinged later with pool_getswork. +- driver-ztex: libztex_setFreq() must be called before ztex_releaseFpga() +- driver-ztex: changed two pairs of malloc()/memset() to calloc() +- libztex: Read bitstream file in 2kb blocks with simpler and faster code +- Added the binary versions of ztex_ufm1_15d4.ihx and ztex_ufm1_15y1.ihx +- Trivial space removal. +- libztex: Add firmware download support for ZTEX 1.15d and 1.15x +- libztex: Factor out local version of libusb_get_string_descriptor_ascii() +- Shut up some boring old cpu warnings. +- Style changes. +- Allow pool active to be called on stratum or disabled pools in the watchpool +thread if the pool has not been probed. +- libztex: Make log messages say bitstream when refering to bitstreams +- libztex: Don't return error when a bitstream was already configured +- libztex: Read bitstream file in 64kb blocks with simpler and faster code +- libztex: Verify that the mining firmware is not a dummy firmware +- libztex: Match mining firmware ZTEX descriptor against the dummy firmware +- Combine shared padding into one char. +- libztex: Start download sequence only after reading in the new firmware +- libztex: Download mining firmware to all devices with dummy firmware +- lock (most of) the threaded statistics updates +- README stats don't add up +- usbutils.c remove compiler warning +- Make need connection return true if a pool is idle. +- API add Best Share to summary +- Check on creating new GBT work if the structures are up to date and update +them as required rather than regularly. +- Update windows build instructions. +- Enable backup stratum connections for getwork when the primary pool doesn't +have longpoll aka solo mining. +- Check for correct absence of opt_fail_only in cnx_needed. +- Remove unused variable. +- The specification for stratum has been elaborated to say that a changed diff +applies only to new work so do not retarget when submitting shares. +- Use a variable length string array in submit_upstream_work to cope with +massive GBT submissions. +- API lock access to some summary statistics (and copy them) +- Suspend stratum connections to backup pools when there is no requirement to +potentially grab work from them. +- Fix missing export for RenameThread. +- enumerate the mining threadnames +- MMQ avoid possible number overrun crashes +- mmq usb v0.4 + api usb stats +- setting the name of the threads for linux,freebsd,openbsd and osx code is +borrowed from bitcoins util.c, so it is already tested +- Don't show broken WU value with scrypt mining. +- Style police. +- Remove unused getwork times in getswork. +- Fix readme wordwrap. + + +Version 2.9.6 - December 2, 2012 + +- Make gen_stratum_work more robust by using a dynamically allocated array for +the header in case bogus data is sent by the pool to avoid overflowing a static +array. +- scrypt_diff now returns a uint64_t +- Support monitoring and reporting much higher diffs for scrypt mining, +truncating irrelevant zeroes from displayed hash. +- Pass ostate values around in scrypt to be able to extract full hashes if +needed later on. +- Since we will be using calloc_str to put a string into it, convert the +function to calloc_strcat which does it automatically. +- Revert "Handle crash exceptions by trying to restart cgminer unless the +--no-restart option is used." +- Count longpoll and GBT decodes as queued work since the count otherwise +remains static. +- Use the string helper functions to create gbt blocks of any length. +- Provide helper functions calloc_str and realloc_strcat to create and extend +arbitrary length arrays based on string length. + + +Version 2.9.5 - November 25, 2012 + +- fixes target calc for mips openwrt +- openwrt needs roundl +- Get rid of unused last_work in opencl thread data. +- Do away with the flaky free_work api in the driver code which would often lose +the work data in opencl and simply flush it before exiting the opencl scanhash. +- Use base_work for comparison just for cleanness in __copy_work +- Remove all static work structs, using the make and free functions. +- Add pool no. to stale share detected message. +- Add info about which pool share became stale while resubmitting. +-b Copy the work on opencl_free_work +- Add an extra slot in the max backlog for ztex to minimise dupes. +- Do not use or count the getworks submitted which are simply testing that pools +are still up. This was increasing share leakage and making stats not reflect +real work. +- Track all dynamically allocated memory within the work struct by copying work +structs in a common place, creating freshly allocated heap ram for all arrays +within the copied struct. Clear all work structs from the same place to ensure +memory does not leak from arrays within the struct. Convert the gbt coinbase and +stratum strings within the work struct to heap ram. This will allow arbitrary +lengths without an upper limit for the strings, preventing the overflows that +happen with GBT. +- libztex: Work around ZTEX USB firmware bug exposed by the FreeBSD libusb +- opencl: Use new dev_error function for REASON_DEV_NOSTART + + +Version 2.9.4 - November 18, 2012 + +- Provide rudimentary support for the balancing failover strategies with stratum +and GBT by switching pools silently on getwork requests. +- Convert remaining modminer and bfl uses of usleep to nmsleep. +- Convert libztex to nmsleep where possible. +- Convert unreliable usleep calls to nmsleep calls in ztex driver. +- Support workid for block submission on GBT pools that use it. +- Provide rudimentary support for literal ipv6 addresses when parsing stratum +URLs. +- Work around libcurl cflags not working on hacked up mingw installations on +windows. +- Only increase gpu engine speed by a larger step if the temperature is below +hysteresis instead of increasing it to max speed. +- Convert pool not responding and pool alive message on backup pools to verbose +level only since they mean a single failed getwork. +- Update work block on the longpoll work item before calling restart threads to +ensure all work but the longpoll work item gets discarded when we call +discard_stale from restart_threads. +- Do not attempt to remove the stratum share hash after unsuccessful submission +since it may already be removed by clear_stratum_shares. +- Check against a double for current pool diff. +- Support for fractional diffs and the classic just-below-1 share all FFs diff +target. + + +Version 2.9.3 - November 11, 2012 + +- Make header larger on gen stratum work to accomodate \0 at the end. + + +Version 2.9.2 - November 11, 2012 + +- Use stratum block change from backup pools as an alternative to longpoll for +pools that don't support LP. +- Check share target diff for best_share to be calculated when solo mining. +- Round some more static string arrays to 4 byte boundaries. +- There is no need for the static arrays to be larger than required, so long as +they're 4 byte aligned to appease ARM. +- Store the full stratum url information in rpc_url for correct configuration +file saving. +- Put in a hack to prevent dud work from sneaking into test_work_current being +seen as a new block. +- Reset the work->longpoll flag where it will affect stratum work items as well. +- Check for both coinbase/append and submit/coinbase support before using GBT +protocol. +- First pass through testing for GBT should not set probed to true since we are +about to probe again. +- Hash1 is only used by the deprecated cpu mining code and never changes so +remove it from the work struct and bypass needing to process the value for all +other mining. +- Get a work item once per minute for all getwork and GBT pools to test they're +still alive and to maintain a current GBT template. +- Get a fresh block template with GBT pools on switching to them. + + +Version 2.9.1 - November 6, 2012 + +- Reset work flags to prevent GBT shares from being submitted as stratum ones +after switching. + + +Version 2.9.0 - November 6, 2012 + +- Add endian swap defines for where missing. +- Only retarget stratum shares to new pool diff if diff has dropped. +- Remove resetting of probed variable when detecting GBT. +- Count lost stratum share submits and increase message priority to warning. +- Only retrieve a new block template for GBT pools that are the current pool. +- Show which pool untracked share messages have come from. +- Add management for dead GBT pools. +- Count lost shares with stratum as submit stale lost. +- Discard record of stratum shares sent and report lost shares on disconnection +since they will never be reported back. +- Swab, don't just swap the bytes in the GBT target. +- Change status window message for GBT connected pools versus LP. +- Generate a gbt work item from longpoll when required to set new block and +message appropriately. +- Use existing pool submit_old bool from gbt data. +- Retrieve a new block template if more than 30 seconds has elapsed since the +last one to keep the data current and test the pool is still alive. +- Update GBT longpollid every time we request a new longpoll. +- Manage appropriate response codes for share submission with GBT. +- Allow the longpoll thread to start with GBT and only set the longpollid once. +- Correct last few components of GBT block generation courtesy of Luke-jr. +- Use correct length for offsetting extra nonce and remaining data. +- Flip all 80 bytes in the flip function which was wrongly named flip256 for its +purpose. +- Calculate midstate for gbt work and remove now unused variable. +- Use a standard function for flipping bytes. +- Insert the extra nonce and remaining data in the correct position in the +coinbase. +- Remove txn size debugging and enlarge gbt block string to prevent overflow. +- Remove varint display debugging. +- Build varint correctly for share submission and sleep 5 seconds before +retrying submit. +- Make gbt_coinbase large enough for submissions, swap bytes correctly to make a +header from GBT and encode the number of transactions in share submission. +- Store the fixed size entries as static variables in GBT in binary form, +byteswapping as is required. +- 32 bit hex encoded variables should be in LE with GBT. +- Target and prevblockhash need to be reversed from GBT variables. +- Construct block for submission when using GBT. +- Use same string for debug as for submission and make string larger to cope +with future GBT messages. +- Skip trying to decipher LP url if we have GBT support. +- Store all the transaction hashes in pool->txn_hashes instead of separating +txn0 and correct generation of merkle root, fixing memory overwrites. +- Hook into various places to generate GBT work where appropriate. +- Create extra work fields when generating GBT work. +- Generate header from correct hashing generation of the merkle root for GBT. +- Generate the merkle root for gbt work generation. +- Create a store of the transactions with GBT in the minimum size form required +to generate work items with a varied coinbase. +- Create a function that generates a GBT coinbase from the existing pool +variables. +- Extract and store the various variables GBT uses when decoding gbt work. +- Check for invalid json result in work_decode. +- Decode work in separate functions for getwork vs gbt. +- Check for the coinbase/append mutable in GBT support to decide whether to use +it or not. +- Add a gbt mutex within the pool struct for protecting the gbt values. +- Convert work decode function to prepare for decoding block templates. +- Check for GBT support on first probing the pool and convert to using the GBT +request as the rpc request for that pool. +- Make the rpc request used with getwork a pool variable to allow it to be +converted to/from gbt requests. +- Changes to build prototypes to support building on FreeBSD 9.1-RC2 amd64 +- Free old stratum_work data before replacing it +- There is no need for addrinfo any more. +- server and client sockaddr_in are no longer used in struct pool. +- Merge pull request #322 from luke-jr/bugfix_stratum_tmpwork +- Set sshare id and swork_id within the sshare mutex to avoid multiple share +submits with the same id. +- Initialize temporary stratum work + + +Version 2.8.7 - October 29, 2012 + +- Fail on select() failing in stratum thread without needing to attempt +recv_line. +- Add share to stratum database before sending it again in case we get a +response from the pool before it's added. + + +Version 2.8.6 - October 29, 2012 + +- Shorten the initiate stratum connect timeout to 30 seconds. +- Shorten the stratum timeout on read to 90 seconds to detect unresponsive pool. +- Display best share difficulty on exit. +- Make stratum socket fail more robust on windows by disabling the send buffer. +- Reuse the same curl handle forcing a new connection instead of risking +derefencing. +- Add information about submission failure to stratum send. +- Only add stratum share to database if we succeeded in submitting it, with a +debug output saying it succeeded. +- Use keepalive with stratum sockets to improve its ability to detect broken +connections. +- Show only the URL in the status bar to avoid long prefixes making for extra +long lines. +- Display compact status in menu and update README to reflect current menu +entries. +- Add a compact display mode that does not list per device statistics in the +status window. +- Add blank spaces after best share displayed. +- Round a few static string arrays up to 4 byte boundaries for ARM. +- Display best share diff for scrypt as well. +- Show the best diff share as "best share" and add info to the README. +- Display the best diff share submitted so far. +- Redundant check. +- The work struct pointer in struct pc_data in findnonce is never freed yet +there is no need to allocate it separately so make struct work a static part of +the struct pc_data. s + + +Version 2.8.5 - October 23, 2012 + +- Handle crash exceptions by trying to restart cgminer unless the --no-restart +option is used. +- Switch queued count when choosing a different pool from a failed stratum pool +in getwork thread. +- Put a mandatory 5s wait between reattempting a getwork on failure to avoid +hammering requests. +- The ATI stream / AMD APP SDK environment variables appear to only interfere +with win32 builds so bypass them. +- Make sure to check pool stratum curl exists under lock before attempting any +recv to not risk dereferencing upon attempting to reinitiate stratum. +- Avoid redefining macros and align to 4 byte boundaries. +- API - add Stratum information to pools +- update FPGA-README for MMQ + + +Version 2.8.4 - October 18, 2012 + +- Time for dynamic is in microseconds, not ms. +- x86_64 builds of mingw32 are not supported directly and should just configure +as generic mingw32 builds since they're NOT 64 bit. +- Cope with both ATI stream and AMD APP SDK roots being set when building. +- Use 3 significant digits when suffix string is used and values are >1000. +- MMQ new initialisation (that works) and clocking control +- Get rid of unused warning for !scrypt. +- Use select on stratum send to make sure the socket is writeable. +- Cope with dval being zero in suffix_string and display a single decimal place +when significant digits is not specified but the value is greater than 1000. +- Pad out the suffix string function with zeroes on the right. +- Failure to calloc in bin2hex is a fatal failure always so just check for that +failure within the function and abort, simplifying the rest of the code. +- Provide locking around the change of the stratum curl structures to avoid +possible races. +- Bump opencl kernel version numbers. +- Remove atomic ops from opencl kernels given rarity of more than once nonce on +the same wavefront and the potential increased ramspeed requirements to use the +atomics. +- Clear the pool idle flag in stratum when it comes back to life. +- Display correct share hash and share difficulty with scrypt mining. +- Use explicit host to BE functions in scrypt code instead of hard coding +byteswap everywhere. +- Show work target diff for scrypt mining. +- Ease the checking on allocation of padbuffer8 in the hope it works partially +anyway on an apparently failed call. +- Watch for buffer overflows on receiving data into the socket buffer. +- Round target difficulties down to be in keeping with the rounding of detected +share difficulties. +- Dramatically simplify the dynamic intensity calculation by oversampling many +runs through the opencl kernel till we're likely well within the timer +resolution on windows. +- String alignment to 4 byte boundaries and optimisations for bin<->hex +conversions. +- In opencl_free_work, make sure to still flush results in dynamic mode. +- Align static arrays to 4 byte boundaries to appease ARM builds for stratum. + + +Version 2.8.3 - October 12, 2012 + +- Left align values that are suffix_string generated. +- Share_diff should not be converting the work data to hex. +- Off by one error. +- Prevent overflows of the port char array in extract_sockaddr. +- Disable stratum detection with scrypt. +- Use the suffix string function when displaying device hashrates. +- Be consistent with the get_statline function. +- Use the suffix string function for displaying hashrate with 4 significant +digits. +- Display the actual share diff next to the pool required diff, using a suffix +creation function to prevent values of >1000 being shown in their entirety. +- Fix 4 * 0 being 0 that would break dynamic intensity mode. +- Fix wrong byteswap macro being used on mingw32 which was breaking target +generation on stratum. + + +Version 2.8.2 - October 11, 2012 + +- Reinstate the history on dynamic intensity mode to damp fluctuations in +intensity but use an upper limit on how much the value can increase at any time +to cope with rare overflows. +- Create a fix-protocol option which prevents cgminer from switching to stratum +if it's detected. +- Simplify target generation code. +- Add support for client.get_version for stratum. +- Use a 64 bit unsigned integer on the diff target to generate the hex target. +- Update reconnect message to show whole address including port. +- Look for null values and parse correct separate array entries for url and port +with client reconnect commands for stratum. +- The command for stratum is client.reconnect, not mining.reconnect. +- Only copy the stratum url to the rpc url if an rpc url does not exist. +- Implement rudimentary mining.reconnect support for stratum. +- Ignore the value of stratum_active on calling initiate_stratum and assume +we're always trying to reinitiate it, and set the active flag to false in that +function. +- stratum auth can be unset if we fail to authorise on subsequent calls to +auth_stratum which undoes the requirement of setting it in one place so set it +in pool_active. + + +Version 2.8.1 - October 8, 2012 + +- Use the stratum url as the rpc url advertised if we switch to it. +- Count an invalid nonce count as a hardware error on opencl. +- Count each stratum work item as local work. +- Cope with one stratum pool being the only active pool when it dies by sleeping +for 5 seconds before retrying to get work from it instead of getting work +indefinitely. +- Detect stratum outage based on either select timing out or receiving an empty +buffer and properly re-establish connection by disabling the stratum_active +flag, coping with empty buffers in parse_stratum. + + +Version 2.8.0 - October 7, 2012 + +- Major upgrade - support for the stratum mining protocol. +- Fix various modminer warnings on mingw. +- Fix sign warning on windows build for bitforce. +- Cast socketfail to integer since SOCKET is an unsigned int on windows. +- Use strtod not strtol for bitforce temp backup. +- Cope with broken drivers returning nonsense values for bitforce temperatures. +- Minor warning fixes. +- Use the stratum thread to detect when a stratum pool has died based on no +message for 2 minutes. +- Only set the stratum auth flag once and once the stratum thread is started, +use that to set/unset the stratum active flag. +- Only hand off to stratum from getwork if we succeed in initiating the +protocol. +- Target should only be 32 bytes copied. +- Use a static array for work submission data instead of stack memory. +- Clear the buffer data before sprinting to it. +- Clear work stratum strings before setting them and add them to debug output. +- Drop stratum connect failed message to verbose level only since it's a regular +probing message. +- TCP Keepalive in curl is only in very recent versions and not required with +regular messages on stratum anyway. +- Move stratum sockets to curl infrastructure with locking around send+recv to +begin support for proxies and ssl. +- Make detect stratum fail if a proxy has been set up. +- Stratum does not currently have any proxy support so do not try to switch to +stratum if a proxy has been specified. +- Windows doesn't work with MSG_PEEK on recv so move to a continuously updating +buffer for incoming messages. +- Alloca is unreliable on windows so use static arrays in util.c stratum code. +- Begin support for mingw stratum build. +- Add space to reject reason. +- Parse the reject reason where possible from stratum share submission. +- Pass json error value to share result function to be able to parse reject +reason in stratum. +- Don't try to parse unneeded parameters in response to mining.subscribe. +- Remove the sshare hash entry if we failed to send it. +- Change notify message to info level to avoid spamming repeatedly when a pool +is down. +- Check the stratum pool difference has not changed compared to the work diff +when testing whether a share meets the target or not and retarget if necessary. +- Bit error in target calculation for stratum. +- Set work_block in gen_stratum_work for when work is reused to avoid thinking +it's all stale. +- Offset the current block detection to the prev block hash. +- We should be testing for id_val, not id in parse stratum response. +- Make target on stratum scale to any size by clearing sequential bits according +to diff. +- Correct target calculation in gen_stratum_work. +- If a share result has an error code but still has an id, it is likely a +reject, not an error. +- Initiate stratum the first time in pool_active only, allowing us to switch to +it on getting a failed getwork and detecting the presence of stratum on the url +at that time. +- Use 5 second timeout on sock full for now as a temporary workaround. +- If no stratum url is set by the end of the detect stratum routine, copy the +sockaddr url. +- Make all buffers slightly larger to prevent overflow. +- Make the stratum recv buffer larger than the recvsize. +- Userpass needs to be copied to user and pass earlier to allow stratum +authorisation to work with it. +- Store a sockaddr url of the stripped url used in determining sockaddr to not +confuse it with the stratum url and fix build warnings. +- Decrease the queued count with stratum work once it's staged as well. +- Allow the stratum retry to initiate and auth stratum in pool_alive to make +sure the stratum thread is started. +- Avoid duplicating pool->rpc_url and setting pool->stratum_url twice to itself. +- Detect if a getwork based pool has the X-Stratum header on startup, and if so, +switch to the stratum based pool. +- Comment update. +- Minor message change. +- Create a work item from a "clean" request from stratum allowing the new block +to be detected and the appropriate block change message to be given. +- Use statically allocated stratum strings in struct work to cope with the +inability to safely deallocate dynamically allocated ram. +- Use the current pool when deciding whether to reuse work from a stratum source +rather than the work's previous pool. +- Copy the stratum url to the rpc url to avoid none being set. +- Provide locking around stratum send operations to avoid races. +- Submit shares from stratum through the abstracted submit share function +detecting what message they belong to and showing the data from the associated +work, and then deleting it from the hash. +- Use a more robust mechanism to obtain a \n terminated string over a socket. +- Abstract out share submit as a function to be useable by stratum. +- Rename parse_stratum to parse_method as it is only for stratum messages that +contain methods. +- Display stratum as mechanism in status line when current pool is running it. +- Count each stratum notify as a getwork equivalent. +- Correct nonce submitted with share. +- Extranonce2 should be added before coinbase2. +- We should be hashing the binary coinbase, not the hex one. +- Fix endianness of nonce submitted for stratum. +- Check that stratum is already active in initiate_stratum to avoid +de-authorising ourselves by subscribing again. +- Begin implementing a hash database of submissions and attempt sending results. +- Copy parameters from stratum work required for share submission. +- Set lagging flag on first adding a pool to prevent pool slow warning at +startup. +- Fix work->target being a 32 byte binary in gen_stratum_work. +- Store and display stripped url in its own variable. +- Create machinery to divert work requests to stratum. +- Generate the work target in gen_stratum_work, setting default diff to 1 in +case it is not yet set. +- Generate work data, midstate and hash1 in gen_stratum_work. +- Generate header created from stratum structures in gen_stratum_work. +- Generate merkle root hash in gen_stratum_work. +- Generate the coinbase for generation of stratum based work. +- The number of transactions is variable so make merkle a variable length +dynamically allocated array and track how many there are for stratum. +- Rename nonce2 to n2size reflecting that it's a size variable and not the +actual nonce. +- Provide rudimentary support for stratum clean work command in the stratum +thread. +- Cope with pools being removed in the stratum thread. +- Use the pool sock value directly in the stratum thread in case it changes +after reconnecting. +- Create a stratum thread per pool that has stratum that monitors the socket and +serves received data. +- Check return value of stratum_parse. +- Complete authorisation in stratum. +- Implement stratum parsing of notify parameters and storing them in the pool +stratum work structure. +- Create helper functions for duplicating json strings to avoid keeping json +references in use. +- Append \n in the sock_send function instead of adding it when constructing +json in stratum. +- Don't keep any json references around with stratum structures. +- Create parse_stratum function that hands off stratum parameters to other +functions to manage pool stratum work struct variables. Implement mining +difficulty setting. +- Create helper functions for checking when a socket is ready to read on and +receive a single line at a time. Begin stratum authorisation process. +- Provide a helper function for reading a single \n terminated string from a +socket. +- Create a stratum work structure to store current work variables. +- Test specifically for stratum being active in pool_active. +- Detect stratum in common place when adding urls, and use a bool to tell us +when it's active. +- Fix warnings. +- Extract and store various parameters on stratum init confirming successful +mining notify. +- Use existing socket macros and close the socket on failure in init stratum. +- Initiate stratum and grab first json result. +- Get detailed addressinfo from the parsed URL for future raw socket usage when +possible. IPV4 only for now. +- Prepare for getaddrinfo call. +- Add data structures to pool struct for socket communications. +- Put all socket definitions in util.h to allow reusing by added socket +functions to be used in util.c. + + +Version 2.7.7 - October 7, 2012 + +- Fix unused warnings on ming build. +- Fix sign warning in ocl.c +- fds need to be zeroed before set in modminer. +- Put scrypt warning on separate line to avoid 0 being shown on windows as +bufsize. +- Display correct pool number when block is found. +- Prevent corrupt values returned from the opencl code from trying to read +beyond the end of the buffer by masking the value to a max of 15. +- Icarus USB write failure is also a comms error +- api.c DEBUG message has no paramter +- Icarus catch more USB errors and close/reopen the port +- API-README update cgminer verison number +- hashmeter fix stats kh/s on 32bit windows + + +Version 2.7.6 - September 24, 2012 + +- Reorder libztex header include order to fix missing struct definition. +- Display share difficulty on log with a shortened hash display on submission. +- API stats add some pool getwork difficulty stats +- Ignore any pings pushed to the worker threads if the thread is still paused to +prevent it being enabled and disabled repeatedly. +- README - FAQ - usermod group - shouldn't remove other groups +- Test for sequential getwork failures on a pool that might actually be up but +failing to deliver work as we may end up hammering it repeatedly by mistake. +- reduce windows compile warnings +- util.c - bug - proxy - no data end condition +- As we average gpu time over 5 work intervals for dynamic GPU intensity, there +is no need to maintain a rolling average and it avoids the potential long term +corruption of a single overflow value. +- Test for the now-automatically exported variable AMDAPPSDKROOT when looking +for the presence of the OpenCL headers. +- API don't change 'Diff1 Shares' - backward compatability FTW +- miner.php highlighting correctly handling difficulty +- API - Add last share difficulty for devices and pool +- Store and report Accepted,Rejected,Stale difficulty in the summary and API +- WorkTime - display prevblock for scrypt +- api.c remove compile warnings +- Calculate work difficulty for each getwork and display with WorkTime debug +- remove MMQ unused variable warning +- FPGA - allow long or short device names in detect code + style police +- WorkTime - multiple nonce per work and identify the work source +- Optional WorkTime details with each Accepted/Rejected work item +- Icarus - ignore hardware errors in timing mode +- miner.php oops - mistype +- miner.php by default don't display IP/Port numbers in error messages +- api.c all STATUS messages automatically escaped +- api.c add missing escape for comma in MSG_PGAUNW +- API add display of and setting queue,scantime,expiry +- HW: dont submit bad shares +- save individual pool proxy settings to config +- --default-config - allow command line to define the default configuration file +for loading and saving +- API-README update for pools proxy info +- README URL proxy must use quote so show in the example +- bug: remove proxy: from the front of the proxy used +- CURL support for individual proxy per pool and all proxy types +- README spelling/etc +- README - FPGA device FAQ +- HW: error counter auto for all devices - ztex code not fixed +- API pgaidentify - unsupported message should be a warning +- API/BFL identify a device - currently only BFL to flash the led +- BFL add throttle count to internal stats + API +- BFL: missing device id in log message +- miner.php correct to new Diff1 Work field names +- API add device diff1 work +- API-README update +- api.c Correct diff1 field name +- count device diff1 shares +- API-README more debug parameter information +- API allow full debug settings control + + +Version 2.7.5 - August 31, 2012 + +- Adjust opencl intensity when adjusting thread count to prevent it getting +pegged at a value below the minimum threads possible. +- miner.h max_hashes -> int64_t +- Keep the local block number in the blocks structs stored and sort them by +number to guarantee we delete the oldest when ageing the block struct entries. +- Use correct sdk version detection for SDK 2.7 +- Revert "Pick worksize 256 with Cypress if none is specified." +- Test for lagging once more in queue_request to enable work to leak to backup +pools. +- There is no need to try to switch pools in select_pool since the current pool +is actually not affected by the choice of pool to get work from. +- Only clear the pool lagging flag if we're staging work faster than we're using +it. +- needed flag is currently always false in queue_request. Remove it for now. +- thr is always NULL going into queue_request now. + + +Version 2.7.4 - August 23, 2012 + +- Perform select_pool even when not lagging to allow it to switch back if needed +to the primary. +- Simplify macros in output kernels avoiding apparent loops and local variables. +- Carry the needed bool over the work command queue. +- Move the decision to queue further work upstream before threads are spawned +based on fine grained per-pool stats and increment the queued count immediately. +- Track queued and staged per pool once again for future use. +- OpenCL 1.0 does not have native atomic_add and extremely slow support with +atom_add so detect opencl1.0 and use a non-atomic workaround. +- Pools: add RollTime info to API 'stats' and 'Stats' button in miner.php + + +Version 2.7.3 - August 22, 2012 + +- Minimise the number of getwork threads we generate. + + +Version 2.7.2 - August 22, 2012 + +- Pick worksize 256 with Cypress if none is specified. +- Give warning with sdk2.7 and phatk as well. +- Whitelist sdk2.7 for diablo kernel as well. +- Only keep the last 6 blocks in the uthash database to keep memory usage +constant. Storing more is unhelpful anyway. +- BFL Flash - always distribute source +- Increase kernel versions signifying changed APIs. +- BFL flash - include source in builds and more FPGA-README +- Check we haven't staged work while waiting for a curl entry before proceeding. +- Use atomic ops to never miss a nonce on opencl kernels, including nonce==0, +also allowing us to make the output buffer smaller. +- Remove compile errors/warnings and document compile/usage in FPGA-README +- bitforce-firmware-flash.c by Luke-jr +- Ignore the submit_fail flag when deciding whether to recruit more curls or not +since we have upper bounds on how many curls can be recruited, this test is +redundant and can lead to problems. +- API-README update cgminer version number +- API-README fix groups P: example mistake +- API-README add COIN and other edits +- gpu->hit should be reset on new work as well. +- Do not add time to dynamic opencl calculations over a getwork. +- miner.php allow 'coin' is custom pages + + +Version 2.7.1 - August 21, 2012 + +- Update windows build instructions courtesy of sharky. +- Increase max curls to number of mining threads + queue * 2, accounting for up +and downstream comms. +- Queue enough requests to get started. +- There is no point trying to clone_work in get_work() any more since we clone +on every get_work_thread where possible. +- There is no point subtracting 1 from maxq in get_work_thread. +- Only set lagging flag once there are no staged work items. +- select_pool does not switch back to the primary once lagging is disabled. +- miner.php allow page title to be defined in myminer.php +- Free work before retrying in get_work_thread. +- Increment total work counter under mutex lock. +- Increment the queued count after the curl is popped in case there's a delay +waiting on curls and we think we've queued work when in fact we're waiting +- API new command 'coin' with mining information +- Do the dynamic timing in opencl code over a single pass through scanhash to +make sure we're only getting opencl times contributing to the measured inte +- Increase curl reaping time to 5 minutes since comms between curl requests can +be 2 mins apart with lots of rolltime. +- No need for extra variable in hash_push. +- Remove short options -r and -R to allow them to be reused and remove readme +entries for deprecated options. +- Avoid attempting to recursively lock the console mutex by disabling warnings +in gpu_fanpercent when fanspeed monitoring fails on windows. Debugged by l +- Deprecate the opt_fail_pause parameter, leaving a null placeholder for +existing configurations. +- Don't pause after failed getwork, set lagging flag and reassess. +- Add message to share if it's a resubmit. +- We should not be pausing in trying to resubmit shares. +- Get rid of the extending fail pause on failed connects since we discard work +after a period. +- get_work always returns true so turn it into a void function. +- get_work never returns false so get rid of fail pause loop. +- Get rid of pause and retry from get_upstream_work so we only do it from one +place. +- Deprecate the opt_retries feature as no one wants cgminer to automatically +abort. Leave a null placeholder for configurations that still have it. +- Reinstate fix ADL gpu-map not working when there are more ADL devices than +openCL patch by Nite69. Add virtual adl mapping for when none is specified o +- miner.php show summary Diff1 Shares total +- miner.php fix Work Utility totals +- miner.php format new Work Utility and Diff1 Shares +- API V1.17 show Work Utility and Diff1 Shares + + + +Version 2.7.0 - August 18, 2012 + +- Introduce a new statistic, Work Utility, which is the number of difficulty 1 +shares solved per minute. This is useful for measuring a relative rate of work +that is independent of reject rate and target difficulty. +- Implement a new pool strategy, BALANCE, which monitors work performed per pool +as a rolling average every 10 minutes to try and distribute work evenly over all +the pools. Do this by monitoring diff1 solutions to allow different difficulty +target pools to be treated equally, along with solo mining. Update the +documentation to describe this strategy and more accurately describe the +load-balance one. +- Getwork fail was not being detected. Remove a vast amount of unused variables +and functions used in the old queue request mechanism and redefine the getfail +testing. +- Don't try to start devices that don't support scrypt when scrypt mining. +- 0 is a valid return value for read so only break out if read returns -1. +- Consider us lagging only once our queue is almost full and no staged work. +- Simplify the enough work algorithm dramatically. +- Only queue from backup pools once we have nothing staged. +- Don't keep queueing work indefinitely if we're in opt failover mode. +- Make sure we don't opt out of queueing more work if all the queued work is +from one pool. +- Set lagging flag if we're on the last of our staged items. +- Reinstate clone on grabbing work. +- Grab clones from hashlist wherever possible first. +- Cull all the early queue requests since we request every time work is popped +now. +- Keep track of staged rollable work item counts to speed up clone_available. +- Make expiry on should_roll to 2/3 time instead of share duration since some +hardware will have very fast share times. +- Do the cheaper comparison first. +- Check that we'll get 1 shares' worth of work time by rolling before saying we +should roll the work. +- Simplify all those total_secs usages by initialising it to 1 second. +- Overlap queued decrementing with staged incrementing. +- Artificially set the pool lagging flag on pool switch in failover only mode as +well. +- Artificially set the pool lagging flag on work restart to avoid messages about +slow pools after every longpoll. +- Factor in opt_queue value into enough work queued or staged. +- Roll work whenever we can on getwork. +- Queue requests for getwork regardless and test whether we should send for a +getwork from the getwork thread itself. +- Get rid of age_work(). +- 0 is a valid return value for read so only break out if read returns -1. +- Offset libusb reads/writes by length written as well in ztex. +- Cope with timeouts and partial reads in ztex code. +- fpga serial I/O extra debug (disabled by default) + + +Version 2.6.5 - August 15, 2012 + +- Don't try to get bitforce temperature if we're polling for a result to +minimise the chance of interleaved responses. +- Set memory clock based on memdiff if present from with engine changes, +allowing it to parallel manual changes from the menu as well. +- Increase the timeout on bitforce as per Paul Sheppard's suggestion to account +for throttling + work time + excess. +- Fix ADL gpu-map not working when there are more ADL devices than openCL. +Initial patch supplied by Nite69. Modified to suit. +- Windows' timer resolution is limited to 15ms accuracy. This was breaking +dynamic intensity since it tries to measure below this. Since we are repeatedly +sampling similar timeframes, we can average the gpu_us result over 5 different +values to get very fine precision. +- Fix harmless unused warnings in scrypt.h. +- api.c typo +- API allow display/change failover-only setting +- Check we are not lagging as well as there is enough work in getwork. +- Minimise locking and unlocking when getting counts by reusing shared mutex +lock functions. +- Avoid getting more work if by the time the getwork thread is spawned we find +ourselves with enough work. +- The bitforce buffer is cleared and hw error count incremented on return from a +failed send_work already so no need to do it within the send_work function. +- miner.php allow a custom page section to select all fields with '*' - e.g. to +create a STATS section on a custom page +- Escape " and \ when writing json config file +- miner.php optional single rig totals (on by default) + + +Version 2.6.4 - August 7, 2012 + +- Convert the serial autodetect functions to use int instead of char to +enumerate devices. +- Make the serial open timeout for BFL generically 1 second on windows. +- Deuglify windows autodetect code for BFL. +- There is no point zeroing temperature in BFL if we fail to get a response, and +we should register it as a HW error, suggesting throttling. +- Update SCRYPT README with information about HW errors. +- Use the scrypt CPU code to confirm results from OCL code, and mark failures as +HW errors, making it easier to tune scrypt parameters. +- We may as well leave one curl still available per pool instead of reaping the +last one. +- Need to recheck the pool->curls count on regaining the pool lock after the +pthread conditional wait returns. +- Display reaped debug message outside mutex lock to avoid recursive locking. +- Add specific information when ADL detects error -10 saying the device is not +enabled. +- api.c update API start message and include port number +- miner.php ignore arg when readonly +- miner.php allow pool inputs: delete, addpool, poolpriority + + +Version 2.6.3 - August 5, 2012 + +- Count likely throttling episodes on bitforce devices as hardware errors. +- Style cleanups. +- Use FTD2XX.DLL on Windows to autodetect BitFORCE SHA256 devices. +- Make pool_disabled the first in the enums == 0, fixing the pool enabled count +which compares if value is not enabled before enabling it. +- Correct writing of scrypt parameters to config file based on command line +parameters only. +- Use different variables for command line specified lookup gap and thread +concurrency to differentiate user defined versus auto chosen values. +- Queue a request on pool switch in case we have no work from the new pool yet. +- Display failover only mode in pool menu and allow it to be toggled live. +- Reinstate check for system queueing lag when the current pool's queue is maxed +out, there is no staged work, and the work is needed now. +- There is no need for pool active testing to be mandatory any more with queue +request changes. +- Fix harmless warnings. +- Check the current staged and global queued as well before queueing requests. +Discard stales before ageing work in the watchdog thread. Queue requests after +discarding and ageing work in watchdog thread. Display accurate global queued in +curses output. Reuse variable in age_work(). +- The queueing mechanism has become a complex state machine that is no longer +predictable. Rewrite it from scratch watching only current queues in flight and +staged work available on a pool by pool basis. +- API remove unused warning in non-GPU compile +- api.c in linux allow to open a closed socket in TIME_WAIT +- Queue an extra request whenever staged work drops below mining thread count in +hash_pop. +- Update debian package configs to v2.6.2 + + +Version 2.6.2 - August 3, 2012 + +- Scrypt mining does not support block testing yet so don't try to print it. +- Clear the bitforce buffer whenever we get an unexpected result as it has +likely throttled and we are getting cached responses out of order, and use the +temperature monitoring as a kind of watchdog to flush unexpected results. +- It is not critical getting the temperature response in bitforce so don't +mandatorily wait on the mutex lock. +- Check there is a cutoff temp actually set in bitforce before using it as a cut +off value otherwise it may think it's set to zero degrees. +- We dropped the temporary stopping of curl recruiting on submit_fail by +mistake, reinstate it. +- Make threads report in either side of the scanhash function in case we miss +reporting in when restarting work. +- Don't make mandatory work and its clones last forever. +- Make test work for pool_active mandatory work items to smooth out staged work +counts when in failover-only mode. +- Add debugging output when work is found stale as to why. +- Print the 3 parameters that are passed to applog for a debug line in +bitforce.c +- Clear bitforce buffer on init as previously. +- Add some headroom to the number of curls available per pool to allow for +longpoll and sendwork curls. +- Revert "Revert "Change BFL driver thread initialising to a constant 100ms +delay between devices instead of a random arrangement."" +- Revert "Remove bitforce_thread_init" +- Show the correct base units on GPU summary. +- Differentiate between the send return value being a bool and the get return +value when managing them in bitforce scanhash. +- 23a8c60 Revert "bitforce: Skip out of sending work if work restart requested" + + +Version 2.6.1 - July 30, 2012 + +- Display scrypt as being built in as well. +- Fix build warning about KL_SCRYPT when built without scrypt support. +- Remove the low hash count determinant of hardware being sick. A low hash rate +can be for poor network connectivity or scrypt mining, neither of which are due +to a sick device. +- api.c poolpriority changes + + +Version 2.6.0 - July 29, 2012 + +- Display kilohash when suitable, but store the global mhash value still truly +in megahashes to not break the API output. +- Don't try and print curses output for devices that won't fit on the screen. +- Add scrypt documentation in the form of a separate readme. +- Fix build error without scrypt enabled. +- Limit total number of curls recruited per pool to the number of mining threads +to prevent blasting the network when we only have one pool to talk to. +- bitforce: Skip out of sending work if work restart requested +- Keep a counter of enabled pools and use that instead of iterating over the +pool list. Use that value to ensure we don't set the last remaining active pool +to the rejecting state. +- fpgautils: add support for 57.6 kBd serial +- miner.php add a socket RCV timeout for if cgminer is hung and the API thread +is still running +- Limit thread concurrency for scrypt to 5xshaders if shaders is specified. +- Simplify repeated use of gpus[gpu]. in ocl.c +- Find the nearest power of 2 maximum alloc size for the scrypt buffer that can +successfully be allocated and is large enough to accomodate the thread +concurrency chosen, thus mapping it to an intensity. +- Don't make opt_scrypt mandatory blocking with opencl code. +- Update kernel versions reflecting changes in the API. +- Make the thread concurrency and lookup gap options hidden on the command line +and autotune parameters with a newly parsed --shaders option. +- Fix target testing with scrypt kernel as it would have been missing shares +below target. +- Bugfix: Use a mutex to control non-curses output +- Simplify code to a single vprintf path for curses-less printing +- Move opt_quiet check to my_log_curses, so it works for curses-less builds +- Use log_generic for vapplog to cut down on code duplication +- Add space to log output now that there is more screen real estate available. +- BFL force all code to timeout to avoid hanging +- Bugfix: Copy argv[0] given to dirname() +- Always create the largest possible padbuffer for scrypt kernels even if not +needed for thread_concurrency, giving us some headroom for intensity levels. +- Use the detected maximum allocable memory on a GPU to determine the optimal +scrypt settings when lookup_gap and thread_concurrency parameters are not given. +- Check the maximum allocable memory size per opencl device. +- Add debugging output if buffer allocation fails for scrypt and round up +bufsize to a multiple of 256. +- Nonce testing for btc got screwed up, leading to no accepted shares. Fix it. +- Display size of scrypt buffer used in debug. +- Allow intensities up to 20 if scrypt is compiled in. +- Add name to scrypt kernel copyright. +- Allow lookup gap and thread concurrency to be passed per device and store +details in kernel binary filename. +- Ignore negative intensities for scrypt. +- Change the scale of intensity for scrypt kernel and fix a build warning. +- Correct target value passed to scrypt kernel. +- Use 256 output slots for kernels to allow 1 for each worksize. +- Test the target in the actual scrypt kernel itself saving further +calculations. +- Reinstate GPU only opencl device detection. +- Decrease lookup gap to 1. Does not seem to help in any way being 2. +- Fix build. +- Make pad0 and pad1 local variable in scrypt kernel. +- Constify input variable in scrypt kernel. +- Send correct values to scrypt kernel to get it finally working. +- Create command queue before compiling program in opencl. +- Detach pthread from within the api thread in case it is terminated due to not +being instantiated before pthread_cancel is called from main, leading to a +segfault. +- Debug output per thread hashrate is out by a factor of 1000. +- Initialise mdplatform. +- Find the gpu platform with the most devices and use that if no platform option +is passed. +- Allow more platforms to be probed if first does not return GPUs. +- Fix external scrypt algo missing. +- Limit scrypt to 1 vector. +- Handle KL_SCRYPT in config write. +- Get rid of stuff. +- Don't enqueuewrite buffer at all for pad8 and pass work details around for +scrypt in dev_blk. +- Set the correct data for cldata and prepare for pad8 fixes. +- Bugfix: Fix build without curses but with OpenCL +- Find the gpu platform with the most devices and use that if no platform option +is passed. +- Allow more platforms to be probed if first does not return GPUs. +- Get rid of spaces in arrays in scrypt kernel. +- Start with smaller amount of hashes in cpu mining to enable scrypt to return +today sometime. +- Show Khash hashrates when scrypt is in use. +- Free the scratchbuf memory allocated in scrypt and don't check if CPUs are +sick since they can't be. Prepare for khash hash rates in display. +- Add cpumining capability for scrypt. +- Set scrypt settings and buffer size in ocl.c code to be future modifiable. +- Cope with when we cannot set intensity low enough to meet dynamic interval by +inducing a forced sleep. +- Make dynamic and scrypt opencl calls blocking. +- Calculate midstate in separate function and remove likely/unlikely macros +since they're dependent on pools, not code design. +- bitforce: Use "full work" vs "nonce range" for kernel name +- Display in debug mode when we're making the midstate locally. +- Fix nonce submission code for scrypt. +- Make sure goffset is set for scrypt and drop padbuffer8 to something +manageable for now. +- Set up buffer8 for scrypt. +- Build fix for opt scrypt. +- Don't check postcalc nonce with sha256 in scrypt. +- Don't test nonce with sha and various fixes for scrypt. +- Make scrypt buffers and midstate compatible with cgminer. +- Use cgminer specific output array entries in scrypt kernel. +- Provide initial support for the scrypt kernel to compile with and mine scrypt +with the --scrypt option. +- Enable completely compiling scrypt out. +- Begin import of scrypt opencl kernel from reaper. +- bitforce_get_result returns -1 on error now. +- Check return value of read in BFgets +- Bugfix: Make our Windows nanosleep/sleep replacements standards-compliant +(which fixes nmsleep) and include compat.h for bitforce (for sleep) +- rpc: Use a single switch statement for both stringifications of cgpu->status +- Fix whitespace mangling. +- miner.php fix rig # when miners fail +- Only try to shut down work cleanly if we've successfully connected and started +mining. +- Use switch statement for cgpu->status and fix spelling. +- Abbrv. correction +- Bugfix: Don't declare devices SICK if they're just busy initialising +- Bugfix: Calculate nsec in nmsleep correctly +- Bugfix: Adapt OpenCL scanhash errors to driver API change (errors are now -1, +not 0) +- Remove superfluous ave_wait +- Put kname change for broken nonce-range back in +- Add average wait time to api stats +- Change BFL driver thread initialising to a constant 100ms delay between +devices instead of a random arrangement. +- Spelling typo. +- Time opencl work from start of queueing a kernel till it's flushed when +calculating dynamic intensity. +- Modify te scanhash API to use an int64_t and return -1 on error, allowing zero +to be a valid return value. +- Check for work restart after the hashmeter is invoked for we lose the hashes +otherwise contributed in the count. +- Remove disabled: label from mining thread function, using a separate +mt_disable function. +- Style changes. +- Missed one nonce-range disabling. +- Add average return time to api stats +- miner.php allow rig names in number buttons +- Remove bitforce_thread_init The delay thing does nothing useful... when long +poll comes around, all threads restart at the same time anyway. +- Change timeouts to time-vals for accuracy. +- fix API support for big endian machines +- Cope with signals interrupting the nanosleep of nmsleep. +- Use standard cfsetispeed/cfsetospeed to set baud rate on *nix +- miner.php split() flagged deprecated in PHP 5.3.0 +- More BFL tweaks. Add delay between closing and reopening port. Remove buffer +clear in re-init Add kernel type (mini-rig or single) +- Make long timeout 10seconds on bitforce for when usleep or nanosleep just +can't be accurate... + + +Version 2.5.0 - July 6, 2012 + +- Fix --benchmark not working since the dynamic addition of pools and pool +stats. +- Make disabling BFL nonce range support a warning since it has to be explicitly +enabled on the command line now. +- miner.php allow renaming table headers +- Make bitforce nonce range support a command line option --bfl-range since +enabling it decrease hashrate by 1%. +- Add sanity checking to make sure we don't make sleep_ms less than 0 in +bitforce. +- The fastest minirig devices need a significantly smaller starting sleep time. +- Use a much shorter initial sleep time to account for faster devices and nonce +range working, and increase it if nonce range fails to work. +- Use nmsleep instead of usleep in bitforce. +- Provide a ms based sleep function that uses nanosleep to avoid the inaccuracy +of usleep on SMP systems. +- delay_time_ms is always set so need not be initialised in bitforce. +- Increase bitforce timeout to 10 seconds. +- Add more hysteresis and poll ~5 times to allow for timer delays in bitforce +devices. +- miner.php allow alternating line colours (off by default) +- Display the actual duration of wait when it is greater than the cutoff. +- Set nonce to maximum once we determine nonce range support is broken. +- Initial wait time is always known so no need to zero it beforehand in +bitforce. +- No point counting wait time until the work is actually sent to bitforce +devices. +- Use string comparison functions instead of explicit comparisons. +- Account for wait_ms time when nonce_range is in use on BFL. +- Split nonces up into 1/5 chunks when nonce range is supported. +- limit clear buffer iterations. +- Ad fd check to clear buffer. +- miner.php remove incorrect 'DATE' error message +- miner.php allow summary header in custom pages +- Disable nonce range support in BFL when broken support is detected. +- Restart_wait is only called with a ms value so incorporate that into the +function. +- Only try to adjust dev width when curses is built in. +- miner.php define custom sum fields as a simple array +- Fix off-by-one error in nonce increment in bfl. +- Use BE when setting nonce in bitforce nonce range work. +- Enable nonce range in the normal init sequence for bfl. +- Queue extra work at 2/3 differently depending on whether we're using nonce +range or not. +- Initially enable support for nonce range support on bfl, splitting nonces up +into 3/4 size and only disable it if it fails on work submit. +- Attempt to detect nonce range support in BFL by sending work requring its +support. +- Limit retrying on busy for up to BITFORCE_TIMEOUT_MS +- Attempt to initialise while bitforce device returns BUSY. +- Extend length of string that can be passed to BFL devices. +- Fix signedness warning. +- Adjust device width column to be consistent. +- Use cgpu-> not gpus[] in watchdog thread. +- Add api stats (sleep time) +- Timing tweaks Added long and short timeouts, short for detecting throttling, +long to give up totally. Reset sleep time when device re-initialised Still check +results after timeout Back up a larger time if result on first poll. +- Add API Notify counter 'Comms Error' +- Style police on api.c +- Do all logging outside of the bitforce mutex locking to avoid deadlocks. +- Remove applog call from bfwrite to prevent grabbing nested mutexes. +- Bitforce style changes. +- Minor style changes. +- Remove needless roundl define. +- Made JSON error message verbose. +- Fine-tune timing adjustment. Also remove old work_restart timing. +- Check for gpu return times of >= 0, not just 0, to fix intensity dropping to +-10. +- Restart is zeroed in the mining thread so no need to do it inside the bitforce +code. +- More improvements to comms. BFL return nothing when throttling, so should not +be considered an error. Instead repeat with a longer delay. +- Polling every 10ms there's not much point checking the pthread_cond_timedwait +as it just adds overhead. Simply check the value of work_restart in the bfl main +polling loop. +- Use a pthread conditional that is broadcast whenever work restarts are +required. Create a generic wait function waiting a specified time on that +conditional that returns if the condition is met or a specified time passed to +it has elapsed. Use this to do smarter polling in bitforce to abort work, queue +more work, and check for results to minimise time spent working needlessly. +- Add busy time to wait time. +- api.c put version up to 1.14 +- Add tiny delay after writing to BFL Change BFL errors to something more human +readable Send work busy re-tries after 10ms delay + + +Version 2.4.4 - July 1, 2012 + +- Fix builds on non gnu platforms. +- api.c ensure old mode is always available when not using --api-groups + quit() +on param errors +- Implement rudimentary X-Mining-Hashrate support. +- Detect large swings in temperature when below the target temperature range and +change fan by amounts dependant on the value of tdiff. +- Adjust the fanspeed by the magnitude of the temperature difference when in the +optimal range. +- Revert "Restarting cgminer from within after ADL has been corrupted only leads +to a crash. Display a warning only and disable fanspeed monitoring." +- api.c fix json already closed +- implement and document API option --api-groups +- Put upper bounds to under 2 hours that work can be rolled into the future for +bitcoind will deem it invalid beyond that. +- define API option --api-groups +- api.c allow unwell devices to be enabled so they can be cured +- miner.php - fix/enable autorefresh for custom pages +- miner.php allow custom summary pages - new 'Mobile' summary +- Work around pools that advertise very low expire= time inappropriately as this +leads to many false positives for stale shares detected. +- Only show ztex board count if any exist. +- There is no need for work to be a union in struct workio_cmd +- fpgautils.c include a debug message for all unknown open errors +- Don't keep rolling work right up to the expire= cut off. Use 2/3 of the time +between the scantime and the expiry as cutoff for reusing work. +- Log a specific error when serial opens fail due to lack of user permissions +- Increase GPU timing resolution to microsecond and add sanity check to ensure +times are positive. +- Opencl code may start executing before the clfinish order is given to it so +get the start timing used for dynamic intensity from before the kernel is +queued. +- fpgautils.c - set BAUD rate according to termio spec +- fpgautils.c - linux ordering back to the correct way +- miner.php remove unneeded '.'s +- miner.php add auto refresh options +- miner.php add 'restart' next to 'quit' +- miner.php make fontname/size configurable with myminer.php +- Make the pools array a dynamically allocated array to allow unlimited pools to +be added. +- Make the devices array a dynamically allocated array of pointers to allow +unlimited devices. +- Dynamic intensity for GPUs should be calculated on a per device basis. Clean +up the code to only calculate it if required as well. +- Use a queueing bool set under control_lock to prevent multiple calls to +queue_request racing. +- Use the work clone flag to determine if we should subtract it from the total +queued variable and provide a subtract queued function to prevent looping over +locked code. +- Don't decrement staged extras count from longpoll work. +- Count longpoll's contribution to the queue. +- Increase queued count before pushing message. +- Test we have enough work queued for pools with and without rolltime +capability. +- As work is sorted by age, we can discard the oldest work at regular intervals +to keep only 1 of the newest work items per mining thread. +- Roll work again after duplicating it to prevent duplicates on return to the +clone function. +- Abstract out work cloning and clone $mining_threads copies whenever a rollable +work item is found and return a clone instead. +- api.c display Pool Av in json +- Take into account average getwork delay as a marker of pool communications +when considering work stale. +- Work out a rolling average getwork delay stored in pool_stats. +- Getwork delay in stats should include retries for each getwork call. +- Walk through the thread list instead of searching for them when disabling +threads for dynamic mode. +- Extend nrolltime to support the expiry= parameter. Do this by turning the +rolltime bool into an integer set to the expiry time. If the pool supports +rolltime but not expiry= then set the expiry time to the standard scantime. +- When disabling fanspeed monitoring on adl failure, remove any twin GPU +association. This could have been leading to hangs on machines with dual GPU +cards when ADL failed. +- modminer: Don't delay 2nd+ FPGAs during work restart +- Disable OpenCL code when not available. +- Fix openwrt crashing on regeneratehash() by making check_solve a noop. +- FPGA - allow device detect override without an open failure +- Fix sign warning. + + +Version 2.4.3 - June 14, 2012 + +- can_roll and should_roll should have no bearing on the cycle period within the +miner_thread so remove it. +- Check for strategy being changed to load balance when enabling LPs. +- Check that all threads on the device that called get_work are waiting on +getwork before considering the pool lagging. +- Iterate over each thread belonging to each device in the hashmeter instead of +searching for them now that they're a list. +- When using rotate pool strategy, ensure we only select from alive enabled +pools. +- Start longpoll from every pool when load balance strategy is in use. +- Add mandatory and block fields to the work struct. Flag any shares that are +detected as blocks as mandatory to submit, along with longpoll work from a +previously rejecting pool. +- Consider the fan optimal if fanspeed is dropping but within the optimal speed +window. +- Fix typo in some API messages (succeess/success) +- api.c MMQ stat bugs +- Bugfix: Fix warnings when built without libudev support +- Bugfix: slay a variety of warnings +- Bugfix: modminer: Fix unsigned/signed comparison and similar warnings +- API add ModMinerQuad support +- Bugfix: Honour forceauto parameter in serial_detect functions +- modminer: Temperature sensor improvements +- modminer: Make log messages more consistent in format +- Only adjust GPU speed up if the fanspeed is within the normal fanrange and +hasn't been turned to maximum speed under overheat conditions. +- ModMiner use valid .name +- New driver: BTCFPGA ModMiner +- Abstract generally useful FPGA code into fpgautils.c +- API add stats for pool getworks +- miner.php option to hide specific fields from the display +- miner.php add version numbers to the summary page +- Update debian configs to v2.4.2 +- Add API and FPGA READMEs into Makefile to be included in source distribution. +- Icarus - fix unit64_t printf warnings + + +Version 2.4.2 - June 2, 2012 + +- API.class compiled with Java SE 6.0_03 - works with Win7x64 +- miner.php highlight devs too slow finding shares (possibly failing) +- API update version to V1.11 and document changes +- API save default config file if none specified +- api.c save success incorrectly returns error +- api.c replace BUFSIZ (linux/windows have different values) +- Move RPC API content out of README to API-README +- Open a longpoll connection if a pool is in the REJECTING state as it's the +only way to re-enable it automatically. +- Use only one longpoll as much as possible by using a pthread conditional +broadcast that each longpoll thread waits on and checks if it's the current pool +before +- If shares are known stale, don't use them to decide to disable a pool for +sequential rejects. +- Restarting cgminer from within after ADL has been corrupted only leads to a +crash. Display a warning only and disable fanspeed monitoring. +- Icarus: fix abort calculation/allow user specified abort +- Icarus: make --icarus-timing hidden and document it in FPGA-README +- Icarus: high accuracy timing and other bitstream speed support +- add-MIPSEB-to-icarus-for-BIG_ENDIAN +- work_decode only needs swab32 on midstate under BIG ENDIAN +- add compile command to api-example.c +- save config bugfix: writing an extra ',' when no gpus +- Add dpkg-source commits + + +Version 2.4.1 - May 6, 2012 + +- In the unlikely event of finding a block, display the block solved count with +the pool it came from for auditing. +- Display the device summary on exit even if a device has been disabled. +- Use correct pool enabled enums in api.c. +- Import Debian packaging configs +- Ensure we test for a pool recovering from idle so long as it's not set to +disabled. +- Fix pool number display. +- Give cgminer -T message only if curses is in use. +- Reinit_adl is no longer used. +- API 'stats' allow devices to add their own stats also for testing/debug +- API add getwork stats to cgminer - accesable from API 'stats' +- Don't initialise variables to zero when in global scope since they're already +initialised. +- Get rid of unitialised variable warning when it's false. +- Move a pool to POOL_REJECTING to be disabled only after 3 minutes of +continuous rejected shares. +- Some tweaks to reporting and logging. +- Change FPGA detection order since BFL hangs on an ICA +- API support new pool status +- Add a temporarily disabled state for enabled pools called POOL_REJECTING and +use the work from each longpoll to help determine when a rejecting pool has +started working again. Switch pools based on the multipool strategy once a pool +is re-enabled. +- Removing extra debug +- Fix the benchmark feature by bypassing the new networking code. +- Reset sequential reject counter after a pool is disabled for when it is +re-enabled. +- Icarus - correct MH/s and U: with work restart set at 8 seconds +- ztex updateFreq was always reporting on fpga 0 +- Trying harder to get 1.15y working +- Specifying threads on multi fpga boards extra cgpu +- Missing the add cgpu per extra fpga on 1.15y boards +- API add last share time to each pool +- Don't try to reap curls if benchmarking is enabled. + + +Version 2.4.0 - May 3, 2012 + +- Only show longpoll warning once when it has failed. +- Convert hashes to an unsigned long long as well. +- Detect pools that have issues represented by endless rejected shares and +disable them, with a parameter to optionally disable this feature. +- Bugfix: Use a 64-bit type for hashes_done (miner_thread) since it can overflow +32-bit on some FPGAs +- Implement an older header fix for a label existing before the pthread_cleanup +macro. +- Limit the number of curls we recruit on communication failures and with +delaynet enabled to 5 by maintaining a per-pool curl count, and using a pthread +conditional that wakes up when one is returned to the ring buffer. +- Generalise add_pool() functions since they're repeated in add_pool_details. +- Bugfix: Return failure, rather than quit, if BFwrite fails +- Disable failing devices such that the user can attempt to re-enable them +- Bugfix: thread_shutdown shouldn't try to free the device, since it's needed +afterward +- API bool's and 1TBS fixes +- Icarus - minimise code delays and name timer variables +- api.c V1.9 add 'restart' + redesign 'quit' so thread exits cleanly +- api.c bug - remove extra ']'s in notify command +- Increase pool watch interval to 30 seconds. +- Reap curls that are unused for over a minute. This allows connections to be +closed, thereby allowing the number of curl handles to always be the minimum +necessary to not delay networking. +- Use the ringbuffer of curls from the same pool for submit as well as getwork +threads. Since the curl handles were already connected to the same pool and are +immediately available, share submission will not be delayed by getworks. +- Implement a scaleable networking framework designed to cope with any sized +network requirements, yet minimise the number of connections being reopened. Do +this by create a ring buffer linked list of curl handles to be used by getwork, +recruiting extra handles when none is immediately available. +- There is no need for the submit and getwork curls to be tied to the pool +struct. +- Do not recruit extra connection threads if there have been connection errors +to the pool in question. +- We should not retry submitting shares indefinitely or we may end up with a +huge backlog during network outages, so discard stale shares if we failed to +submit them and they've become stale in the interim. + + +Version 2.3.6 - April 29, 2012 + +- Shorten stale share messages slightly. +- Protect the freeing of current_hash under mutex_lock to prevent racing on it +when set_curblock is hit concurrently. +- Change default behaviour to submitting stale, removing the --submit-stale +option and adding a --no-submit-stale option. +- Make sure to start the getwork and submit threads when a pool is added on the +fly. This fixes a crash when a pool is added to running cgminer and then +switched to. +- Faster hardware can easily outstrip the speed we can get work and submit +shares when using only one connection per pool. +- Test the queued list to see if any get/submits are already queued and if they +are, start recruiting extra connections by generating new threads. +- This allows us to reuse network connections at low loads but recuit new open +connections as they're needed, so that cgminer can scale to hardware of any +size. + + +Version 2.3.5 - April 28, 2012 + +- Restarting cgminer leads to a socket that can't be bound for 60 seconds, so +increase the interval that API binding waits to 30 seconds to minimise the +number of times it will retry, spamming the logs. +- Give a longpoll message for any longpoll that detects a block change, primary +or backup, and also display which pool it was. +- Decrease utility display to one decimal place. +- Small cosmetic output alignment. +- Add pool number to stale share message. +- Add space to log output now that there is more screen real estate available. +- Indentation clean up. +- Merge branch 'master' of github.com:ckolivas/cgminer +- Remove thread id display from rejected shares as well. +- Merge pull request #185 from Diapolo/diakgcn +- add goffset support for diakgcn with -v 1 and update kernel version +- Set have_longpoll to true when there is at least one pool with longpoll. +- Don't display the thread ID since it adds no useful information over the +device number. +- Don't display the first 8 bytes of a share since they will always be zero at +>= 1 difficulty. +- work->longpoll is reset across test_work_current so we need to recheck what +pool it belongs to. +- Use longpolls from backup pools with failover-only enabled just to check for +block changes, but don't use them as work. +- Start longpoll only after we have tried to extract the longpoll URL. +- Check for submitold flag on resubmit of shares, and give different message for +stale shares on retry. +- Check for submitold before submitstale. +- Don't force fresh curl connections on anything but longpoll threads. +- Create one longpoll thread per pool, using backup pools for those pools that +don't have longpoll. +- Use the work created from the longpoll return only if we don't have +failover-enabled, and only flag the work as a longpoll if it is the current +pool. +- This will work around the problem of trying to restart the single longpoll +thread on pool changes that was leading to race conditions. +- It will also have less work restarts from the multiple longpolls received from +different pools. +- Remove the ability to disable longpoll. It is not a useful feature and will +conflict with planned changes to longpoll code. +- Remove the invalid entries from the example configuration file. +- Add support for latest ATI SDK on windows. +- Export missing function from libztex. +- miner.php change socktimeoutsec = 10 (it only waits once) +- Bugfix: Make initial_args a const char** to satisfy exec argument type warning +(on Windows only) +- miner.php add a timeout so you don't sit and wait ... forever +- Create discrete persistent submit and get work threads per pool, thus allowing +all submitworks belonging to the same pool to reuse the same curl handle, and +all getworks to reuse their own handle. +- Use separate handles for submission to not make getwork potentially delay +share submission which is time critical. +- This will allow much more reusing of persistent connections instead of opening +new ones which can flood routers. +- This mandated a rework of the extra longpoll support (for when pools are +switched) and this is managed by restarting longpoll cleanly and waiting for a +thread join. +- miner.php only show the current date header once +- miner.php also add current time like single rig page +- miner.php display rig 'when' table at top of the multi-rig summary page +- README - add some Ztex details +- api.c include zTex in the FPGA support list +- api.c ensure 'devs' shows PGA's when only PGA code is compiled +- cgminer.c sharelog code consistency and compile warning fix +- README correct API version number +- README spelling error +- api.c combine all pairs of sprintfs() +- api.c uncomment and use BLANK (and COMMA) +- Code style cleanup +- Annotating frequency changes with the changed from value +- README clarification of 'notify' command +- README update for API RPC 'devdetails' +- api.c 'devdetails' list static details of devices +- Using less heap space as my TP-Link seems to not handle this much + + +Version 2.3.4 - April 25, 2012 + +- Extensively document the cause of GPU device issues and the use of --gpu-map. +- Support for share logging +- Detect poorly performing combination of SDK and phatk kernel and add verbose +warning at startup. +- Icarus update to new add_cgpu() +- Icarus driver working with Linux and Windows +- api.c fix unused variable compile warning +- Display all OpenCL devices when -n is called as well to allow debugging of +differential mapping of OpenCL to ADL. +- Add a --gpu-map option which will allow arbitrarily mapping ADL devices to +OpenCL devices for instances where association by enumeration alone fails. +- Increase upper limit on number of extra items to queue as some FPGA code can't +yet reliably keep many devices busy. +- Display configuration file information when -c option is passed and only when +file exists on loading default config file. +- Display configuration file loaded, if any, and debug output if configuration +file parsing failed. +- Add missing ztex header to Makefile for distribution. +- Document long-form COM port device names on Windows, required to specify +serial ports above 9 +- Include ztex bitstreams firmware in distribution and install if configured in. +- Style police on driver-ztex.c +- work_restart should only be changed by cgminer.c now +- Shut down the api cleanly when the api thread is cancelled. This should allow +the api socket to be closed successfully to next be reopened with app_restart. +- Make a union for cgpu device handles, and rename "device" to "device_ztex" +since it's Ztex-specific +- Initialise name variable. +- Remove unnecessary check for variable that always has memory allocated. +- Bugfix: Missing "break" no-op in default case +- Make the status window and log window as large as can fit on startup, +rechecking to see if it can be enlarged after the fact. This allows any number +of devices to be displayed provided the window is made long enough without +corrupting the output. +- Style police on libztex.c. +- API add removepool like the screen interface +- api.c escape required characters in return strings + pools returns the +username +- Set lp_path to NULL after free for consistency. +- Removing dmalloc import left behind by mistake +- Fixing leak in resp_hdr_cb +- miner.php warning highlight GPU stats if they are zero (e.g. ADL not enabled) +- miner.php highlight any device that isn't 'Enabled' +- miner.php highlight any Status that isn't 'Alive' +- miner.php optionally support multiple rigs +- Initial Ztex support 1.15x board. + + +Version 2.3.3 - April 15, 2012 + +- Don't even display that cpumining is disabled on ./configure to discourage +people from enabling it. +- Do a complete cgminer restart if the ATI Display Library fails, as it does on +windows after running for some time, when fanspeed reporting fails. +- Cache the initial arguments passed to cgminer and implement an attempted +restart option from the settings menu. +- Disable per-device status lines when there are more than 8 devices since +screen output will be corrupted, enumerating them to the log output instead at +startup. +- Reuse Vals[] array more than W[] till they're re-initialised on the second +sha256 cycle in poclbm kernel. +- Minor variable alignment in poclbm kernel. +- Make sure to disable devices with any status not being DEV_ENABLED to ensure +that thermal cutoff code works as it was setting the status to DEV_RECOVER. +- Re-initialising ADL simply made the driver fail since it is corruption over +time within the windows driver that's responsible. Revert "Attempt to +re-initialise ADL should a device that previously reported fanspeed stops +reporting it." +- Microoptimise poclbm kernel by ordering Val variables according to usage +frequency. + + +Version 2.3.2 - March 31, 2012 + +- Damping small changes in hashrate so dramatically has the tendency to always +make the hashrate underread so go back to gentle damping instead. +- Revert the crossover of variables from Vals to W in poclbm kernel now that +Vals are the first declared variables so they're used more frequently. +- Vals variables appearing first in the array in poclbm is faster. +- Change the preferred vector width to 1 for Tahiti only, not all poclbm +kernels. +- Use a time constant 0.63 for when large changes in hashrate are detected to +damp change in case the large change is an aliasing artefact instead of a real +chang +- Only increment stale counter if the detected stales are discarded. +- Attempt to re-initialise ADL should a device that previously reported fanspeed +stops reporting it. +- Move the ADL setup and clearing to separate functions and provide a reinit_adl +function to be used when adl fails while running. +- Use slightly more damping on the decay time function in the never-ending quest +to smooth off the hashmeter. +- Set the starting fanspeed to a safe and fairly neutral 50% when autofan is +enabled. +- Provide locking around updates of cgpu hashrates as well to prevent multiple +threads accessing data fields on the same device. +- Display the beginning of the new block in verbose mode in the logs. +- Reinstate old diablo kernel variable ordering from 120222, adding only goffset +and vector size hint. The massive variable ordering change only helped one SDK +on +- Change the version number on the correct kernels. +- api.c devicecode/osinfo incorrectly swapped for json +- Add extensive instructions on how to make a native windows build. +- Update version numbers of poclbm and diablo kernels as their APIs have also +changed. +- Use global offset parameter to diablo and poclbm kernel ONLY for 1 vector +kernels. +- Use poclbm preferentially on Tahiti now regardless of SDK. +- Remove unused constant passed to poclbm. +- Clean up use of macros in poclbm and use bitselect everywhere possible. +- Add vector type hint to diablo kernel. +- Add worksize and vector attribute hints to the poclbm kernel. +- Spaces for non-aligned variables in poclbm. +- More tidying of poclbm. +- Swap Vals and W variables where they can overlap in poclbm. +- More tidying of poclbm. +- Tidy up first half of poclbm. +- Clean up use of any() by diablo and poclbm kernels. +- Minor variable symmetry changes in poclbm. +- Put additions on separate lines for consistency in poclbm. +- Consolidate last use of W11 into Vals4 in poclbm. +- Change email due to SPAM +- api.c miner.php add a '*' to the front of all notify counters - simplifies +future support of new counters +- miner.php add display 'notify' command +- Small change to help arch's without processor affinity +- Fix bitforce compile error +- api.c notify should report disabled devices also - of course +- API returns the simple device history with the 'notify' command +- code changes for supporting a simple device history +- api.c Report an OS string in config to help with device issues +- api.c fix Log Interval - integer in JSON +- api.c config 'Device Code' to show list of compiled devices + README +- api.c increase buffer size close to current code allowable limit +- removed 8-component vector support from kernel, as this is not supported in +CGMINER anyway +- forgot to update kernel modification date, fixed ;) +- reordered an addition in the kernel, which results in less instructions used +in the GPU ISA code for GCN +- miner.php: option for readonly or check privileged access +- Ignore reduntant-with-build options --disable-gpu, --no-adl, and --no-restart +- miner.php: ereg_replace is DEPRECATED so use preg_replace instead +- Make curses TUI support optional at compile-time. +- Bugfix: AC_ARG_WITH provides withval instead of enableval +- miner.php split devs output for different devices +- api.c: correct error messages +- icarus.c modify (regular) timeout warning to only be debug +- icarus.c set the windows TODO timeout +- Allow specifying a specific driver for --scan-serial +- optimized nonce-check and output code for -v 2 and -v 4 +- Bugfix: Check for libudev header (not just library) in configure, and document +optional dependency +- Add API support for Icarus and Bitforce +- Next API version is 1.4 (1.3 is current) +- README/api.c add "When" the request was processed to STATUS +- Bugfix: ZLX to read BitFORCE temp, not ZKX -.- +- Use libudev to autodetect BitFORCE GPUs, if available +- Use the return value of fan_autotune to set fan_optimal instead of passing it +as a pointer. +- Pass the lasttemp from the device we're using to adjust fanspeed in twin +devices. +- fix the name to 3 chars, fix the multi-icarus support +- Bugfix: "-S auto" is the default if no -S is specified, and there is no such +delay in using it +- README add information missing from --scan-serial +- Update README RPC API Version comment +- Bugfix: Allow enabling CPU even without OpenCL support +- Change failed-to-mine number of requested shares messge to avoid segfault on +recursive calling of quit(). +- Get rid of extra char which is just truncated in poclbm kernel. +- only small code formating changes +- removed vec_step() as this could lead to errors on older SDKs +- unified code for generating nonce in kernel and moved addition of base to the +end -> faster + +Version 2.3.1 - February 24, 2012 + +- Revert input and output code on diakgcn and phatk kernels to old style which +worked better for older hardware and SDKs. +- Add a vector*worksize parameter passed to those kernels to avoid one op. +- Increase the speed of hashrate adaptation. +- Only send out extra longpoll requests if we want longpolls. +- API implement addpool command +- API return the untouched Total MH also (API now version 1.3) +- Add enable/disablepool to miner.php example and reduce font size 1pt + + +Version 2.3.0 - February 23, 2012 + +- Consider extra longpoll work items as staged_extra so as to make sure we queue +more work if queueing regular work items as longpolls. +- Use diablo kernel on all future SDKs for Tahiti and set preferred vector width +to 1 on poclbm kernel only. +- Explicitly type the constants in diakgcn kernel as uint, to be in line with +poclbm kernel. +- Reset all hash counters at the same time as resetting start times to get +accurate hashrates on exiting which is mandatory for benchmarking. +- Report thread out before it starts to avoid being flagged as sick when waiting +for the first work item. +- Don't disable and re-enable devices as they may recover and in the meantime +have their status set to OFF. +- API new commands enablepool and disablepool (version already incremented) +- Tolerate new-format temperature readings for bitforce +- Modify cgminer.c pool control to allow API to call it +- Bugfix: Fix BitFORCE driver memory leak in debug logging +- Extra byte was being unused in poclbm leading to failure on some platforms. +- Explicitly type the constants in poclbm kernel as uint. +- Don't save 'include' when saving the configuration +- Allow configuration file to include another recursively +- Use the SDK and hardware information to choose good performing default +kernels. +- Move phatk kernel to offset vector based nonce bases as well. +- Add a --benchmark feature which works on a fake item indefinitely to compare +device performance without any server or networking influence. +- Allow writing of multiple worksizes to the configuration file. +- Allow writing of multiple vector sizes to the configuration file. +- Allow writing of multiple kernels to the configuration file. +- Allow multiple different kernels to be chosen per device. +- Allow the worksize to be set per-device. +- Allow different vectors to be set per device. +- If we're well below the target temperature, increase gpu engine speed back to +maximum in case we have gotten lost between profiles during an idle period. +- We should be setting the value of fan_optimal, not its address. +- As all kernels will be new versions it's an opportunity to change the .bin +format and make it simpler. Specifying bitalign is redundant and long can be l. +- Use any() in kernel output code. +- Put the nonce for each vector offset in advance, avoiding one extra addition +in the kernel. +- Reset times after all mining threads are started to make estimating hashrates +easier at startup. +- Bugfix: allow no-exec (NX) stack +- Fix minor warning. +- fix the bitforce.c code style follow 1TBS +- fix icarus.c compile warning +- small changes to speedup no vec for AMD 898.1 OCL runtime +- Update licensing to GPL V3. +- Reset the longpoll flag after it's been used once to prevent it restarting +work again. +- Begin import of DiabloMiner kernel. +- Modify API debug messages to say API instead of DBG +- When API shuts down cgminer don't kill itself +- Don't make rolled work from the longpoll be seen as other longpoll work items. +- API add 'privileged' command so can verify access level +- Set the lp_sent variable under lock since there will almost always be a race +on setting this variable, potentially leading to multiple LPs being sent out. +- API restrict access to all non display commands by default +- Update API version to 1.2 for new 'Log Interval' +- API add --log Interval to 'config' reply +- --api-allow special case 0/0 means all + + +Version 2.2.7 - February 20, 2012 + +- Send out extra longpolls when we have switched pools and the longpoll thread +is still bound to the old one. This is particularly useful with p2pool where +longpolls do not correlate with main bitcoin block change and would have led to +high reject rates on failover. +- Store whether a work item is the result of a longpoll or not in struct work +and use it to help determine block changes directly from the work longpoll bool. +- Keep track of when a longpoll has been sent for a pool and if the current pool +is requesting work but has not sent a longpoll request, convert one of the work +items to a longpoll. +- Store the longpoll url in the pool struct and update it from the pool_active +test in case it changes. This is to allow further changes to longpoll management +on switching pools. +- Re-check for a longpoll supporting pool every 30 seconds if none is found +initially. +- Report threads as busy waiting on getwork on startup to avoid them being +flagged sick on startup during slow networking. +- Allow devices that are disabled due to overheating to be flagged as recovering +instead of disabling them and re-enable them if they're below ideal temperatures +- Tahiti prefers worksize 64 with poclbm. +- No need to expressly retain the opencl program now that the zero binary issue +is fixed. This actually fixes cgminer to work with the latest SDK included with +the ATI catalyst driver 12.2. +- Show error code on any opencl failure status. +- Add detection for version 898.1 SDK as well but only give SDK 2.6 warning once +on startup instead of with each device initialisation. +- Always use a fresh connection for longpoll as prolonged persistent connections +can fail for many reasons. +- Keep track of intended engine clock speed and only adjust up if it's higher +than the last intended speed. This avoids setting the clock speed to one +relative to a lower profile one by mistake. +- Use gpu-memdiff on startup if an engine clockspeed is set and a memdiff value +is set. +- Revert "Adjust engine speed up according to performance level engine setting, +not the current engine speed." - ineffectual. +- Freeze the queues on all threads that are sent the pause message to prevent +them trying to start up again with saved pings in their queues. +- Updates to diakgcn kernel/ +- Consolidate all screen updates to the watchdog thread and touch both windows +before refresh. +- Curses will be disabled in clean_up so don't do it early in kill_work, and +disable_adl so that GPU settings may be restored to normal in case shutting down +curses leads to instability on windows. +- Stop the mining threads before trying to kill them. +- Plain refresh() does not give reliably screen updates so get rid of all uses +of it. +- First release with working diakgcn kernel. + +Version 2.2.6 - February 16, 2012 + +- Provide warning on each startup about sdk 2.6 +- Fix unused warnings on win32. +- bitforce: Simplify BFopen WIN32 ifdef/else +- Fix initialization warning with jansson 1.3 +- bitforce: Cleanup extraneous TODO that isn't needed +- Move tcsetattr (and new tcflush) into *nix BFopen to simplify things a bit +- Add message explaining 2nd thread disabling for dynamic mode and how to tune +it. +- Move logwindow down once number of devices is known. +- Automatically choose phatk kernel for bitalign non-gcn ATI cards, and then +only select poclbm if SDK2.6 is detected. +- Allow the refresh interval to be adjusted in dynamic intensity with a +--gpu-dyninterval parameter. +- Make curses display visible right from the beginning and fix the window sizes +so the initial messages don't get lost once the status window is drawn. +- The amount of work scanned can fluctuate when intensity changes and since we +do this one cycle behind, we increment the work more than enough to prevent +repeati +- bitforce: Set a 30 second timeout for serial port on Windows, since the +default is undefined +- Use PreVal4addT1 instead of PreVal4 in poclbm kernel. +- Import PreVal4 and PreVal0 into poclbm kernel. +- Import more prepared constants into poclbm kernel. +- Keep variables in one array but use Vals[] name for consistency with other +kernel designs. +- Replace constants that are mandatorily added in poclbm kernel with one value. +- Remove addition of final constant before testing for result in poclbm kernel. +- Hand optimise variable addition order. +- Hand optimise first variable declaration order in poclbm kernel. +- Radical reordering machine based first pass to change variables as late as +possible, bringing their usage close together. +- fix strcpy NULL pointer if env HOME unset. +- bitforce: Disable automatic scanning when at least one device is specified +manually +- Unroll all poclbm additions to enable further optimisations. + + +Version 2.2.5 - February 13, 2012 + +- Make output buffer write only as per Diapolo's suggestion. +- Constify nonce in poclbm. +- Use local and group id on poclbm kernel as well. +- Microoptimise phatk kernel on return code. +- Adjust engine speed up according to performance level engine setting, not the +current engine speed. +- Try to load a binary if we've defaulted to the poclbm kernel on SDK2.6 +- Use the poclbm kernel on SDK2.6 with bitalign devices only if there is no +binary available. +- Further generic microoptimisations to poclbm kernel. +- The longstanding generation of a zero sized binary appears to be due to the +OpenCL library putting the binary in a RANDOM SLOT amongst 4 possible binary +locations. Iterate over each of them after building from source till the real +binary is found and use that. +- Fix harmless warnings with -Wsign-compare to allow cgminer to build with -W. +- Fix missing field initialisers warnings. +- Put win32 equivalents of nanosleep and sleep into compat.h fixing sleep() for +adl.c. +- Restore compatibility with Jansson 1.3 and 2.0 (api.c required 2.1) +- Modularized logging, support for priority based logging +- Move CPU chipset specific optimization into device-cpu + + +Version 2.2.4 - February 11, 2012 + +- Fix double definition of A0 B0 to zeroA zeroB. +- Retain cl program after successfully loading a binary image. May decrease +failures to build kernels at startup. +- Variable unused after this so remove setting it. +- BFI INT patching is not necessarily true on binary loading of files and not +true on ATI SDK2.6+. Report bitalign instead. +- Various string fixes for reject reason. +- Generalize --temp-cutoff and implement support for reading temperature from +BitFORCE FPGAs +- Change message from recovered to alive since it is used on startup as well as +when a pool has recovered. +- Start mining as soon as any pool is found active and rely on the watchpool +thread to bring up other pools. +- Delayed responses from testing pools that are down can hold up the watchdog +thread from getting to its device testing code, leading to false detection of +the GPU not checking in, and can substantially delay auto gpu/auto fan +management leading to overheating. Move pool watching to its own thread. +- Bugfix: BitFORCE index needs to be static to count correctly +- Space out retrieval of extra work according to the number of mining threads. +- Make shutdown more robust. Enable the input thread only after the other +threads exist. Don't kill off the workio thread and use it to exit main() only +if there is an unexpected problem. Use kill_work() for all anticipated shutdowns +where possible. Remove unused thread entry. +- Change poclbm version number. +- One array is faster than 2 separate arrays so change to that in poclbm kernel. +- Microoptimisations to poclbm kernel which increase throughput slightly. +- Import diablominer kernel. Currently disabled as not working. +- Import diapolo kernel. Currently disabled as not working. +- Conflicting entries of cl_kernel may have been causing problems, and +automatically chosen kernel type was not being passed on. Rename the enum to +cl_kernels and store the chosen kernel in each clState. +- Set cl_amd_media_ops with the BITALIGN flag and allow non-bitselect devices to +build. +- ALlow much longer filenames for kernels to load properly. +- Allow different kernels to be used by different devices and fix the logic fail +of overcorrecting on last commit with !strstr. +- Fix kernel selection process and build error. +- queue_phatk_kernel now uses CL_SET_VARG() for base-nonce(s), too +- added OpenCL >= 1.1 detection code, in preparation of OpenCL 1.1 global offset +parameter support +- Use K array explicitly to make it clear what is being added. +- Work items have a tendency to expire at exactly the same time and we don't +queue extra items when there are plenty in the queue, regardless of age. Allow +extra work items to be queued if adequate time has passed since we last +requested work even if over the limit. +- Discard work when failover-only is enabled and the work has come from a +different pool. +- Missing include to build on newer mingw32. +- Move from the thread safe localtime_r to regular localtime which is the only +one supported on newer pthread libraries on mingw32 to make it compile with the +newer ming. Thread safety is of no importance where localtime is used in this +code. +- Define in_addr_t in windows if required +- sys/wait.h not required in windows +- Allow API to restrict access by IP address +- Add pool switching to example miner.php +- Display X-Reject-Reason, when provided +- Remove the test for whether the device is on the highest profil level before +raising the GPU speed as it is ineffectual and may prevent raising the GPU +speed. +- Remove unnecessary check for opt_debug one every invocation of applog at +LOG_DEBUG level and place the check in applog(). + + +Version 2.2.3 - February 6, 2012 + +- Revert "Rewrite the convoluted get_work() function to be much simpler and roll +work as much as possible with each new work item." This seems to cause a race on +work in free_work(). Presumably other threads are still accessing the structure. + + +Version 2.2.2 - February 6, 2012 + +- Provide support for the submitold extension on a per-pool basis based on the +value being detected in a longpoll. +- Don't send a ping to a dynamic device if it's not enabled as that will just +enable it for one pass and then disable it again. +- Rewrite the convoluted get_work() function to be much simpler and roll work as +much as possible with each new work item. +- Roll as much work as possible from the work returned from a longpoll. +- Rolling work on each loop through the mining thread serves no purpose. +- Allow to stage more than necessary work items if we're just rolling work. +- Replace divide_work with reuse_work function used twice. +- Give rolled work a new ID to make sure there is no confusion in the hashtable +lookups. +- Remove now-defunct hash_div variables. +- Remove unused get_dondata function. +- Silence ADL warnings. +- Silence unused parameter warnings. +- Stagger the restart of every next thread per device to keep devices busy ahead +of accessory threads per device. +- Deprecate the --donation feature. Needlessly complex, questionable usefulness, +depends on author's server and a central pool of some kind, and was not heavily +adopted. +- It's devices that report back now, not threads, update message. +- Continue auto-management of fan and engine speeds even if a device is disabled +for safety reasons. +- No need to check we're highest performance level when throttling GPU engine +speed. +- Abstract out tests for whether work has come from a block that has been seen +before and whether a string is from a previously seen block. +- Probe but don't set the timeout to 15 seconds as some networks take a long +time to timeout. +- Remove most compiler warnings from api.c +- Add last share's pool info in cgpu_info +- Allow the OpenCL platform ID to be chosen with --gpu-platform. +- Iterate over all platforms displaying their information and number of devices +when --ndevs is called. +- Deprecate main.c +- Some networks can take a long time to resolve so go back to 60 second timeouts +instead of 15. +- Only enable curses on failure if curses is desired. +- Fix warnings in bitforce.c +- Bugfix: Need to open BitForce tty for read-write +- Fix various build issues. +- Modularize code: main.c -> device-cpu + device-gpu +- Fix phatk kernel not working on non-bitalign capable devices (Nvidia, older +ATI). +- Update poclbm kernel for better performance on GCN and new SDKs with bitalign +support when not BFI INT patching. Update phatk kernel to work properly for non +BFI INT patched kernels, providing support for phatk to run on GCN and non-ATI +cards. +- Return last accepted share pool/time for devices +- Display accepted share pool/time for CPUs +- Bug intensity always shows GPU 0 +- Update example web miner.php to use new API commands + + +Version 2.2.1 - January 30, 2012 + +NOTE - The GPU Device reordering in 2.2.0 by default was considered a bad idea +so the original GPU ordering is used by default again unless reordering is +explicitly requested. + +- Fix bitforce failing to build into cgminer. +- Add missing options to write config function. +- Add a --gpu-reorder option to only reorder devices according to PCI Bus ID +when requested. +- Fix for midstate support being broken on pools that supported no-midstate +work by ensuring numbers are 32 bits in sha2.c +- Set virtual GPUs to work when ADL is disabled or all mining will occur on GPU +0. +- Add information about paused threads in the menu status. +- Disable all but the first thread on GPUs in dynamic mode for better +interactivity. +- Set the latest network access time on share submission for --net-delay even if +we're not delaying that submission for further network access. +- Clear adl on exiting after probing values since it may attempt to overclock. +- As share submission is usually staggered, and delays can be costly, submit +shares without delay even when --net-delay is enabled. +- Display GPU number and device name when ADL is successfully enabled on it. +- Display GPU ordering remapping in verbose mode. +- Don't fail in the case the number of ADL and OpenCL devices do not match, and +do not attempt to reorder devices unless they match. Instead give a warning +about +- Display error codes should ADL not return ADL_OK in the more critical function +calls. +- Fix unused warning. +- Fix compile warnings in api.c +- Add extensive ADL based device info in debug mode. +- Make --ndevs display verbose opencl information as well to make debugging +version information easier. +- Display information about the opencl platform with verbose enabled. +- Explicitly check for nvidia in opencl platform strings as well. + + +Version 2.2.0 - January 29, 2012 + +NOTE: GPU Device order will change with this release with ATI GPUs as cgminer +now can enumerate them according to their Bus ID which means the values should +now correlate with their physical position on the motherboard. + +- Default to poclbm kernel on Tahiti (7970) since phatk does not work, even +though performance is sub-standard so that at least it will mine successfully by +defau +- Retain cl program after every possible place we might build the program. +- Update ADL SDK URL. +- Fix potential overflow. +- Map GPU devices to virtual devices in their true physical order based on +BusNumber. +- Change the warning that comes with failure to init cl on a device to be more +generic and accurate. +- Advertise longpoll support in X-Mining-Extensions +- Detect dual GPU cards by iterating through all GPUs, finding ones without +fanspeed and matching twins with fanspeed one bus ID apart. +- Do not attempt to build the program that becomes the kernel twice. This could +have been leading to failures on initialising cl. +- Some opencl compilers have issues with no spaces after -D in the compiler +options. +- Allow intensity up to 14. +- Use calloced stack memory for CompilerOptions to ensure sprintf writes to the +beginning of the char. +- Whitelist 79x0 cards to prefer no vectors as they perform better without. +- Adjust fan speed gently while in the optimal range when temperature is +drifting to minimise overshoot in either direction. +- Detect dual GPU cards via the indirect information of - 1st card has a fan +controller. 2nd card does not have a fan controller, cards share the same device +name +- Instead of using the BFI_INT patching hack on any device reporting +cl_amd_media_ops, create a whitelist of devices that need it. This should enable +GCN architec +- Fixed API compiling issue on OS X +- Add more explanation of JSON format and the 'save' command +- Return an error if using ADL API commands when it's not available +- Read off lpThermalControllerInfo from each ADL device. +- Add ADL_Overdrive5_ThermalDevices_Enum interface. +- Add API commands: config, switchpool, gpu settings, save +- Implement socks4 proxy support. +- Fix send() for JSON strings +- Introduce a --net-delay option which guarantees at least 250ms between any +networking requests to not overload slow routers. +- Generalise locking init code. +- Allow invalid values to be in the configuration file, just skipping over them +provided the rest of the file is valid JSON. This will allow older configurat +- Allow CPU mining explicitly enable only if other mining support is built in. +- BitForce FPGA support +- Configure out building and support of all CPU mining code unless +--enable-cpumining is enabled. +- Allow parsed values to be zero which will allow 0 values in the config file to +work. +- Advertise that we can make our own midstate, so the pool can skip generating +it for us +- Refactor the CPU scanhash_* functions to use a common API. Fixes bugs. +- Don't consider a pool lagging if a request has only just been filed. This +should decrease the false positives for "pool not providing work fast enough". +- Invalidating work after longpoll made hash_pop return no work giving a false +positive for dead pool. Rework hash_pop to retry while finds no staged work u +- Remove TCP_NODELAY from curl options as many small packets may be contributing +to network overload, when --net-delay is enabled. +- Refactor miner_thread to be common code for any kind of device +- Simplify submit_nonce loop and avoid potentially missing FOUND - 1 entry. +Reported by Luke-Jr. +- Micro-optimisation in sha256_sse2 code courtesy of Guido Ascioti +guido.ascioti@gmail.com +- Refactor to abstract device-specific code + + +Version 2.1.2 - January 6, 2012 + +- If api-description is specified, save it when writing the config file +- Adjust utility width to be constant maximum as well. +- Add percent signs to reject ratio outputs +- Should the donation pool fail, don't make the fallover pool behave as though +the primary pool is lagging. +- Use an alternative pool should the donation getwork fail. + + +Version 2.1.1 - January 1, 2012 + +- Include API examples in distribution tarball. +- Don't attempt to pthread_join when cancelling threads as they're already +detached and doing so can lead to a segfault. +- Give more generic message if slow pool at startup is the donation pool. +- Continue to attempt restarting GPU threads if they're flagged dead at 1 min. +intervals. +- Don't attempt to restart sick flagged GPUs while they're still registering +activity. +- Make curl use fresh connections whenever there is any communication issue +in case there are dead persistent connections preventing further comms from +working. +- Display pool in summary if only 1 pool. +- Adjust column width of A/R/HW to be the maximum of any device and align them. + + +Version 2.1.0 - December 27, 2011 + +- Major infrastructure upgrade with RPC interface for controlling via sockets +encoded with/without JSON courtesy of Andrew Smith. Added documentation for +use of the API and sample code to use with it. +- Updated linux-usb-cgminer document. +- Rewrite of longpoll mechanism to choose the current pool wherever possible to +use for the longpoll, or any pool that supports longpoll if the current one +does not. +- Display information about longpoll when the chosen server has changed. +- Fix the bug where longpoll generated work may have been sent back to the +wrong pool, causing rejects. +- Fix a few race conditions on closing cgminer which caused some of the crashes +on exit. +- Only adjust gpu engine speed in autotune mode if the gpu is currently at the +performance level of that being adjusted. +- Various fixes for parsing/writing of configuration files. +- Do not add blank lines for threads of unused CPUs. +- Show which pool is unresponsive on startup. +- Only show GPU management menu item if GPUs are in use. +- Align most device columns in the curses display. + + +Version 2.0.8 - November 11, 2011 + +- Make longpoll do a mandatory flushing of all work even if the block hasn't +changed, thus supporting longpoll initiated work change of any sort and merged +mining. +- Byteswap computed hash in hashtest so it can be correctly checked. This fixes +the very rare possibility that a block solve on solo mining was missed. +- Add x86_64 w64 mingw32 target +- Allow a fixed speed difference between memory and GPU clock speed with +--gpu-memdiff that will change memory speed when GPU speed is changed in +autotune mode. +- Don't load the default config if a config file is specified on the command +line. +- Don't build VIA on apple since -a auto bombs instead of gracefully ignoring +VIA failing. +- Build fix for dlopen/dlclose errors in glibc. + + +Version 2.0.7 - October 17, 2011 + +- Support work without midstate or hash1, which are deprecated in bitcoind 0.5+ +- Go to kernel build should we fail to clCreateProgramWithBinary instead of +failing on that device. This should fix the windows problems with devices not +initialising. +- Support new configuration file format courtesy of Chris Savery which can write +the config file from the menu and will load it on startup. +- Write unix configuration to .cgminer/cgminer.conf by default and prompt to +overwrite if given a filename from the menu that exists. + + +Version 2.0.6 - October 9, 2011 + +- Must initialise the donorpool mutex or it fails on windows. +- Don't make donation work interfere with block change detection allowing +donation to work regardless of the block chain we're mining on. +- Expire shares as stale with a separate timeout from the scantime, defaulting +to 120 seconds. +- Retry pools after a delay of 15 seconds if none can be contacted on startup +unless a key is pressed. +- Don't try to build adl features without having adl. +- Properly check shares against target difficulty - This will no longer show +shares when solo mining at all unless they're considered to be a block solve. +- Add altivec 4 way (cpu mining) support courtesy of Gilles Risch. +- Try to use SSL if the server supports it. +- Display the total solved blocks on exit (LOL if you're lucky). +- Use ADL activity report to tell us if a sick GPU is still busy suggesting it +is hard hung and do not attempt to restart it. + + +Version 2.0.5 - September 27, 2011 + +- Intensity can now be set to dynamic or static values per-device. +- New donation feature --donation sends a proportion of shares to author's +account of choice, but is disabled by default! +- The hash being displayed and block detection has been fixed. +- Devices not being mined on will not attempt to be ADL managed. +- Intensity is now displayed per GPU device. +- Make longpoll attempt to restart as often as opt_retries specifies. +- We weren't rolling work as often as we could. +- Correct some memory management issues. +- Build fixes. +- Don't mess with GPUs if we don't have them. + + +Version 2.0.4 - September 23, 2011 + +- Confused Longpoll messages should be finally fixed with cgminer knowing for +sure who found the new block and possibly avoiding a rare crash. +- Display now shows the actual hash and will say BLOCK! if a block is deemed +solved. +- Extra spaces, which would double space lines on small terminals, have been +removed. +- Fan speed change is now damped if it is already heading in the correct +direction to minimise overshoot. +- Building without opencl libraries is fixed. +- GPUs are autoselected if there is only one when in the GPU management menu. +- GPU menu is refreshed instead of returning to status after a GPU change. + + +Version 2.0.3 - September 17, 2011 + +- Various modes of failure to set fanspeeds and adl values have been addressed +and auto-fan should work now on most hardware, and possibly other values +which previously would not have worked. +- Fixed a crash that can occur on switching pools due to longpoll thread races. +- Use ATISTREAMSDKROOT if available at build time. +- Fanspeed management is returned to the driver default on exit instead of +whatever it was when cgminer was started. +- Logging of events deemed WARNING or ERR now will display even during +periods where menu input is being awaited on. + + +Version 2.0.2 - September 11, 2011 + +- Exit cleanly if we abort before various threads are set up or if they no +longer exist. +- Fix a rare crash in HASH_DEL due to using different mutexes to protect the +data. +- Flag devices that have never started and don't allow enabling of devices +without restarting them. +- Only force the adapter speed to high if we've flagged this device as being +managed. +- Flag any devices with autofan or autogpu as being managed. +- Use a re-entrant value to store what fanspeed we're trying to set in case the +card doesn't support small changes. Force it to a multiple of 10% if it +fails on trying to speed up the fan. +- Do not bother resetting values to old ones if changes to GPU parameters report +failure, instead returning a failure code only if the return value from get() +differs. +- Remove redundant check. +- Only display supported values from fanspeed on change settings. +- Missing bracket from output. +- Display fan percentage on devices that only support reporting percent and not +RPM. +- Properly substitute DLOPEN flags to build with ADL support when -ldl is needed +and not when opencl is not found. + + +Version 2.0.1 - September 9, 2011 + +- Fix building on 32bit glibc with dlopen with -lpthread and -ldl +- ByteReverse is not used and the bswap opcode breaks big endian builds. Remove +it. +- Ignore whether the display is active or not since only display enabled devices +work this way, and we skip over repeat entries anwyay. +- Only reset values on exiting if we've ever modified them. +- Flag adl as active if any card is successfully activated. +- Add a thermal cutoff option as well and set it to 95 degrees by default. +- Change the fan speed by only 5% if it's over the target temperature but less +than the hysteresis value to minimise overshoot down in temperature. +- Add a --no-adl option to disable ADL monitoring and GPU settings. +- Only show longpoll received delayed message at verbose level. +- Allow temperatures greater than 100 degrees. +- We should be passing a float for the remainder of the vddc values. +- Implement accepting a range of engine speeds as well to allow a lower limit to +be specified on the command line. +- Allow per-device fan ranges to be set and use them in auto-fan mode. +- Display which GPU has overheated in warning message. +- Allow temperature targets to be set on a per-card basis on the command line. +- Display fan range in autofan status. +- Setting the hysteresis is unlikely to be useful on the fly and doesn't belong +in the per-gpu submenu. +- With many cards, the GPU summaries can be quite long so use a terse output +line when showing them all. +- Use a terser device status line to show fan RPM as well when available. +- Define max gpudevices in one macro. +- Allow adapterid 0 cards to enumerate as a device as they will be non-AMD +cards, and enable ADL on any AMD card. +- Do away with the increasingly confusing and irrelevant total queued and +efficiency measures per device. +- Only display values in the log if they're supported and standardise device log +line printing. + + +Version 2.0.0 - September 6, 2011 + +Major feature upgrade - GPU monitoring, (over)clocking and fan control for ATI +GPUs. + +New command line switches: +--auto-fan- Automatically adjust all GPU fan speeds to maintain a target +temperature +--auto-gpu- Automatically adjust all GPU engine clock speeds to maintain +a target temperature +--gpu-engine Set the GPU engine (over)clock in Mhz - one value for all or +separate by commas for per card. +--gpu-fan Set the GPU fan percentage - one value for all or separate +by commas for per card. +--gpu-memclock Set the GPU memory (over)clock in Mhz - one value for all +or separate by commas for per card. +--gpu-powertune Set the GPU powertune percentage - one value for all or +separate by commas for per card. +--gpu-vddc Set the GPU voltage in Volts - one value for all or separate +by commas for per card. +--temp-hysteresis Set how much the temperature can fluctuate outside +limits when automanaging speeds (default: 3) +--temp-overheat Set the overheat temperature when automatically managing +fan and GPU speeds (default: 85) +--temp-target Set the target temperature when automatically managing fan +and GPU speeds (default: 75) + +- Implement ATI ADL support for GPU parameter monitoring now and setting later +(temp, fan, clocks etc.). +- Check for the presence of the ADL header files in ADL_SDK. +- Import adl_functions.h from amd overdrive ctrl. +- Implement a setup function that tries to detect GPUs that support the ADL and +link in the parameters into the gpus struct. +- Put a summary of monitoring information from the GPU menu. +- Implement changing memory speed and voltage on the fly. +- Implement fan speed setting. +- Minor corrections to set fan speed by percentage. +- Make sure to read off the value in RPM only. +- Implement auto fanspeed adjustment to maintain a target temperature and +fanspeed below 85%, with an overheat check that will speed the fan up to 100%. +- Add an --auto-fan command line option to allow all GPUs to have autofan +enabled from startup. +- Add a gpu autotune option which adjusts GPU speed to maintain a target +temperature within the bounds of the default GPU speed and any overclocking set. +- Avoid a dereference if the longpoll thread doesn't exist. +- Clean up by setting performance profiles and fan settings to startup levels on +exit. +- Add a small amount of hysteresis before lowering clock speed. +- Allow target, overheat and hysteresis temperatures to be set from command +line. +- Combine all stats collating into one function to avoid repeating function +calls on each variable. +- Add gpu statistics to debugging output via the watchdog thread. +- Implement menus to change temperature limits. +- Implement setting the GPU engine clock speed of all devices or each device as +a comma separated value. +- Implement setting the GPU memory clock speed of all devices or each device as +a comma separated value. +- Implement setting the GPU voltage of all devices or each device as a comma +separated value. +- Implement setting the GPU fan speed of all devices or each device as a comma +separated value. +- Add support for monitoring powertune setting. +- Implement changing of powertune value from the GPU change settings menu. +- Get the value of powertune in get_stats. +- Implement setting the GPU powertune value of all devices or each device as a +comma separated value. +- Remove the safety checks in speed setting since confirmation is done first in +the menu, then show the new current values after a short pause. +- Force the speed to high on startup and restore it to whatever the setting was +on exit. +- Add temperature to standard output where possible and use more compact output. +- Move and print at the same time in curses to avoid random trampling display +errors. +- Update the status window only from the watchdog thread, do not rewrite the top +status messages and only refresh once all the status window is complete, +clearing the window each time to avoid corruption. +- Set a safe starting fan speed if we're automanaging the speeds. +- Provide locking around all adl calls to prevent races. +- Lower profile settings cannot be higher than higher profile ones so link any +drops in settings. +- Add new needed text files to distribution. +- Queue requests ignoring the number of staged clones since they get discarded +very easily leading to false positives for pool not providing work fast enough. +- Include libgen.h in opt.c to fix win32 compilation warnings. +- Fix compilation warning on win32. +- Add the directory name from the arguments cgminer was called from as well to +allow it running from a relative pathname. +- Add a --disable-adl option to configure and only enable it if opencl support +exists. +- Retry before returning a failure to get upstream work as a failure to avoid +false positives for pool dead. +- Retry also if the decoding of work fails. +- Use the presence of X-Roll-Ntime in the header as a bool for exists unless N +is found in the response. + + +Version 1.6.2 - September 2, 2011 + +- Add --failover-only option to not leak work to backup pools when the primary +pool is lagging. +- Change recommendation to intensity 9 for dedicated miners. +- Fix the bouncing short term value by allowing it to change dynamically when +the latest value is very different from the rolling value, but damp the change +when it gets close. +- Use the curses_lock to protect the curses_active variable and test it under +lock. +- Go back to requesting work 2/3 of the way through the current scantime with +CPU mining as reports of mining threads running out of work have occurred with +only 5 seconds to retrieve work. +- Add start and stop time scheduling for regular time of day running or once off +start/stop options. +- Print summary on quit modes. +- Put some sanity checks on the times that can be input. +- Give a verbose message when no active pools are found and pause before +exiting. +- Add verbose message when a GPU fails to initialise, and disable the correct +GPU. +- Cryptopp asm32 was not correctly updated to the incremental nonce code so the +hash counter was bogus. +- Get rid of poorly executed curl check. +- If curl does not have sockopts, do not try to compile the +json_rpc_call_sockopt_cb function, making it possible to build against older +curl libraries. +- Most people expect /usr/local when an unspecified prefix is used so change to +that. +- Rename localgen occasions to getwork fail occasions since localgen is +unrelated now. + + +Version 1.6.1 - August 29, 2011 + +- Copy cgminer path, not cat it. +- Switching between redrawing windows does not fix the crash with old +libncurses, so redraw both windows, but only when the window size hasn't +changed. +- Reinstate minimum 1 extra in queue to make it extremely unlikely to ever have +0 staged work items and any idle time. +- Return -1 if no input is detected from the menu to prevent it being +interpreted as a 0. +- Make pthread, libcurl and libcurses library checks mandatory or fail. +- Add a --disable-opencl configure option to make it possible to override +detection of opencl and build without GPU mining support. +- Confusion over the variable name for number of devices was passing a bogus +value which likely was causing the zero sized binary issue. +- cgminer no longer supports default url user and pass so remove them. +- Don't show value of intensity since it's dynamic by default. +- Add options to explicitly enable CPU mining or disable GPU mining. +- Convert the opt queue into a minimum number of work items to have queued +instead of an extra number to decrease risk of getting idle devices without +increasing risk of higher rejects. +- Statify tv_sort. +- Check for SSE2 before trying to build 32 bit SSE2 assembly version. Prevents +build failure when yasm is installed but -msse2 is not specified. +- Add some defines to configure.ac to enable exporting of values and packaging, +and clean up output. +- Give convenient summary at end of ./configure. +- Display version information and add --version command line option, and make +sure we flush stdout. +- Enable curses after the mining threads are set up so that failure messages +won't be lost in the curses interface. +- Disable curses after inputting a pool if we requested no curses interface. +- Add an option to break out after successfully mining a number of accepted +shares. +- Exit with a failed return code if we did not reach opt_shares. +- The cpu mining work data can get modified before we copy it if we submit it +async, and the sync submission is not truly sync anyway, so just submit it sync. + + +Version 1.6.0 - August 26, 2011 + +- Make restarting of GPUs optional for systems that hang on any attempt to +restart them. Fix DEAD status by comparing it to last live time rather than +last attempted restart time since that happens every minute. +- Move staged threads to hashes so we can sort them by time. +- Create a hash list of all the blocks created and search them to detect when a +new block has definitely appeared, using that information to detect stale work +and discard it. +- Update configure.ac for newer autoconf tools. +- Use the new hashes directly for counts instead of the fragile counters +currently in use. +- Update to latest sse2 code from cpuminer-ng. +- Allow LP to reset block detect and block detect lp flags to know who really +came first. +- Get start times just before mining begins to not have very slow rise in +average. +- Add message about needing one server. +- We can queue all the necessary work without hitting frequent stales now with +the time and string stale protection active all the time. This prevents a +pool being falsely labelled as not providing work fast enough. +- Include uthash.h in distro. +- Implement SSE2 32 bit assembly algorithm as well. +- Fail gracefully if unable to open the opencl files. +- Make cgminer look in the install directory for the .cl files making make +install work correctly. +- Allow a custom kernel path to be entered on the command line. +- Bump threshhold for lag up to maximum queued but no staged work. +- Remove fragile source patching for bitalign, vectors et. al and simply pass it +with the compiler options. +- Actually check the value returned for the x-roll-ntime extension to make sure +it isn't saying N. +- Prevent segfault on exit for when accessory threads don't exist. +- Disable curl debugging with opt protocol since it spews to stderr. + + +Version 1.5.8 - August 23, 2011 + +- Minimise how much more work can be given in cpu mining threads each interval. +- Make the fail-pause progressively longer each time it fails until the network +recovers. +- Only display the lagging message if we've requested the work earlier. +- Clean up the pool switching to not be dependent on whether the work can roll +or not by setting a lagging flag and then the idle flag. +- Only use one thread to determine if a GPU is sick or well, and make sure to +reset the sick restart attempt time. +- The worksize was unintentionally changed back to 4k by mistake, this caused a +slowdown. + + +Version 1.5.7 - August 22, 2011 + +- Fix a crash with --algo auto +- Test at appropriate target difficulty now. +- Add per-device statics log output with --per-device-stats +- Fix breakage that occurs when 1 or 4 vectors are chosen on new phatk. +- Make rolltime report debug level only now since we check it every work +item. +- Add the ability to enable/disable per-device stats on the fly and match +logging on/off. +- Explicitly tell the compiler to retain the program to minimise the chance of +the zero sized binary errors. +- Add one more instruction to avoid one branch point in the common path in the +cl return code. Although this adds more ALUs overall and more branch points, the +common path code has the same number of ALUs and one less jmp, jmps being more +expensive. +- Explicitly link in ws2_32 on the windows build and update README file on how +to compile successfully on windows. +- Release cl resources should the gpu mining thread abort. +- Attempt to restart a GPU once every minute while it's sick. +- Don't kill off the reinit thread if it fails to init a GPU but returns safely. +- Only declare a GPU dead if there's been no sign of activity from the reinit +thread for 10 mins. +- Never automatically disable any pools but just specify them as idle if they're +unresponsive at startup. +- Use any longpoll available, and don't disable it if switching to a server that +doesn't have it. This allows you to mine solo, yet use the longpoll from a pool +even if the pool is the backup server. +- Display which longpoll failed and don't free the ram for lp_url since it +belongs to the pool hdr path. +- Make the tcp setsockopts unique to linux in the hope it allows freebsd et. al +to compile. + + +Version 1.5.6 - August 17, 2011 + +- New phatk and poclbm kernels. Updated phatk to be in sync with latest 2.2 +courtesy of phateus. Custom modified to work best with cgminer. +- Updated output buffer code to use a smaller buffer with the kernels. +- Clean up the longpoll management to ensure the right paths go to the right +pool and display whether we're connected to LP or not in the status line. + + +Version 1.5.5 - August 16, 2011 + +- Rework entirely the GPU restart code. Strike a balance between code that +re-initialises the GPU entirely so that soft hangs in the code are properly +managed, but if a GPU is completely hung, the thread restart code fails +gracefully, so that it does not take out any other code or devices. This will +allow cgminer to keep restarting GPUs that can be restarted, but continue +mining even if one or more GPUs hangs which would normally require a reboot. +- Add --submit-stale option which submits all shares, regardless of whether they +would normally be considered stale. +- Keep options in alphabetical order. +- Probe for slightly longer for when network conditions are lagging. +- Only display the CPU algo when we're CPU mining. +- As we have keepalives now, blaming network flakiness on timeouts appears to +have been wrong. Set a timeout for longpoll to 1 hour, and most other +network connectivity to 1 minute. +- Simplify output code and remove HW errors from CPU stats. +- Simplify code and tidy output. +- Only show cpu algo in summary if cpu mining. +- Log summary at the end as per any other output. +- Flush output. +- Add a linux-usb-cgminer guide courtesy of Kano. + + +Version 1.5.4 - August 14, 2011 + +- Add new option: --monitor Option lets user specify a command that +will get forked by cgminer on startup. cgminer's stderr output subsequently gets +piped directly to this command. +- Allocate work from one function to be able to initialise variables added +later. +- Add missing fflush(stdout) for --ndevs and conclusion summary. +- Preinitialise the devices only once on startup. +- Move the non cl_ variables into the cgpu info struct to allow creating a new +cl state on reinit, preserving known GPU variables. +- Create a new context from scratch in initCQ in case something was corrupted to +maximise our chance of succesfully creating a new worker thread. Hopefully this +makes thread restart on GPU failure more reliable, without hanging everything +in the case of a completely wedged GPU. +- Display last initialised time in gpu management info, to know if a GPU has +been re-initialised. +- When pinging a sick cpu, flush finish and then ping it in a separate thread in +the hope it recovers without needing a restart, but without blocking code +elsewhere. +- Only consider a pool lagging if we actually need the work and we have none +staged despite queue requests stacking up. This decreases significantly the +amount of work that leaks to the backup pools. +- The can_roll function fails inappropriately in stale_work. +- Only put the message that a pool is down if not pinging it every minute. This +prevents cgminer from saying pool down at 1 minute intervals unless in debug +mode. +- Free all work in one place allowing us to perform actions on it in the future. +- Remove the extra shift in the output code which was of dubious benefit. In +fact in cgminer's implementation, removing this caused a miniscule speedup. +- Test each work item to see if it can be rolled instead of per-pool and roll +whenever possible, adhering to the 60 second timeout. This makes the period +after a longpoll have smaller dips in throughput, as well as requiring less +getworks overall thus increasing efficiency. +- Stick to rolling only work from the current pool unless we're in load balance +mode or lagging to avoid aggressive rolling imitating load balancing. +- If a work item has had any mining done on it, don't consider it discarded +work. + + +Version 1.5.3 - July 30, 2011 + +- Significant work went into attempting to make the thread restart code robust +to identify sick threads, tag them SICK after 1 minute, then DEAD after 5 +minutes of inactivity and try to restart them. Instead of re-initialising the +GPU completely, only a new cl context is created to avoid hanging the rest of +the GPUs should the dead GPU be hung irrevocably. +- Use correct application name in syslog. +- Get rid of extra line feeds. +- Use pkg-config to check for libcurl version +- Implement per-thread getwork count with proper accounting to not over-account +queued items when local work replaces it. +- Create a command queue from the program created from source which allows us +to flush the command queue in the hope it will not generate a zero sized binary +any more. +- Be more willing to get work from the backup pools if the work is simply being +queued faster than it is being retrieved. + + +Version 1.5.2 - July 28, 2011 + +- Restarting a hung GPU can hang the rest of the GPUs so just declare it dead +and provide the information in the status. +- The work length in the miner thread gets smaller but doesn't get bigger if +it's under 1 second. This could end up leading to CPU under-utilisation and +lower and lower hash rates. Fix it by increasing work length if it drops +under 1 second. +- Make the "quiet" mode still update the status and display errors, and add a +new --real-quiet option which disables all output and can be set once while +running. +- Update utility and efficiency figures when displaying them. +- Some Intel HD graphics support the opencl commands but return errors since +they don't support opencl. Don't fail with them, just provide a warning and +disable GPU mining. +- Add http:// if it's not explicitly set for URL entries. +- Log to the output file at any time with warnings and errors, instead of just +when verbose mode is on. +- Display the correct current hash as per blockexplorer, truncated to 16 +characters, with just the time. + + +Version 1.5.1 - July 27, 2011 + +- Two redraws in a row cause a crash in old libncurses so just do one redraw +using the main window. +- Don't adjust hash_div only up for GPUs. Disable hash_div adjustment for GPUs. +- Only free the thread structures if the thread still exists. +- Update both windows separately, but not at the same time to prevent the double +refresh crash that old libncurses has. Do the window resize check only when +about to redraw the log window to minimise ncurses cpu usage. +- Abstract out the decay time function and use it to make hash_div a rolling +average so it doesn't change too abruptly and divide work in chunks large enough +to guarantee they won't overlap. +- Sanity check to prove locking. +- Don't take more than one lock at a time. +- Make threads report out when they're queueing a request and report if they've +failed. +- Make cpu mining work submission asynchronous as well. +- Properly detect stale work based on time from staging and discard instead of +handing on, but be more lax about how long work can be divided for up to the +scantime. +- Do away with queueing work separately at the start and let each thread grab +its own work as soon as it's ready. +- Don't put an extra work item in the queue as each new device thread will do so +itself. +- Make sure to decrease queued count if we discard the work. +- Attribute split work as local work generation. +- If work has been cloned it is already at the head of the list and when being +reinserted into the queue it should be placed back at the head of the list. +- Dividing work is like the work is never removed at all so treat it as such. +However the queued bool needs to be reset to ensure we *can* request more work +even if we didn't initially. +- Make the display options clearer. +- Add debugging output to tq_push calls. +- Add debugging output to all tq_pop calls. + + +Version 1.5.0 - July 26, 2011 + +- Increase efficiency of slow mining threads such as CPU miners dramatically. Do +this by detecting which threads cannot complete searching a work item within the +scantime and then divide up a work item into multiple smaller work items. +Detect the age of the work items and if they've been cloned before to prevent +doing the same work over. If the work is too old to be divided, then see if it +can be time rolled and do that to generate work. This dramatically decreases the +number of queued work items from a pool leading to higher overall efficiency +(but the same hashrate and share submission rate). +- Don't request work too early for CPUs as CPUs will scan for the full +opt_scantime anyway. +- Simplify gpu management enable/disable/restart code. +- Implement much more accurate rolling statistics per thread and per gpu and +improve accuracy of rolling displayed values. +- Make the rolling log-second average more accurate. +- Add a menu to manage GPUs on the fly allowing you to enable/disable GPUs or +try restarting them. +- Keep track of which GPUs are alive versus enabled. +- Start threads for devices that are even disabled, but don't allow them to +start working. +- The last pool is when we are low in total_pools, not active_pools. +- Make the thread restart do a pthread_join after disabling the device, only +re-enabling it if we succeed in restarting the thread. Do this from a separate +thread so as to not block any other code.This will allow cgminer to continue +even if one GPU hangs. +- Try to do every curses manipulation under the curses lock. +- Only use the sockoptfunction if the version of curl is recent enough. + + +Version 1.4.1 - July 24, 2011 + +- Do away with GET for dealing with longpoll forever. POST is the one that works +everywhere, not the other way around. +- Detect when the primary pool is lagging and start queueing requests on backup +pools if possible before needing to roll work. +- Load balancing puts more into the current pool if there are disabled pools. +Fix. +- Disable a GPU device should the thread fail to init. +- Out of order command queue may fail on osx. Try without if it fails. +- Fix possible dereference on blank inputs during input_pool. +- Defines missing would segfault on --help when no sse mining is built in. +- Revert "Free up resources/stale compilers." - didn't help. +- Only try to print the status of active devices or it would crash. +- Some hardware might benefit from the less OPS so there's no harm in leaving +kernel changes that do that apart from readability of the code. + +Version 1.4.0 - July 23, 2011 + +- Feature upgrade: Add keyboard input during runtime to allow modification of +and viewing of numerous settings such as adding/removing pools, changing +multipool management strategy, switching pools, changing intensiy, verbosity, +etc. with a simple keypress menu system. +- Free up resources/stale compilers. +- Kernels are safely flushed in a way that allows out of order execution to +work. +- Sometimes the cl compiler generates zero sized binaries and only a reboot +seems to fix it. +- Don't try to stop/cancel threads that don't exist. +- Only set option to show devices and exit if built with opencl support. +- Enable curses earlier and exit with message in main for messages to not be +lost in curses windows. +- Make it possible to enter server credentials with curses input if none are +specified on the command line. +- Abstract out a curses input function and separate input pool function to allow +for live adding of pools later. +- Remove the nil arguments check to allow starting without parameters. +- Disable/enable echo & cbreak modes. +- Add a thread that takes keyboard input and allow for quit, silent, debug, +verbose, normal, rpc protocol debugging and clear screen options. +- Add pool option to input and display current pool status, pending code to +allow live changes. +- Add a bool for explicit enabling/disabling of pools. +- Make input pool capable of bringing up pools while running. +- Do one last check of the work before submitting it. +- Implement the ability to live add, enable, disable, and switch to pools. +- Only internally test for block changes when the work matches the current pool +to prevent interleaved block change timing on multipools. +- Display current pool management strategy to enable changing it on the fly. +- The longpoll blanking of the current_block data may not be happening before +the work is converted and appears to be a detected block change. Blank the +current block be +- Make --no-longpoll work again. +- Abstract out active pools count. +- Allow the pool strategy to be modified on the fly. +- Display pool information on the fly as well. +- Add a menu and separate out display options. +- Clean up the messy way the staging thread communicates with the longpoll +thread to determine who found the block first. +- Make the input windows update immediately instead of needing a refresh. +- Allow log interval to be set in the menu. +- Allow scan settings to be modified at runtime. +- Abstract out the longpoll start and explicitly restart it on pool change. +- Make it possible to enable/disable longpoll. +- Set priority correctly on multipools. Display priority and alive/dead +information in display_pools. +- Implement pool removal. +- Limit rolltime work generation to 10 iterations only. +- Decrease testing log to info level. +- Extra refresh not required. +- With huge variation in GPU performance, allow intensity to go from -10 to +10. +- Tell getwork how much of a work item we're likely to complete for future +splitting up of work. +- Remove the mandatory work requirement at startup by testing for invalid work +being passed which allows for work to be queued immediately. This also +removes the requirem +- Make sure intensity is carried over to thread count and is at least the +minimum necessary to work. +- Unlocking error on retry. Locking unnecessary anyway so remove it. +- Clear log window from consistent place. No need for locking since logging is +disabled during input. +- Cannot print the status of threads that don't exist so just queue enough work +for the number of mining threads to prevent crash with -Q N. +- Update phatk kernel to one with new parameters for slightly less overhead +again. Make the queue kernel parameters call a function pointer to select +phatk or poclbm. +- Make it possible to select the choice of kernel on the command line. +- Simplify the output part of the kernel. There's no demonstrable advantage from +more complexity. +- Merge pull request #18 from ycros/cgminer +- No need to make leaveok changes win32 only. +- Build support in for all SSE if possible and only set the default according to +machine capabilities. +- Win32 threading and longpoll keepalive fixes. +- Win32: Fix for mangled output on the terminal on exit. + + +Version 1.3.1 - July 20, 2011 + +- Feature upgrade; Multiple strategies for failover. Choose from default which +now falls back to a priority order from 1st to last, round robin which only +changes pools when one is idle, rotate which changes pools at user-defined +intervals, and load-balance which spreads the work evenly amongst all pools. +- Implement pool rotation strategy. +- Implement load balancing algorithm by rotating requests to each pool. +- Timeout on failed discarding of staged requests. +- Implement proper flagging of idle pools, test them with the watchdog thread, +and failover correctly. +- Move pool active test to own function. +- Allow multiple strategies to be set for multipool management. +- Track pool number. +- Don't waste the work items queued on testing the pools at startup. +- Reinstate the mining thread watchdog restart. +- Add a getpoll bool into the thread information and don't restart threads stuck +waiting on work. +- Rename the idlenet bool for the pool for later use. +- Allow the user/pass userpass urls to be input in any order. +- When json rpc errors occur they occur in spits and starts, so trying to limit +them with the comms error bool doesn't stop a flood of them appearing. +- Reset the queued count to allow more work to be queued for the new pool on +pool switch. + +Version 1.3.0 - July 19, 2011 + +- Massive infrastructure update to support pool failover. +- Accept multiple parameters for url, user and pass and set up structures of +pool data accordingly. +- Probe each pool for what it supports. +- Implement per pool feature support according to rolltime support as +advertised by server. +- Do switching automatically based on a 300 second timeout of locally generated +work or 60 seconds of no response from a server that doesn't support rolltime. +- Implement longpoll server switching. +- Keep per-pool data and display accordingly. +- Make sure cgminer knows how long the pool has actually been out for before +deeming it a prolonged outage. +- Fix bug with ever increasing staged work in 1.2.8 that eventually caused +infinite rejects. +- Make warning about empty http requests not show by default since many +servers do this regularly. + + +Version 1.2.8 - July 18, 2011 + +- More OSX build fixes. +- Add an sse4 algorithm to CPU mining. +- Fix CPU mining with other algorithms not working. +- Rename the poclbm file to ensure a new binary is built since. +- We now are guaranteed to have one fresh work item after a block change and we +should only discard staged requests. +- Don't waste the work we retrieve from a longpoll. +- Provide a control lock around global bools to avoid racing on them. +- Iterating over 1026 nonces when confirming data from the GPU is old code +and unnecessary and can lead to repeats/stales. +- The poclbm kernel needs to be updated to work with the change to 4k sized +output buffers. +- longpoll seems to work either way with post or get but some servers prefer +get so change to httpget. + + +Version 1.2.7 - July 16, 2011 + +- Show last 8 characters of share submitted in log. +- Display URL connected to and user logged in as in status. +- Display current block and when it was started in the status line. +- Only pthread_join the mining threads if they exist as determined by +pthread_cancel and don't fail on pthread_cancel. +- Create a unique work queue for all getworks instead of binding it to thread 0 +to avoid any conflict over thread 0's queue. +- Clean up the code to make it clear it's watchdog thread being messaged to +restart the threads. +- Check the current block description hasn't been blanked pending the real +new current block data. +- Re-enable signal handlers once the signal has been received to make it +possible to kill cgminer if it fails to shut down. +- Disable restarting of CPU mining threads pending further investigation. +- Update longpoll messages. +- Add new block data to status line. +- Fix opencl tests for osx. +- Only do local generation of work if the work item is not stale itself. +- Check for stale work within the mining threads and grab new work if +positive. +- Test for idle network conditions and prevent threads from being restarted +by the watchdog thread under those circumstances. +- Make sure that local work generation does not continue indefinitely by +stopping it after 10 minutes. +- Tweak the kernel to have a shorter path using a 4k buffer and a mask on the +nonce value instead of a compare and loop for a shorter code path. +- Allow queue of zero and make that default again now that we can track how +work is being queued versus staged. This can decrease reject rates. +- Queue precisely the number of mining threads as longpoll_staged after a +new block to not generate local work. + + +Version 1.2.6 - July 15, 2011 + +- Put a current system status line beneath the total work status line +- Fix a counting error that would prevent cgminer from correctly detecting +situations where getwork was failing - this would cause stalls sometimes +unrecoverably. +- Limit the maximum number of requests that can be put into the queue which +otherwise could get arbitrarily long during a network outage. +- Only count getworks that are real queue requests. + + +Version 1.2.5 - July 15, 2011 + +- Conflicting -n options corrected +- Setting an intensity with -I disables dynamic intensity setting +- Removed option to manually disable dynamic intensity +- Improve display output +- Implement signal handler and attempt to clean up properly on exit +- Only restart threads that are not stuck waiting on mandatory getworks +- Compatibility changes courtesy of Ycros to build on mingw32 and osx +- Explicitly grab first work item to prevent false positive hardware errors +due to working on uninitialised work structs +- Add option for non curses --text-only output +- Ensure we connect at least once successfully before continuing to retry to +connect in case url/login parameters were wrong +- Print an executive summary when cgminer is terminated +- Make sure to refresh the status window + +Versions -> 1.2.4 + +- Con Kolivas - July 2011. New maintainership of code under cgminer name. +- Massive rewrite to incorporate GPU mining. +- Incorporate original oclminer c code. +- Rewrite gpu mining code to efficient work loops. +- Implement per-card detection and settings. +- Implement vector code. +- Implement bfi int patching. +- Import poclbm and phatk ocl kernels and use according to hardware type. +- Implement customised optimised versions of opencl kernels. +- Implement binary kernel generation and loading. +- Implement preemptive asynchronous threaded work gathering and pushing. +- Implement variable length extra work queues. +- Optimise workloads to be efficient miners instead of getting lots of extra + work. +- Implement total hash throughput counters, per-card accepted, rejected and + hw error count. +- Staging and watchdog threads to prevent fallover. +- Stale and reject share guarding. +- Autodetection of new blocks without longpoll. +- Dynamic setting of intensity to maintain desktop interactivity. +- Curses interface with generous statistics and information. +- Local generation of work (xroll ntime) when detecting poor network +connectivity. + +Version 1.0.2 + +- Linux x86_64 optimisations - Con Kolivas +- Optimise for x86_64 by default by using sse2_64 algo +- Detects CPUs and sets number of threads accordingly +- Uses CPU affinity for each thread where appropriate +- Sets scheduling policy to lowest possible +- Minor performance tweaks + +Version 1.0.1 - May 14, 2011 + +- OSX support + +Version 1.0 - May 9, 2011 + +- jansson 2.0 compatibility +- correct off-by-one in date (month) display output +- fix platform detection +- improve yasm configure bits +- support full URL, in X-Long-Polling header + +Version 0.8.1 - March 22, 2011 + +- Make --user, --pass actually work + +- Add User-Agent HTTP header to requests, so that server operators may + more easily identify the miner client. + +- Fix minor bug in example JSON config file + +Version 0.8 - March 21, 2011 + +- Support long polling: http://deepbit.net/longpolling.php + +- Adjust max workload based on scantime (default 5 seconds, + or 60 seconds for longpoll) + +- Standardize program output, and support syslog on Unix platforms + +- Suport --user/--pass options (and "user" and "pass" in config file), + as an alternative to the current --userpass + +Version 0.7.2 - March 14, 2011 + +- Add port of ufasoft's sse2 assembly implementation (Linux only) + This is a substantial speed improvement on Intel CPUs. + +- Move all JSON-RPC I/O to separate thread. This reduces the + number of HTTP connections from one-per-thread to one, reducing resource + usage on upstream bitcoind / pool server. + +Version 0.7.1 - March 2, 2011 + +- Add support for JSON-format configuration file. See example + file example-cfg.json. Any long argument on the command line + may be stored in the config file. +- Timestamp each solution found +- Improve sha256_4way performance. NOTE: This optimization makes + the 'hash' debug-print output for sha256_way incorrect. +- Use __builtin_expect() intrinsic as compiler micro-optimization +- Build on Intel compiler +- HTTP library now follows HTTP redirects + +Version 0.7 - February 12, 2011 + +- Re-use CURL object, thereby reuseing DNS cache and HTTP connections +- Use bswap_32, if compiler intrinsic is not available +- Disable full target validation (as opposed to simply H==0) for now + +Version 0.6.1 - February 4, 2011 + +- Fully validate "hash < target", rather than simply stopping our scan + if the high 32 bits are 00000000. +- Add --retry-pause, to set length of pause time between failure retries +- Display proof-of-work hash and target, if -D (debug mode) enabled +- Fix max-nonce auto-adjustment to actually work. This means if your + scan takes longer than 5 seconds (--scantime), the miner will slowly + reduce the number of hashes you work on, before fetching a new work unit. + +Version 0.6 - January 29, 2011 + +- Fetch new work unit, if scanhash takes longer than 5 seconds (--scantime) +- BeeCee1's sha256 4way optimizations +- lfm's byte swap optimization (improves via, cryptopp) +- Fix non-working short options -q, -r + +Version 0.5 - December 28, 2010 + +- Exit program, when all threads have exited +- Improve JSON-RPC failure diagnostics and resilience +- Add --quiet option, to disable hashmeter output. + +Version 0.3.3 - December 27, 2010 + +- Critical fix for sha256_cryptopp 'cryptopp_asm' algo + +Version 0.3.2 - December 23, 2010 + +- Critical fix for sha256_via + +Version 0.3.1 - December 19, 2010 + +- Critical fix for sha256_via +- Retry JSON-RPC failures (see --retry, under "minerd --help" output) + +Version 0.3 - December 18, 2010 + +- Add crypto++ 32bit assembly implementation +- show version upon 'minerd --help' +- work around gcc 4.5.x bug that killed 4way performance + +Version 0.2.2 - December 6, 2010 + +- VIA padlock implementation works now +- Minor build and runtime fixes + +Version 0.2.1 - November 29, 2010 + +- avoid buffer overflow when submitting solutions +- add Crypto++ sha256 implementation (C only, ASM elided for now) +- minor internal optimizations and cleanups + +Version 0.2 - November 27, 2010 + +- Add script for building a Windows installer +- improve hash performance (hashmeter) statistics +- add tcatm 4way sha256 implementation +- Add experimental VIA Padlock sha256 implementation + +Version 0.1.2 - November 26, 2010 + +- many small cleanups and micro-optimizations +- build win32 exe using mingw +- RPC URL, username/password become command line arguments +- remove unused OpenSSL dependency + +Version 0.1.1 - November 24, 2010 + +- Do not build sha256_generic module separately from cpuminer. + +Version 0.1 - November 24, 2010 + +- Initial release. + diff --git a/README b/README new file mode 100644 index 0000000..91a2170 --- /dev/null +++ b/README @@ -0,0 +1,1058 @@ +This is a multi-threaded multi-pool FPGA and ASIC miner for bitcoin. + +This code is provided entirely free of charge by the programmer in his spare +time so donations would be greatly appreciated. Please consider donating to the +address below. + +Con Kolivas +15qSxP1SQcUX3o4nhkfdbgyoWEFMomJ4rZ + +NOTE: This code is licensed under the GPLv3. This means that the source to any +modifications you make to this code MUST be provided by law if you distribute +modified binaries. See COPYING for details. + + +DOWNLOADS: + +http://ck.kolivas.org/apps/cgminer + +GIT TREE: + +https://github.com/ckolivas/cgminer + +Support thread: + +http://bitcointalk.org/index.php?topic=28402.0 + +IRC Channel: + +irc://irc.freenode.net/cgminer + +SEE ALSO API-README, ASIC-README and FGPA-README FOR MORE INFORMATION ON EACH. + +--- + +EXECUTIVE SUMMARY ON USAGE: + +Single pool: + +cgminer -o http://pool:port -u username -p password + +Multiple pools: + +cgminer -o http://pool1:port -u pool1username -p pool1password -o http://pool2:port -u pool2usernmae -p pool2password + +Single pool with a standard http proxy: + +cgminer -o "http:proxy:port|http://pool:port" -u username -p password + +Single pool with a socks5 proxy: + +cgminer -o "socks5:proxy:port|http://pool:port" -u username -p password + +Single pool with stratum protocol support: + +cgminer -o stratum+tcp://pool:port -u username -p password + +Solo mining to local bitcoind: + +cgminer -o http://localhost:8332 -u username -p password --btc-address 15qSxP1SQcUX3o4nhkfdbgyoWEFMomJ4rZ + +The list of proxy types are: + http: standard http 1.1 proxy + http0: http 1.0 proxy + socks4: socks4 proxy + socks5: socks5 proxy + socks4a: socks4a proxy + socks5h: socks5 proxy using a hostname + +If you compile cgminer with a version of CURL before 7.19.4 then some of the above will +not be available. All are available since CURL version 7.19.4 + +If you specify the --socks-proxy option to cgminer, it will only be applied to all pools +that don't specify their own proxy setting like above + + +After saving configuration from the menu, you do not need to give cgminer any +arguments and it will load your configuration. + +Any configuration file may also contain a single + "include" : "filename" +to recursively include another configuration file. +Writing the configuration will save all settings from all files in the output. + + +--- +BUILDING CGMINER FOR YOURSELF + +DEPENDENCIES: +Mandatory: + pkg-config http://www.freedesktop.org/wiki/Software/pkg-config + libtool http://www.gnu.org/software/libtool/ +Optional: + curl dev library http://curl.haxx.se/libcurl/ + (libcurl4-openssl-dev - Must tell configure --disable-libcurl otherwise + it will attempt to compile it in) + + curses dev library + (libncurses5-dev or libpdcurses on WIN32 for text user interface) + + libudev dev library (libudev-dev) + (This is only required for USB device support and is linux only) + +If building from git: + autoconf + automake + +If building on Red Hat: + sudo yum install autoconf automake autoreconf libtool openssl-compat-bitcoin-devel.x86_64 \ + curl libcurl libcurl-devel openssh + +If building on Ubuntu: + sudo apt-get install build-essential autoconf automake libtool pkg-config \ + libcurl3-dev libudev-dev + +CGMiner specific configuration options: + --enable-bmsc Compile support for BitMain Single Chain(default disabled) + --enable-bitmain Compile support for BitMain Multi Chain(default disabled) + --enable-avalon Compile support for Avalon (default disabled) + --enable-avalon2 Compile support for Avalon2 (default disabled) + --enable-avalon4 Compile support for Avalon4 (default disabled) + --enable-bab Compile support for BlackArrow Bitfury (default + disabled) + --enable-bflsc Compile support for BFL ASICs (default disabled) + --enable-bitforce Compile support for BitForce FPGAs (default + disabled) + --enable-bitfury Compile support for BitFury ASICs (default disabled) + --enable-bitmine_A1 Compile support for Bitmine.ch A1 ASICs (default + disabled) + --enable-blockerupter Compile support for ASICMINER BlockErupter Tube/Prisma + (default disabled) + --enable-cointerra Compile support for Cointerra ASICs (default disabled) + --enable-drillbit Compile support for Drillbit BitFury ASICs (default + disabled) + --enable-hashfast Compile support for Hashfast (default disabled) + --enable-icarus Compile support for Icarus (default disabled) + --enable-klondike Compile support for Klondike (default disabled) + --enable-knc Compile support for KnC miners (default disabled) + --enable-minion Compile support for Minion BlackArrow ASIC (default + disabled) + --enable-modminer Compile support for ModMiner FPGAs(default disabled) + --enable-sp10 Compile support for Spondoolies SP10 (default + disabled) + --enable-sp30 Compile support for Spondoolies SP30 (default + disabled) + --disable-libcurl Disable building with libcurl for getwork and GBT + support + --without-curses Compile support for curses TUI (default enabled) + --with-system-libusb Compile against dynamic system libusb (default use + included static libusb) + +Basic *nix build instructions: + To actually build: + + ./autogen.sh # only needed if building from git repo + CFLAGS="-O2 -Wall -march=native" ./configure + make + + No installation is necessary. You may run cgminer from the build + directory directly, but you may do make install if you wish to install + cgminer to a system location or location you specified. + +Building for windows: + +It is actually easiest to build a windows binary using cross compilation tools +provided by "mxe" available at http://mxe.cc/ (use the 32 bit one!) +Once you have followed the instructions for building mxe: + export PATH=(path/to/mxe)/usr/bin/:$PATH + CFLAGS="-O2 -Wall -W -march=i686" ./configure --host=i686-pc-mingw32 + make + +Native WIN32 build instructions: see windows-build.txt but these instructions +are now hopelessly out of date. + +--- + +Usage instructions: Run "cgminer --help" to see options: + +Usage: cgminer [-DdElmpPQqUsTouOchnV] + +Options for both config file and command line: +--anu-freq Set AntminerU1/2 frequency in MHz, range 125-500 (default: 250.0) +--api-allow Allow API access only to the given list of [G:]IP[/Prefix] addresses[/subnets] +--api-description Description placed in the API status header, default: cgminer version +--api-groups API one letter groups G:cmd:cmd[,P:cmd:*...] defining the cmds a groups can use +--api-listen Enable API, default: disabled +--api-mcast Enable API Multicast listener, default: disabled +--api-mcast-addr API Multicast listen address +--api-mcast-code Code expected in the API Multicast message, don't use '-' +--api-mcast-des Description appended to the API Multicast reply, default: '' +--api-mcast-port API Multicast listen port (default: 4028) +--api-network Allow API (if enabled) to listen on/for any address, default: only 127.0.0.1 +--api-port Port number of miner API (default: 4028) +--au3-freq Set AntminerU3 frequency in MHz, range 100-250 (default: 225.0) +--au3-volt Set AntminerU3 voltage in mv, range 725-850, 0 to not set (default: 750) +--avalon-auto Adjust avalon overclock frequency dynamically for best hashrate +--avalon-cutoff Set avalon overheat cut off temperature (default: 60) +--avalon-fan Set fanspeed percentage for avalon, single value or range (default: 20-100) +--avalon-freq Set frequency range for avalon-auto, single value or range +--avalon-options Set avalon options baud:miners:asic:timeout:freq:tech +--avalon-temp Set avalon target temperature (default: 50) +--avalon2-freq Set frequency range for Avalon2, single value or range +--avalon2-voltage Set Avalon2 core voltage, in millivolts +--avalon2-fan Set Avalon2 target fan speed +--avalon2-cutoff Set Avalon2 overheat cut off temperature (default: 88) +--avalon2-fixed-speed Set Avalon2 fan to fixed speed +--avalon4-automatic-voltage Automatic adjust voltage base on module DH +--avalon4-voltage Set Avalon4 core voltage, in millivolts, step: 125 +--avalon4-freq Set frequency for Avalon4, 1 to 3 values, example: 445:385:370 +--avalon4-fan Set Avalon4 target fan speed range +--avalon4-temp Set Avalon4 target temperature (default: 42) +--avalon4-cutoff Set Avalon4 overheat cut off temperature (default: 65) +--avalon4-polling-delay Set Avalon4 polling delay value (ms) (default: 20) +--avalon4-ntime-offset Set Avalon4 MM ntime rolling max offset (default: 4) +--avalon4-aucspeed Set Avalon4 AUC IIC bus speed (default: 400000) +--avalon4-aucxdelay Set Avalon4 AUC IIC xfer read delay, 4800 ~= 1ms (default: 9600) +--bab-options Set BaB options max:def:min:up:down:hz:delay:trf +--balance Change multipool strategy from failover to even share balance +--benchfile Run cgminer in benchmark mode using a work file - produces no shares +--benchfile-display Display each benchfile nonce found +--benchmark Run cgminer in benchmark mode - produces no shares +--bet-clk Set clockspeed of ASICMINER Tube/Prisma to (arg+1)*10MHz (default: 23) +--bfl-range Use nonce range on bitforce devices if supported +--bflsc-overheat Set overheat temperature where BFLSC devices throttle, 0 to disable (default: 85) +--bitburner-fury-voltage Set BitBurner Fury core voltage, in millivolts +--bitburner-fury-options Override avalon-options for BitBurner Fury boards baud:miners:asic:timeout:freq +--bitburner-voltage Set BitBurner (Avalon) core voltage, in millivolts +--bitmain-auto Adjust bitmain overclock frequency dynamically for best hashrate +--bitmain-cutoff Set bitmain overheat cut off temperature +--bitmain-fan Set fanspeed percentage for bitmain, single value or range (default: 20-100) +--bitmain-freq Set frequency range for bitmain-auto, single value or range +--bitmain-hwerror Set bitmain device detect hardware error +--bitmain-options Set bitmain options baud:miners:asic:timeout:freq +--bitmain-temp Set bitmain target temperature +--bxf-bits Set max BXF/HXF bits for overclocking (default: 54) +--bxf-temp-target Set target temperature for BXF/HXF devices (default: 82) +--bxm-bits Set BXM bits for overclocking (default: 54) +--btc-address Set bitcoin target address when solo mining to bitcoind +--btc-sig Set signature to add to coinbase when solo mining (optional) +--compact Use compact display without per device statistics +--debug|-D Enable debug output +--disable-rejecting Automatically disable pools that continually reject shares +--drillbit-options Set drillbit options :clock[:clock_divider][:voltage] +--expiry|-E Upper bound on how many seconds after getting work we consider a share from it stale (default: 120) +--failover-only Don't leak work to backup pools when primary pool is lagging +--fix-protocol Do not redirect to a different getwork protocol (eg. stratum) +--hfa-hash-clock Set hashfast clock speed (default: 550) +--hfa-fail-drop Set how many MHz to drop clockspeed each failure on an overlocked hashfast device (default: 10) +--hfa-fan Set fanspeed percentage for hashfast, single value or range (default: 10-85) +--hfa-name Set a unique name for a single hashfast device specified with --usb or the first device found +--hfa-noshed Disable hashfast dynamic core disabling feature +--hfa-options Set hashfast options name:clock (comma separated) +--hfa-temp-overheat Set the hashfast overheat throttling temperature (default: 95) +--hfa-temp-target Set the hashfast target temperature (0 to disable) (default: 88) +--hro-freq Set the hashratio clock frequency (default: 280) +--hotplug Seconds between hotplug checks (0 means never check) +--klondike-options Set klondike options clock:temptarget +--load-balance Change multipool strategy from failover to quota based balance +--log|-l Interval in seconds between log output (default: 5) +--lowmem Minimise caching of shares for low memory applications +--minion-chipreport Seconds to report chip 5min hashrate, range 0-100 (default: 0=disabled) +--minion-freq Set minion chip frequencies in MHz, single value or comma list, range 100-1400 (default: 1200) +--minion-freqchange Millisecond total time to do frequency changes (default: 1000) +--minion-freqpercent Percentage to use when starting up a chip (default: 70%) +--minion-idlecount Report when IdleCount is >0 or changes +--minion-ledcount Turn off led when more than this many chips below the ledlimit (default: 0) +--minion-ledlimit Turn off led when chips GHs are below this (default: 90) +--minion-noautofreq Disable automatic frequency adjustment +--minion-overheat Enable directly halting any chip when the status exceeds 100C +--minion-spidelay Add a delay in microseconds after each SPI I/O +--minion-spireset SPI regular reset: iNNN for I/O count or sNNN for seconds - 0 means none +--minion-spisleep Sleep time in milliseconds when doing an SPI reset +--minion-temp Set minion chip temperature threshold, single value or comma list, range 120-160 (default: 135C) +--monitor|-m Use custom pipe cmd for output messages +--nfu-bits Set nanofury bits for overclocking, range 32-63 (default: 50) +--net-delay Impose small delays in networking to not overload slow routers +--no-submit-stale Don't submit shares if they are detected as stale +--osm-led-mode Set LED mode for OneStringMiner devices (default: 4) +--pass|-p Password for bitcoin JSON-RPC server +--per-device-stats Force verbose mode and output per-device statistics +--protocol-dump|-P Verbose dump of protocol-level activities +--queue|-Q Minimum number of work items to have queued (0+) (default: 1) +--quiet|-q Disable logging output, display status and errors +--quota|-U quota;URL combination for server with load-balance strategy quotas +--real-quiet Disable all output +--rock-freq Set RockMiner frequency in MHz, range 200-400 (default: 270) +--rotate Change multipool strategy from failover to regularly rotate at N minutes (default: 0) +--round-robin Change multipool strategy from failover to round robin on failure +--scan-time|-s Upper bound on time spent scanning current work, in seconds (default: -1) +--sched-start Set a time of day in HH:MM to start mining (a once off without a stop time) +--sched-stop Set a time of day in HH:MM to stop mining (will quit without a start time) +--sharelog Append share log to file +--shares Quit after mining N shares (default: unlimited) +--socks-proxy Set socks4 proxy (host:port) +--suggest-diff Suggest miner difficulty for pool to user (default: none) +--syslog Use system log for output messages (default: standard error) +--temp-cutoff Temperature where a device will be automatically disabled, one value or comma separated list (default: 95) +--text-only|-T Disable ncurses formatted screen output +--url|-o URL for bitcoin JSON-RPC server +--usb USB device selection +--user|-u Username for bitcoin JSON-RPC server +--userpass|-O Username:Password pair for bitcoin JSON-RPC server +--verbose Log verbose output to stderr as well as status output +--widescreen Use extra wide display without toggling +--worktime Display extra work time debug information +Options for command line only: +--config|-c Load a JSON-format configuration file +See example.conf for an example configuration. +--default-config Specify the filename of the default config file +Loaded at start and used when saving without a name. +--help|-h Print this message +--ndevs|-n Display all USB devices and exit +--version|-V Display version and exit + + +Silent USB device (ASIC and FPGA) options: + +--icarus-options Set specific FPGA board configurations - one set of values for all or comma separated +--icarus-timing Set how the Icarus timing is calculated - one setting/value for all or comma separated +--usb-dump (See FPGA-README) + +See FGPA-README or ASIC-README for more information regarding these. + + +ASIC only options: + +--anu-freq Set AntminerU1/2 frequency in MHz, range 125-500 (default: 250.0) +--au3-freq Set AntminerU3 frequency in MHz, range 100-250 (default: 225.0) +--au3-volt Set AntminerU3 voltage in mv, range 725-850, 0 to not set (default: 750) +--avalon-auto Adjust avalon overclock frequency dynamically for best hashrate +--avalon-cutoff Set avalon overheat cut off temperature (default: 60) +--avalon-fan Set fanspeed percentage for avalon, single value or range (default: 20-100) +--avalon-freq Set frequency range for avalon-auto, single value or range +--avalon-options Set avalon options baud:miners:asic:timeout:freq:tech +--avalon-temp Set avalon target temperature (default: 50) +--avalon2-freq Set frequency range for Avalon2, single value or range +--avalon2-voltage Set Avalon2 core voltage, in millivolts +--avalon2-fan Set Avalon2 target fan speed +--avalon2-cutoff Set Avalon2 overheat cut off temperature (default: 88) +--avalon2-fixed-speed Set Avalon2 fan to fixed speed +--avalon4-automatic-voltage Automatic adjust voltage base on module DH +--avalon4-voltage Set Avalon4 core voltage, in millivolts, step: 125 +--avalon4-freq Set frequency for Avalon4, 1 to 3 values, example: 445:385:370 +--avalon4-fan Set Avalon4 target fan speed range +--avalon4-temp Set Avalon4 target temperature (default: 42) +--avalon4-cutoff Set Avalon4 overheat cut off temperature (default: 65) +--avalon4-polling-delay Set Avalon4 polling delay value (ms) (default: 20) +--avalon4-ntime-offset Set Avalon4 MM ntime rolling max offset (default: 4) +--avalon4-aucspeed Set Avalon4 AUC IIC bus speed (default: 400000) +--avalon4-aucxdelay Set Avalon4 AUC IIC xfer read delay, 4800 ~= 1ms (default: 9600) +--bab-options Set BaB options max:def:min:up:down:hz:delay:trf +--bflsc-overheat Set overheat temperature where BFLSC devices throttle, 0 to disable (default: 90) +--bitburner-fury-options Override avalon-options for BitBurner Fury boards baud:miners:asic:timeout:freq +--bitburner-fury-voltage Set BitBurner Fury core voltage, in millivolts +--bitburner-voltage Set BitBurner (Avalon) core voltage, in millivolts +--bitmine-a1-options ::: +--bxf-temp-target Set target temperature for BXF devices (default: 82) +--bxm-bits Set BXM bits for overclocking (default: 50) +--hfa-hash-clock Set hashfast clock speed (default: 550) +--hfa-fail-drop Set how many MHz to drop clockspeed each failure on an overlocked hashfast device (default: 10) +--hfa-fan Set fanspeed percentage for hashfast, single value or range (default: 10-85) +--hfa-name Set a unique name for a single hashfast device specified with --usb or the first device found +--hfa-noshed Disable hashfast dynamic core disabling feature +--hfa-temp-overheat Set the hashfast overheat throttling temperature (default: 95) +--hfa-temp-target Set the hashfast target temperature (0 to disable) (default: 88) +--hro-freq Set the hashratio clock frequency (default: 280) +--klondike-options Set klondike options clock:temptarget +--rock-freq Set RockMiner frequency in MHz, range 125-500 (default: 270) + +See ASIC-README for more information regarding these. + + +FPGA only options: + +--bfl-range Use nonce range on bitforce devices if supported + +See FGPA-README for more information regarding this. + + +Cgminer should automatically find all of your Avalon ASIC, BFL ASIC, BitForce +FPGAs, Icarus bitstream FPGAs, Klondike ASIC, ASICMINER usb block erupters, +KnC ASICs, BaB ASICs, Hashfast ASICs, ModMiner FPGAs, BPMC/BGMC BF1 USB ASICs, +Bi*fury USB ASICs, Onestring miner USB ASICs, Hexfury USB ASICs, Nanofury USB +ASICs, Antminer U1/U2/U2+ U3 USB ASICs, Cointerra devices, BFx2 USB ASICs, +Rockminer R-Box/RK-Box/T1 USB ASICs, Avalon2/3/4 USB ASICs and Hashratio USB +ASICs. + +--- + +SETTING UP USB DEVICES + +WINDOWS: + +On windows, the direct USB support requires the installation of a WinUSB +driver (NOT the ftdi_sio driver), and attach it to the chosen USB device. +When configuring your device, plug it in and wait for windows to attempt to +install a driver on its own. It may think it has succeeded or failed but wait +for it to finish regardless. This is NOT the driver you want installed. At this +point you need to associate your device with the WinUSB driver. The easiest +way to do this is to use the zadig utility which you must right click on and +run as administrator. Then once you plug in your device you can choose the +"list all devices" from the "option" menu and you should be able to see the +device as something like: "BitFORCE SHA256 SC". Choose the install or replace +driver option and select WinUSB. You can either google for zadig or download +it from the cgminer directory in the DOWNLOADS link above. + +When you first switch a device over to WinUSB with zadig and it shows that +correctly on the left of the zadig window, but it still gives permission +errors, you may need to unplug the USB miner and then plug it back in. Some +users may need to reboot at this point. + + +LINUX: + +The short version: + + sudo cp 01-cgminer.rules /etc/udev/rules.d/ + + +The long version: + +On linux, the direct USB support requires no drivers at all. However due to +permissions issues, you may not be able to mine directly on the devices as a +regular user without giving the user access to the device or by mining as +root (administrator). In order to give your regular user access, you can make +him a member of the plugdev group with the following commands: + + sudo usermod -G plugdev -a `whoami` + +If your distribution does not have the plugdev group you can create it with: + + sudo groupadd plugdev + +In order for the USB devices to instantly be owned by the plugdev group and +accessible by anyone from the plugdev group you can copy the file +"01-cgminer.rules" from the cgminer archive into the /etc/udev/rules.d +directory with the following command: + + sudo cp 01-cgminer.rules /etc/udev/rules.d/ + +After this you can either manually restart udev and re-login, or more easily +just reboot. + + +OSX: + +On OSX, like Linux, no drivers need to be installed. However some devices +like the bitfury USB sticks automatically load a driver thinking they're a +modem and the driver needs to be unloaded for cgminer to work: + +sudo kextunload -b com.apple.driver.AppleUSBCDC +sudo kextunload -b com.apple.driver.AppleUSBCDCACMData + +There may be a limit to the number of USB devices that you are allowed to start. +The following set of commands, followed by a reboot will increase that: + + sudo su + touch /etc/sysctl.conf + echo kern.sysv.semume=100 >> /etc/sysctl.conf + chown root:wheel /etc/sysctl.conf + chmod 0644 /etc/sysctl.conf + +Some devices need superuser access to mine on them so cgminer may need to +be started with sudo +i.e.: +sudo cgminer + + +--- + +Advanced USB options: + +The --usb option can restrict how many USB devices are found: + + --usb 1:2,1:3,1:4,1:* +or + --usb BAS:1,BFL:1,MMQ:0,ICA:0,KLN:0 +or + --usb :10 + +You can only use one of the above 3 + +The first version + --usb 1:2,1:3,1:4,1:* +allows you to select which devices to mine on with a list of USB + bus_number:device_address +All other USB devices will be ignored +Hotplug will also only look at the devices matching the list specified and +find nothing new if they are all in use +You can specify just the USB bus_number to find all devices like 1:* +which means any devices on USB bus_number 1 +This is useful if you unplug a device then plug it back in the same port, +it usually reappears with the same bus_number but a different device_address + +You can see the list of all USB devices on linux with 'sudo lsusb' +Cgminer will list the recognised USB devices + +with the '-n' option or the +'--usb-dump 0' option +The '--usb-dump N' option with a value of N greater than 0 will dump a lot +of details about each recognised USB device +If you wish to see all USB devices, include the --usb-list-all option + +The second version + --usb BAS:1,BFL:1,MMQ:0,ICA:0,KLN:0 +allows you to specify how many devices to choose based on each device +driver cgminer has - the current USB drivers are: +AVA, BAS, BFL, BF1, DRB, HFA, ICA, KLN and MMQ. + +N.B. you can only specify which device driver to limit, not the type of +each device, e.g. with BAS:n you can limit how many BFL ASIC devices will +be checked, but you cannot limit the number of each type of BFL ASIC + +Also note that the MMQ count is the number of MMQ backplanes you have +not the number of MMQ FPGAs + +The third version + --usb :10 +means only use a maximum of 10 devices of any supported USB devices +Once cgminer has 10 devices it will not configure any more and hotplug will +not scan for any more +If one of the 10 devices stops working, hotplug - if enabled, as is default +- will scan normally again until it has 10 devices + + --usb :0 will disable all USB I/O other than to initialise libusb + +--- + +WHILE RUNNING: + +The following options are available while running with a single keypress: + + [U]SB management [P]ool management [S]ettings [D]isplay options [Q]uit + + +U gives you: + +[S]ummary of device information +[E]nable device +[D]isable device +[U]nplug to allow hotplug restart +[R]eset device USB +[L]ist all known devices +[B]lacklist current device from current instance of cgminer +[W]hitelist previously blacklisted device +[H]otplug interval (0 to disable) + + +P gives you: + +Current pool management strategy: Failover +[F]ailover only disabled +[A]dd pool [R]emove pool [D]isable pool [E]nable pool +[C]hange management strategy [S]witch pool [I]nformation + + +S gives you: + +[Q]ueue: 1 +[S]cantime: 60 +[E]xpiry: 120 +[W]rite config file +[C]gminer restart + + +D gives you: + +[N]ormal [C]lear [S]ilent mode (disable all output) +[D]ebug:off +[P]er-device:off +[Q]uiet:off +[V]erbose:off +[R]PC debug:off +[W]orkTime details:off +co[M]pact: off +[T]oggle status switching:enabled +[Z]ero statistics +[L]og interval:5 + + +Q quits the application. + + +The running log shows output like this: + + [2013-11-09 11:04:41] Accepted 01b3bde7 Diff 150/128 AVA 1 pool 0 + [2013-11-09 11:04:49] Accepted 015df995 Diff 187/128 AVA 1 pool 0 + [2013-11-09 11:04:50] Accepted 01163b68 Diff 236/128 AVA 1 pool 0 + [2013-11-09 11:04:53] Accepted 9f745840 Diff 411/128 BAS 1 pool 0 + +The 8 byte hex value are the 1st nonzero bytes of the share being submitted to +the pool. The 2 diff values are the actual difficulty target that share reached +followed by the difficulty target the pool is currently asking for. + +--- +Also many issues and FAQs are covered in the forum thread +dedicated to this program, + http://forum.bitcoin.org/index.php?topic=28402.0 + +DISPLAY: + +The display is roughly split into two portions, the top status window and the +bottom scrolling log window. + + +STATUS WINDOW +The status window is split into overall status and per device status. + +Overall status: + +The output line shows the following: + (5s):2.469T (1m):2.677T (5m):2.040T (15m):1.014T (avg):2.733Th/s + +These are exponentially decaying average hashrates over 5s/1m/5m/15m and an +average since the start. + +Followed by: + A:290391 R:5101 HW:145 WU:37610.4/m + +Each column is as follows: +A: The total difficulty of Accepted shares +R: The total difficulty of Rejected shares +HW: The number of HardWare errors +WU: The Work Utility defined as the number of diff1 shares work / minute + (accepted or rejected). + +alternating with: + ST: 22 SS: 0 NB: 2 LW: 356090 GF: 0 RF: 0 + +ST is STaged work items (ready to use). +SS is Stale Shares discarded (detected and not submitted so don't count as rejects) +NB is New Blocks detected on the network +LW is Locally generated Work items +GF is Getwork Fail Occasions (server slow to provide work) +RF is Remote Fail occasions (server slow to accept work) + +Followed by: + Connected to pool.com diff 3.45K with stratum as user me + +The diff shown is the current vardiff requested by the pool currently being +mined at. + +Followed by: +Block: ca0d237f... Diff:5.01G Started: [00:14:27] Best share: 1.18M + +This shows a short stretch about the current block, when the new block started, +and the all time best difficulty share you've found since starting cgminer +this time. + +Per device status: + + 6: HFS Random : 645MHz 85C 13% 0.79V | 2.152T / 1.351Th/s + +Each column is as follows: +Temperature (if supported) +Fanspeed (if supported) +Voltage (if supported) + +A 5 second exponentially decaying average hash rate +An all time average hash rate + +alternating with + + 6: HFS Random : 645MHz 86C 13% 0.80V | A:290348 R:1067 HW:88 WU:18901.8/m + +The total difficulty of accepted shares +The total difficulty of rejected shares +The number of hardware erorrs +The work utility defined as the number of diff1 shares work / minute + + +LOG WINDOW + +All running information is shown here, usually share submission results and +block update notifications, along with device messages and warnings. + + [2014-03-29 00:24:09] Accepted 1397768d Diff 3.35K/2727 HFS 0 pool 0 + [2014-03-29 00:24:13] Stratum from pool 0 detected new block + + +--- +MULTIPOOL + +FAILOVER STRATEGIES WITH MULTIPOOL: +A number of different strategies for dealing with multipool setups are +available. Each has their advantages and disadvantages so multiple strategies +are available by user choice, as per the following list: + +FAILOVER: +The default strategy is failover. This means that if you input a number of +pools, it will try to use them as a priority list, moving away from the 1st +to the 2nd, 2nd to 3rd and so on. If any of the earlier pools recover, it will +move back to the higher priority ones. + +ROUND ROBIN: +This strategy only moves from one pool to the next when the current one falls +idle and makes no attempt to move otherwise. + +ROTATE: +This strategy moves at user-defined intervals from one active pool to the next, +skipping pools that are idle. + +LOAD BALANCE: +This strategy sends work to all the pools on a quota basis. By default, all +pools are allocated equal quotas unless specified with --quota. This +apportioning of work is based on work handed out, not shares returned so is +independent of difficulty targets or rejected shares. While a pool is disabled +or dead, its quota is dropped until it is re-enabled. Quotas are forward +looking, so if the quota is changed on the fly, it only affects future work. +If all pools are set to zero quota or all pools with quota are dead, it will +fall back to a failover mode. See quota below for more information. + +The failover-only flag has special meaning in combination with load-balance +mode and it will distribute quota back to priority pool 0 from any pools that +are unable to provide work for any reason so as to maintain quota ratios +between the rest of the pools. + +BALANCE: +This strategy monitors the amount of difficulty 1 shares solved for each pool +and uses it to try to end up doing the same amount of work for all pools. + + +--- +QUOTAS + +The load-balance multipool strategy works off a quota based scheduler. The +quotas handed out by default are equal, but the user is allowed to specify any +arbitrary ratio of quotas. For example, if all the quota values add up to 100, +each quota value will be a percentage, but if 2 pools are specified and pool0 +is given a quota of 1 and pool1 is given a quota of 9, pool0 will get 10% of +the work and pool1 will get 90%. Quotas can be changed on the fly by the API, +and do not act retrospectively. Setting a quota to zero will effectively +disable that pool unless all other pools are disabled or dead. In that +scenario, load-balance falls back to regular failover priority-based strategy. +While a pool is dead, it loses its quota and no attempt is made to catch up +when it comes back to life. + +To specify quotas on the command line, pools should be specified with a +semicolon separated --quota(or -U) entry instead of --url. Pools specified with +--url are given a nominal quota value of 1 and entries can be mixed. + +For example: +--url poola:porta -u usernamea -p passa --quota "2;poolb:portb" -u usernameb -p passb +Will give poola 1/3 of the work and poolb 2/3 of the work. + +Writing configuration files with quotas is likewise supported. To use the above +quotas in a configuration file they would be specified thus: + +"pools" : [ + { + "url" : "poola:porta", + "user" : "usernamea", + "pass" : "passa" + }, + { + "quota" : "2;poolb:portb", + "user" : "usernameb", + "pass" : "passb" + } +] + + +--- +SOLO MINING + +Solo mining can be done efficiently as a single pool entry or a backup to +any other pooled mining and it is recommended everyone have solo mining set up +as their final backup in case all their other pools are DDoSed/down for the +security of the network. To enable solo mining, one must be running a local +bitcoind/bitcoin-qt or have one they have rpc access to. To do this, edit your +bitcoind configuration file (bitcoin.conf) with the following extra lines, +using your choice of username and password: + +rpcuser=username +rpcpassword=password + +Restart bitcoind, then start cgminer, pointing to the bitcoind and choose a +btc address with the following options, altering to suit their setup: + +cgminer -o http://localhost:8332 -u username -p password --btc-address 15qSxP1SQcUX3o4nhkfdbgyoWEFMomJ4rZ + +Note the http:// is mandatory for solo mining. + +--- +LOGGING + +cgminer will log to stderr if it detects stderr is being redirected to a file. +To enable logging simply add 2>logfile.txt to your command line and logfile.txt +will contain the logged output at the log level you specify (normal, verbose, +debug etc.) + +In other words if you would normally use: +./cgminer -o xxx -u yyy -p zzz +if you use +./cgminer -o xxx -u yyy -p zzz 2>logfile.txt +it will log to a file called logfile.txt and otherwise work the same. + +There is also the -m option on linux which will spawn a command of your choice +and pipe the output directly to that command. + +The WorkTime details 'debug' option adds details on the end of each line +displayed for Accepted or Rejected work done. An example would be: + + <-00000059.ed4834a3 M:X D:1.0 G:17:02:38:0.405 C:1.855 (2.995) W:3.440 (0.000) S:0.461 R:17:02:47 + +The first 2 hex codes are the previous block hash, the rest are reported in +seconds unless stated otherwise: +The previous hash is followed by the getwork mode used M:X where X is one of +P:Pool, T:Test Pool, L:LP or B:Benchmark, +then D:d.ddd is the difficulty required to get a share from the work, +then G:hh:mm:ss:n.nnn, which is when the getwork or LP was sent to the pool and +the n.nnn is how long it took to reply, +followed by 'O' on it's own if it is an original getwork, or 'C:n.nnn' if it was +a clone with n.nnn stating how long after the work was recieved that it was cloned, +(m.mmm) is how long from when the original work was received until work started, +W:n.nnn is how long the work took to process until it was ready to submit, +(m.mmm) is how long from ready to submit to actually doing the submit, this is +usually 0.000 unless there was a problem with submitting the work, +S:n.nnn is how long it took to submit the completed work and await the reply, +R:hh:mm:ss is the actual time the work submit reply was received + +If you start cgminer with the --sharelog option, you can get detailed +information for each share found. The argument to the option may be "-" for +standard output (not advisable with the ncurses UI), any valid positive number +for that file descriptor, or a filename. + +To log share data to a file named "share.log", you can use either: +./cgminer --sharelog 50 -o xxx -u yyy -p zzz 50>share.log +./cgminer --sharelog share.log -o xxx -u yyy -p zzz + +For every share found, data will be logged in a CSV (Comma Separated Value) +format: + timestamp,disposition,target,pool,dev,thr,sharehash,sharedata +For example (this is wrapped, but it's all on one line for real): + 1335313090,reject, + ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000, + http://localhost:8337,ASC0,0, + 6f983c918f3299b58febf95ec4d0c7094ed634bc13754553ec34fc3800000000, + 00000001a0980aff4ce4a96d53f4b89a2d5f0e765c978640fe24372a000001c5 + 000000004a4366808f81d44f26df3d69d7dc4b3473385930462d9ab707b50498 + f681634a4f1f63d01a0cd43fb338000000000080000000000000000000000000 + 0000000000000000000000000000000000000000000000000000000080020000 + +--- + +BENCHMARK + +The --benchmark option hashes a single fixed work item over and over and does +not submit shares to any pools. + +The --benchfile option hashes the work given in the file supplied. +The format of the work file is: +version,merkleroot,prevhash,diffbits,noncetime +Any empty line or any line starting with '#' or '/' is ignored. +When it reaches the end of the file it continues back at the top. + +The format of the data items matches the byte ordering and format of the +the bitcoind getblock RPC output. + +An example file containing bitcoin block #1 would be: + +# Block 1 +1,0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098,00000000001 +9d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f,1d00ffff,1231469665 + +However, the work data should be one line without the linebreak in the middle + +If you use --benchfile , then --benchfile-display will output a log line, +for each nonce found, showing the nonce value in decimal and hex and the work +used to find it in hex. + +--- + +RPC API + +For RPC API details see the API-README file + +--- + +FAQ + +Q: Help, I've started cgminer and everything reads zero!? +A: Welcome to bitcoin mining. Your computer by itself cannot mine bitcoin no +matter how powerful it is. You have to purchase dedicated mining hardware +called ASICs to plug into your computer. See Q regarding ASICs below. + +Q: I have multiple USB stick devices but I can't get them all to work at once? +A: Very few USB hubs deliver the promised power required to run as many devices +as they fit if all of them draw power from USB. + +Q: I've plugged my devices into my USB hub but nothing shows up? +A: RPis and Windows have incomplete or non-standard USB3 support so they may +never work. It may be possible to get a USB3 hub to work by plugging it into +a USB2 hub. When choosing a hub, USB2 hubs are preferable whenever possible +due to better support all round. + +Q: Can I mine on servers from different networks (eg xxxcoin and bitcoin) at +the same time? +A: No, cgminer keeps a database of the block it's working on to ensure it does +not work on stale blocks, and having different blocks from two networks would +make it invalidate the work from each other. + +Q: Can I configure cgminer to mine with different login credentials or pools +for each separate device? +A: No. + +Q: Can I put multiple pools in the config file? +A: Yes, check the example.conf file. Alternatively, set up everything either on +the command line or via the menu after startup and choose settings->write +config file and the file will be loaded one each startup. + +Q: The build fails with gcc is unable to build a binary. +A: Remove the "-march=native" component of your CFLAGS as your version of gcc +does not support it. Also -O2 is capital o 2, not zero 2. + +Q: Can you implement feature X? +A: I can, but time is limited, and people who donate are more likely to get +their feature requests implemented. + +Q: Work keeps going to my backup pool even though my primary pool hasn't +failed? +A: Cgminer checks for conditions where the primary pool is lagging and will +pass some work to the backup servers under those conditions. The reason for +doing this is to try its absolute best to keep the devices working on something +useful and not risk idle periods. You can disable this behaviour with the +option --failover-only. + +Q: Is this a virus? +A: Cgminer is being packaged with other trojan scripts and some antivirus +software is falsely accusing cgminer.exe as being the actual virus, rather +than whatever it is being packaged with. If you installed cgminer yourself, +then you do not have a virus on your computer. Complain to your antivirus +software company. They seem to be flagging even source code now from cgminer +as viruses, even though text source files can't do anything by themself. + +Q: Can you modify the display to include more of one thing in the output and +less of another, or can you change the quiet mode or can you add yet another +output mode? +A: Everyone will always have their own view of what's important to monitor. +The defaults are very sane and I have very little interest in changing this +any further. There is far more detail in the API output than can be reasonably +displayed on the small console window, and using an external interface such +as miner.php is much more useful for setups with many devices. + +Q: What are the best parameters to pass for X pool/hardware/device. +A: Virtually always, the DEFAULT parameters give the best results. Most user +defined settings lead to worse performance. + +Q: What happened to CPU and GPU mining? +A: Their efficiency makes them irrelevant in the bitcoin mining world today +and the author has no interest in supporting alternative coins that are better +mined by these devices. + +Q: GUI version? +A: No. The RPC interface makes it possible for someone else to write one +though. + +Q: I'm having an issue. What debugging information should I provide? +A: Start cgminer with your regular commands and add -D -T --verbose and provide +the full startup output and a summary of your hardware and operating system. + +Q: Why don't you provide win64 builds? +A: Win32 builds work everywhere and there is precisely zero advantage to a +64 bit build on windows. + +Q: Is it faster to mine on windows or linux? +A: It makes no difference in terms of performance. It comes down to choice of +operating system for their various features and your comfort level. However +linux is the primary development platform and is virtually guaranteed to be +more stable. + +Q: My network gets slower and slower and then dies for a minute? +A; Try the --net-delay option if you are on a getwork or GBT server. This does +nothing with stratum mining. + +Q: How do I tune for p2pool? +A: It is also recommended to use --failover-only since the work is effectively +like a different block chain, and not enabling --no-submit-stale. If mining with +a BFL (fpga) minirig, it is worth adding the --bfl-range option. + +Q: I run PHP on windows to access the API with the example miner.php. Why does +it fail when php is installed properly but I only get errors about Sockets not +working in the logs? +A: http://us.php.net/manual/en/sockets.installation.php + +Q: What is a PGA? +A: Cgminer supports 3 FPGAs: BitForce, Icarus and ModMiner. +They are Field-Programmable Gate Arrays that have been programmed to do Bitcoin +mining. Since the acronym needs to be only 3 characters, the "Field-" part has +been skipped. + +Q: What is an ASIC? +A: They are Application Specify Integrated Circuit devices and provide the +highest performance per unit power due to being dedicated to only one purpose. +They are the only meaningful way to mine bitcoin today. + +Q: What is stratum and how do I use it? +A: Stratum is a protocol designed for pooled mining in such a way as to +minimise the amount of network communications, yet scale to hardware of any +speed. With versions of cgminer 2.8.0+, if a pool has stratum support, cgminer +will automatically detect it and switch to the support as advertised if it can. +If you input the stratum port directly into your configuration, or use the +special prefix "stratum+tcp://" instead of "http://", cgminer will ONLY try to +use stratum protocol mining. The advantages of stratum to the miner are no +delays in getting more work for the miner, less rejects across block changes, +and far less network communications for the same amount of mining hashrate. If +you do NOT wish cgminer to automatically switch to stratum protocol even if it +is detected, add the --fix-protocol option. + +Q: Why don't the statistics add up: Accepted, Rejected, Stale, Hardware Errors, +Diff1 Work, etc. when mining greater than 1 difficulty shares? +A: As an example, if you look at 'Difficulty Accepted' in the RPC API, the number +of difficulty shares accepted does not usually exactly equal the amount of work +done to find them. If you are mining at 8 difficulty, then you would expect on +average to find one 8 difficulty share, per 8 single difficulty shares found. +However, the number is actually random and converges over time, it is an average, +not an exact value, thus you may find more or less than the expected average. + +Q: My keyboard input momentarily pauses or repeats keys every so often on +windows while mining? +A: The USB implementation on windows can be very flaky on some hardware and +every time cgminer looks for new hardware to hotplug it it can cause these +sorts of problems. You can disable hotplug with: +--hotplug 0 + +Q: What should my Work Utility (WU) be? +A: Work utility is the product of hashrate * luck and only stabilises over a +very long period of time. Assuming all your work is valid work, bitcoin mining +should produce a work utility of approximately 1 per 71.6MH. This means at +5GH you should have a WU of 5000 / 71.6 or ~ 69. You cannot make your machine +do "better WU" than this - it is luck related. However you can make it much +worse if your machine produces a lot of hardware errors producing invalid work. + +Q: What should I build in for a generic distribution binary? +A: There are a number of drivers that expect to be used on dedicated standalone +hardware. That said, the drivers that are designed to work generically with +USB on any hardware are the following: + +--enable-avalon +--enable-avalon2 +--enable-avalon4 +--enable-bflsc +--enable-bitfury +--enable-blockerupter +--enable-cointerra +--enable-drillbit +--enable-hashfast +--enable-hashratio +--enable-icarus +--enable-klondike + +--- + +This code is provided entirely free of charge by the programmer in his spare +time so donations would be greatly appreciated. Please consider donating to the +address below. + +Con Kolivas +15qSxP1SQcUX3o4nhkfdbgyoWEFMomJ4rZ diff --git a/api-example.c b/api-example.c new file mode 100644 index 0000000..f363b81 --- /dev/null +++ b/api-example.c @@ -0,0 +1,334 @@ +/* + * Copyright 2011 Kano + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +/* Compile: + * gcc api-example.c -Icompat/jansson-2.6/src -Icompat/libusb-1.0/libusb -o bmminer-api + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "compat.h" +#include "miner.h" + +#if defined(unix) + #include + #include + #include + #include + #include + + #define SOCKETFAIL(a) ((a) < 0) + #define INVSOCK -1 + #define CLOSESOCKET close + + #define SOCKETINIT {} + + #define SOCKERRMSG strerror(errno) +#endif + +#ifdef WIN32 + #include + + #define SOCKETTYPE SOCKET + #define SOCKETFAIL(a) ((a) == SOCKET_ERROR) + #define INVSOCK INVALID_SOCKET + #define CLOSESOCKET closesocket + + static char WSAbuf[1024]; + + struct WSAERRORS { + int id; + char *code; + } WSAErrors[] = { + { 0, "No error" }, + { WSAEINTR, "Interrupted system call" }, + { WSAEBADF, "Bad file number" }, + { WSAEACCES, "Permission denied" }, + { WSAEFAULT, "Bad address" }, + { WSAEINVAL, "Invalid argument" }, + { WSAEMFILE, "Too many open sockets" }, + { WSAEWOULDBLOCK, "Operation would block" }, + { WSAEINPROGRESS, "Operation now in progress" }, + { WSAEALREADY, "Operation already in progress" }, + { WSAENOTSOCK, "Socket operation on non-socket" }, + { WSAEDESTADDRREQ, "Destination address required" }, + { WSAEMSGSIZE, "Message too long" }, + { WSAEPROTOTYPE, "Protocol wrong type for socket" }, + { WSAENOPROTOOPT, "Bad protocol option" }, + { WSAEPROTONOSUPPORT, "Protocol not supported" }, + { WSAESOCKTNOSUPPORT, "Socket type not supported" }, + { WSAEOPNOTSUPP, "Operation not supported on socket" }, + { WSAEPFNOSUPPORT, "Protocol family not supported" }, + { WSAEAFNOSUPPORT, "Address family not supported" }, + { WSAEADDRINUSE, "Address already in use" }, + { WSAEADDRNOTAVAIL, "Can't assign requested address" }, + { WSAENETDOWN, "Network is down" }, + { WSAENETUNREACH, "Network is unreachable" }, + { WSAENETRESET, "Net connection reset" }, + { WSAECONNABORTED, "Software caused connection abort" }, + { WSAECONNRESET, "Connection reset by peer" }, + { WSAENOBUFS, "No buffer space available" }, + { WSAEISCONN, "Socket is already connected" }, + { WSAENOTCONN, "Socket is not connected" }, + { WSAESHUTDOWN, "Can't send after socket shutdown" }, + { WSAETOOMANYREFS, "Too many references, can't splice" }, + { WSAETIMEDOUT, "Connection timed out" }, + { WSAECONNREFUSED, "Connection refused" }, + { WSAELOOP, "Too many levels of symbolic links" }, + { WSAENAMETOOLONG, "File name too long" }, + { WSAEHOSTDOWN, "Host is down" }, + { WSAEHOSTUNREACH, "No route to host" }, + { WSAENOTEMPTY, "Directory not empty" }, + { WSAEPROCLIM, "Too many processes" }, + { WSAEUSERS, "Too many users" }, + { WSAEDQUOT, "Disc quota exceeded" }, + { WSAESTALE, "Stale NFS file handle" }, + { WSAEREMOTE, "Too many levels of remote in path" }, + { WSASYSNOTREADY, "Network system is unavailable" }, + { WSAVERNOTSUPPORTED, "Winsock version out of range" }, + { WSANOTINITIALISED, "WSAStartup not yet called" }, + { WSAEDISCON, "Graceful shutdown in progress" }, + { WSAHOST_NOT_FOUND, "Host not found" }, + { WSANO_DATA, "No host data of that type was found" }, + { -1, "Unknown error code" } + }; + + static char *WSAErrorMsg() + { + char *msg; + int i; + int id = WSAGetLastError(); + + /* Assume none of them are actually -1 */ + for (i = 0; WSAErrors[i].id != -1; i++) + if (WSAErrors[i].id == id) + break; + + sprintf(WSAbuf, "Socket Error: (%d) %s", id, WSAErrors[i].code); + + return &(WSAbuf[0]); + } + + #define SOCKERRMSG WSAErrorMsg() + + static WSADATA WSA_Data; + + #define SOCKETINIT int wsa; \ + if (wsa = WSAStartup(0x0202, &WSA_Data)) { \ + printf("Socket startup failed: %d\n", wsa); \ + return 1; \ + } + + #ifndef SHUT_RDWR + #define SHUT_RDWR SD_BOTH + #endif +#endif + +static const char SEPARATOR = '|'; +static const char COMMA = ','; +static const char EQ = '='; +static int ONLY; + +void display(char *buf) +{ + char *nextobj, *item, *nextitem, *eq; + int itemcount; + + while (buf != NULL) { + nextobj = strchr(buf, SEPARATOR); + if (nextobj != NULL) + *(nextobj++) = '\0'; + + if (*buf) { + item = buf; + itemcount = 0; + while (item != NULL) { + nextitem = strchr(item, COMMA); + if (nextitem != NULL) + *(nextitem++) = '\0'; + + if (*item) { + eq = strchr(item, EQ); + if (eq != NULL) + *(eq++) = '\0'; + + if (itemcount == 0) + printf("[%s%s] =>\n(\n", item, (eq != NULL && isdigit(*eq)) ? eq : ""); + + if (eq != NULL) + printf(" [%s] => %s\n", item, eq); + else + printf(" [%d] => %s\n", itemcount, item); + } + + item = nextitem; + itemcount++; + } + if (itemcount > 0) + puts(")"); + } + + buf = nextobj; + } +} + +#define SOCKSIZ 65535 + +int callapi(char *command, char *host, short int port) +{ + struct hostent *ip; + struct sockaddr_in serv; + SOCKETTYPE sock; + int ret = 0; + int n; + char *buf = NULL; + size_t len, p; + + SOCKETINIT; + + ip = gethostbyname(host); + if (!ip) { + printf("Couldn't get hostname: '%s'\n", host); + return 1; + } + + sock = socket(AF_INET, SOCK_STREAM, 0); + if (sock == INVSOCK) { + printf("Socket initialisation failed: %s\n", SOCKERRMSG); + return 1; + } + + memset(&serv, 0, sizeof(serv)); + serv.sin_family = AF_INET; + serv.sin_addr = *((struct in_addr *)ip->h_addr); + serv.sin_port = htons(port); + + if (SOCKETFAIL(connect(sock, (struct sockaddr *)&serv, sizeof(struct sockaddr)))) { + printf("Socket connect failed: %s\n", SOCKERRMSG); + return 1; + } + + n = send(sock, command, strlen(command), 0); + if (SOCKETFAIL(n)) { + printf("Send failed: %s\n", SOCKERRMSG); + ret = 1; + } + else { + len = SOCKSIZ; + buf = malloc(len+1); + if (!buf) { + printf("Err: OOM (%d)\n", (int)(len+1)); + return 1; + } + p = 0; + while (42) { + if ((len - p) < 1) { + len += SOCKSIZ; + buf = realloc(buf, len+1); + if (!buf) { + printf("Err: OOM (%d)\n", (int)(len+1)); + return 1; + } + } + + n = recv(sock, &buf[p], len - p , 0); + + if (SOCKETFAIL(n)) { + printf("Recv failed: %s\n", SOCKERRMSG); + ret = 1; + break; + } + + if (n == 0) + break; + + p += n; + } + buf[p] = '\0'; + + if (ONLY) + printf("%s\n", buf); + else { + printf("Reply was '%s'\n", buf); + display(buf); + } + } + + CLOSESOCKET(sock); + + return ret; +} + +static char *trim(char *str) +{ + char *ptr; + + while (isspace(*str)) + str++; + + ptr = strchr(str, '\0'); + while (ptr-- > str) { + if (isspace(*ptr)) + *ptr = '\0'; + } + + return str; +} + +int main(int argc, char *argv[]) +{ + char *command = "summary"; + char *host = "127.0.0.1"; + short int port = 4028; + char *ptr; + int i = 1; + + if (argc > 1) + if (strcmp(argv[1], "-?") == 0 + || strcmp(argv[1], "-h") == 0 + || strcmp(argv[1], "--help") == 0) { + fprintf(stderr, "usAge: %s [command [ip/host [port]]]\n", argv[0]); + return 1; + } + + if (argc > 1) + if (strcmp(argv[1], "-o") == 0) { + ONLY = 1; + i = 2; + } + + if (argc > i) { + ptr = trim(argv[i++]); + if (strlen(ptr) > 0) + command = ptr; + } + + if (argc > i) { + ptr = trim(argv[i++]); + if (strlen(ptr) > 0) + host = ptr; + } + + if (argc > i) { + ptr = trim(argv[i]); + if (strlen(ptr) > 0) + port = atoi(ptr); + } + + return callapi(command, host, port); +} diff --git a/api-example.php b/api-example.php new file mode 100644 index 0000000..6ce2a21 --- /dev/null +++ b/api-example.php @@ -0,0 +1,116 @@ + 0) + { + $items = explode(',', $obj); + $item = $items[0]; + $id = explode('=', $items[0], 2); + if (count($id) == 1 or !ctype_digit($id[1])) + $name = $id[0]; + else + $name = $id[0].$id[1]; + + if (strlen($name) == 0) + $name = 'null'; + + if (isset($data[$name])) + { + $num = 1; + while (isset($data[$name.$num])) + $num++; + $name .= $num; + } + + $counter = 0; + foreach ($items as $item) + { + $id = explode('=', $item, 2); + if (count($id) == 2) + $data[$name][$id[0]] = $id[1]; + else + $data[$name][$counter] = $id[0]; + + $counter++; + } + } + } + + return $data; + } + + return null; +} +# +if (isset($argv) and count($argv) > 1) + $r = request($argv[1]); +else + $r = request('summary'); +# +echo print_r($r, true)."\n"; +# +?> diff --git a/api-example.py b/api-example.py new file mode 100644 index 0000000..5b8cbb2 --- /dev/null +++ b/api-example.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python2.7 + +# Copyright 2013 Setkeh Mkfr +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation; either version 3 of the License, or (at your option) any later +# version. See COPYING for more details. + +#Short Python Example for connecting to The Cgminer API +#Written By: setkeh +#Thanks to Jezzz for all his Support. +#NOTE: When adding a param with a pipe | in bash or ZSH you must wrap the arg in quotes +#E.G "pga|0" + +import socket +import json +import sys + +def linesplit(socket): + buffer = socket.recv(4096) + done = False + while not done: + more = socket.recv(4096) + if not more: + done = True + else: + buffer = buffer+more + if buffer: + return buffer + +api_command = sys.argv[1].split('|') + +if len(sys.argv) < 3: + api_ip = '127.0.0.1' + api_port = 4028 +else: + api_ip = sys.argv[2] + api_port = sys.argv[3] + +s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) +s.connect((api_ip,int(api_port))) +if len(api_command) == 2: + s.send(json.dumps({"command":api_command[0],"parameter":api_command[1]})) +else: + s.send(json.dumps({"command":api_command[0]})) + +response = linesplit(s) +response = response.replace('\x00','') +response = json.loads(response) +print response +s.close() diff --git a/api-example.rb b/api-example.rb new file mode 100644 index 0000000..28c4402 --- /dev/null +++ b/api-example.rb @@ -0,0 +1,38 @@ +#!/usr/bin/env ruby + +# Copyright 2014 James Hilliard +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation; either version 3 of the License, or (at your option) any later +# version. See COPYING for more details. + +require 'socket' +require 'json' + +api_command = ARGV[0].split(":") + +if ARGV.length == 3 + api_ip = ARGV[1] + api_port = ARGV[2] +elsif ARGV.length == 2 + api_ip = ARGV[1] + api_port = 4028 +else + api_ip = "127.0.0.1" + api_port = 4028 +end + +s = TCPSocket.open(api_ip, api_port) + +if api_command.count == 2 + s.write({ :command => api_command[0], :parameter => api_command[1]}.to_json) +else + s.write({ :command => api_command[0]}.to_json) +end + +response = s.read.strip +response = JSON.parse(response) + +puts response +s.close diff --git a/api.c b/api.c new file mode 100644 index 0000000..c046ba6 --- /dev/null +++ b/api.c @@ -0,0 +1,5256 @@ +/* + * Copyright 2011-2014 Andrew Smith + * Copyright 2011-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ +#define _MEMORY_DEBUG_MASTER 1 + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "compat.h" +#include "miner.h" +#include "util.h" +#include "klist.h" + +#if defined(USE_BFLSC) || defined(USE_AVALON) || defined(USE_AVALON2) || defined(USE_AVALON4) || \ + defined(USE_HASHFAST) || defined(USE_BITFURY) || defined(USE_BLOCKERUPTER) || defined(USE_KLONDIKE) || \ + defined(USE_KNC) || defined(USE_BAB) || defined(USE_DRILLBIT) || \ + defined(USE_MINION) || defined(USE_COINTERRA) || defined(USE_BITMINE_A1) || \ + defined(USE_BMSC) || defined(USE_BITMAIN) || defined(USE_SP10) || defined(USE_SP30) || \ + defined(USE_ICARUS) || defined(USE_HASHRATIO) +#define HAVE_AN_ASIC 1 +#endif + +#if defined(USE_BITFORCE) || defined(USE_MODMINER) +#define HAVE_AN_FPGA 1 +#endif + +// BUFSIZ varies on Windows and Linux +#define TMPBUFSIZ 8192 + +// Number of requests to queue - normally would be small +// However lots of PGA's may mean more +#define QUEUE 100 + +#if defined WIN32 +static char WSAbuf[1024]; + +struct WSAERRORS { + int id; + char *code; +} WSAErrors[] = { + { 0, "No error" }, + { WSAEINTR, "Interrupted system call" }, + { WSAEBADF, "Bad file number" }, + { WSAEACCES, "Permission denied" }, + { WSAEFAULT, "Bad address" }, + { WSAEINVAL, "Invalid argument" }, + { WSAEMFILE, "Too many open sockets" }, + { WSAEWOULDBLOCK, "Operation would block" }, + { WSAEINPROGRESS, "Operation now in progress" }, + { WSAEALREADY, "Operation already in progress" }, + { WSAENOTSOCK, "Socket operation on non-socket" }, + { WSAEDESTADDRREQ, "Destination address required" }, + { WSAEMSGSIZE, "Message too long" }, + { WSAEPROTOTYPE, "Protocol wrong type for socket" }, + { WSAENOPROTOOPT, "Bad protocol option" }, + { WSAEPROTONOSUPPORT, "Protocol not supported" }, + { WSAESOCKTNOSUPPORT, "Socket type not supported" }, + { WSAEOPNOTSUPP, "Operation not supported on socket" }, + { WSAEPFNOSUPPORT, "Protocol family not supported" }, + { WSAEAFNOSUPPORT, "Address family not supported" }, + { WSAEADDRINUSE, "Address already in use" }, + { WSAEADDRNOTAVAIL, "Can't assign requested address" }, + { WSAENETDOWN, "Network is down" }, + { WSAENETUNREACH, "Network is unreachable" }, + { WSAENETRESET, "Net connection reset" }, + { WSAECONNABORTED, "Software caused connection abort" }, + { WSAECONNRESET, "Connection reset by peer" }, + { WSAENOBUFS, "No buffer space available" }, + { WSAEISCONN, "Socket is already connected" }, + { WSAENOTCONN, "Socket is not connected" }, + { WSAESHUTDOWN, "Can't send after socket shutdown" }, + { WSAETOOMANYREFS, "Too many references, can't splice" }, + { WSAETIMEDOUT, "Connection timed out" }, + { WSAECONNREFUSED, "Connection refused" }, + { WSAELOOP, "Too many levels of symbolic links" }, + { WSAENAMETOOLONG, "File name too long" }, + { WSAEHOSTDOWN, "Host is down" }, + { WSAEHOSTUNREACH, "No route to host" }, + { WSAENOTEMPTY, "Directory not empty" }, + { WSAEPROCLIM, "Too many processes" }, + { WSAEUSERS, "Too many users" }, + { WSAEDQUOT, "Disc quota exceeded" }, + { WSAESTALE, "Stale NFS file handle" }, + { WSAEREMOTE, "Too many levels of remote in path" }, + { WSASYSNOTREADY, "Network system is unavailable" }, + { WSAVERNOTSUPPORTED, "Winsock version out of range" }, + { WSANOTINITIALISED, "WSAStartup not yet called" }, + { WSAEDISCON, "Graceful shutdown in progress" }, + { WSAHOST_NOT_FOUND, "Host not found" }, + { WSANO_DATA, "No host data of that type was found" }, + { -1, "Unknown error code" } +}; + +char *WSAErrorMsg(void) { + int i; + int id = WSAGetLastError(); + + /* Assume none of them are actually -1 */ + for (i = 0; WSAErrors[i].id != -1; i++) + if (WSAErrors[i].id == id) + break; + + sprintf(WSAbuf, "Socket Error: (%d) %s", id, WSAErrors[i].code); + + return &(WSAbuf[0]); +} +#endif + +#if defined(__APPLE__) +#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP +#define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP +#endif + +static const char *UNAVAILABLE = " - API will not be available"; +static const char *MUNAVAILABLE = " - API multicast listener will not be available"; + +static const char *BLANK = ""; +static const char *COMMA = ","; +#define COMSTR "," +static const char SEPARATOR = '|'; +#define SEPSTR "|" +#define CMDJOIN '+' +#define JOIN_CMD "CMD=" +#define BETWEEN_JOIN SEPSTR + +static const char *APIVERSION = "3.1"; +static const char *DEAD = "Dead"; +static const char *SICK = "Sick"; +static const char *NOSTART = "NoStart"; +static const char *INIT = "Initialising"; +static const char *DISABLED = "Disabled"; +static const char *ALIVE = "Alive"; +static const char *REJECTING = "Rejecting"; +static const char *UNKNOWN = "Unknown"; + +static __maybe_unused const char *NONE = "None"; + +static const char *YES = "Y"; +static const char *NO = "N"; +static const char *NULLSTR = "(null)"; + +static const char *TRUESTR = "true"; +static const char *FALSESTR = "false"; + +static const char *SHA256STR = "sha256"; + +static const char *DEVICECODE = "" +#ifdef USE_BMSC + "BTM " +#endif +#ifdef USE_BITMAIN + "BTM " +#endif +#ifdef USE_AVALON + "AVA " +#endif +#ifdef USE_BAB + "BaB " +#endif +#ifdef USE_BFLSC + "BAS " +#endif +#ifdef USE_BITFORCE + "BFL " +#endif +#ifdef USE_BITFURY + "BFU " +#endif +#ifdef USE_BLOCKERUPTER + "BET " +#endif +#ifdef USE_DRILLBIT + "DRB " +#endif +#ifdef USE_HASHFAST + "HFA " +#endif +#ifdef USE_HASHRATIO + "HRO " +#endif +#ifdef USE_BITMINE_A1 + "BA1 " +#endif +#ifdef USE_ICARUS + "ICA " +#endif +#ifdef USE_KNC + "KnC " +#endif +#ifdef USE_MINION + "MBA " +#endif +#ifdef USE_MODMINER + "MMQ " +#endif +#ifdef USE_COINTERRA + "CTA " +#endif +#ifdef USE_SP10 + "SPN " +#endif +#ifdef USE_SP30 + "S30 " +#endif + + + ""; + +static const char *OSINFO = +#if defined(__linux) + "Linux"; +#else +#if defined(__APPLE__) + "Apple"; +#else +#if defined (WIN32) + "Windows"; +#else +#if defined(unix) + "Unix"; +#else + "Unknown"; +#endif +#endif +#endif +#endif + +#define _DEVS "DEVS" +#define _POOLS "POOLS" +#define _SUMMARY "SUMMARY" +#define _STATUS "STATUS" +#define _VERSION "VERSION" +#define _MINECONFIG "CONFIG" + +#ifdef HAVE_AN_FPGA +#define _PGA "PGA" +#endif + +#ifdef HAVE_AN_ASIC +#define _ASC "ASC" +#endif + +#define _PGAS "PGAS" +#define _ASCS "ASCS" +#define _NOTIFY "NOTIFY" +#define _DEVDETAILS "DEVDETAILS" +#define _BYE "BYE" +#define _RESTART "RESTART" +#define _MINESTATS "STATS" +#define _CHECK "CHECK" +#define _MINECOIN "COIN" +#define _DEBUGSET "DEBUG" +#define _SETCONFIG "SETCONFIG" +#define _USBSTATS "USBSTATS" +#define _LCD "LCD" + +static const char ISJSON = '{'; +#define JSON0 "{" +#define JSON1 "\"" +#define JSON2 "\":[" +#define JSON3 "]" +#define JSON4 ",\"id\":1" +// If anyone cares, id=0 for truncated output +#define JSON4_TRUNCATED ",\"id\":0" +#define JSON5 "}" +#define JSON6 "\":" + +#define JSON_START JSON0 +#define JSON_DEVS JSON1 _DEVS JSON2 +#define JSON_POOLS JSON1 _POOLS JSON2 +#define JSON_SUMMARY JSON1 _SUMMARY JSON2 +#define JSON_STATUS JSON1 _STATUS JSON2 +#define JSON_VERSION JSON1 _VERSION JSON2 +#define JSON_MINECONFIG JSON1 _MINECONFIG JSON2 +#define JSON_ACTION JSON0 JSON1 _STATUS JSON6 + +#ifdef HAVE_AN_FPGA +#define JSON_PGA JSON1 _PGA JSON2 +#endif + +#ifdef HAVE_AN_ASIC +#define JSON_ASC JSON1 _ASC JSON2 +#endif + +#define JSON_PGAS JSON1 _PGAS JSON2 +#define JSON_ASCS JSON1 _ASCS JSON2 +#define JSON_NOTIFY JSON1 _NOTIFY JSON2 +#define JSON_DEVDETAILS JSON1 _DEVDETAILS JSON2 +#define JSON_BYE JSON1 _BYE JSON1 +#define JSON_RESTART JSON1 _RESTART JSON1 +#define JSON_CLOSE JSON3 +#define JSON_MINESTATS JSON1 _MINESTATS JSON2 +#define JSON_CHECK JSON1 _CHECK JSON2 +#define JSON_MINECOIN JSON1 _MINECOIN JSON2 +#define JSON_DEBUGSET JSON1 _DEBUGSET JSON2 +#define JSON_SETCONFIG JSON1 _SETCONFIG JSON2 +#define JSON_USBSTATS JSON1 _USBSTATS JSON2 +#define JSON_LCD JSON1 _LCD JSON2 +#define JSON_END JSON4 JSON5 +#define JSON_END_TRUNCATED JSON4_TRUNCATED JSON5 +#define JSON_BETWEEN_JOIN "," + +static const char *JSON_COMMAND = "command"; +static const char *JSON_PARAMETER = "parameter"; + +#define MSG_POOL 7 +#define MSG_NOPOOL 8 +#define MSG_DEVS 9 +#define MSG_NODEVS 10 +#define MSG_SUMM 11 +#define MSG_INVCMD 14 +#define MSG_MISID 15 + +#define MSG_VERSION 22 +#define MSG_INVJSON 23 +#define MSG_MISCMD 24 +#define MSG_MISPID 25 +#define MSG_INVPID 26 +#define MSG_SWITCHP 27 +#define MSG_MISVAL 28 +#define MSG_NOADL 29 +#define MSG_INVINT 31 +#define MSG_MINECONFIG 33 +#define MSG_MISFN 42 +#define MSG_BADFN 43 +#define MSG_SAVED 44 +#define MSG_ACCDENY 45 +#define MSG_ACCOK 46 +#define MSG_ENAPOOL 47 +#define MSG_DISPOOL 48 +#define MSG_ALRENAP 49 +#define MSG_ALRDISP 50 +#define MSG_DISLASTP 51 +#define MSG_MISPDP 52 +#define MSG_INVPDP 53 +#define MSG_TOOMANYP 54 +#define MSG_ADDPOOL 55 + +#ifdef HAVE_AN_FPGA +#define MSG_PGANON 56 +#define MSG_PGADEV 57 +#define MSG_INVPGA 58 +#endif + +#define MSG_NUMPGA 59 +#define MSG_NOTIFY 60 + +#ifdef HAVE_AN_FPGA +#define MSG_PGALRENA 61 +#define MSG_PGALRDIS 62 +#define MSG_PGAENA 63 +#define MSG_PGADIS 64 +#define MSG_PGAUNW 65 +#endif + +#define MSG_REMLASTP 66 +#define MSG_ACTPOOL 67 +#define MSG_REMPOOL 68 +#define MSG_DEVDETAILS 69 +#define MSG_MINESTATS 70 +#define MSG_MISCHK 71 +#define MSG_CHECK 72 +#define MSG_POOLPRIO 73 +#define MSG_DUPPID 74 +#define MSG_MISBOOL 75 +#define MSG_INVBOOL 76 +#define MSG_FOO 77 +#define MSG_MINECOIN 78 +#define MSG_DEBUGSET 79 +#define MSG_PGAIDENT 80 +#define MSG_PGANOID 81 +#define MSG_SETCONFIG 82 +#define MSG_UNKCON 83 +#define MSG_INVNUM 84 +#define MSG_CONPAR 85 +#define MSG_CONVAL 86 +#define MSG_USBSTA 87 +#define MSG_NOUSTA 88 + +#ifdef HAVE_AN_FPGA +#define MSG_MISPGAOPT 89 +#define MSG_PGANOSET 90 +#define MSG_PGAHELP 91 +#define MSG_PGASETOK 92 +#define MSG_PGASETERR 93 +#endif + +#define MSG_ZERMIS 94 +#define MSG_ZERINV 95 +#define MSG_ZERSUM 96 +#define MSG_ZERNOSUM 97 +#define MSG_PGAUSBNODEV 98 +#define MSG_INVHPLG 99 +#define MSG_HOTPLUG 100 +#define MSG_DISHPLG 101 +#define MSG_NOHPLG 102 +#define MSG_MISHPLG 103 + +#define MSG_NUMASC 104 +#ifdef HAVE_AN_ASIC +#define MSG_ASCNON 105 +#define MSG_ASCDEV 106 +#define MSG_INVASC 107 +#define MSG_ASCLRENA 108 +#define MSG_ASCLRDIS 109 +#define MSG_ASCENA 110 +#define MSG_ASCDIS 111 +#define MSG_ASCUNW 112 +#define MSG_ASCIDENT 113 +#define MSG_ASCNOID 114 +#endif +#define MSG_ASCUSBNODEV 115 + +#ifdef HAVE_AN_ASIC +#define MSG_MISASCOPT 116 +#define MSG_ASCNOSET 117 +#define MSG_ASCHELP 118 +#define MSG_ASCSETOK 119 +#define MSG_ASCSETERR 120 +#endif + +#define MSG_INVNEG 121 +#define MSG_SETQUOTA 122 +#define MSG_LOCKOK 123 +#define MSG_LOCKDIS 124 +#define MSG_LCD 125 + +enum code_severity { + SEVERITY_ERR, + SEVERITY_WARN, + SEVERITY_INFO, + SEVERITY_SUCC, + SEVERITY_FAIL +}; + +enum code_parameters { + PARAM_PGA, + PARAM_ASC, + PARAM_PID, + PARAM_PGAMAX, + PARAM_ASCMAX, + PARAM_PMAX, + PARAM_POOLMAX, + +// Single generic case: have the code resolve it - see below + PARAM_DMAX, + + PARAM_CMD, + PARAM_POOL, + PARAM_STR, + PARAM_BOTH, + PARAM_BOOL, + PARAM_SET, + PARAM_INT, + PARAM_NONE +}; + +struct CODES { + const enum code_severity severity; + const int code; + const enum code_parameters params; + const char *description; +} codes[] = { + { SEVERITY_SUCC, MSG_POOL, PARAM_PMAX, "%d Pool(s)" }, + { SEVERITY_ERR, MSG_NOPOOL, PARAM_NONE, "No pools" }, + + { SEVERITY_SUCC, MSG_DEVS, PARAM_DMAX, +#ifdef HAVE_AN_ASIC + "%d ASC(s)" +#endif +#if defined(HAVE_AN_ASIC) && defined(HAVE_AN_FPGA) + " - " +#endif +#ifdef HAVE_AN_FPGA + "%d PGA(s)" +#endif + }, + + { SEVERITY_ERR, MSG_NODEVS, PARAM_NONE, "No " +#ifdef HAVE_AN_ASIC + "ASCs" +#endif +#if defined(HAVE_AN_ASIC) && defined(HAVE_AN_FPGA) + "/" +#endif +#ifdef HAVE_AN_FPGA + "PGAs" +#endif + }, + + { SEVERITY_SUCC, MSG_SUMM, PARAM_NONE, "Summary" }, + { SEVERITY_ERR, MSG_INVCMD, PARAM_NONE, "Invalid command" }, + { SEVERITY_ERR, MSG_MISID, PARAM_NONE, "Missing device id parameter" }, +#ifdef HAVE_AN_FPGA + { SEVERITY_ERR, MSG_PGANON, PARAM_NONE, "No PGAs" }, + { SEVERITY_SUCC, MSG_PGADEV, PARAM_PGA, "PGA%d" }, + { SEVERITY_ERR, MSG_INVPGA, PARAM_PGAMAX, "Invalid PGA id %d - range is 0 - %d" }, + { SEVERITY_INFO, MSG_PGALRENA,PARAM_PGA, "PGA %d already enabled" }, + { SEVERITY_INFO, MSG_PGALRDIS,PARAM_PGA, "PGA %d already disabled" }, + { SEVERITY_INFO, MSG_PGAENA, PARAM_PGA, "PGA %d sent enable message" }, + { SEVERITY_INFO, MSG_PGADIS, PARAM_PGA, "PGA %d set disable flag" }, + { SEVERITY_ERR, MSG_PGAUNW, PARAM_PGA, "PGA %d is not flagged WELL, cannot enable" }, +#endif + { SEVERITY_SUCC, MSG_NUMPGA, PARAM_NONE, "PGA count" }, + { SEVERITY_SUCC, MSG_NUMASC, PARAM_NONE, "ASC count" }, + { SEVERITY_SUCC, MSG_VERSION, PARAM_NONE, "BMMiner versions" }, + { SEVERITY_ERR, MSG_INVJSON, PARAM_NONE, "Invalid JSON" }, + { SEVERITY_ERR, MSG_MISCMD, PARAM_CMD, "Missing JSON '%s'" }, + { SEVERITY_ERR, MSG_MISPID, PARAM_NONE, "Missing pool id parameter" }, + { SEVERITY_ERR, MSG_INVPID, PARAM_POOLMAX, "Invalid pool id %d - range is 0 - %d" }, + { SEVERITY_SUCC, MSG_SWITCHP, PARAM_POOL, "Switching to pool %d:'%s'" }, + { SEVERITY_SUCC, MSG_MINECONFIG,PARAM_NONE, "BMMiner config" }, + { SEVERITY_ERR, MSG_MISFN, PARAM_NONE, "Missing save filename parameter" }, + { SEVERITY_ERR, MSG_BADFN, PARAM_STR, "Can't open or create save file '%s'" }, + { SEVERITY_SUCC, MSG_SAVED, PARAM_STR, "Configuration saved to file '%s'" }, + { SEVERITY_ERR, MSG_ACCDENY, PARAM_STR, "Access denied to '%s' command" }, + { SEVERITY_SUCC, MSG_ACCOK, PARAM_NONE, "Privileged access OK" }, + { SEVERITY_SUCC, MSG_ENAPOOL, PARAM_POOL, "Enabling pool %d:'%s'" }, + { SEVERITY_SUCC, MSG_POOLPRIO,PARAM_NONE, "Changed pool priorities" }, + { SEVERITY_ERR, MSG_DUPPID, PARAM_PID, "Duplicate pool specified %d" }, + { SEVERITY_SUCC, MSG_DISPOOL, PARAM_POOL, "Disabling pool %d:'%s'" }, + { SEVERITY_INFO, MSG_ALRENAP, PARAM_POOL, "Pool %d:'%s' already enabled" }, + { SEVERITY_INFO, MSG_ALRDISP, PARAM_POOL, "Pool %d:'%s' already disabled" }, + { SEVERITY_ERR, MSG_DISLASTP,PARAM_POOL, "Cannot disable last active pool %d:'%s'" }, + { SEVERITY_ERR, MSG_MISPDP, PARAM_NONE, "Missing addpool details" }, + { SEVERITY_ERR, MSG_INVPDP, PARAM_STR, "Invalid addpool details '%s'" }, + { SEVERITY_ERR, MSG_TOOMANYP,PARAM_NONE, "Reached maximum number of pools (%d)" }, + { SEVERITY_SUCC, MSG_ADDPOOL, PARAM_POOL, "Added pool %d: '%s'" }, + { SEVERITY_ERR, MSG_REMLASTP,PARAM_POOL, "Cannot remove last pool %d:'%s'" }, + { SEVERITY_ERR, MSG_ACTPOOL, PARAM_POOL, "Cannot remove active pool %d:'%s'" }, + { SEVERITY_SUCC, MSG_REMPOOL, PARAM_BOTH, "Removed pool %d:'%s'" }, + { SEVERITY_SUCC, MSG_NOTIFY, PARAM_NONE, "Notify" }, + { SEVERITY_SUCC, MSG_DEVDETAILS,PARAM_NONE, "Device Details" }, + { SEVERITY_SUCC, MSG_MINESTATS,PARAM_NONE, "BMMiner stats" }, + { SEVERITY_ERR, MSG_MISCHK, PARAM_NONE, "Missing check cmd" }, + { SEVERITY_SUCC, MSG_CHECK, PARAM_NONE, "Check command" }, + { SEVERITY_ERR, MSG_MISBOOL, PARAM_NONE, "Missing parameter: true/false" }, + { SEVERITY_ERR, MSG_INVBOOL, PARAM_NONE, "Invalid parameter should be true or false" }, + { SEVERITY_SUCC, MSG_FOO, PARAM_BOOL, "Failover-Only set to %s" }, + { SEVERITY_SUCC, MSG_MINECOIN,PARAM_NONE, "BMMiner coin" }, + { SEVERITY_SUCC, MSG_DEBUGSET,PARAM_NONE, "Debug settings" }, +#ifdef HAVE_AN_FPGA + { SEVERITY_SUCC, MSG_PGAIDENT,PARAM_PGA, "Identify command sent to PGA%d" }, + { SEVERITY_WARN, MSG_PGANOID, PARAM_PGA, "PGA%d does not support identify" }, +#endif + { SEVERITY_SUCC, MSG_SETCONFIG,PARAM_SET, "Set config '%s' to %d" }, + { SEVERITY_ERR, MSG_UNKCON, PARAM_STR, "Unknown config '%s'" }, + { SEVERITY_ERR, MSG_INVNUM, PARAM_BOTH, "Invalid number (%d) for '%s' range is 0-9999" }, + { SEVERITY_ERR, MSG_INVNEG, PARAM_BOTH, "Invalid negative number (%d) for '%s'" }, + { SEVERITY_SUCC, MSG_SETQUOTA,PARAM_SET, "Set pool '%s' to quota %d'" }, + { SEVERITY_ERR, MSG_CONPAR, PARAM_NONE, "Missing config parameters 'name,N'" }, + { SEVERITY_ERR, MSG_CONVAL, PARAM_STR, "Missing config value N for '%s,N'" }, + { SEVERITY_SUCC, MSG_USBSTA, PARAM_NONE, "USB Statistics" }, + { SEVERITY_INFO, MSG_NOUSTA, PARAM_NONE, "No USB Statistics" }, +#ifdef HAVE_AN_FPGA + { SEVERITY_ERR, MSG_MISPGAOPT, PARAM_NONE, "Missing option after PGA number" }, + { SEVERITY_WARN, MSG_PGANOSET, PARAM_PGA, "PGA %d does not support pgaset" }, + { SEVERITY_INFO, MSG_PGAHELP, PARAM_BOTH, "PGA %d set help: %s" }, + { SEVERITY_SUCC, MSG_PGASETOK, PARAM_BOTH, "PGA %d set OK" }, + { SEVERITY_ERR, MSG_PGASETERR, PARAM_BOTH, "PGA %d set failed: %s" }, +#endif + { SEVERITY_ERR, MSG_ZERMIS, PARAM_NONE, "Missing zero parameters" }, + { SEVERITY_ERR, MSG_ZERINV, PARAM_STR, "Invalid zero parameter '%s'" }, + { SEVERITY_SUCC, MSG_ZERSUM, PARAM_STR, "Zeroed %s stats with summary" }, + { SEVERITY_SUCC, MSG_ZERNOSUM, PARAM_STR, "Zeroed %s stats without summary" }, +#ifdef USE_USBUTILS + { SEVERITY_ERR, MSG_PGAUSBNODEV, PARAM_PGA, "PGA%d has no device" }, + { SEVERITY_ERR, MSG_ASCUSBNODEV, PARAM_PGA, "ASC%d has no device" }, +#endif + { SEVERITY_ERR, MSG_INVHPLG, PARAM_STR, "Invalid value for hotplug (%s) must be 0..9999" }, + { SEVERITY_SUCC, MSG_HOTPLUG, PARAM_INT, "Hotplug check set to %ds" }, + { SEVERITY_SUCC, MSG_DISHPLG, PARAM_NONE, "Hotplug disabled" }, + { SEVERITY_WARN, MSG_NOHPLG, PARAM_NONE, "Hotplug is not available" }, + { SEVERITY_ERR, MSG_MISHPLG, PARAM_NONE, "Missing hotplug parameter" }, +#ifdef HAVE_AN_ASIC + { SEVERITY_ERR, MSG_ASCNON, PARAM_NONE, "No ASCs" }, + { SEVERITY_SUCC, MSG_ASCDEV, PARAM_ASC, "ASC%d" }, + { SEVERITY_ERR, MSG_INVASC, PARAM_ASCMAX, "Invalid ASC id %d - range is 0 - %d" }, + { SEVERITY_INFO, MSG_ASCLRENA,PARAM_ASC, "ASC %d already enabled" }, + { SEVERITY_INFO, MSG_ASCLRDIS,PARAM_ASC, "ASC %d already disabled" }, + { SEVERITY_INFO, MSG_ASCENA, PARAM_ASC, "ASC %d sent enable message" }, + { SEVERITY_INFO, MSG_ASCDIS, PARAM_ASC, "ASC %d set disable flag" }, + { SEVERITY_ERR, MSG_ASCUNW, PARAM_ASC, "ASC %d is not flagged WELL, cannot enable" }, + { SEVERITY_SUCC, MSG_ASCIDENT,PARAM_ASC, "Identify command sent to ASC%d" }, + { SEVERITY_WARN, MSG_ASCNOID, PARAM_ASC, "ASC%d does not support identify" }, + { SEVERITY_ERR, MSG_MISASCOPT, PARAM_NONE, "Missing option after ASC number" }, + { SEVERITY_WARN, MSG_ASCNOSET, PARAM_ASC, "ASC %d does not support ascset" }, + { SEVERITY_INFO, MSG_ASCHELP, PARAM_BOTH, "ASC %d set help: %s" }, + { SEVERITY_SUCC, MSG_ASCSETOK, PARAM_BOTH, "ASC %d set OK" }, + { SEVERITY_ERR, MSG_ASCSETERR, PARAM_BOTH, "ASC %d set failed: %s" }, +#endif + { SEVERITY_SUCC, MSG_LCD, PARAM_NONE, "LCD" }, + { SEVERITY_SUCC, MSG_LOCKOK, PARAM_NONE, "Lock stats created" }, + { SEVERITY_WARN, MSG_LOCKDIS, PARAM_NONE, "Lock stats not enabled" }, + { SEVERITY_FAIL, 0, 0, NULL } +}; + +static const char *localaddr = "127.0.0.1"; + +static int my_thr_id = 0; +static bool bye; + +// Used to control quit restart access to shutdown variables +static pthread_mutex_t quit_restart_lock; + +static bool do_a_quit; +static bool do_a_restart; + +static time_t when = 0; // when the request occurred + +struct IPACCESS { + struct in6_addr ip; + struct in6_addr mask; + char group; +}; + +#define GROUP(g) (toupper(g)) +#define PRIVGROUP GROUP('W') +#define NOPRIVGROUP GROUP('R') +#define ISPRIVGROUP(g) (GROUP(g) == PRIVGROUP) +#define GROUPOFFSET(g) (GROUP(g) - GROUP('A')) +#define VALIDGROUP(g) (GROUP(g) >= GROUP('A') && GROUP(g) <= GROUP('Z')) +#define COMMANDS(g) (apigroups[GROUPOFFSET(g)].commands) +#define DEFINEDGROUP(g) (ISPRIVGROUP(g) || COMMANDS(g) != NULL) + +struct APIGROUPS { + // This becomes a string like: "|cmd1|cmd2|cmd3|" so it's quick to search + char *commands; +} apigroups['Z' - 'A' + 1]; // only A=0 to Z=25 (R: noprivs, W: allprivs) + +static struct IPACCESS *ipaccess = NULL; +static int ips = 0; + +struct io_data { + size_t siz; + char *ptr; + char *cur; + bool sock; + bool close; +}; + +struct io_list { + struct io_data *io_data; + struct io_list *prev; + struct io_list *next; +}; + +static struct io_list *io_head = NULL; + +#define SOCKBUFALLOCSIZ 65536 + +#define io_new(init) _io_new(init, false) +#define sock_io_new() _io_new(SOCKBUFALLOCSIZ, true) + +#define ALLOC_SBITEMS 2 +#define LIMIT_SBITEMS 0 + +typedef struct sbitem { + char *buf; + size_t siz; + size_t tot; +} SBITEM; + +// Size to grow tot if exceeded +#define SBEXTEND 4096 + +#define DATASB(_item) ((SBITEM *)(_item->data)) + +static K_LIST *strbufs; + +static void io_reinit(struct io_data *io_data) +{ + io_data->cur = io_data->ptr; + *(io_data->ptr) = '\0'; + io_data->close = false; +} + +static struct io_data *_io_new(size_t initial, bool socket_buf) +{ + struct io_data *io_data; + struct io_list *io_list; + + io_data = malloc(sizeof(*io_data)); + io_data->ptr = malloc(initial); + io_data->siz = initial; + io_data->sock = socket_buf; + io_reinit(io_data); + + io_list = malloc(sizeof(*io_list)); + + io_list->io_data = io_data; + + if (io_head) { + io_list->next = io_head; + io_list->prev = io_head->prev; + io_list->next->prev = io_list; + io_list->prev->next = io_list; + } else { + io_list->prev = io_list; + io_list->next = io_list; + io_head = io_list; + } + + return io_data; +} + +static bool io_add(struct io_data *io_data, char *buf) +{ + size_t len, dif, tot; + + len = strlen(buf); + dif = io_data->cur - io_data->ptr; + // send will always have enough space to add the JSON + tot = len + 1 + dif + sizeof(JSON_CLOSE) + sizeof(JSON_END); + + if (tot > io_data->siz) { + size_t new = io_data->siz + (2 * SOCKBUFALLOCSIZ); + + if (new < tot) + new = (2 + (size_t)((float)tot / (float)SOCKBUFALLOCSIZ)) * SOCKBUFALLOCSIZ; + + io_data->ptr = realloc(io_data->ptr, new); + io_data->cur = io_data->ptr + dif; + io_data->siz = new; + } + + memcpy(io_data->cur, buf, len + 1); + io_data->cur += len; + + return true; +} + +static bool io_put(struct io_data *io_data, char *buf) +{ + io_reinit(io_data); + return io_add(io_data, buf); +} + +static void io_close(struct io_data *io_data) +{ + io_data->close = true; +} + +static void io_free() +{ + struct io_list *io_list, *io_next; + + if (io_head) { + io_list = io_head; + do { + io_next = io_list->next; + + free(io_list->io_data->ptr); + free(io_list->io_data); + free(io_list); + + io_list = io_next; + } while (io_list != io_head); + + io_head = NULL; + } +} + +// This is only called when expected to be needed (rarely) +// i.e. strings outside of the codes control (input from the user) +static char *escape_string(char *str, bool isjson) +{ + char *buf, *ptr; + int count; + + count = 0; + for (ptr = str; *ptr; ptr++) { + switch (*ptr) { + case ',': + case '|': + case '=': + if (!isjson) + count++; + break; + case '"': + if (isjson) + count++; + break; + case '\\': + count++; + break; + } + } + + if (count == 0) + return str; + + buf = malloc(strlen(str) + count + 1); + if (unlikely(!buf)) { + quithere(1, "Failed to malloc escape buf %d", + (int)(strlen(str) + count + 1)); + } + + ptr = buf; + while (*str) + switch (*str) { + case ',': + case '|': + case '=': + if (!isjson) + *(ptr++) = '\\'; + *(ptr++) = *(str++); + break; + case '"': + if (isjson) + *(ptr++) = '\\'; + *(ptr++) = *(str++); + break; + case '\\': + *(ptr++) = '\\'; + *(ptr++) = *(str++); + break; + default: + *(ptr++) = *(str++); + break; + } + + *ptr = '\0'; + + return buf; +} + +static struct api_data *api_add_extra(struct api_data *root, struct api_data *extra) +{ + struct api_data *tmp; + + if (root) { + if (extra) { + // extra tail + tmp = extra->prev; + + // extra prev = root tail + extra->prev = root->prev; + + // root tail next = extra + root->prev->next = extra; + + // extra tail next = root + tmp->next = root; + + // root prev = extra tail + root->prev = tmp; + } + } else + root = extra; + + return root; +} + +static struct api_data *api_add_data_full(struct api_data *root, char *name, enum api_data_type type, void *data, bool copy_data) +{ + struct api_data *api_data; + + api_data = (struct api_data *)malloc(sizeof(struct api_data)); + + api_data->name = strdup(name); + api_data->type = type; + + if (root == NULL) { + root = api_data; + root->prev = root; + root->next = root; + } else { + api_data->prev = root->prev; + root->prev = api_data; + api_data->next = root; + api_data->prev->next = api_data; + } + + api_data->data_was_malloc = copy_data; + + // Avoid crashing on bad data + if (data == NULL) { + api_data->type = type = API_CONST; + data = (void *)NULLSTR; + api_data->data_was_malloc = copy_data = false; + } + + if (!copy_data) + api_data->data = data; + else + switch(type) { + case API_ESCAPE: + case API_STRING: + case API_CONST: + api_data->data = (void *)malloc(strlen((char *)data) + 1); + strcpy((char*)(api_data->data), (char *)data); + break; + case API_UINT8: + /* Most OSs won't really alloc less than 4 */ + api_data->data = malloc(4); + *(uint8_t *)api_data->data = *(uint8_t *)data; + break; + case API_INT16: + /* Most OSs won't really alloc less than 4 */ + api_data->data = malloc(4); + *(int16_t *)api_data->data = *(int16_t *)data; + break; + case API_UINT16: + /* Most OSs won't really alloc less than 4 */ + api_data->data = malloc(4); + *(uint16_t *)api_data->data = *(uint16_t *)data; + break; + case API_INT: + api_data->data = (void *)malloc(sizeof(int)); + *((int *)(api_data->data)) = *((int *)data); + break; + case API_UINT: + api_data->data = (void *)malloc(sizeof(unsigned int)); + *((unsigned int *)(api_data->data)) = *((unsigned int *)data); + break; + case API_UINT32: + api_data->data = (void *)malloc(sizeof(uint32_t)); + *((uint32_t *)(api_data->data)) = *((uint32_t *)data); + break; + case API_HEX32: + api_data->data = (void *)malloc(sizeof(uint32_t)); + *((uint32_t *)(api_data->data)) = *((uint32_t *)data); + break; + case API_UINT64: + api_data->data = (void *)malloc(sizeof(uint64_t)); + *((uint64_t *)(api_data->data)) = *((uint64_t *)data); + break; + case API_INT64: + api_data->data = (void *)malloc(sizeof(int64_t)); + *((int64_t *)(api_data->data)) = *((int64_t *)data); + break; + case API_DOUBLE: + case API_ELAPSED: + case API_MHS: + case API_MHTOTAL: + case API_UTILITY: + case API_FREQ: + case API_HS: + case API_DIFF: + case API_PERCENT: + api_data->data = (void *)malloc(sizeof(double)); + *((double *)(api_data->data)) = *((double *)data); + break; + case API_BOOL: + api_data->data = (void *)malloc(sizeof(bool)); + *((bool *)(api_data->data)) = *((bool *)data); + break; + case API_TIMEVAL: + api_data->data = (void *)malloc(sizeof(struct timeval)); + memcpy(api_data->data, data, sizeof(struct timeval)); + break; + case API_TIME: + api_data->data = (void *)malloc(sizeof(time_t)); + *(time_t *)(api_data->data) = *((time_t *)data); + break; + case API_VOLTS: + case API_TEMP: + case API_AVG: + api_data->data = (void *)malloc(sizeof(float)); + *((float *)(api_data->data)) = *((float *)data); + break; + default: + applog(LOG_ERR, "API: unknown1 data type %d ignored", type); + api_data->type = API_STRING; + api_data->data_was_malloc = false; + api_data->data = (void *)UNKNOWN; + break; + } + + return root; +} + +struct api_data *api_add_escape(struct api_data *root, char *name, char *data, bool copy_data) +{ + return api_add_data_full(root, name, API_ESCAPE, (void *)data, copy_data); +} + +struct api_data *api_add_string(struct api_data *root, char *name, char *data, bool copy_data) +{ + return api_add_data_full(root, name, API_STRING, (void *)data, copy_data); +} + +struct api_data *api_add_const(struct api_data *root, char *name, const char *data, bool copy_data) +{ + return api_add_data_full(root, name, API_CONST, (void *)data, copy_data); +} + +struct api_data *api_add_uint8(struct api_data *root, char *name, uint8_t *data, bool copy_data) +{ + return api_add_data_full(root, name, API_UINT8, (void *)data, copy_data); +} + +struct api_data *api_add_int16(struct api_data *root, char *name, uint16_t *data, bool copy_data) +{ + return api_add_data_full(root, name, API_INT16, (void *)data, copy_data); +} + +struct api_data *api_add_uint16(struct api_data *root, char *name, uint16_t *data, bool copy_data) +{ + return api_add_data_full(root, name, API_UINT16, (void *)data, copy_data); +} + +struct api_data *api_add_int(struct api_data *root, char *name, int *data, bool copy_data) +{ + return api_add_data_full(root, name, API_INT, (void *)data, copy_data); +} + +struct api_data *api_add_uint(struct api_data *root, char *name, unsigned int *data, bool copy_data) +{ + return api_add_data_full(root, name, API_UINT, (void *)data, copy_data); +} + +struct api_data *api_add_uint32(struct api_data *root, char *name, uint32_t *data, bool copy_data) +{ + return api_add_data_full(root, name, API_UINT32, (void *)data, copy_data); +} + +struct api_data *api_add_hex32(struct api_data *root, char *name, uint32_t *data, bool copy_data) +{ + return api_add_data_full(root, name, API_HEX32, (void *)data, copy_data); +} + +struct api_data *api_add_uint64(struct api_data *root, char *name, uint64_t *data, bool copy_data) +{ + return api_add_data_full(root, name, API_UINT64, (void *)data, copy_data); +} + +struct api_data *api_add_int64(struct api_data *root, char *name, int64_t *data, bool copy_data) +{ + return api_add_data_full(root, name, API_INT64, (void *)data, copy_data); +} + +struct api_data *api_add_double(struct api_data *root, char *name, double *data, bool copy_data) +{ + return api_add_data_full(root, name, API_DOUBLE, (void *)data, copy_data); +} + +struct api_data *api_add_elapsed(struct api_data *root, char *name, double *data, bool copy_data) +{ + return api_add_data_full(root, name, API_ELAPSED, (void *)data, copy_data); +} + +struct api_data *api_add_bool(struct api_data *root, char *name, bool *data, bool copy_data) +{ + return api_add_data_full(root, name, API_BOOL, (void *)data, copy_data); +} + +struct api_data *api_add_timeval(struct api_data *root, char *name, struct timeval *data, bool copy_data) +{ + return api_add_data_full(root, name, API_TIMEVAL, (void *)data, copy_data); +} + +struct api_data *api_add_time(struct api_data *root, char *name, time_t *data, bool copy_data) +{ + return api_add_data_full(root, name, API_TIME, (void *)data, copy_data); +} + +struct api_data *api_add_mhs(struct api_data *root, char *name, double *data, bool copy_data) +{ + return api_add_data_full(root, name, API_MHS, (void *)data, copy_data); +} + +struct api_data *api_add_mhtotal(struct api_data *root, char *name, double *data, bool copy_data) +{ + return api_add_data_full(root, name, API_MHTOTAL, (void *)data, copy_data); +} + +struct api_data *api_add_temp(struct api_data *root, char *name, float *data, bool copy_data) +{ + return api_add_data_full(root, name, API_TEMP, (void *)data, copy_data); +} + +struct api_data *api_add_utility(struct api_data *root, char *name, double *data, bool copy_data) +{ + return api_add_data_full(root, name, API_UTILITY, (void *)data, copy_data); +} + +struct api_data *api_add_freq(struct api_data *root, char *name, double *data, bool copy_data) +{ + return api_add_data_full(root, name, API_FREQ, (void *)data, copy_data); +} + +struct api_data *api_add_volts(struct api_data *root, char *name, float *data, bool copy_data) +{ + return api_add_data_full(root, name, API_VOLTS, (void *)data, copy_data); +} + +struct api_data *api_add_hs(struct api_data *root, char *name, double *data, bool copy_data) +{ + return api_add_data_full(root, name, API_HS, (void *)data, copy_data); +} + +struct api_data *api_add_diff(struct api_data *root, char *name, double *data, bool copy_data) +{ + return api_add_data_full(root, name, API_DIFF, (void *)data, copy_data); +} + +struct api_data *api_add_percent(struct api_data *root, char *name, double *data, bool copy_data) +{ + return api_add_data_full(root, name, API_PERCENT, (void *)data, copy_data); +} + +struct api_data *api_add_avg(struct api_data *root, char *name, float *data, bool copy_data) +{ + return api_add_data_full(root, name, API_AVG, (void *)data, copy_data); +} + +static void add_item_buf(K_ITEM *item, const char *str) +{ + size_t old_siz, new_siz, siz, ext; + char *buf; + + buf = DATASB(item)->buf; + siz = (size_t)strlen(str); + + old_siz = DATASB(item)->siz; + new_siz = old_siz + siz + 1; // include '\0' + if (DATASB(item)->tot < new_siz) { + ext = (siz + 1) + SBEXTEND - ((siz + 1) % SBEXTEND); + DATASB(item)->buf = buf = realloc(DATASB(item)->buf, DATASB(item)->tot + ext); + if (!buf) { + quithere(1, "OOM buf siz=%d tot=%d ext=%d", + (int)siz, (int)(DATASB(item)->tot), (int)ext); + } + DATASB(item)->tot += ext; + } + memcpy(buf + old_siz, str, siz + 1); + DATASB(item)->siz += siz; +} + +static struct api_data *print_data(struct io_data *io_data, struct api_data *root, bool isjson, bool precom) +{ + // N.B. strings don't use this buffer so 64 is enough (for now) + char buf[64]; + struct api_data *tmp; + bool done, first = true; + char *original, *escape; + K_ITEM *item; + + K_WLOCK(strbufs); + item = k_unlink_head(strbufs); + K_WUNLOCK(strbufs); + + DATASB(item)->siz = 0; + + if (precom) + add_item_buf(item, COMMA); + + if (isjson) + add_item_buf(item, JSON0); + + while (root) { + if (!first) + add_item_buf(item, COMMA); + else + first = false; + + if (isjson) + add_item_buf(item, JSON1); + + add_item_buf(item, root->name); + + if (isjson) + add_item_buf(item, JSON1); + + if (isjson) + add_item_buf(item, ":"); + else + add_item_buf(item, "="); + + first = false; + + done = false; + switch(root->type) { + case API_STRING: + case API_CONST: + if (isjson) + add_item_buf(item, JSON1); + add_item_buf(item, (char *)(root->data)); + if (isjson) + add_item_buf(item, JSON1); + done = true; + break; + case API_ESCAPE: + original = (char *)(root->data); + escape = escape_string((char *)(root->data), isjson); + if (isjson) + add_item_buf(item, JSON1); + add_item_buf(item, escape); + if (isjson) + add_item_buf(item, JSON1); + if (escape != original) + free(escape); + done = true; + break; + case API_UINT8: + snprintf(buf, sizeof(buf), "%u", *(uint8_t *)root->data); + break; + case API_INT16: + snprintf(buf, sizeof(buf), "%d", *(int16_t *)root->data); + break; + case API_UINT16: + snprintf(buf, sizeof(buf), "%u", *(uint16_t *)root->data); + break; + case API_INT: + snprintf(buf, sizeof(buf), "%d", *((int *)(root->data))); + break; + case API_UINT: + snprintf(buf, sizeof(buf), "%u", *((unsigned int *)(root->data))); + break; + case API_UINT32: + snprintf(buf, sizeof(buf), "%"PRIu32, *((uint32_t *)(root->data))); + break; + case API_HEX32: + if (isjson) + add_item_buf(item, JSON1); + snprintf(buf, sizeof(buf), "0x%08x", *((uint32_t *)(root->data))); + add_item_buf(item, buf); + if (isjson) + add_item_buf(item, JSON1); + done = true; + break; + case API_UINT64: + snprintf(buf, sizeof(buf), "%"PRIu64, *((uint64_t *)(root->data))); + break; + case API_INT64: + snprintf(buf, sizeof(buf), "%"PRId64, *((int64_t *)(root->data))); + break; + case API_TIME: + snprintf(buf, sizeof(buf), "%lu", *((unsigned long *)(root->data))); + break; + case API_DOUBLE: + snprintf(buf, sizeof(buf), "%f", *((double *)(root->data))); + break; + case API_ELAPSED: + snprintf(buf, sizeof(buf), "%.0f", *((double *)(root->data))); + break; + case API_UTILITY: + case API_FREQ: + case API_MHS: + snprintf(buf, sizeof(buf), "%.2f", *((double *)(root->data))); + break; + case API_VOLTS: + case API_AVG: + snprintf(buf, sizeof(buf), "%.3f", *((float *)(root->data))); + break; + case API_MHTOTAL: + snprintf(buf, sizeof(buf), "%.4f", *((double *)(root->data))); + break; + case API_HS: + snprintf(buf, sizeof(buf), "%.15f", *((double *)(root->data))); + break; + case API_DIFF: + snprintf(buf, sizeof(buf), "%.8f", *((double *)(root->data))); + break; + case API_BOOL: + snprintf(buf, sizeof(buf), "%s", *((bool *)(root->data)) ? TRUESTR : FALSESTR); + break; + case API_TIMEVAL: + snprintf(buf, sizeof(buf), "%ld.%06ld", + (long)((struct timeval *)(root->data))->tv_sec, + (long)((struct timeval *)(root->data))->tv_usec); + break; + case API_TEMP: + snprintf(buf, sizeof(buf), "%.2f", *((float *)(root->data))); + break; + case API_PERCENT: + snprintf(buf, sizeof(buf), "%.4f", *((double *)(root->data)) * 100.0); + break; + default: + applog(LOG_ERR, "API: unknown2 data type %d ignored", root->type); + if (isjson) + add_item_buf(item, JSON1); + add_item_buf(item, UNKNOWN); + if (isjson) + add_item_buf(item, JSON1); + done = true; + break; + } + + if (!done) + add_item_buf(item, buf); + + free(root->name); + if (root->data_was_malloc) + free(root->data); + + if (root->next == root) { + free(root); + root = NULL; + } else { + tmp = root; + root = tmp->next; + root->prev = tmp->prev; + root->prev->next = root; + free(tmp); + } + } + + if (isjson) + add_item_buf(item, JSON5); + else + add_item_buf(item, SEPSTR); + + io_add(io_data, DATASB(item)->buf); + + K_WLOCK(strbufs); + k_add_head(strbufs, item); + K_WUNLOCK(strbufs); + + return root; +} + +#define DRIVER_COUNT_DRV(X) if (devices[i]->drv->drv_id == DRIVER_##X) \ + count++; + +#ifdef HAVE_AN_ASIC +static int numascs(void) +{ + int count = 0; + int i; + + rd_lock(&devices_lock); + for (i = 0; i < total_devices; i++) { + ASIC_PARSE_COMMANDS(DRIVER_COUNT_DRV) + } + rd_unlock(&devices_lock); + return count; +} + +static int ascdevice(int ascid) +{ + int count = 0; + int i; + + rd_lock(&devices_lock); + for (i = 0; i < total_devices; i++) { + ASIC_PARSE_COMMANDS(DRIVER_COUNT_DRV) + if (count == (ascid + 1)) + goto foundit; + } + + rd_unlock(&devices_lock); + return -1; + +foundit: + + rd_unlock(&devices_lock); + return i; +} +#endif + +#ifdef HAVE_AN_FPGA +static int numpgas(void) +{ + int count = 0; + int i; + + rd_lock(&devices_lock); + for (i = 0; i < total_devices; i++) { + FPGA_PARSE_COMMANDS(DRIVER_COUNT_DRV) + } + rd_unlock(&devices_lock); + return count; +} + +static int pgadevice(int pgaid) +{ + int count = 0; + int i; + + rd_lock(&devices_lock); + for (i = 0; i < total_devices; i++) { + FPGA_PARSE_COMMANDS(DRIVER_COUNT_DRV) + if (count == (pgaid + 1)) + goto foundit; + } + + rd_unlock(&devices_lock); + return -1; + +foundit: + + rd_unlock(&devices_lock); + return i; +} +#endif + +// All replies (except BYE and RESTART) start with a message +// thus for JSON, message() inserts JSON_START at the front +// and send_result() adds JSON_END at the end +static void message(struct io_data *io_data, int messageid, int paramid, char *param2, bool isjson) +{ + struct api_data *root = NULL; + char buf[TMPBUFSIZ]; + char severity[2]; +#ifdef HAVE_AN_ASIC + int asc; +#endif +#ifdef HAVE_AN_FPGA + int pga; +#endif + int i; + + if (isjson) + io_add(io_data, JSON_START JSON_STATUS); + + for (i = 0; codes[i].severity != SEVERITY_FAIL; i++) { + if (codes[i].code == messageid) { + switch (codes[i].severity) { + case SEVERITY_WARN: + severity[0] = 'W'; + break; + case SEVERITY_INFO: + severity[0] = 'I'; + break; + case SEVERITY_SUCC: + severity[0] = 'S'; + break; + case SEVERITY_ERR: + default: + severity[0] = 'E'; + break; + } + severity[1] = '\0'; + + switch(codes[i].params) { + case PARAM_PGA: + case PARAM_ASC: + case PARAM_PID: + case PARAM_INT: + sprintf(buf, codes[i].description, paramid); + break; + case PARAM_POOL: + sprintf(buf, codes[i].description, paramid, pools[paramid]->rpc_url); + break; +#ifdef HAVE_AN_FPGA + case PARAM_PGAMAX: + pga = numpgas(); + sprintf(buf, codes[i].description, paramid, pga - 1); + break; +#endif +#ifdef HAVE_AN_ASIC + case PARAM_ASCMAX: + asc = numascs(); + sprintf(buf, codes[i].description, paramid, asc - 1); + break; +#endif + case PARAM_PMAX: + sprintf(buf, codes[i].description, total_pools); + break; + case PARAM_POOLMAX: + sprintf(buf, codes[i].description, paramid, total_pools - 1); + break; + case PARAM_DMAX: +#ifdef HAVE_AN_ASIC + asc = numascs(); +#endif +#ifdef HAVE_AN_FPGA + pga = numpgas(); +#endif + + sprintf(buf, codes[i].description +#ifdef HAVE_AN_ASIC + , asc +#endif +#ifdef HAVE_AN_FPGA + , pga +#endif + ); + break; + case PARAM_CMD: + sprintf(buf, codes[i].description, JSON_COMMAND); + break; + case PARAM_STR: + sprintf(buf, codes[i].description, param2); + break; + case PARAM_BOTH: + sprintf(buf, codes[i].description, paramid, param2); + break; + case PARAM_BOOL: + sprintf(buf, codes[i].description, paramid ? TRUESTR : FALSESTR); + break; + case PARAM_SET: + sprintf(buf, codes[i].description, param2, paramid); + break; + case PARAM_NONE: + default: + strcpy(buf, codes[i].description); + } + + root = api_add_string(root, _STATUS, severity, false); + root = api_add_time(root, "When", &when, false); + root = api_add_int(root, "Code", &messageid, false); + root = api_add_escape(root, "Msg", buf, false); + root = api_add_escape(root, "Description", opt_api_description, false); + + root = print_data(io_data, root, isjson, false); + if (isjson) + io_add(io_data, JSON_CLOSE); + return; + } + } + + root = api_add_string(root, _STATUS, "F", false); + root = api_add_time(root, "When", &when, false); + int id = -1; + root = api_add_int(root, "Code", &id, false); + sprintf(buf, "%d", messageid); + root = api_add_escape(root, "Msg", buf, false); + root = api_add_escape(root, "Description", opt_api_description, false); + + root = print_data(io_data, root, isjson, false); + if (isjson) + io_add(io_data, JSON_CLOSE); +} + +#if LOCK_TRACKING + +#define LOCK_FMT_FFL " - called from %s %s():%d" + +#define LOCKMSG(fmt, ...) fprintf(stderr, "APILOCK: " fmt "\n", ##__VA_ARGS__) +#define LOCKMSGMORE(fmt, ...) fprintf(stderr, " " fmt "\n", ##__VA_ARGS__) +#define LOCKMSGFFL(fmt, ...) fprintf(stderr, "APILOCK: " fmt LOCK_FMT_FFL "\n", ##__VA_ARGS__, file, func, linenum) +#define LOCKMSGFLUSH() fflush(stderr) + +typedef struct lockstat { + uint64_t lock_id; + const char *file; + const char *func; + int linenum; + struct timeval tv; +} LOCKSTAT; + +typedef struct lockline { + struct lockline *prev; + struct lockstat *stat; + struct lockline *next; +} LOCKLINE; + +typedef struct lockinfo { + void *lock; + enum cglock_typ typ; + const char *file; + const char *func; + int linenum; + uint64_t gets; + uint64_t gots; + uint64_t tries; + uint64_t dids; + uint64_t didnts; // should be tries - dids + uint64_t unlocks; + LOCKSTAT lastgot; + LOCKLINE *lockgets; + LOCKLINE *locktries; +} LOCKINFO; + +typedef struct locklist { + LOCKINFO *info; + struct locklist *next; +} LOCKLIST; + +static uint64_t lock_id = 1; + +static LOCKLIST *lockhead; + +static void lockmsgnow() +{ + struct timeval now; + struct tm *tm; + time_t dt; + + cgtime(&now); + + dt = now.tv_sec; + tm = localtime(&dt); + + LOCKMSG("%d-%02d-%02d %02d:%02d:%02d", + tm->tm_year + 1900, + tm->tm_mon + 1, + tm->tm_mday, + tm->tm_hour, + tm->tm_min, + tm->tm_sec); +} + +static LOCKLIST *newlock(void *lock, enum cglock_typ typ, const char *file, const char *func, const int linenum) +{ + LOCKLIST *list; + + list = calloc(1, sizeof(*list)); + if (!list) + quithere(1, "OOM list"); + list->info = calloc(1, sizeof(*(list->info))); + if (!list->info) + quithere(1, "OOM info"); + list->next = lockhead; + lockhead = list; + + list->info->lock = lock; + list->info->typ = typ; + list->info->file = file; + list->info->func = func; + list->info->linenum = linenum; + + return list; +} + +static LOCKINFO *findlock(void *lock, enum cglock_typ typ, const char *file, const char *func, const int linenum) +{ + LOCKLIST *look; + + look = lockhead; + while (look) { + if (look->info->lock == lock) + break; + look = look->next; + } + + if (!look) + look = newlock(lock, typ, file, func, linenum); + + return look->info; +} + +static void addgettry(LOCKINFO *info, uint64_t id, const char *file, const char *func, const int linenum, bool get) +{ + LOCKSTAT *stat; + LOCKLINE *line; + + stat = calloc(1, sizeof(*stat)); + if (!stat) + quithere(1, "OOM stat"); + line = calloc(1, sizeof(*line)); + if (!line) + quithere(1, "OOM line"); + + if (get) + info->gets++; + else + info->tries++; + + stat->lock_id = id; + stat->file = file; + stat->func = func; + stat->linenum = linenum; + cgtime(&stat->tv); + + line->stat = stat; + + if (get) { + line->next = info->lockgets; + if (info->lockgets) + info->lockgets->prev = line; + info->lockgets = line; + } else { + line->next = info->locktries; + if (info->locktries) + info->locktries->prev = line; + info->locktries = line; + } +} + +static void markgotdid(LOCKINFO *info, uint64_t id, const char *file, const char *func, const int linenum, bool got, int ret) +{ + LOCKLINE *line; + + if (got) + info->gots++; + else { + if (ret == 0) + info->dids++; + else + info->didnts++; + } + + if (got || ret == 0) { + info->lastgot.lock_id = id; + info->lastgot.file = file; + info->lastgot.func = func; + info->lastgot.linenum = linenum; + cgtime(&info->lastgot.tv); + } + + if (got) + line = info->lockgets; + else + line = info->locktries; + while (line) { + if (line->stat->lock_id == id) + break; + line = line->next; + } + + if (!line) { + lockmsgnow(); + LOCKMSGFFL("ERROR attempt to mark a lock as '%s' that wasn't '%s' id=%"PRIu64, + got ? "got" : "did/didnt", got ? "get" : "try", id); + } + + // Unlink it + if (line->prev) + line->prev->next = line->next; + if (line->next) + line->next->prev = line->prev; + + if (got) { + if (info->lockgets == line) + info->lockgets = line->next; + } else { + if (info->locktries == line) + info->locktries = line->next; + } + + free(line->stat); + free(line); +} + +// Yes this uses locks also ... ;/ +static void locklock() +{ + if (unlikely(pthread_mutex_lock(&lockstat_lock))) + quithere(1, "WTF MUTEX ERROR ON LOCK! errno=%d", errno); +} + +static void lockunlock() +{ + if (unlikely(pthread_mutex_unlock(&lockstat_lock))) + quithere(1, "WTF MUTEX ERROR ON UNLOCK! errno=%d", errno); +} + +uint64_t api_getlock(void *lock, const char *file, const char *func, const int linenum) +{ + LOCKINFO *info; + uint64_t id; + + locklock(); + + info = findlock(lock, CGLOCK_UNKNOWN, file, func, linenum); + id = lock_id++; + addgettry(info, id, file, func, linenum, true); + + lockunlock(); + + return id; +} + +void api_gotlock(uint64_t id, void *lock, const char *file, const char *func, const int linenum) +{ + LOCKINFO *info; + + locklock(); + + info = findlock(lock, CGLOCK_UNKNOWN, file, func, linenum); + markgotdid(info, id, file, func, linenum, true, 0); + + lockunlock(); +} + +uint64_t api_trylock(void *lock, const char *file, const char *func, const int linenum) +{ + LOCKINFO *info; + uint64_t id; + + locklock(); + + info = findlock(lock, CGLOCK_UNKNOWN, file, func, linenum); + id = lock_id++; + addgettry(info, id, file, func, linenum, false); + + lockunlock(); + + return id; +} + +void api_didlock(uint64_t id, int ret, void *lock, const char *file, const char *func, const int linenum) +{ + LOCKINFO *info; + + locklock(); + + info = findlock(lock, CGLOCK_UNKNOWN, file, func, linenum); + markgotdid(info, id, file, func, linenum, false, ret); + + lockunlock(); +} + +void api_gunlock(void *lock, const char *file, const char *func, const int linenum) +{ + LOCKINFO *info; + + locklock(); + + info = findlock(lock, CGLOCK_UNKNOWN, file, func, linenum); + info->unlocks++; + + lockunlock(); +} + +void api_initlock(void *lock, enum cglock_typ typ, const char *file, const char *func, const int linenum) +{ + locklock(); + + findlock(lock, typ, file, func, linenum); + + lockunlock(); +} + +void dsp_det(char *msg, LOCKSTAT *stat) +{ + struct tm *tm; + time_t dt; + + dt = stat->tv.tv_sec; + tm = localtime(&dt); + + LOCKMSGMORE("%s id=%"PRIu64" by %s %s():%d at %d-%02d-%02d %02d:%02d:%02d", + msg, + stat->lock_id, + stat->file, + stat->func, + stat->linenum, + tm->tm_year + 1900, + tm->tm_mon + 1, + tm->tm_mday, + tm->tm_hour, + tm->tm_min, + tm->tm_sec); +} + +void dsp_lock(LOCKINFO *info) +{ + LOCKLINE *line; + char *status; + + LOCKMSG("Lock %p created by %s %s():%d", + info->lock, + info->file, + info->func, + info->linenum); + LOCKMSGMORE("gets:%"PRIu64" gots:%"PRIu64" tries:%"PRIu64 + " dids:%"PRIu64" didnts:%"PRIu64" unlocks:%"PRIu64, + info->gets, + info->gots, + info->tries, + info->dids, + info->didnts, + info->unlocks); + + if (info->gots > 0 || info->dids > 0) { + if (info->unlocks < info->gots + info->dids) + status = "Last got/did still HELD"; + else + status = "Last got/did (idle)"; + + dsp_det(status, &(info->lastgot)); + } else + LOCKMSGMORE("... unused ..."); + + if (info->lockgets) { + LOCKMSGMORE("BLOCKED gets (%"PRIu64")", info->gets - info->gots); + line = info->lockgets; + while (line) { + dsp_det("", line->stat); + line = line->next; + } + } else + LOCKMSGMORE("no blocked gets"); + + if (info->locktries) { + LOCKMSGMORE("BLOCKED tries (%"PRIu64")", info->tries - info->dids - info->didnts); + line = info->lockgets; + while (line) { + dsp_det("", line->stat); + line = line->next; + } + } else + LOCKMSGMORE("no blocked tries"); +} + +void show_locks() +{ + LOCKLIST *list; + + locklock(); + + lockmsgnow(); + + list = lockhead; + if (!list) + LOCKMSG("no locks?!?\n"); + else { + while (list) { + dsp_lock(list->info); + list = list->next; + } + } + + LOCKMSGFLUSH(); + + lockunlock(); +} +#endif + +static void lockstats(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ +#if LOCK_TRACKING + show_locks(); + message(io_data, MSG_LOCKOK, 0, NULL, isjson); +#else + message(io_data, MSG_LOCKDIS, 0, NULL, isjson); +#endif +} + +static void apiversion(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open; + + message(io_data, MSG_VERSION, 0, NULL, isjson); + io_open = io_add(io_data, isjson ? COMSTR JSON_VERSION : _VERSION COMSTR); + + root = api_add_string(root, "BMMiner", VERSION, false); + root = api_add_const(root, "API", APIVERSION, false); + root = api_add_string(root, "Miner", g_miner_version, false); + root = api_add_string(root, "CompileTime", g_miner_compiletime, false); + root = api_add_string(root, "Type", g_miner_type, false); + + root = print_data(io_data, root, isjson, false); + if (isjson && io_open) + io_close(io_data); +} + +static void minerconfig(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open; + int asccount = 0; + int pgacount = 0; + +#ifdef HAVE_AN_ASIC + asccount = numascs(); +#endif + +#ifdef HAVE_AN_FPGA + pgacount = numpgas(); +#endif + + message(io_data, MSG_MINECONFIG, 0, NULL, isjson); + io_open = io_add(io_data, isjson ? COMSTR JSON_MINECONFIG : _MINECONFIG COMSTR); + + root = api_add_int(root, "ASC Count", &asccount, false); + root = api_add_int(root, "PGA Count", &pgacount, false); + root = api_add_int(root, "Pool Count", &total_pools, false); + root = api_add_const(root, "Strategy", strategies[pool_strategy].s, false); + root = api_add_int(root, "Log Interval", &opt_log_interval, false); + root = api_add_const(root, "Device Code", DEVICECODE, false); + root = api_add_const(root, "OS", OSINFO, false); + root = api_add_bool(root, "Failover-Only", &opt_fail_only, false); + root = api_add_int(root, "ScanTime", &opt_scantime, false); + root = api_add_int(root, "Queue", &opt_queue, false); + root = api_add_int(root, "Expiry", &opt_expiry, false); +#ifdef USE_USBUTILS + if (hotplug_time == 0) + root = api_add_const(root, "Hotplug", DISABLED, false); + else + root = api_add_int(root, "Hotplug", &hotplug_time, false); +#else + root = api_add_const(root, "Hotplug", NONE, false); +#endif + + root = print_data(io_data, root, isjson, false); + if (isjson && io_open) + io_close(io_data); +} + +static const char *status2str(enum alive status) +{ + switch (status) { + case LIFE_WELL: + return ALIVE; + case LIFE_SICK: + return SICK; + case LIFE_DEAD: + return DEAD; + case LIFE_NOSTART: + return NOSTART; + case LIFE_INIT: + return INIT; + default: + return UNKNOWN; + } +} + +#ifdef HAVE_AN_ASIC +static void ascstatus(struct io_data *io_data, int asc, bool isjson, bool precom) +{ + struct api_data *root = NULL; + char *enabled; + char *status; + int numasc = numascs(); + + if (numasc > 0 && asc >= 0 && asc < numasc) { + int dev = ascdevice(asc); + if (dev < 0) // Should never happen + return; + + struct cgpu_info *cgpu = get_devices(dev); + float temp = cgpu->temp; + double dev_runtime; + + dev_runtime = cgpu_runtime(cgpu); + + cgpu->utility = cgpu->accepted / dev_runtime * 60; + + if (cgpu->deven != DEV_DISABLED) + enabled = (char *)YES; + else + enabled = (char *)NO; + + status = (char *)status2str(cgpu->status); + + root = api_add_int(root, "ASC", &asc, false); + root = api_add_string(root, "Name", cgpu->drv->name, false); + root = api_add_int(root, "ID", &(cgpu->device_id), false); + root = api_add_string(root, "Enabled", enabled, false); + root = api_add_string(root, "Status", status, false); + root = api_add_temp(root, "Temperature", &temp, false); + double mhs = cgpu->total_mhashes / dev_runtime; + root = api_add_mhs(root, "MHS av", &mhs, false); + char mhsname[27]; + sprintf(mhsname, "MHS %ds", opt_log_interval); + root = api_add_mhs(root, mhsname, &(cgpu->rolling), false); + root = api_add_int(root, "Accepted", &(cgpu->accepted), false); + root = api_add_int(root, "Rejected", &(cgpu->rejected), false); + root = api_add_int(root, "Hardware Errors", &(cgpu->hw_errors), false); + root = api_add_utility(root, "Utility", &(cgpu->utility), false); + int last_share_pool = cgpu->last_share_pool_time > 0 ? + cgpu->last_share_pool : -1; + root = api_add_int(root, "Last Share Pool", &last_share_pool, false); + root = api_add_time(root, "Last Share Time", &(cgpu->last_share_pool_time), false); + root = api_add_mhtotal(root, "Total MH", &(cgpu->total_mhashes), false); + root = api_add_int64(root, "Diff1 Work", &(cgpu->diff1), false); + root = api_add_diff(root, "Difficulty Accepted", &(cgpu->diff_accepted), false); + root = api_add_diff(root, "Difficulty Rejected", &(cgpu->diff_rejected), false); + root = api_add_diff(root, "Last Share Difficulty", &(cgpu->last_share_diff), false); +#ifdef USE_USBUTILS + root = api_add_bool(root, "No Device", &(cgpu->usbinfo.nodev), false); +#endif + root = api_add_time(root, "Last Valid Work", &(cgpu->last_device_valid_work), false); + double hwp = (cgpu->hw_errors + cgpu->diff1) ? + (double)(cgpu->hw_errors) / (double)(cgpu->hw_errors + cgpu->diff1) : 0; + root = api_add_percent(root, "Device Hardware%", &hwp, false); + double rejp = cgpu->diff1 ? + (double)(cgpu->diff_rejected) / (double)(cgpu->diff1) : 0; + root = api_add_percent(root, "Device Rejected%", &rejp, false); + root = api_add_elapsed(root, "Device Elapsed", &(dev_runtime), false); + + root = print_data(io_data, root, isjson, precom); + } +} +#endif + +#ifdef HAVE_AN_FPGA +static void pgastatus(struct io_data *io_data, int pga, bool isjson, bool precom) +{ + struct api_data *root = NULL; + char *enabled; + char *status; + int numpga = numpgas(); + + if (numpga > 0 && pga >= 0 && pga < numpga) { + int dev = pgadevice(pga); + if (dev < 0) // Should never happen + return; + + struct cgpu_info *cgpu = get_devices(dev); + double frequency = 0; + float temp = cgpu->temp; + struct timeval now; + double dev_runtime; + + if (cgpu->dev_start_tv.tv_sec == 0) + dev_runtime = total_secs; + else { + cgtime(&now); + dev_runtime = tdiff(&now, &(cgpu->dev_start_tv)); + } + + if (dev_runtime < 1.0) + dev_runtime = 1.0; + +#ifdef USE_MODMINER + if (cgpu->drv->drv_id == DRIVER_modminer) + frequency = cgpu->clock; +#endif + + cgpu->utility = cgpu->accepted / dev_runtime * 60; + + if (cgpu->deven != DEV_DISABLED) + enabled = (char *)YES; + else + enabled = (char *)NO; + + status = (char *)status2str(cgpu->status); + + root = api_add_int(root, "PGA", &pga, false); + root = api_add_string(root, "Name", cgpu->drv->name, false); + root = api_add_int(root, "ID", &(cgpu->device_id), false); + root = api_add_string(root, "Enabled", enabled, false); + root = api_add_string(root, "Status", status, false); + root = api_add_temp(root, "Temperature", &temp, false); + double mhs = cgpu->total_mhashes / dev_runtime; + root = api_add_mhs(root, "MHS av", &mhs, false); + char mhsname[27]; + sprintf(mhsname, "MHS %ds", opt_log_interval); + root = api_add_mhs(root, mhsname, &(cgpu->rolling), false); + root = api_add_int(root, "Accepted", &(cgpu->accepted), false); + root = api_add_int(root, "Rejected", &(cgpu->rejected), false); + root = api_add_int(root, "Hardware Errors", &(cgpu->hw_errors), false); + root = api_add_utility(root, "Utility", &(cgpu->utility), false); + int last_share_pool = cgpu->last_share_pool_time > 0 ? + cgpu->last_share_pool : -1; + root = api_add_int(root, "Last Share Pool", &last_share_pool, false); + root = api_add_time(root, "Last Share Time", &(cgpu->last_share_pool_time), false); + root = api_add_mhtotal(root, "Total MH", &(cgpu->total_mhashes), false); + root = api_add_freq(root, "Frequency", &frequency, false); + root = api_add_int64(root, "Diff1 Work", &(cgpu->diff1), false); + root = api_add_diff(root, "Difficulty Accepted", &(cgpu->diff_accepted), false); + root = api_add_diff(root, "Difficulty Rejected", &(cgpu->diff_rejected), false); + root = api_add_diff(root, "Last Share Difficulty", &(cgpu->last_share_diff), false); +#ifdef USE_USBUTILS + root = api_add_bool(root, "No Device", &(cgpu->usbinfo.nodev), false); +#endif + root = api_add_time(root, "Last Valid Work", &(cgpu->last_device_valid_work), false); + double hwp = (cgpu->hw_errors + cgpu->diff1) ? + (double)(cgpu->hw_errors) / (double)(cgpu->hw_errors + cgpu->diff1) : 0; + root = api_add_percent(root, "Device Hardware%", &hwp, false); + double rejp = cgpu->diff1 ? + (double)(cgpu->diff_rejected) / (double)(cgpu->diff1) : 0; + root = api_add_percent(root, "Device Rejected%", &rejp, false); + root = api_add_elapsed(root, "Device Elapsed", &(dev_runtime), false); + + root = print_data(io_data, root, isjson, precom); + } +} +#endif + +static void devstatus(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + bool io_open = false; + int devcount = 0; + int numasc = 0; + int numpga = 0; + int i; + +#ifdef HAVE_AN_ASIC + numasc = numascs(); +#endif + +#ifdef HAVE_AN_FPGA + numpga = numpgas(); +#endif + + if (numpga == 0 && numasc == 0) { + message(io_data, MSG_NODEVS, 0, NULL, isjson); + return; + } + + + message(io_data, MSG_DEVS, 0, NULL, isjson); + if (isjson) + io_open = io_add(io_data, COMSTR JSON_DEVS); + +#ifdef HAVE_AN_ASIC + if (numasc > 0) { + for (i = 0; i < numasc; i++) { + ascstatus(io_data, i, isjson, isjson && devcount > 0); + + devcount++; + } + } +#endif + +#ifdef HAVE_AN_FPGA + if (numpga > 0) { + for (i = 0; i < numpga; i++) { + pgastatus(io_data, i, isjson, isjson && devcount > 0); + + devcount++; + } + } +#endif + + if (isjson && io_open) + io_close(io_data); +} + +static void edevstatus(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + bool io_open = false; + int devcount = 0; + int numasc = 0; + int numpga = 0; + int i; +#ifdef USE_USBUTILS + time_t howoldsec = 0; +#endif + +#ifdef HAVE_AN_ASIC + numasc = numascs(); +#endif + +#ifdef HAVE_AN_FPGA + numpga = numpgas(); +#endif + + if (numpga == 0 && numasc == 0) { + message(io_data, MSG_NODEVS, 0, NULL, isjson); + return; + } + +#ifdef USE_USBUTILS + if (param && *param) + howoldsec = (time_t)atoi(param); +#endif + + message(io_data, MSG_DEVS, 0, NULL, isjson); + if (isjson) + io_open = io_add(io_data, COMSTR JSON_DEVS); + +#ifdef HAVE_AN_ASIC + if (numasc > 0) { + for (i = 0; i < numasc; i++) { +#ifdef USE_USBUTILS + int dev = ascdevice(i); + if (dev < 0) // Should never happen + continue; + + struct cgpu_info *cgpu = get_devices(dev); + if (!cgpu) + continue; + if (cgpu->blacklisted) + continue; + if (cgpu->usbinfo.nodev) { + if (howoldsec <= 0) + continue; + if ((when - cgpu->usbinfo.last_nodev.tv_sec) >= howoldsec) + continue; + } +#endif + + ascstatus(io_data, i, isjson, isjson && devcount > 0); + + devcount++; + } + } +#endif + +#ifdef HAVE_AN_FPGA + if (numpga > 0) { + for (i = 0; i < numpga; i++) { +#ifdef USE_USBUTILS + int dev = pgadevice(i); + if (dev < 0) // Should never happen + continue; + + struct cgpu_info *cgpu = get_devices(dev); + if (!cgpu) + continue; + if (cgpu->blacklisted) + continue; + if (cgpu->usbinfo.nodev) { + if (howoldsec <= 0) + continue; + if ((when - cgpu->usbinfo.last_nodev.tv_sec) >= howoldsec) + continue; + } +#endif + + pgastatus(io_data, i, isjson, isjson && devcount > 0); + + devcount++; + } + } +#endif + + if (isjson && io_open) + io_close(io_data); +} + +#ifdef HAVE_AN_FPGA +static void pgadev(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + bool io_open = false; + int numpga = numpgas(); + int id; + + if (numpga == 0) { + message(io_data, MSG_PGANON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= numpga) { + message(io_data, MSG_INVPGA, id, NULL, isjson); + return; + } + + message(io_data, MSG_PGADEV, id, NULL, isjson); + + if (isjson) + io_open = io_add(io_data, COMSTR JSON_PGA); + + pgastatus(io_data, id, isjson, false); + + if (isjson && io_open) + io_close(io_data); +} + +static void pgaenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct cgpu_info *cgpu; + int numpga = numpgas(); + struct thr_info *thr; + int pga; + int id; + int i; + + if (numpga == 0) { + message(io_data, MSG_PGANON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= numpga) { + message(io_data, MSG_INVPGA, id, NULL, isjson); + return; + } + + int dev = pgadevice(id); + if (dev < 0) { // Should never happen + message(io_data, MSG_INVPGA, id, NULL, isjson); + return; + } + + cgpu = get_devices(dev); + + applog(LOG_DEBUG, "API: request to pgaenable pgaid %d device %d %s%u", + id, dev, cgpu->drv->name, cgpu->device_id); + + if (cgpu->deven != DEV_DISABLED) { + message(io_data, MSG_PGALRENA, id, NULL, isjson); + return; + } + +#if 0 /* A DISABLED device wont change status FIXME: should disabling make it WELL? */ + if (cgpu->status != LIFE_WELL) { + message(io_data, MSG_PGAUNW, id, NULL, isjson); + return; + } +#endif + +#ifdef USE_USBUTILS + if (cgpu->usbinfo.nodev) { + message(io_data, MSG_PGAUSBNODEV, id, NULL, isjson); + return; + } +#endif + + for (i = 0; i < mining_threads; i++) { + thr = get_thread(i); + pga = thr->cgpu->cgminer_id; + if (pga == dev) { + cgpu->deven = DEV_ENABLED; + applog(LOG_DEBUG, "API: Pushing sem post to thread %d", thr->id); + cgsem_post(&thr->sem); + } + } + + message(io_data, MSG_PGAENA, id, NULL, isjson); +} + +static void pgadisable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct cgpu_info *cgpu; + int numpga = numpgas(); + int id; + + if (numpga == 0) { + message(io_data, MSG_PGANON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= numpga) { + message(io_data, MSG_INVPGA, id, NULL, isjson); + return; + } + + int dev = pgadevice(id); + if (dev < 0) { // Should never happen + message(io_data, MSG_INVPGA, id, NULL, isjson); + return; + } + + cgpu = get_devices(dev); + + applog(LOG_DEBUG, "API: request to pgadisable pgaid %d device %d %s%u", + id, dev, cgpu->drv->name, cgpu->device_id); + + if (cgpu->deven == DEV_DISABLED) { + message(io_data, MSG_PGALRDIS, id, NULL, isjson); + return; + } + + cgpu->deven = DEV_DISABLED; + + message(io_data, MSG_PGADIS, id, NULL, isjson); +} + +static void pgaidentify(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct cgpu_info *cgpu; + struct device_drv *drv; + int numpga = numpgas(); + int id; + + if (numpga == 0) { + message(io_data, MSG_PGANON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= numpga) { + message(io_data, MSG_INVPGA, id, NULL, isjson); + return; + } + + int dev = pgadevice(id); + if (dev < 0) { // Should never happen + message(io_data, MSG_INVPGA, id, NULL, isjson); + return; + } + + cgpu = get_devices(dev); + drv = cgpu->drv; + + if (!drv->identify_device) + message(io_data, MSG_PGANOID, id, NULL, isjson); + else { + drv->identify_device(cgpu); + message(io_data, MSG_PGAIDENT, id, NULL, isjson); + } +} +#endif + +static void poolstatus(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open = false; + char *status, *lp; + int i; + int hour = 0; + int minute = 0; + int second = 0; + + char lasttime[256] = {0}; + long timediff = 0; + + if (total_pools == 0) { + message(io_data, MSG_NOPOOL, 0, NULL, isjson); + return; + } + + message(io_data, MSG_POOL, 0, NULL, isjson); + + if (isjson) + io_open = io_add(io_data, COMSTR JSON_POOLS); + + for (i = 0; i < total_pools; i++) { + struct pool *pool = pools[i]; + + if (pool->removed) + continue; + + switch (pool->enabled) { + case POOL_DISABLED: + status = (char *)DISABLED; + break; + case POOL_REJECTING: + status = (char *)REJECTING; + break; + case POOL_ENABLED: + if (pool->idle) + status = (char *)DEAD; + else + status = (char *)ALIVE; + break; + default: + status = (char *)UNKNOWN; + break; + } + + if (pool->hdr_path) + lp = (char *)YES; + else + lp = (char *)NO; + + if(pool->last_share_time <= 0) { + strcpy(lasttime, "0"); + } else { + timediff = time(NULL) - pool->last_share_time; + if(timediff < 0) + timediff = 0; + + hour = timediff / 3600; + minute = (timediff % 3600) / 60; + second = (timediff % 3600) % 60; + sprintf(lasttime, "%d:%02d:%02d", hour, minute, second); + } + + root = api_add_int(root, "POOL", &i, false); + root = api_add_escape(root, "URL", pool->rpc_url, false); + root = api_add_string(root, "Status", status, false); + root = api_add_int(root, "Priority", &(pool->prio), false); + root = api_add_int(root, "Quota", &pool->quota, false); + root = api_add_string(root, "Long Poll", lp, false); + root = api_add_uint(root, "Getworks", &(pool->getwork_requested), false); + root = api_add_int64(root, "Accepted", &(pool->accepted), false); + root = api_add_int64(root, "Rejected", &(pool->rejected), false); + //root = api_add_int(root, "Works", &pool->works, false); + root = api_add_uint(root, "Discarded", &(pool->discarded_work), false); + root = api_add_uint(root, "Stale", &(pool->stale_shares), false); + root = api_add_uint(root, "Get Failures", &(pool->getfail_occasions), false); + root = api_add_uint(root, "Remote Failures", &(pool->remotefail_occasions), false); + root = api_add_escape(root, "User", pool->rpc_user, false); + //root = api_add_time(root, "Last Share Time", &(pool->last_share_time), false); + root = api_add_string(root, "Last Share Time", lasttime, false); + root = api_add_string(root, "Diff", pool->diff, false); + root = api_add_int64(root, "Diff1 Shares", &(pool->diff1), false); + if (pool->rpc_proxy) { + root = api_add_const(root, "Proxy Type", proxytype(pool->rpc_proxytype), false); + root = api_add_escape(root, "Proxy", pool->rpc_proxy, false); + } else { + root = api_add_const(root, "Proxy Type", BLANK, false); + root = api_add_const(root, "Proxy", BLANK, false); + } + root = api_add_diff(root, "Difficulty Accepted", &(pool->diff_accepted), false); + root = api_add_diff(root, "Difficulty Rejected", &(pool->diff_rejected), false); + root = api_add_diff(root, "Difficulty Stale", &(pool->diff_stale), false); + root = api_add_diff(root, "Last Share Difficulty", &(pool->last_share_diff), false); + root = api_add_bool(root, "Has Stratum", &(pool->has_stratum), false); + root = api_add_bool(root, "Stratum Active", &(pool->stratum_active), false); + if (pool->stratum_active) + root = api_add_escape(root, "Stratum URL", pool->stratum_url, false); + else + root = api_add_const(root, "Stratum URL", BLANK, false); + root = api_add_bool(root, "Has GBT", &(pool->has_gbt), false); + root = api_add_uint64(root, "Best Share", &(pool->best_diff), true); + double rejp = (pool->diff_accepted + pool->diff_rejected + pool->diff_stale) ? + (double)(pool->diff_rejected) / (double)(pool->diff_accepted + pool->diff_rejected + pool->diff_stale) : 0; + root = api_add_percent(root, "Pool Rejected%", &rejp, false); + double stalep = (pool->diff_accepted + pool->diff_rejected + pool->diff_stale) ? + (double)(pool->diff_stale) / (double)(pool->diff_accepted + pool->diff_rejected + pool->diff_stale) : 0; + root = api_add_percent(root, "Pool Stale%", &stalep, false); + + root = print_data(io_data, root, isjson, isjson && (i > 0)); + } + + if (isjson && io_open) + io_close(io_data); +} + +static void lcddisplay(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open = false; + char *status, *lp; + double ghs; + + char szindex[32] = {0}; + char szfan[32] = {0}; + char sztemp[32] = {0}; + char szpool[32] = {0}; + char szuser[32] = {0}; + + struct pool *pool = current_pool(); + + message(io_data, MSG_POOL, 0, NULL, isjson); + + if (isjson) + io_open = io_add(io_data, COMSTR JSON_POOLS); + + ghs = total_mhashes_done / 1000 / total_secs; + + strcpy(szindex, "0"); + root = api_add_string(root, "LCD", szindex, false); + + root = api_add_mhs(root, "GHS5s", &(g_displayed_rolling), false); + root = api_add_mhs(root, "GHSavg", &(ghs), false); + + sprintf(szfan, "%d", g_max_fan); + root = api_add_string(root, "fan", szfan, false); + sprintf(sztemp, "%d", g_max_temp); + root = api_add_string(root, "temp", sztemp, false); + + if(pool == NULL) { + strcpy(szpool, "no"); + strcpy(szuser, "no"); + root = api_add_string(root, "pool", szpool, false); + root = api_add_string(root, "user", szuser, false); + } else { + root = api_add_string(root, "pool", pool->rpc_url, false); + root = api_add_string(root, "user", pool->rpc_user, false); + } + + root = print_data(io_data, root, isjson, isjson); + + if (isjson && io_open) + io_close(io_data); +} + +static void summary(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open; + double utility, ghs, work_utility; + + message(io_data, MSG_SUMM, 0, NULL, isjson); + io_open = io_add(io_data, isjson ? COMSTR JSON_SUMMARY : _SUMMARY COMSTR); + + // stop hashmeter() changing some while copying + mutex_lock(&hash_lock); +#ifdef USE_BITMAIN_C5 + total_diff1 = total_diff_accepted + total_diff_rejected + total_diff_stale; +#endif + + utility = total_accepted / ( total_secs ? total_secs : 1 ) * 60; + ghs = total_mhashes_done / 1000 / total_secs; + work_utility = total_diff1 / ( total_secs ? total_secs : 1 ) * 60; + + root = api_add_elapsed(root, "Elapsed", &(total_secs), true); +#ifndef USE_BITMAIN_C5 + root = api_add_mhs(root, "GHS 5s", &(g_displayed_rolling), false); +#else + root = api_add_string(root, "GHS 5s", displayed_hash_rate, false); +#endif + root = api_add_mhs(root, "GHS av", &(ghs), false); + root = api_add_uint(root, "Found Blocks", &(found_blocks), true); + root = api_add_int64(root, "Getworks", &(total_getworks), true); + root = api_add_int64(root, "Accepted", &(total_accepted), true); + root = api_add_int64(root, "Rejected", &(total_rejected), true); + root = api_add_int(root, "Hardware Errors", &(hw_errors), true); + root = api_add_utility(root, "Utility", &(utility), false); + root = api_add_int64(root, "Discarded", &(total_discarded), true); + root = api_add_int64(root, "Stale", &(total_stale), true); + root = api_add_uint(root, "Get Failures", &(total_go), true); + root = api_add_uint(root, "Local Work", &(local_work), true); + root = api_add_uint(root, "Remote Failures", &(total_ro), true); + root = api_add_uint(root, "Network Blocks", &(new_blocks), true); + root = api_add_mhtotal(root, "Total MH", &(total_mhashes_done), true); + root = api_add_utility(root, "Work Utility", &(work_utility), false); + root = api_add_diff(root, "Difficulty Accepted", &(total_diff_accepted), true); + root = api_add_diff(root, "Difficulty Rejected", &(total_diff_rejected), true); + root = api_add_diff(root, "Difficulty Stale", &(total_diff_stale), true); + root = api_add_uint64(root, "Best Share", &(best_diff), true); + double hwp = (hw_errors + total_diff1) ? + (double)(hw_errors) / (double)(hw_errors + total_diff1) : 0; + root = api_add_percent(root, "Device Hardware%", &hwp, false); + double rejp = total_diff1 ? + (double)(total_diff_rejected) / (double)(total_diff1) : 0; + root = api_add_percent(root, "Device Rejected%", &rejp, false); + double prejp = (total_diff_accepted + total_diff_rejected + total_diff_stale) ? + (double)(total_diff_rejected) / (double)(total_diff_accepted + total_diff_rejected + total_diff_stale) : 0; + root = api_add_percent(root, "Pool Rejected%", &prejp, false); + double stalep = (total_diff_accepted + total_diff_rejected + total_diff_stale) ? + (double)(total_diff_stale) / (double)(total_diff_accepted + total_diff_rejected + total_diff_stale) : 0; + root = api_add_percent(root, "Pool Stale%", &stalep, false); + root = api_add_time(root, "Last getwork", &last_getwork, false); + + mutex_unlock(&hash_lock); + + root = print_data(io_data, root, isjson, false); + if (isjson && io_open) + io_close(io_data); +} + +static void pgacount(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open; + int count = 0; + +#ifdef HAVE_AN_FPGA + count = numpgas(); +#endif + + message(io_data, MSG_NUMPGA, 0, NULL, isjson); + io_open = io_add(io_data, isjson ? COMSTR JSON_PGAS : _PGAS COMSTR); + + root = api_add_int(root, "Count", &count, false); + + root = print_data(io_data, root, isjson, false); + if (isjson && io_open) + io_close(io_data); +} + +static void switchpool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct pool *pool; + int id; + + if (total_pools == 0) { + message(io_data, MSG_NOPOOL, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISPID, 0, NULL, isjson); + return; + } + + id = atoi(param); + cg_rlock(&control_lock); + if (id < 0 || id >= total_pools) { + cg_runlock(&control_lock); + message(io_data, MSG_INVPID, id, NULL, isjson); + return; + } + + pool = pools[id]; + pool->enabled = POOL_ENABLED; + cg_runlock(&control_lock); + switch_pools(pool); + + message(io_data, MSG_SWITCHP, id, NULL, isjson); +} + +static void copyadvanceafter(char ch, char **param, char **buf) +{ +#define src_p (*param) +#define dst_b (*buf) + + while (*src_p && *src_p != ch) { + if (*src_p == '\\' && *(src_p+1) != '\0') + src_p++; + + *(dst_b++) = *(src_p++); + } + if (*src_p) + src_p++; + + *(dst_b++) = '\0'; +} + +static bool pooldetails(char *param, char **url, char **user, char **pass) +{ + char *ptr, *buf; + + ptr = buf = malloc(strlen(param)+1); + if (unlikely(!buf)) + quit(1, "Failed to malloc pooldetails buf"); + + *url = buf; + + // copy url + copyadvanceafter(',', ¶m, &buf); + + if (!(*param)) // missing user + goto exitsama; + + *user = buf; + + // copy user + copyadvanceafter(',', ¶m, &buf); + + if (!*param) // missing pass + goto exitsama; + + *pass = buf; + + // copy pass + copyadvanceafter(',', ¶m, &buf); + + return true; + +exitsama: + free(ptr); + return false; +} + +static void addpool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + char *url, *user, *pass; + struct pool *pool; + char *ptr; + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISPDP, 0, NULL, isjson); + return; + } + + if (!pooldetails(param, &url, &user, &pass)) { + ptr = escape_string(param, isjson); + message(io_data, MSG_INVPDP, 0, ptr, isjson); + if (ptr != param) + free(ptr); + ptr = NULL; + return; + } + + pool = add_pool(); + detect_stratum(pool, url); + add_pool_details(pool, true, url, user, pass); + + ptr = escape_string(url, isjson); + message(io_data, MSG_ADDPOOL, pool->pool_no, ptr, isjson); + if (ptr != url) + free(ptr); + ptr = NULL; +} + +static void enablepool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct pool *pool; + int id; + + if (total_pools == 0) { + message(io_data, MSG_NOPOOL, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISPID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= total_pools) { + message(io_data, MSG_INVPID, id, NULL, isjson); + return; + } + + pool = pools[id]; + if (pool->enabled == POOL_ENABLED) { + message(io_data, MSG_ALRENAP, id, NULL, isjson); + return; + } + + pool->enabled = POOL_ENABLED; + if (pool->prio < current_pool()->prio) + switch_pools(pool); + + message(io_data, MSG_ENAPOOL, id, NULL, isjson); +} + +static void poolpriority(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + char *ptr, *next; + int i, pr, prio = 0; + + // TODO: all cgminer code needs a mutex added everywhere for change + // access to total_pools and also parts of the pools[] array, + // just copying total_pools here wont solve that + + if (total_pools == 0) { + message(io_data, MSG_NOPOOL, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISPID, 0, NULL, isjson); + return; + } + + bool pools_changed[total_pools]; + int new_prio[total_pools]; + for (i = 0; i < total_pools; ++i) + pools_changed[i] = false; + + next = param; + while (next && *next) { + ptr = next; + next = strchr(ptr, ','); + if (next) + *(next++) = '\0'; + + i = atoi(ptr); + if (i < 0 || i >= total_pools) { + message(io_data, MSG_INVPID, i, NULL, isjson); + return; + } + + if (pools_changed[i]) { + message(io_data, MSG_DUPPID, i, NULL, isjson); + return; + } + + pools_changed[i] = true; + new_prio[i] = prio++; + } + + // Only change them if no errors + for (i = 0; i < total_pools; i++) { + if (pools_changed[i]) + pools[i]->prio = new_prio[i]; + } + + // In priority order, cycle through the unchanged pools and append them + for (pr = 0; pr < total_pools; pr++) + for (i = 0; i < total_pools; i++) { + if (!pools_changed[i] && pools[i]->prio == pr) { + pools[i]->prio = prio++; + pools_changed[i] = true; + break; + } + } + + if (current_pool()->prio) + switch_pools(NULL); + + message(io_data, MSG_POOLPRIO, 0, NULL, isjson); +} + +static void poolquota(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct pool *pool; + int quota, id; + char *comma; + + if (total_pools == 0) { + message(io_data, MSG_NOPOOL, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISPID, 0, NULL, isjson); + return; + } + + comma = strchr(param, ','); + if (!comma) { + message(io_data, MSG_CONVAL, 0, param, isjson); + return; + } + + *(comma++) = '\0'; + + id = atoi(param); + if (id < 0 || id >= total_pools) { + message(io_data, MSG_INVPID, id, NULL, isjson); + return; + } + pool = pools[id]; + + quota = atoi(comma); + if (quota < 0) { + message(io_data, MSG_INVNEG, quota, pool->rpc_url, isjson); + return; + } + + pool->quota = quota; + adjust_quota_gcd(); + message(io_data, MSG_SETQUOTA, quota, pool->rpc_url, isjson); +} + +static void disablepool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct pool *pool; + int id; + + if (total_pools == 0) { + message(io_data, MSG_NOPOOL, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISPID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= total_pools) { + message(io_data, MSG_INVPID, id, NULL, isjson); + return; + } + + pool = pools[id]; + if (pool->enabled == POOL_DISABLED) { + message(io_data, MSG_ALRDISP, id, NULL, isjson); + return; + } + + if (enabled_pools <= 1) { + message(io_data, MSG_DISLASTP, id, NULL, isjson); + return; + } + + pool->enabled = POOL_DISABLED; + if (pool == current_pool()) + switch_pools(NULL); + + message(io_data, MSG_DISPOOL, id, NULL, isjson); +} + +static void removepool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct pool *pool; + char *rpc_url; + bool dofree = false; + int id; + + if (total_pools == 0) { + message(io_data, MSG_NOPOOL, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISPID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= total_pools) { + message(io_data, MSG_INVPID, id, NULL, isjson); + return; + } + + if (total_pools <= 1) { + message(io_data, MSG_REMLASTP, id, NULL, isjson); + return; + } + + pool = pools[id]; + if (pool == current_pool()) + switch_pools(NULL); + + if (pool == current_pool()) { + message(io_data, MSG_ACTPOOL, id, NULL, isjson); + return; + } + + pool->enabled = POOL_DISABLED; + rpc_url = escape_string(pool->rpc_url, isjson); + if (rpc_url != pool->rpc_url) + dofree = true; + + remove_pool(pool); + + message(io_data, MSG_REMPOOL, id, rpc_url, isjson); + + if (dofree) + free(rpc_url); + rpc_url = NULL; +} + +void doquit(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + if (isjson) + io_put(io_data, JSON_ACTION JSON_BYE); + else + io_put(io_data, _BYE); + + bye = true; + do_a_quit = true; +} + +void dorestart(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + if (isjson) + io_put(io_data, JSON_ACTION JSON_RESTART); + else + io_put(io_data, _RESTART); + + bye = true; + do_a_restart = true; +} + +void privileged(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + message(io_data, MSG_ACCOK, 0, NULL, isjson); +} + +void notifystatus(struct io_data *io_data, int device, struct cgpu_info *cgpu, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + char *reason; + + if (cgpu->device_last_not_well == 0) + reason = REASON_NONE; + else + switch(cgpu->device_not_well_reason) { + case REASON_THREAD_FAIL_INIT: + reason = REASON_THREAD_FAIL_INIT_STR; + break; + case REASON_THREAD_ZERO_HASH: + reason = REASON_THREAD_ZERO_HASH_STR; + break; + case REASON_THREAD_FAIL_QUEUE: + reason = REASON_THREAD_FAIL_QUEUE_STR; + break; + case REASON_DEV_SICK_IDLE_60: + reason = REASON_DEV_SICK_IDLE_60_STR; + break; + case REASON_DEV_DEAD_IDLE_600: + reason = REASON_DEV_DEAD_IDLE_600_STR; + break; + case REASON_DEV_NOSTART: + reason = REASON_DEV_NOSTART_STR; + break; + case REASON_DEV_OVER_HEAT: + reason = REASON_DEV_OVER_HEAT_STR; + break; + case REASON_DEV_THERMAL_CUTOFF: + reason = REASON_DEV_THERMAL_CUTOFF_STR; + break; + case REASON_DEV_COMMS_ERROR: + reason = REASON_DEV_COMMS_ERROR_STR; + break; + default: + reason = REASON_UNKNOWN_STR; + break; + } + + // ALL counters (and only counters) must start the name with a '*' + // Simplifies future external support for identifying new counters + root = api_add_int(root, "NOTIFY", &device, false); + root = api_add_string(root, "Name", cgpu->drv->name, false); + root = api_add_int(root, "ID", &(cgpu->device_id), false); + root = api_add_time(root, "Last Well", &(cgpu->device_last_well), false); + root = api_add_time(root, "Last Not Well", &(cgpu->device_last_not_well), false); + root = api_add_string(root, "Reason Not Well", reason, false); + root = api_add_int(root, "*Thread Fail Init", &(cgpu->thread_fail_init_count), false); + root = api_add_int(root, "*Thread Zero Hash", &(cgpu->thread_zero_hash_count), false); + root = api_add_int(root, "*Thread Fail Queue", &(cgpu->thread_fail_queue_count), false); + root = api_add_int(root, "*Dev Sick Idle 60s", &(cgpu->dev_sick_idle_60_count), false); + root = api_add_int(root, "*Dev Dead Idle 600s", &(cgpu->dev_dead_idle_600_count), false); + root = api_add_int(root, "*Dev Nostart", &(cgpu->dev_nostart_count), false); + root = api_add_int(root, "*Dev Over Heat", &(cgpu->dev_over_heat_count), false); + root = api_add_int(root, "*Dev Thermal Cutoff", &(cgpu->dev_thermal_cutoff_count), false); + root = api_add_int(root, "*Dev Comms Error", &(cgpu->dev_comms_error_count), false); + root = api_add_int(root, "*Dev Throttle", &(cgpu->dev_throttle_count), false); + + root = print_data(io_data, root, isjson, isjson && (device > 0)); +} + +static void notify(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, char group) +{ + struct cgpu_info *cgpu; + bool io_open = false; + int i; + + if (total_devices == 0) { + message(io_data, MSG_NODEVS, 0, NULL, isjson); + return; + } + + message(io_data, MSG_NOTIFY, 0, NULL, isjson); + + if (isjson) + io_open = io_add(io_data, COMSTR JSON_NOTIFY); + + for (i = 0; i < total_devices; i++) { + cgpu = get_devices(i); + notifystatus(io_data, i, cgpu, isjson, group); + } + + if (isjson && io_open) + io_close(io_data); +} + +static void devdetails(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open = false; + struct cgpu_info *cgpu; + int i; + + if (total_devices == 0) { + message(io_data, MSG_NODEVS, 0, NULL, isjson); + return; + } + + message(io_data, MSG_DEVDETAILS, 0, NULL, isjson); + + if (isjson) + io_open = io_add(io_data, COMSTR JSON_DEVDETAILS); + + for (i = 0; i < total_devices; i++) { + cgpu = get_devices(i); + + root = api_add_int(root, "DEVDETAILS", &i, false); + root = api_add_string(root, "Name", cgpu->drv->name, false); + root = api_add_int(root, "ID", &(cgpu->device_id), false); + root = api_add_string(root, "Driver", cgpu->drv->dname, false); + root = api_add_const(root, "Kernel", cgpu->kname ? : BLANK, false); + root = api_add_const(root, "Model", cgpu->name ? : BLANK, false); + root = api_add_const(root, "Device Path", cgpu->device_path ? : BLANK, false); + + root = print_data(io_data, root, isjson, isjson && (i > 0)); + } + + if (isjson && io_open) + io_close(io_data); +} + +void dosave(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + char filename[PATH_MAX]; + FILE *fcfg; + char *ptr; + + if (param == NULL || *param == '\0') { + default_save_file(filename); + param = filename; + } + + fcfg = fopen(param, "w"); + if (!fcfg) { + ptr = escape_string(param, isjson); + message(io_data, MSG_BADFN, 0, ptr, isjson); + if (ptr != param) + free(ptr); + ptr = NULL; + return; + } + + write_config(fcfg); + fclose(fcfg); + + ptr = escape_string(param, isjson); + message(io_data, MSG_SAVED, 0, ptr, isjson); + if (ptr != param) + free(ptr); + ptr = NULL; +} + +static int itemstats(struct io_data *io_data, int i, char *id, struct cgminer_stats *stats, struct cgminer_pool_stats *pool_stats, struct api_data *extra, struct cgpu_info *cgpu, bool isjson) +{ + struct api_data *root = NULL; + double ghs; + + ghs = total_mhashes_done / 1000 / total_secs; + + root = api_add_int(root, "STATS", &i, false); + root = api_add_string(root, "ID", id, false); + root = api_add_elapsed(root, "Elapsed", &(total_secs), false); + root = api_add_uint32(root, "Calls", &(stats->getwork_calls), false); + root = api_add_timeval(root, "Wait", &(stats->getwork_wait), false); + root = api_add_timeval(root, "Max", &(stats->getwork_wait_max), false); + root = api_add_timeval(root, "Min", &(stats->getwork_wait_min), false); + root = api_add_mhs(root, "GHS 5s", &(g_displayed_rolling), false); + root = api_add_mhs(root, "GHS av", &(ghs), false); + + /* + if (pool_stats) { + root = api_add_uint32(root, "Pool Calls", &(pool_stats->getwork_calls), false); + root = api_add_uint32(root, "Pool Attempts", &(pool_stats->getwork_attempts), false); + root = api_add_timeval(root, "Pool Wait", &(pool_stats->getwork_wait), false); + root = api_add_timeval(root, "Pool Max", &(pool_stats->getwork_wait_max), false); + root = api_add_timeval(root, "Pool Min", &(pool_stats->getwork_wait_min), false); + root = api_add_double(root, "Pool Av", &(pool_stats->getwork_wait_rolling), false); + root = api_add_bool(root, "Work Had Roll Time", &(pool_stats->hadrolltime), false); + root = api_add_bool(root, "Work Can Roll", &(pool_stats->canroll), false); + root = api_add_bool(root, "Work Had Expire", &(pool_stats->hadexpire), false); + root = api_add_uint32(root, "Work Roll Time", &(pool_stats->rolltime), false); + root = api_add_diff(root, "Work Diff", &(pool_stats->last_diff), false); + root = api_add_diff(root, "Min Diff", &(pool_stats->min_diff), false); + root = api_add_diff(root, "Max Diff", &(pool_stats->max_diff), false); + root = api_add_uint32(root, "Min Diff Count", &(pool_stats->min_diff_count), false); + root = api_add_uint32(root, "Max Diff Count", &(pool_stats->max_diff_count), false); + root = api_add_uint64(root, "Times Sent", &(pool_stats->times_sent), false); + root = api_add_uint64(root, "Bytes Sent", &(pool_stats->bytes_sent), false); + root = api_add_uint64(root, "Times Recv", &(pool_stats->times_received), false); + root = api_add_uint64(root, "Bytes Recv", &(pool_stats->bytes_received), false); + root = api_add_uint64(root, "Net Bytes Sent", &(pool_stats->net_bytes_sent), false); + root = api_add_uint64(root, "Net Bytes Recv", &(pool_stats->net_bytes_received), false); + }*/ + + if (extra) + root = api_add_extra(root, extra); + + if (cgpu) { +#ifdef USE_USBUTILS + char details[256]; + + if (cgpu->usbinfo.pipe_count) + snprintf(details, sizeof(details), + "%"PRIu64" %"PRIu64"/%"PRIu64"/%"PRIu64" %lu", + cgpu->usbinfo.pipe_count, + cgpu->usbinfo.clear_err_count, + cgpu->usbinfo.retry_err_count, + cgpu->usbinfo.clear_fail_count, + (unsigned long)(cgpu->usbinfo.last_pipe)); + else + strcpy(details, "0"); + + root = api_add_string(root, "USB Pipe", details, true); + + /* + snprintf(details, sizeof(details), + "r%"PRIu64" %.6f w%"PRIu64" %.6f", + cgpu->usbinfo.read_delay_count, + cgpu->usbinfo.total_read_delay, + cgpu->usbinfo.write_delay_count, + cgpu->usbinfo.total_write_delay); + + root = api_add_string(root, "USB Delay", details, true); + + if (cgpu->usbinfo.usb_tmo[0].count == 0 && + cgpu->usbinfo.usb_tmo[1].count == 0 && + cgpu->usbinfo.usb_tmo[2].count == 0) { + snprintf(details, sizeof(details), + "%"PRIu64" 0", cgpu->usbinfo.tmo_count); + } else { + snprintf(details, sizeof(details), + "%"PRIu64" %d=%d/%d/%d/%"PRIu64"/%"PRIu64 + " %d=%d/%d/%d/%"PRIu64"/%"PRIu64 + " %d=%d/%d/%d/%"PRIu64"/%"PRIu64" ", + cgpu->usbinfo.tmo_count, + USB_TMO_0, cgpu->usbinfo.usb_tmo[0].count, + cgpu->usbinfo.usb_tmo[0].min_tmo, + cgpu->usbinfo.usb_tmo[0].max_tmo, + cgpu->usbinfo.usb_tmo[0].total_over, + cgpu->usbinfo.usb_tmo[0].total_tmo, + USB_TMO_1, cgpu->usbinfo.usb_tmo[1].count, + cgpu->usbinfo.usb_tmo[1].min_tmo, + cgpu->usbinfo.usb_tmo[1].max_tmo, + cgpu->usbinfo.usb_tmo[1].total_over, + cgpu->usbinfo.usb_tmo[1].total_tmo, + USB_TMO_2, cgpu->usbinfo.usb_tmo[2].count, + cgpu->usbinfo.usb_tmo[2].min_tmo, + cgpu->usbinfo.usb_tmo[2].max_tmo, + cgpu->usbinfo.usb_tmo[2].total_over, + cgpu->usbinfo.usb_tmo[2].total_tmo); + } + + root = api_add_string(root, "USB tmo", details, true);*/ +#endif + } + + root = print_data(io_data, root, isjson, isjson && (i > 0)); + + return ++i; +} + +static void minerstats(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + struct cgpu_info *cgpu; + bool io_open = false; + struct api_data *extra; + char id[20]; + int i, j; + + message(io_data, MSG_MINESTATS, 0, NULL, isjson); + + if (isjson) + io_open = io_add(io_data, COMSTR JSON_MINESTATS); + + root = api_add_string(root, "BMMiner", VERSION, false); + root = api_add_string(root, "Miner", g_miner_version, false); + root = api_add_string(root, "CompileTime", g_miner_compiletime, false); + root = api_add_string(root, "Type", g_miner_type, false); + root = print_data(io_data, root, isjson, false); + + i = 0; + for (j = 0; j < total_devices; j++) { + cgpu = get_devices(j); + + if (cgpu && cgpu->drv) { + if (cgpu->drv->get_api_stats) + extra = cgpu->drv->get_api_stats(cgpu); + else + extra = NULL; + + sprintf(id, "%s%d", cgpu->drv->name, cgpu->device_id); + i = itemstats(io_data, i, id, &(cgpu->cgminer_stats), NULL, extra, cgpu, isjson); + } + } + + if (isjson && io_open) + io_close(io_data); +} + +static void minerestats(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct cgpu_info *cgpu; + bool io_open = false; + struct api_data *extra; + char id[20]; + int i, j; +#ifdef USE_USBUTILS + time_t howoldsec = 0; + + if (param && *param) + howoldsec = (time_t)atoi(param); +#endif + + message(io_data, MSG_MINESTATS, 0, NULL, isjson); + if (isjson) + io_open = io_add(io_data, COMSTR JSON_MINESTATS); + + i = 0; + for (j = 0; j < total_devices; j++) { + cgpu = get_devices(j); + if (!cgpu) + continue; +#ifdef USE_USBUTILS + if (cgpu->blacklisted) + continue; + if (cgpu->usbinfo.nodev) { + if (howoldsec <= 0) + continue; + if ((when - cgpu->usbinfo.last_nodev.tv_sec) >= howoldsec) + continue; + } +#endif + if (cgpu->drv) { + if (cgpu->drv->get_api_stats) + extra = cgpu->drv->get_api_stats(cgpu); + else + extra = NULL; + + sprintf(id, "%s%d", cgpu->drv->name, cgpu->device_id); + i = itemstats(io_data, i, id, &(cgpu->cgminer_stats), NULL, extra, cgpu, isjson); + } + } + + if (isjson && io_open) + io_close(io_data); +} + +static void failoveronly(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISBOOL, 0, NULL, isjson); + return; + } + + *param = tolower(*param); + + if (*param != 't' && *param != 'f') { + message(io_data, MSG_INVBOOL, 0, NULL, isjson); + return; + } + + bool tf = (*param == 't'); + + opt_fail_only = tf; + + message(io_data, MSG_FOO, tf, NULL, isjson); +} + +static void minecoin(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open; + + message(io_data, MSG_MINECOIN, 0, NULL, isjson); + io_open = io_add(io_data, isjson ? COMSTR JSON_MINECOIN : _MINECOIN COMSTR); + + root = api_add_const(root, "Hash Method", SHA256STR, false); + + cg_rlock(&ch_lock); + root = api_add_timeval(root, "Current Block Time", &block_timeval, true); + root = api_add_string(root, "Current Block Hash", current_hash, true); + cg_runlock(&ch_lock); + + root = api_add_bool(root, "LP", &have_longpoll, false); + root = api_add_diff(root, "Network Difficulty", ¤t_diff, true); + + root = print_data(io_data, root, isjson, false); + if (isjson && io_open) + io_close(io_data); +} + +static void debugstate(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open; + + if (param == NULL) + param = (char *)BLANK; + else + *param = tolower(*param); + + switch(*param) { + case 's': + opt_realquiet = true; + break; + case 'q': + opt_quiet ^= true; + break; + case 'v': + opt_log_output ^= true; + if (opt_log_output) + opt_quiet = false; + break; + case 'd': + opt_debug ^= true; + opt_log_output = opt_debug; + if (opt_debug) + opt_quiet = false; + break; + case 'r': + opt_protocol ^= true; + if (opt_protocol) + opt_quiet = false; + break; + case 'p': + want_per_device_stats ^= true; + opt_log_output = want_per_device_stats; + break; + case 'n': + opt_log_output = false; + opt_debug = false; + opt_quiet = false; + opt_protocol = false; + want_per_device_stats = false; + opt_worktime = false; + break; + case 'w': + opt_worktime ^= true; + break; +#ifdef _MEMORY_DEBUG + case 'y': + cgmemspeedup(); + break; + case 'z': + cgmemrpt(); + break; +#endif + default: + // anything else just reports the settings + break; + } + + message(io_data, MSG_DEBUGSET, 0, NULL, isjson); + io_open = io_add(io_data, isjson ? COMSTR JSON_DEBUGSET : _DEBUGSET COMSTR); + + root = api_add_bool(root, "Silent", &opt_realquiet, false); + root = api_add_bool(root, "Quiet", &opt_quiet, false); + root = api_add_bool(root, "Verbose", &opt_log_output, false); + root = api_add_bool(root, "Debug", &opt_debug, false); + root = api_add_bool(root, "RPCProto", &opt_protocol, false); + root = api_add_bool(root, "PerDevice", &want_per_device_stats, false); + root = api_add_bool(root, "WorkTime", &opt_worktime, false); + + root = print_data(io_data, root, isjson, false); + if (isjson && io_open) + io_close(io_data); +} + +static void setconfig(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + char *comma; + int value; + + if (param == NULL || *param == '\0') { + message(io_data, MSG_CONPAR, 0, NULL, isjson); + return; + } + + comma = strchr(param, ','); + if (!comma) { + message(io_data, MSG_CONVAL, 0, param, isjson); + return; + } + + *(comma++) = '\0'; + value = atoi(comma); + if (value < 0 || value > 9999) { + message(io_data, MSG_INVNUM, value, param, isjson); + return; + } + + if (strcasecmp(param, "queue") == 0) + opt_queue = value; + else if (strcasecmp(param, "scantime") == 0) + opt_scantime = value; + else if (strcasecmp(param, "expiry") == 0) + opt_expiry = value; + else { + message(io_data, MSG_UNKCON, 0, param, isjson); + return; + } + + message(io_data, MSG_SETCONFIG, value, param, isjson); +} + +static void usbstats(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + +#ifdef USE_USBUTILS + bool io_open = false; + int count = 0; + + root = api_usb_stats(&count); +#endif + + if (!root) { + message(io_data, MSG_NOUSTA, 0, NULL, isjson); + return; + } + +#ifdef USE_USBUTILS + message(io_data, MSG_USBSTA, 0, NULL, isjson); + + if (isjson) + io_open = io_add(io_data, COMSTR JSON_USBSTATS); + + root = print_data(io_data, root, isjson, false); + + while (42) { + root = api_usb_stats(&count); + if (!root) + break; + + root = print_data(io_data, root, isjson, isjson); + } + + if (isjson && io_open) + io_close(io_data); +#endif +} + +#ifdef HAVE_AN_FPGA +static void pgaset(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct cgpu_info *cgpu; + struct device_drv *drv; + char buf[TMPBUFSIZ]; + int numpga = numpgas(); + + if (numpga == 0) { + message(io_data, MSG_PGANON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + char *opt = strchr(param, ','); + if (opt) + *(opt++) = '\0'; + if (!opt || !*opt) { + message(io_data, MSG_MISPGAOPT, 0, NULL, isjson); + return; + } + + int id = atoi(param); + if (id < 0 || id >= numpga) { + message(io_data, MSG_INVPGA, id, NULL, isjson); + return; + } + + int dev = pgadevice(id); + if (dev < 0) { // Should never happen + message(io_data, MSG_INVPGA, id, NULL, isjson); + return; + } + + cgpu = get_devices(dev); + drv = cgpu->drv; + + char *set = strchr(opt, ','); + if (set) + *(set++) = '\0'; + + if (!drv->set_device) + message(io_data, MSG_PGANOSET, id, NULL, isjson); + else { + char *ret = drv->set_device(cgpu, opt, set, buf); + if (ret) { + if (strcasecmp(opt, "help") == 0) + message(io_data, MSG_PGAHELP, id, ret, isjson); + else + message(io_data, MSG_PGASETERR, id, ret, isjson); + } else + message(io_data, MSG_PGASETOK, id, NULL, isjson); + } +} +#endif + +static void dozero(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + if (param == NULL || *param == '\0') { + message(io_data, MSG_ZERMIS, 0, NULL, isjson); + return; + } + + char *sum = strchr(param, ','); + if (sum) + *(sum++) = '\0'; + if (!sum || !*sum) { + message(io_data, MSG_MISBOOL, 0, NULL, isjson); + return; + } + + bool all = false; + bool bs = false; + if (strcasecmp(param, "all") == 0) + all = true; + else if (strcasecmp(param, "bestshare") == 0) + bs = true; + + if (all == false && bs == false) { + message(io_data, MSG_ZERINV, 0, param, isjson); + return; + } + + *sum = tolower(*sum); + if (*sum != 't' && *sum != 'f') { + message(io_data, MSG_INVBOOL, 0, NULL, isjson); + return; + } + + bool dosum = (*sum == 't'); + if (dosum) + print_summary(); + + if (all) + zero_stats(); + if (bs) + zero_bestshare(); + + if (dosum) + message(io_data, MSG_ZERSUM, 0, all ? "All" : "BestShare", isjson); + else + message(io_data, MSG_ZERNOSUM, 0, all ? "All" : "BestShare", isjson); +} + +static void dohotplug(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ +#ifdef USE_USBUTILS + int value; + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISHPLG, 0, NULL, isjson); + return; + } + + value = atoi(param); + if (value < 0 || value > 9999) { + message(io_data, MSG_INVHPLG, 0, param, isjson); + return; + } + + hotplug_time = value; + + if (value) + message(io_data, MSG_HOTPLUG, value, NULL, isjson); + else + message(io_data, MSG_DISHPLG, 0, NULL, isjson); +#else + message(io_data, MSG_NOHPLG, 0, NULL, isjson); + return; +#endif +} + +#ifdef HAVE_AN_ASIC +static void ascdev(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + bool io_open = false; + int numasc = numascs(); + int id; + + if (numasc == 0) { + message(io_data, MSG_ASCNON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= numasc) { + message(io_data, MSG_INVASC, id, NULL, isjson); + return; + } + + message(io_data, MSG_ASCDEV, id, NULL, isjson); + + if (isjson) + io_open = io_add(io_data, COMSTR JSON_ASC); + + ascstatus(io_data, id, isjson, false); + + if (isjson && io_open) + io_close(io_data); +} + +static void ascenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct cgpu_info *cgpu; + int numasc = numascs(); + struct thr_info *thr; + int asc; + int id; + int i; + + if (numasc == 0) { + message(io_data, MSG_ASCNON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= numasc) { + message(io_data, MSG_INVASC, id, NULL, isjson); + return; + } + + int dev = ascdevice(id); + if (dev < 0) { // Should never happen + message(io_data, MSG_INVASC, id, NULL, isjson); + return; + } + + cgpu = get_devices(dev); + + applog(LOG_DEBUG, "API: request to ascenable ascid %d device %d %s%u", + id, dev, cgpu->drv->name, cgpu->device_id); + + if (cgpu->deven != DEV_DISABLED) { + message(io_data, MSG_ASCLRENA, id, NULL, isjson); + return; + } + +#if 0 /* A DISABLED device wont change status FIXME: should disabling make it WELL? */ + if (cgpu->status != LIFE_WELL) { + message(io_data, MSG_ASCUNW, id, NULL, isjson); + return; + } +#endif + +#ifdef USE_USBUTILS + if (cgpu->usbinfo.nodev) { + message(io_data, MSG_ASCUSBNODEV, id, NULL, isjson); + return; + } +#endif + + for (i = 0; i < mining_threads; i++) { + thr = get_thread(i); + asc = thr->cgpu->cgminer_id; + if (asc == dev) { + cgpu->deven = DEV_ENABLED; + applog(LOG_DEBUG, "API: Pushing sem post to thread %d", thr->id); + cgsem_post(&thr->sem); + } + } + + message(io_data, MSG_ASCENA, id, NULL, isjson); +} + +static void ascdisable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct cgpu_info *cgpu; + int numasc = numascs(); + int id; + + if (numasc == 0) { + message(io_data, MSG_ASCNON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= numasc) { + message(io_data, MSG_INVASC, id, NULL, isjson); + return; + } + + int dev = ascdevice(id); + if (dev < 0) { // Should never happen + message(io_data, MSG_INVASC, id, NULL, isjson); + return; + } + + cgpu = get_devices(dev); + + applog(LOG_DEBUG, "API: request to ascdisable ascid %d device %d %s%u", + id, dev, cgpu->drv->name, cgpu->device_id); + + if (cgpu->deven == DEV_DISABLED) { + message(io_data, MSG_ASCLRDIS, id, NULL, isjson); + return; + } + + cgpu->deven = DEV_DISABLED; + + message(io_data, MSG_ASCDIS, id, NULL, isjson); +} + +static void ascidentify(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) +{ + struct cgpu_info *cgpu; + struct device_drv *drv; + int numasc = numascs(); + int id; + + if (numasc == 0) { + message(io_data, MSG_ASCNON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + id = atoi(param); + if (id < 0 || id >= numasc) { + message(io_data, MSG_INVASC, id, NULL, isjson); + return; + } + + int dev = ascdevice(id); + if (dev < 0) { // Should never happen + message(io_data, MSG_INVASC, id, NULL, isjson); + return; + } + + cgpu = get_devices(dev); + drv = cgpu->drv; + + if (!drv->identify_device) + message(io_data, MSG_ASCNOID, id, NULL, isjson); + else { + drv->identify_device(cgpu); + message(io_data, MSG_ASCIDENT, id, NULL, isjson); + } +} +#endif + +static void asccount(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + bool io_open; + int count = 0; + +#ifdef HAVE_AN_ASIC + count = numascs(); +#endif + + message(io_data, MSG_NUMASC, 0, NULL, isjson); + io_open = io_add(io_data, isjson ? COMSTR JSON_ASCS : _ASCS COMSTR); + + root = api_add_int(root, "Count", &count, false); + + root = print_data(io_data, root, isjson, false); + if (isjson && io_open) + io_close(io_data); +} + +#ifdef HAVE_AN_ASIC +static void ascset(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct cgpu_info *cgpu; + struct device_drv *drv; + char buf[TMPBUFSIZ]; + int numasc = numascs(); + + if (numasc == 0) { + message(io_data, MSG_ASCNON, 0, NULL, isjson); + return; + } + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISID, 0, NULL, isjson); + return; + } + + char *opt = strchr(param, ','); + if (opt) + *(opt++) = '\0'; + if (!opt || !*opt) { + message(io_data, MSG_MISASCOPT, 0, NULL, isjson); + return; + } + + int id = atoi(param); + if (id < 0 || id >= numasc) { + message(io_data, MSG_INVASC, id, NULL, isjson); + return; + } + + int dev = ascdevice(id); + if (dev < 0) { // Should never happen + message(io_data, MSG_INVASC, id, NULL, isjson); + return; + } + + cgpu = get_devices(dev); + drv = cgpu->drv; + + char *set = strchr(opt, ','); + if (set) + *(set++) = '\0'; + + if (!drv->set_device) + message(io_data, MSG_ASCNOSET, id, NULL, isjson); + else { + char *ret = drv->set_device(cgpu, opt, set, buf); + if (ret) { + if (strcasecmp(opt, "help") == 0) + message(io_data, MSG_ASCHELP, id, ret, isjson); + else + message(io_data, MSG_ASCSETERR, id, ret, isjson); + } else + message(io_data, MSG_ASCSETOK, id, NULL, isjson); + } +} +#endif + +static void lcddata(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) +{ + struct api_data *root = NULL; + struct cgpu_info *cgpu; + bool io_open; + double ghs = 0.0, last_share_diff = 0.0; + float temp = 0.0; + time_t last_share_time = 0; + time_t last_device_valid_work = 0; + struct pool *pool = NULL; + char *rpc_url = "none", *rpc_user = ""; + int i; + + message(io_data, MSG_LCD, 0, NULL, isjson); + io_open = io_add(io_data, isjson ? COMSTR JSON_LCD : _LCD COMSTR); + + // stop hashmeter() changing some while copying + mutex_lock(&hash_lock); + + root = api_add_elapsed(root, "Elapsed", &(total_secs), true); + ghs = total_mhashes_done / total_secs / 1000.0; + root = api_add_mhs(root, "GHS av", &ghs, true); + ghs = rolling5 / 1000.0; + root = api_add_mhs(root, "GHS 5m", &ghs, true); + ghs = total_rolling / 1000.0; + root = api_add_mhs(root, "GHS 5s", &ghs, true); + + mutex_unlock(&hash_lock); + + temp = 0; + last_device_valid_work = 0; + for (i = 0; i < total_devices; i++) { + cgpu = get_devices(i); + if (last_device_valid_work == 0 || + last_device_valid_work < cgpu->last_device_valid_work) + last_device_valid_work = cgpu->last_device_valid_work; + if (temp < cgpu->temp) + temp = cgpu->temp; + } + + last_share_time = 0; + last_share_diff = 0; + for (i = 0; i < total_pools; i++) { + pool = pools[i]; + + if (pool->removed) + continue; + + if (last_share_time == 0 || last_share_time < pool->last_share_time) { + last_share_time = pool->last_share_time; + last_share_diff = pool->last_share_diff; + } + } + pool = current_pool(); + if (pool) { + rpc_url = pool->rpc_url; + rpc_user = pool->rpc_user; + } + + root = api_add_temp(root, "Temperature", &temp, false); + root = api_add_diff(root, "Last Share Difficulty", &last_share_diff, false); + root = api_add_time(root, "Last Share Time", &last_share_time, false); + root = api_add_uint64(root, "Best Share", &best_diff, true); + root = api_add_time(root, "Last Valid Work", &last_device_valid_work, false); + root = api_add_uint(root, "Found Blocks", &found_blocks, true); + root = api_add_escape(root, "Current Pool", rpc_url, true); + root = api_add_escape(root, "User", rpc_user, true); + + root = print_data(io_data, root, isjson, false); + if (isjson && io_open) + io_close(io_data); +} + +static void checkcommand(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, char group); + +struct CMDS { + char *name; + void (*func)(struct io_data *, SOCKETTYPE, char *, bool, char); + bool iswritemode; + bool joinable; +} cmds[] = { + { "version", apiversion, false, true }, + { "config", minerconfig, false, true }, + { "devs", devstatus, false, true }, + { "edevs", edevstatus, false, true }, + { "pools", poolstatus, false, true }, + { "summary", summary, false, true }, +#ifdef HAVE_AN_FPGA + { "pga", pgadev, false, false }, + { "pgaenable", pgaenable, true, false }, + { "pgadisable", pgadisable, true, false }, + { "pgaidentify", pgaidentify, true, false }, +#endif + { "pgacount", pgacount, false, true }, + { "switchpool", switchpool, true, false }, + { "addpool", addpool, true, false }, + { "poolpriority", poolpriority, true, false }, + { "poolquota", poolquota, true, false }, + { "enablepool", enablepool, true, false }, + { "disablepool", disablepool, true, false }, + { "removepool", removepool, true, false }, + { "save", dosave, true, false }, + { "quit", doquit, true, false }, + { "privileged", privileged, true, false }, + { "notify", notify, false, true }, + { "devdetails", devdetails, false, true }, + { "restart", dorestart, true, false }, + { "stats", minerstats, false, true }, + { "estats", minerestats, false, true }, + { "check", checkcommand, false, false }, + { "failover-only", failoveronly, true, false }, + { "coin", minecoin, false, true }, + { "debug", debugstate, true, false }, + { "setconfig", setconfig, true, false }, + { "usbstats", usbstats, false, true }, +#ifdef HAVE_AN_FPGA + { "pgaset", pgaset, true, false }, +#endif + { "zero", dozero, true, false }, + { "hotplug", dohotplug, true, false }, +#ifdef HAVE_AN_ASIC + { "asc", ascdev, false, false }, + { "ascenable", ascenable, true, false }, + { "ascdisable", ascdisable, true, false }, + { "ascidentify", ascidentify, true, false }, + { "ascset", ascset, true, false }, +#endif + { "asccount", asccount, false, true }, + { "lcd", lcddisplay, false, true }, + { "lockstats", lockstats, true, true }, + { NULL, NULL, false, false } +}; + +static void checkcommand(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, char group) +{ + struct api_data *root = NULL; + bool io_open; + char cmdbuf[100]; + bool found, access; + int i; + + if (param == NULL || *param == '\0') { + message(io_data, MSG_MISCHK, 0, NULL, isjson); + return; + } + + found = false; + access = false; + for (i = 0; cmds[i].name != NULL; i++) { + if (strcmp(cmds[i].name, param) == 0) { + found = true; + + sprintf(cmdbuf, "|%s|", param); + if (ISPRIVGROUP(group) || strstr(COMMANDS(group), cmdbuf)) + access = true; + + break; + } + } + + message(io_data, MSG_CHECK, 0, NULL, isjson); + io_open = io_add(io_data, isjson ? COMSTR JSON_CHECK : _CHECK COMSTR); + + root = api_add_const(root, "Exists", found ? YES : NO, false); + root = api_add_const(root, "Access", access ? YES : NO, false); + + root = print_data(io_data, root, isjson, false); + if (isjson && io_open) + io_close(io_data); +} + +static void head_join(struct io_data *io_data, char *cmdptr, bool isjson, bool *firstjoin) +{ + char *ptr; + + if (*firstjoin) { + if (isjson) + io_add(io_data, JSON0); + *firstjoin = false; + } else { + if (isjson) + io_add(io_data, JSON_BETWEEN_JOIN); + } + + // External supplied string + ptr = escape_string(cmdptr, isjson); + + if (isjson) { + io_add(io_data, JSON1); + io_add(io_data, ptr); + io_add(io_data, JSON2); + } else { + io_add(io_data, JOIN_CMD); + io_add(io_data, ptr); + io_add(io_data, BETWEEN_JOIN); + } + + if (ptr != cmdptr) + free(ptr); +} + +static void tail_join(struct io_data *io_data, bool isjson) +{ + if (io_data->close) { + io_add(io_data, JSON_CLOSE); + io_data->close = false; + } + + if (isjson) { + io_add(io_data, JSON_END); + io_add(io_data, JSON3); + } +} + +static void send_result(struct io_data *io_data, SOCKETTYPE c, bool isjson) +{ + int count, sendc, res, tosend, len, n; + char *buf = io_data->ptr; + + strcpy(buf, io_data->ptr); + + if (io_data->close) + strcat(buf, JSON_CLOSE); + + if (isjson) + strcat(buf, JSON_END); + + len = strlen(buf); + tosend = len+1; + + applog(LOG_DEBUG, "API: send reply: (%d) '%.10s%s'", tosend, buf, len > 10 ? "..." : BLANK); + + count = sendc = 0; + while (count < 5 && tosend > 0) { + // allow 50ms per attempt + struct timeval timeout = {0, 50000}; + fd_set wd; + + FD_ZERO(&wd); + FD_SET(c, &wd); + if ((res = select(c + 1, NULL, &wd, NULL, &timeout)) < 1) { + applog(LOG_WARNING, "API: send select failed (%d)", res); + return; + } + + n = send(c, buf, tosend, 0); + sendc++; + + if (SOCKETFAIL(n)) { + count++; + if (sock_blocks()) + continue; + + applog(LOG_WARNING, "API: send (%d:%d) failed: %s", len+1, (len+1 - tosend), SOCKERRMSG); + + return; + } else { + if (sendc <= 1) { + if (n == tosend) + applog(LOG_DEBUG, "API: sent all of %d first go", tosend); + else + applog(LOG_DEBUG, "API: sent %d of %d first go", n, tosend); + } else { + if (n == tosend) + applog(LOG_DEBUG, "API: sent all of remaining %d (sendc=%d)", tosend, sendc); + else + applog(LOG_DEBUG, "API: sent %d of remaining %d (sendc=%d)", n, tosend, sendc); + } + + tosend -= n; + buf += n; + + if (n == 0) + count++; + } + } +} + +static void tidyup(__maybe_unused void *arg) +{ + mutex_lock(&quit_restart_lock); + + SOCKETTYPE *apisock = (SOCKETTYPE *)arg; + + bye = true; + + if (*apisock != INVSOCK) { + shutdown(*apisock, SHUT_RDWR); + CLOSESOCKET(*apisock); + *apisock = INVSOCK; + } + + if (ipaccess != NULL) { + free(ipaccess); + ipaccess = NULL; + } + + io_free(); + + mutex_unlock(&quit_restart_lock); +} + +/* + * Interpret --api-groups G:cmd1:cmd2:cmd3,P:cmd4,*,... + */ +static void setup_groups() +{ + char *api_groups = opt_api_groups ? opt_api_groups : (char *)BLANK; + char *buf, *ptr, *next, *colon; + char group; + char commands[TMPBUFSIZ]; + char cmdbuf[100]; + char *cmd; + bool addstar, did; + int i; + + buf = malloc(strlen(api_groups) + 1); + if (unlikely(!buf)) + quit(1, "Failed to malloc ipgroups buf"); + + strcpy(buf, api_groups); + + next = buf; + // for each group defined + while (next && *next) { + ptr = next; + next = strchr(ptr, ','); + if (next) + *(next++) = '\0'; + + // Validate the group + if (*(ptr+1) != ':') { + colon = strchr(ptr, ':'); + if (colon) + *colon = '\0'; + quit(1, "API invalid group name '%s'", ptr); + } + + group = GROUP(*ptr); + if (!VALIDGROUP(group)) + quit(1, "API invalid group name '%c'", *ptr); + + if (group == PRIVGROUP) + quit(1, "API group name can't be '%c'", PRIVGROUP); + + if (group == NOPRIVGROUP) + quit(1, "API group name can't be '%c'", NOPRIVGROUP); + + if (apigroups[GROUPOFFSET(group)].commands != NULL) + quit(1, "API duplicate group name '%c'", *ptr); + + ptr += 2; + + // Validate the command list (and handle '*') + cmd = &(commands[0]); + *(cmd++) = SEPARATOR; + *cmd = '\0'; + addstar = false; + while (ptr && *ptr) { + colon = strchr(ptr, ':'); + if (colon) + *(colon++) = '\0'; + + if (strcmp(ptr, "*") == 0) + addstar = true; + else { + did = false; + for (i = 0; cmds[i].name != NULL; i++) { + if (strcasecmp(ptr, cmds[i].name) == 0) { + did = true; + break; + } + } + if (did) { + // skip duplicates + sprintf(cmdbuf, "|%s|", cmds[i].name); + if (strstr(commands, cmdbuf) == NULL) { + strcpy(cmd, cmds[i].name); + cmd += strlen(cmds[i].name); + *(cmd++) = SEPARATOR; + *cmd = '\0'; + } + } else { + quit(1, "API unknown command '%s' in group '%c'", ptr, group); + } + } + + ptr = colon; + } + + // * = allow all non-iswritemode commands + if (addstar) { + for (i = 0; cmds[i].name != NULL; i++) { + if (cmds[i].iswritemode == false) { + // skip duplicates + sprintf(cmdbuf, "|%s|", cmds[i].name); + if (strstr(commands, cmdbuf) == NULL) { + strcpy(cmd, cmds[i].name); + cmd += strlen(cmds[i].name); + *(cmd++) = SEPARATOR; + *cmd = '\0'; + } + } + } + } + + ptr = apigroups[GROUPOFFSET(group)].commands = malloc(strlen(commands) + 1); + if (unlikely(!ptr)) + quit(1, "Failed to malloc group commands buf"); + + strcpy(ptr, commands); + } + + // Now define R (NOPRIVGROUP) as all non-iswritemode commands + cmd = &(commands[0]); + *(cmd++) = SEPARATOR; + *cmd = '\0'; + for (i = 0; cmds[i].name != NULL; i++) { + if (cmds[i].iswritemode == false) { + strcpy(cmd, cmds[i].name); + cmd += strlen(cmds[i].name); + *(cmd++) = SEPARATOR; + *cmd = '\0'; + } + } + + ptr = apigroups[GROUPOFFSET(NOPRIVGROUP)].commands = malloc(strlen(commands) + 1); + if (unlikely(!ptr)) + quit(1, "Failed to malloc noprivgroup commands buf"); + + strcpy(ptr, commands); + + // W (PRIVGROUP) is handled as a special case since it simply means all commands + + free(buf); + return; +} + +/* + * Interpret [W:]IP[/Prefix][,[R|W:]IP2[/Prefix2][,...]] --api-allow option + * ipv6 address should be enclosed with a pair of square brackets and the prefix left outside + * special case of 0/0 allows /0 (means all IP addresses) + */ +#define ALLIP "0/0" +/* + * N.B. IP4 addresses are by Definition 32bit big endian on all platforms + */ +static void setup_ipaccess() +{ + char *buf, *ptr, *comma, *slash, *end; + int ipcount, mask, i, shift; + bool ipv6 = false; + char group; + char tmp[30]; + + buf = malloc(strlen(opt_api_allow) + 1); + if (unlikely(!buf)) + quit(1, "Failed to malloc ipaccess buf"); + + strcpy(buf, opt_api_allow); + + ipcount = 1; + ptr = buf; + while (*ptr) + if (*(ptr++) == ',') + ipcount++; + + // possibly more than needed, but never less + ipaccess = calloc(ipcount, sizeof(struct IPACCESS)); + if (unlikely(!ipaccess)) + quit(1, "Failed to calloc ipaccess"); + + ips = 0; + ptr = buf; + while (ptr && *ptr) { + while (*ptr == ' ' || *ptr == '\t') + ptr++; + + if (*ptr == ',') { + ptr++; + continue; + } + + comma = strchr(ptr, ','); + if (comma) + *(comma++) = '\0'; + + group = NOPRIVGROUP; + + if (isalpha(*ptr) && *(ptr+1) == ':') { + if (DEFINEDGROUP(*ptr)) + group = GROUP(*ptr); + + ptr += 2; + } + + ipaccess[ips].group = group; + + if (strcmp(ptr, ALLIP) == 0) { + for (i = 0; i < 16; i++) { + ipaccess[ips].ip.s6_addr[i] = 0; + ipaccess[ips].mask.s6_addr[i] = 0; + } + } + else { + end = strchr(ptr, '/'); + if (!end) { + for (i = 0; i < 16; i++) + ipaccess[ips].mask.s6_addr[i] = 0xff; + end = ptr + strlen(ptr); + } + slash = end--; + if (*ptr == '[' && *end == ']') { + *(ptr++) = '\0'; + *(end--) = '\0'; + ipv6 = true; + } + else + ipv6 = false; + if (*slash) { + *(slash++) = '\0'; + mask = atoi(slash); + if (mask < 1 || (mask += ipv6 ? 0 : 96) > 128 ) + goto popipo; // skip invalid/zero + + for (i = 0; i < 16; i++) + ipaccess[ips].mask.s6_addr[i] = 0; + + i = 0; + shift = 7; + while (mask-- > 0) { + ipaccess[ips].mask.s6_addr[i] |= 1 << shift; + if (shift-- == 0) { + i++; + shift = 7; + } + } + } + + for (i = 0; i < 16; i++) + ipaccess[ips].ip.s6_addr[i] = 0; // missing default to '[::]' + if (ipv6) { + if (INET_PTON(AF_INET6, ptr, &(ipaccess[ips].ip)) != 1) + goto popipo; + } + else { + // v4 mapped v6 address, such as "::ffff:255.255.255.255" + sprintf(tmp, "::ffff:%s", ptr); + if (INET_PTON(AF_INET6, tmp, &(ipaccess[ips].ip)) != 1) + goto popipo; + } + for (i = 0; i < 16; i++) + ipaccess[ips].ip.s6_addr[i] &= ipaccess[ips].mask.s6_addr[i]; + } + + ips++; +popipo: + ptr = comma; + } + + free(buf); +} + +static void *quit_thread(__maybe_unused void *userdata) +{ + // allow thread creator to finish whatever it's doing + mutex_lock(&quit_restart_lock); + mutex_unlock(&quit_restart_lock); + + if (opt_debug) + applog(LOG_DEBUG, "API: killing bmminer"); + + kill_work(); + + return NULL; +} + +static void *restart_thread(__maybe_unused void *userdata) +{ + // allow thread creator to finish whatever it's doing + mutex_lock(&quit_restart_lock); + mutex_unlock(&quit_restart_lock); + + if (opt_debug) + applog(LOG_DEBUG, "API: restarting bmminer"); + + app_restart(); + + return NULL; +} + +static bool check_connect(struct sockaddr_storage *cli, char **connectaddr, char *group) +{ + bool addrok = false; + int i, j; + bool match; + char tmp[30]; + struct in6_addr client_ip; + + *connectaddr = (char *)malloc(INET6_ADDRSTRLEN); + getnameinfo((struct sockaddr *)cli, sizeof(*cli), + *connectaddr, INET6_ADDRSTRLEN, NULL, 0, NI_NUMERICHOST); + + // v4 mapped v6 address, such as "::ffff:255.255.255.255" + if (cli->ss_family == AF_INET) { + sprintf(tmp, "::ffff:%s", *connectaddr); + INET_PTON(AF_INET6, tmp, &client_ip); + } + else + INET_PTON(AF_INET6, *connectaddr, &client_ip); + + *group = NOPRIVGROUP; + if (opt_api_allow) { + for (i = 0; i < ips; i++) { + match = true; + for (j = 0; j < 16; j++) + if ((client_ip.s6_addr[j] & ipaccess[i].mask.s6_addr[j]) + != ipaccess[i].ip.s6_addr[j]) { + match = false; + break; + } + if (match) { + addrok = true; + *group = ipaccess[i].group; + break; + } + } + } else { + if (opt_api_network) + addrok = true; + else + addrok = (strcmp(*connectaddr, localaddr) == 0) + || IN6_IS_ADDR_LOOPBACK(&client_ip); + } + + return addrok; +} + +static void mcast() +{ + struct sockaddr_storage came_from; + time_t bindstart; + char *binderror; + SOCKETTYPE mcast_sock = INVSOCK; + SOCKETTYPE reply_sock = INVSOCK; + socklen_t came_from_siz; + char *connectaddr; + ssize_t rep; + int bound; + int count; + int reply_port; + bool addrok; + char group; + + char port_s[10], came_from_port[10]; + struct addrinfo hints, *res, *host, *client; + + char expect[] = "bmminer-"; // first 8 bytes constant + char *expect_code; + size_t expect_code_len; + char buf[1024]; + char replybuf[1024]; + + sprintf(port_s, "%d", opt_api_mcast_port); + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + if (getaddrinfo(opt_api_mcast_addr, port_s, &hints, &res) != 0) + quit(1, "Invalid API Multicast Address"); + host = res; + while (host != NULL) { + mcast_sock = socket(res->ai_family, SOCK_DGRAM, 0); + if (mcast_sock > 0) + break; + host = host->ai_next; + } + if (mcast_sock == INVSOCK) { + freeaddrinfo(res); + quit(1, "API mcast could not open socket"); + } + + int optval = 1; + if (SOCKETFAIL(setsockopt(mcast_sock, SOL_SOCKET, SO_REUSEADDR, (void *)(&optval), sizeof(optval)))) { + applog(LOG_ERR, "API mcast setsockopt SO_REUSEADDR failed (%s)%s", SOCKERRMSG, MUNAVAILABLE); + goto die; + } + + // try for more than 1 minute ... in case the old one hasn't completely gone yet + bound = 0; + bindstart = time(NULL); + while (bound == 0) { + if (SOCKETFAIL(bind(mcast_sock, host->ai_addr, host->ai_addrlen))) { + binderror = SOCKERRMSG; + if ((time(NULL) - bindstart) > 61) + break; + else + cgsleep_ms(30000); + } else + bound = 1; + } + + if (bound == 0) { + applog(LOG_ERR, "API mcast bind to port %d failed (%s)%s", opt_api_mcast_port, binderror, MUNAVAILABLE); + goto die; + } + + switch (host->ai_family) { + case AF_INET: { + struct ip_mreq grp; + memset(&grp, 0, sizeof(grp)); + grp.imr_multiaddr.s_addr = ((struct sockaddr_in *)(host->ai_addr))->sin_addr.s_addr; + grp.imr_interface.s_addr = INADDR_ANY; + + if (SOCKETFAIL(setsockopt(mcast_sock, IPPROTO_IP, IP_ADD_MEMBERSHIP, + (void *)(&grp), sizeof(grp)))) { + applog(LOG_ERR, "API mcast join failed (%s)%s", SOCKERRMSG, MUNAVAILABLE); + goto die; + } + break; + } + case AF_INET6: { + struct ipv6_mreq grp; + memcpy(&grp.ipv6mr_multiaddr, &(((struct sockaddr_in6 *)(host->ai_addr))->sin6_addr), + sizeof(struct in6_addr)); + grp.ipv6mr_interface= 0; + + if (SOCKETFAIL(setsockopt(mcast_sock, IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, + (void *)(&grp), sizeof(grp)))) { + applog(LOG_ERR, "API mcast join failed (%s)%s", SOCKERRMSG, MUNAVAILABLE); + goto die; + } + break; + } + default: + break; + } + freeaddrinfo(res); + + expect_code_len = sizeof(expect) + strlen(opt_api_mcast_code); + expect_code = malloc(expect_code_len+1); + if (!expect_code) + quit(1, "Failed to malloc mcast expect_code"); + snprintf(expect_code, expect_code_len+1, "%s%s-", expect, opt_api_mcast_code); + + count = 0; + while (80085) { + cgsleep_ms(1000); + + count++; + came_from_siz = sizeof(came_from); + if (SOCKETFAIL(rep = recvfrom(mcast_sock, buf, sizeof(buf) - 1, + 0, (struct sockaddr *)(&came_from), &came_from_siz))) { + applog(LOG_DEBUG, "API mcast failed count=%d (%s) (%d)", + count, SOCKERRMSG, (int)mcast_sock); + continue; + } + + addrok = check_connect(&came_from, &connectaddr, &group); + applog(LOG_DEBUG, "API mcast from %s - %s", + connectaddr, addrok ? "Accepted" : "Ignored"); + if (!addrok) + continue; + + buf[rep] = '\0'; + if (rep > 0 && buf[rep-1] == '\n') + buf[--rep] = '\0'; + + getnameinfo((struct sockaddr *)(&came_from), came_from_siz, + NULL, 0, came_from_port, sizeof(came_from_port), NI_NUMERICHOST); + + applog(LOG_DEBUG, "API mcast request rep=%d (%s) from [%s]:%s", + (int)rep, buf, connectaddr, came_from_port); + + if ((size_t)rep > expect_code_len && memcmp(buf, expect_code, expect_code_len) == 0) { + reply_port = atoi(&buf[expect_code_len]); + if (reply_port < 1 || reply_port > 65535) { + applog(LOG_DEBUG, "API mcast request ignored - invalid port (%s)", + &buf[expect_code_len]); + } else { + applog(LOG_DEBUG, "API mcast request OK port %s=%d", + &buf[expect_code_len], reply_port); + + if (getaddrinfo(connectaddr, &buf[expect_code_len], &hints, &res) != 0) { + applog(LOG_ERR, "Invalid client address %s", connectaddr); + continue; + } + client = res; + while (client) { + reply_sock = socket(res->ai_family, SOCK_DGRAM, 0); + if (mcast_sock > 0) + break; + client = client->ai_next; + } + if (reply_sock == INVSOCK) { + freeaddrinfo(res); + applog(LOG_ERR, "API mcast could not open socket to client %s", connectaddr); + continue; + } + + snprintf(replybuf, sizeof(replybuf), + "cgm-" API_MCAST_CODE "-%d-%s", + opt_api_port, opt_api_mcast_des); + + rep = sendto(reply_sock, replybuf, strlen(replybuf)+1, + 0, client->ai_addr, client->ai_addrlen); + freeaddrinfo(res); + if (SOCKETFAIL(rep)) { + applog(LOG_DEBUG, "API mcast send reply failed (%s) (%d)", + SOCKERRMSG, (int)reply_sock); + } else { + applog(LOG_DEBUG, "API mcast send reply (%s) succeeded (%d) (%d)", + replybuf, (int)rep, (int)reply_sock); + } + + CLOSESOCKET(reply_sock); + } + } else + applog(LOG_DEBUG, "API mcast request was no good"); + } + +die: + + CLOSESOCKET(mcast_sock); +} + +static void *mcast_thread(void *userdata) +{ + struct thr_info *mythr = userdata; + + pthread_detach(pthread_self()); + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + RenameThread("APIMcast"); + + mcast(); + + PTH(mythr) = 0L; + + return NULL; +} + +void mcast_init() +{ + struct thr_info *thr; + + thr = calloc(1, sizeof(*thr)); + if (!thr) + quit(1, "Failed to calloc mcast thr"); + + if (thr_info_create(thr, NULL, mcast_thread, thr)) + quit(1, "API mcast thread create failed"); +} + +void api(int api_thr_id) +{ + struct io_data *io_data; + struct thr_info bye_thr; + char buf[TMPBUFSIZ]; + char param_buf[TMPBUFSIZ]; + SOCKETTYPE c; + int n, bound; + char *connectaddr; + char *binderror; + time_t bindstart; + short int port = opt_api_port; + char port_s[10]; + struct sockaddr_storage cli; + socklen_t clisiz; + char cmdbuf[100]; + char *cmd = NULL; + char *param; + bool addrok; + char group; + json_error_t json_err; + json_t *json_config = NULL; + json_t *json_val; + bool isjson; + bool did, isjoin = false, firstjoin; + int i; + struct addrinfo hints, *res, *host; + + SOCKETTYPE *apisock; + + apisock = malloc(sizeof(*apisock)); + *apisock = INVSOCK; + + if (!opt_api_listen) { + applog(LOG_DEBUG, "API not running%s", UNAVAILABLE); + free(apisock); + return; + } + + io_data = sock_io_new(); + + mutex_init(&quit_restart_lock); + + pthread_cleanup_push(tidyup, (void *)apisock); + my_thr_id = api_thr_id; + + setup_groups(); + + if (opt_api_allow) { + setup_ipaccess(); + + if (ips == 0) { + applog(LOG_WARNING, "API not running (no valid IPs specified)%s", UNAVAILABLE); + free(apisock); + return; + } + } + + /* This should be done before curl in needed + * to ensure curl has already called WSAStartup() in windows */ + cgsleep_ms(opt_log_interval*1000); + + sprintf(port_s, "%d", port); + memset(&hints, 0, sizeof(hints)); + hints.ai_flags = AI_PASSIVE; + hints.ai_family = AF_UNSPEC; + if (getaddrinfo(opt_api_host, port_s, &hints, &res) != 0) { + applog(LOG_ERR, "API failed to resolve %s", opt_api_host); + free(apisock); + return; + } + host = res; + while (host) { + *apisock = socket(res->ai_family, SOCK_STREAM, 0); + if (*apisock > 0) + break; + host = host->ai_next; + } + if (*apisock == INVSOCK) { + applog(LOG_ERR, "API initialisation failed (%s)%s", SOCKERRMSG, UNAVAILABLE); + freeaddrinfo(res); + free(apisock); + return; + } + +#ifndef WIN32 + // On linux with SO_REUSEADDR, bind will get the port if the previous + // socket is closed (even if it is still in TIME_WAIT) but fail if + // another program has it open - which is what we want + int optval = 1; + // If it doesn't work, we don't really care - just show a debug message + if (SOCKETFAIL(setsockopt(*apisock, SOL_SOCKET, SO_REUSEADDR, (void *)(&optval), sizeof(optval)))) + applog(LOG_DEBUG, "API setsockopt SO_REUSEADDR failed (ignored): %s", SOCKERRMSG); +#else + // On windows a 2nd program can bind to a port>1024 already in use unless + // SO_EXCLUSIVEADDRUSE is used - however then the bind to a closed port + // in TIME_WAIT will fail until the timeout - so we leave the options alone +#endif + + // try for more than 1 minute ... in case the old one hasn't completely gone yet + bound = 0; + bindstart = time(NULL); + while (bound == 0) { + if (SOCKETFAIL(bind(*apisock, host->ai_addr, host->ai_addrlen))) { + binderror = SOCKERRMSG; + if ((time(NULL) - bindstart) > 61) + break; + else { + applog(LOG_WARNING, "API bind to port %d failed - trying again in 30sec", port); + cgsleep_ms(30000); + } + } else + bound = 1; + } + freeaddrinfo(res); + + if (bound == 0) { + applog(LOG_ERR, "API bind to port %d failed (%s)%s", port, binderror, UNAVAILABLE); + free(apisock); + return; + } + + if (SOCKETFAIL(listen(*apisock, QUEUE))) { + applog(LOG_ERR, "API3 initialisation failed (%s)%s", SOCKERRMSG, UNAVAILABLE); + CLOSESOCKET(*apisock); + free(apisock); + return; + } + + if (opt_api_allow) + applog(LOG_WARNING, "API running in IP access mode on port %d (%d)", port, (int)*apisock); + else { + if (opt_api_network) + applog(LOG_WARNING, "API running in UNRESTRICTED read access mode on port %d (%d)", port, (int)*apisock); + else + applog(LOG_WARNING, "API running in local read access mode on port %d (%d)", port, (int)*apisock); + } + + if (opt_api_mcast) + mcast_init(); + + strbufs = k_new_list("StrBufs", sizeof(SBITEM), ALLOC_SBITEMS, LIMIT_SBITEMS, false); + + while (!bye) { + clisiz = sizeof(cli); + if (SOCKETFAIL(c = accept(*apisock, (struct sockaddr *)(&cli), &clisiz))) { + applog(LOG_ERR, "API failed (%s)%s (%d)", SOCKERRMSG, UNAVAILABLE, (int)*apisock); + goto die; + } + + addrok = check_connect((struct sockaddr_storage *)&cli, &connectaddr, &group); + applog(LOG_DEBUG, "API: connection from %s - %s", + connectaddr, addrok ? "Accepted" : "Ignored"); + + if (addrok) { + n = recv(c, &buf[0], TMPBUFSIZ-1, 0); + if (SOCKETFAIL(n)) + buf[0] = '\0'; + else + buf[n] = '\0'; + + if (opt_debug) { + if (SOCKETFAIL(n)) + applog(LOG_DEBUG, "API: recv failed: %s", SOCKERRMSG); + else + applog(LOG_DEBUG, "API: recv command: (%d) '%s'", n, buf); + } + + if (!SOCKETFAIL(n)) { + // the time of the request in now + when = time(NULL); + io_reinit(io_data); + + did = false; + + if (*buf != ISJSON) { + isjson = false; + + param = strchr(buf, SEPARATOR); + if (param != NULL) + *(param++) = '\0'; + + cmd = buf; + } + else { + isjson = true; + + param = NULL; + + json_config = json_loadb(buf, n, 0, &json_err); + + if (!json_is_object(json_config)) { + message(io_data, MSG_INVJSON, 0, NULL, isjson); + send_result(io_data, c, isjson); + did = true; + } else { + json_val = json_object_get(json_config, JSON_COMMAND); + if (json_val == NULL) { + message(io_data, MSG_MISCMD, 0, NULL, isjson); + send_result(io_data, c, isjson); + did = true; + } else { + if (!json_is_string(json_val)) { + message(io_data, MSG_INVCMD, 0, NULL, isjson); + send_result(io_data, c, isjson); + did = true; + } else { + cmd = (char *)json_string_value(json_val); + json_val = json_object_get(json_config, JSON_PARAMETER); + if (json_is_string(json_val)) + param = (char *)json_string_value(json_val); + else if (json_is_integer(json_val)) { + sprintf(param_buf, "%d", (int)json_integer_value(json_val)); + param = param_buf; + } else if (json_is_real(json_val)) { + sprintf(param_buf, "%f", (double)json_real_value(json_val)); + param = param_buf; + } + } + } + } + } + + if (!did) { + char *cmdptr, *cmdsbuf = NULL; + + if (strchr(cmd, CMDJOIN)) { + firstjoin = isjoin = true; + // cmd + leading+tailing '|' + '\0' + cmdsbuf = malloc(strlen(cmd) + 3); + if (!cmdsbuf) + quithere(1, "OOM cmdsbuf"); + strcpy(cmdsbuf, "|"); + param = NULL; + } else + firstjoin = isjoin = false; + + cmdptr = cmd; + do { + did = false; + if (isjoin) { + cmd = strchr(cmdptr, CMDJOIN); + if (cmd) + *(cmd++) = '\0'; + if (!*cmdptr) + goto inochi; + } + + for (i = 0; cmds[i].name != NULL; i++) { + if (strcmp(cmdptr, cmds[i].name) == 0) { + sprintf(cmdbuf, "|%s|", cmdptr); + if (isjoin) { + if (strstr(cmdsbuf, cmdbuf)) { + did = true; + break; + } + strcat(cmdsbuf, cmdptr); + strcat(cmdsbuf, "|"); + head_join(io_data, cmdptr, isjson, &firstjoin); + if (!cmds[i].joinable) { + message(io_data, MSG_ACCDENY, 0, cmds[i].name, isjson); + did = true; + tail_join(io_data, isjson); + break; + } + } + if (ISPRIVGROUP(group) || strstr(COMMANDS(group), cmdbuf)) + (cmds[i].func)(io_data, c, param, isjson, group); + else { + message(io_data, MSG_ACCDENY, 0, cmds[i].name, isjson); + applog(LOG_DEBUG, "API: access denied to '%s' for '%s' command", connectaddr, cmds[i].name); + } + + did = true; + if (!isjoin) + send_result(io_data, c, isjson); + else + tail_join(io_data, isjson); + break; + } + } + + if (!did) { + if (isjoin) + head_join(io_data, cmdptr, isjson, &firstjoin); + message(io_data, MSG_INVCMD, 0, NULL, isjson); + if (isjoin) + tail_join(io_data, isjson); + else + send_result(io_data, c, isjson); + } +inochi: + if (isjoin) + cmdptr = cmd; + } while (isjoin && cmdptr); + } + + if (isjoin) + send_result(io_data, c, isjson); + + if (isjson && json_is_object(json_config)) + json_decref(json_config); + } + } + CLOSESOCKET(c); + } +die: + /* Blank line fix for older compilers since pthread_cleanup_pop is a + * macro that gets confused by a label existing immediately before it + */ + ; + pthread_cleanup_pop(true); + + free(apisock); + + if (opt_debug) + applog(LOG_DEBUG, "API: terminating due to: %s", + do_a_quit ? "QUIT" : (do_a_restart ? "RESTART" : (bye ? "BYE" : "UNKNOWN!"))); + + mutex_lock(&quit_restart_lock); + + if (do_a_restart) { + if (thr_info_create(&bye_thr, NULL, restart_thread, &bye_thr)) { + mutex_unlock(&quit_restart_lock); + quit(1, "API failed to initiate a restart - aborting"); + } + pthread_detach(bye_thr.pth); + } else if (do_a_quit) { + if (thr_info_create(&bye_thr, NULL, quit_thread, &bye_thr)) { + mutex_unlock(&quit_restart_lock); + quit(1, "API failed to initiate a clean quit - aborting"); + } + pthread_detach(bye_thr.pth); + } + + mutex_unlock(&quit_restart_lock); +} diff --git a/arg-nonnull.h b/arg-nonnull.h new file mode 100644 index 0000000..6c2f1e8 --- /dev/null +++ b/arg-nonnull.h @@ -0,0 +1,26 @@ +/* A C macro for declaring that specific arguments must not be NULL. + Copyright (C) 2009-2011 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* _GL_ARG_NONNULL((n,...,m)) tells the compiler and static analyzer tools + that the values passed as arguments n, ..., m must be non-NULL pointers. + n = 1 stands for the first argument, n = 2 for the second argument etc. */ +#ifndef _GL_ARG_NONNULL +# if (__GNUC__ == 3 && __GNUC_MINOR__ >= 3) || __GNUC__ > 3 +# define _GL_ARG_NONNULL(params) __attribute__ ((__nonnull__ params)) +# else +# define _GL_ARG_NONNULL(params) +# endif +#endif diff --git a/autogen.sh b/autogen.sh new file mode 100644 index 0000000..cae99ba --- /dev/null +++ b/autogen.sh @@ -0,0 +1,11 @@ +#!/bin/sh +bs_dir=$(cd "$(dirname "$0")"; pwd) + +#Some versions of libtoolize don't like there being no ltmain.sh file already +touch "${bs_dir}"/ltmain.sh +autoreconf -fi "${bs_dir}" + +if test -n "$1" && test -z "$NOCONFIGURE" ; then + echo 'Configuring...' + "$bs_dir"/configure "$@" +fi diff --git a/bench_block.h b/bench_block.h new file mode 100644 index 0000000..729a95e --- /dev/null +++ b/bench_block.h @@ -0,0 +1,170 @@ +#ifndef __BENCH_BLOCK_H__ +#define __BENCH_BLOCK_H__ + +/* This contains 32 carefully chosen work items, 16 of which return diff >= 32 + * at nonces spaced ~ 0x10000000 apart and 16 < diff 32. */ + +const char bench_hidiffs[16][324] = { +// 0002108b diff 131 +"000000029c6bf469abe4ad37605c097a860cff3cf5c1ef4377618f74000000000000000082b1514e7b6565941e5824f084292164ec5f97e7ea20c494bd96e524d478977b536dd2261900896c8b100200" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"64e4e3becc01064d808269b330f40f4de82dc92e894d635025daa3e2e2c410b4", + +// 1003dacf diff 37 +"00000002e790c23987181950eeb144591c3ac4d06c0705f2801d097600000000000000009ebbce2f5f0d6cc0aca284ecb1059c856ef2f7f42e7edd403d246754ee4c905a536dd2a91900896ccfda0310" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"4a78daf1b5eb3397af1c00dbd9b06659cdc04183c8baaf5be1dbf32f79e00459", + +// 200e57b4 diff 3866 +"000000023e91fce7300a792bfbaa0c76e1aa5f9b546c1db582aee4ff0000000000000000f04650a8e748d2e6fde86a8a920b285f3e22398f583700236958323ef9ea8321536dcf431900896cb4570e20" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"0a1d654ae2b06f219ccf4601933fab408de1c3b7c8c9c85e03231d4aaf5a26cd", + +// 300f71e2 diff 335 +"000000023e91fce7300a792bfbaa0c76e1aa5f9b546c1db582aee4ff000000000000000074b39134c2930d2f2e7339f9d502c776c44d6ee599f7efebec6c9bbd04787aae536dce561900896ce2710f30" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"94e60c1180022f337232ab3d298f838304b6008ab237cf7e1717f1933407e592", + +// 400548ed diff 2670 +"000000023e91fce7300a792bfbaa0c76e1aa5f9b546c1db582aee4ff0000000000000000c5b821fb0b26d63b00cc26e7ac4d6cfd1d3fc109b0db188e7e792e3d18342919536dce501900896ced480540" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"a290eac61642949c00d17f7cd5980abedb8647fc5df9955dcfe4d56a50a0c564", + +// 5001f760 diff 60 +"00000002e790c23987181950eeb144591c3ac4d06c0705f2801d097600000000000000006e9d94bf5a0ab7b202d39e1200af96074e4f641f4e55e3e9e3aee72aa00a70e9536dd2ae1900896c60f70150" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"1477ca8536702eacbd65a6a162cfe90d62016a14ffe58d52b7dd4c3628a27e5b", + +// 600c9816 diff 35 +"00000002194bb5b4f8ac3392fbd66f3dd3e9dcdb22370e380837fe44000000000000000003bbb250f2dc23717e8192c0b8bec6a175cd059e4089d325006eaee3446254c9536dd39e1900896c16980c60" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"68db599d6b7a55fd61d4244a3dfa465055ead6b5c0a37c7a3d4555b58e99065e", + +// 70092d5f diff 114 +"000000023e91fce7300a792bfbaa0c76e1aa5f9b546c1db582aee4ff000000000000000072e17babd4089b204797cebda7dc6e277950eab1b2908991ae1d72335f82d204536dcf441900896c5f2d0970" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"c7d601ce3b01e569a49508d541bbcba9b3c8394b1834523ef1e5cb2c60bd34a3", + +// 800eeaa8 diff 159 +"000000029c6bf469abe4ad37605c097a860cff3cf5c1ef4377618f74000000000000000022388b6f022144db134af1bc8e61b385ca37cae038c1d165ae98c496b3b41e8b536dd2101900896ca8ea0e80" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"410761e97e67b494fd547cfe9ffbb36893da7aec75c6b51b8d5f38f87b5d63cf", + +// 900f600d diff 144 +"000000029c6bf469abe4ad37605c097a860cff3cf5c1ef4377618f7400000000000000000e1f0cfdf5ad8248fc4520f3bb0b2040226430348cddeff5ca9181beeb78870d536dd2161900896c0d600f90" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"ad1a8d354a7e8b13ec47f4c3d907d00945a61e86059f4943e42c1e52398eba5d", + +// a00210bf diff 1055 +"00000002194bb5b4f8ac3392fbd66f3dd3e9dcdb22370e380837fe4400000000000000002232a16d38cc0e13e4b16d917bff4c34727deb3b5c50e424fb8453ff9b2adcb4536dd4231900896cbf1002a0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"adc67d38f31f589b18b9d8e531b994ce5733c021a03d88d38611ee6b4c2710a5", + +// b004309d diff 43 +"00000002194bb5b4f8ac3392fbd66f3dd3e9dcdb22370e380837fe440000000000000000a2860471277b4a93fea2a8b6d8c281fab7bde3b78f2acd1bfdc89d464ed3bb3c536dd35e1900896c9d3004b0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"537686c611aae4397c7c04b2c190708453d00e8c9563525610c31ba46e80dbc2", + +// c00b7537 diff 64 +"00000002194bb5b4f8ac3392fbd66f3dd3e9dcdb22370e380837fe440000000000000000f370230607998fbbd10275c5890885fcd81b68018ba2373abf0f93a06d02ab28536dd33e1900896c37750bc0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"1fdda952da6abd70022a6e5f2b9dc5e1b66011128c3fa249f0b7439f00d5943e", + +// d0005bd5 diff 1539 +"00000002c0a2c91fc41254539a5b2a27be28de2a6187e2af3f129d6300000000000000005e45ffc512d5ca3bc4d2063dd3af1669c296ae126a5a2ef896d1e190cedf67b9536dd46b1900896cd55b00d0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"07094d6cbe76538a88612624fc5e655cc405cb8198dcad516b88dbac5bf8b906", + +// e00a7796 diff 41 +"00000002194bb5b4f8ac3392fbd66f3dd3e9dcdb22370e380837fe44000000000000000027c548815127c125147af91c356c293f0defbd2771f8dc3b1142b367528656db536dd37c1900896c96770ae0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"1bdbb3b1be7216872ea787627b03c389a527451f6dd832d8540874306f9c07c6", + +// f001f029 diff 77 +"00000002194bb5b4f8ac3392fbd66f3dd3e9dcdb22370e380837fe440000000000000000adef758770bb90c5b13769c5b61affb322b24c747573b38ebe2ee81748d0b557536dd4071900896c29f001f0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"1ac8eea63285353944e40eec54d2dd6cd0994b447429bb0ed0598d38f42da0e2" +}; + +const char bench_lodiffs[16][324] = { +// 000ed6b6 diff 2 +"00000002c01f502cb3e9fdb053230ec12a4954c1021a6b35862b5e29000000000000000084d1b83ae44057025e8c5b5756b44f04df5fffe4a7a30e5c12d12a97a7a4c2ea536dce431900896cb6d60e00" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"d08f7e14c50dad77dc238b4db2901a0578e657b1954779ab9cd82a73829edf7f", + +// 1000818f diff 5 +"000000023bf53ef343a50f7599601f849c93ecce63530b0b449a44630000000000000000c1a174254a6593ffba987f68fe26e716e3c129a7f33a9c43ae7ecf90c8cd0d2c536dc4e61900896c8f810010" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"6700aeedada2b3877900b58a183c42c40949956bb8b4a8d21481f8936b572922", + +// 20006be9 diff 7 +"00000002138cf4b61dff74e3c26b2d80045064e8ab4802521bab2cda000000000000000071eef64a7ef4e47cda16e96673197d36c7235a4aadd23c21a38ce53827d1f8bc536dc4d71900896ce96b0020" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"0730cf7a6b8a85eb1cc017b109d23c392464f99aa8c020ea107c525b671adde0", + +// 300029f4 diff 2 +"000000023bf53ef343a50f7599601f849c93ecce63530b0b449a44630000000000000000fe2c6b926468565e524ab7c2f111035dcde7c60955842111930589eccb410f83536dc66b1900896cf4290030" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"27dbb374a97f15c59587256662f36904d075d0e61f749618182711288ac617c7", + +// 40001d82 diff 2 +"000000023bf53ef343a50f7599601f849c93ecce63530b0b449a4463000000000000000003073385e05c29f0435a6001c8eca9c8d5602890aeff9d4d103d3383cf80dae5536dc57c1900896c821d0040" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"7da3b97e82c0c3125a58dad8a0d1d0369244731f3b096e972484298d15b843d9", + +// 50003ce6 diff 1 +"000000029ca55e5f1bc0328c84f358fddadc13cb232599bc2ca9dbe10000000000000000b5b4d19c20a7fc2b174ff673c006edd2247c4b2336571864df93eb7ec0c8c276536dfe041900896ce63c0050" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"1514bd586511e531e2b6277a6d112b171f9e008d56ef4a971e619acf22e75072", + +// 60004314 diff 2 +"000000023bf53ef343a50f7599601f849c93ecce63530b0b449a446300000000000000003e3030629ff4258056dc9efaf922bd173a65f65ee799b0c765097d3deeddef10536dc4d81900896c14430060" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"dcb77a9c36d894d2dbc31437e5c2a1564e927937848ea2eb20b38638afc64b96", + +// 700041d4 diff 7 +"000000023bf53ef343a50f7599601f849c93ecce63530b0b449a446300000000000000005513c22bb99e9daa9936b0df5dce64d7737e3706be99e5098d112002492cf81b536dc5691900896cd4410070" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"0d896267fda5dda0f85308e77f754c8b94b7b88e3cb315475cd9efd16401e3ce", + +// 80009d99 diff 1 +"00000002e155f07e652e4d671ca4db51bbde14d2b5ae34ee67ecc74400000000000000004af5cffd7e5a7087f1b484b526c7350c86d8389283509ca878502f792115e8dc536dc6ad1900896c999d0080" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"339354568f506ac3cd69bb427b1af83a0473b87c16bf3b562a93d0a2ffc53e54", + +// 9000fb14 diff 4 +"000000029ca55e5f1bc0328c84f358fddadc13cb232599bc2ca9dbe100000000000000005925a624e5c84f96d2c34dce3b6a736addb891724b48a36320c7494435f9c915536dfe621900896c14fb0090" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"8362009c07cf48249f481be6b79e67247cab1d20050cf11c276085b90732110c", + +// a000eb5e diff 2 +"00000002e155f07e652e4d671ca4db51bbde14d2b5ae34ee67ecc74400000000000000001e69f1d6507f4b7b50980930f7d8089834fbe65f0980b8592d53cdda08e50d24536dc7da1900896c5eeb00a0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"21e4f654d26ab8c9164ff311657a9f9c4cdc0e8a09334925f7c02138819d7e61", + +// b0002ec5 diff 2 +"000000023bf53ef343a50f7599601f849c93ecce63530b0b449a4463000000000000000064923b63f53c72c04ebe6c1c9140b6377132b6e50865814fe562291bd023d348536dc65a1900896cc52e00b0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"55db91a25401a89daf9ff7d7954bab722b894ba480fefaf1f0a95aaf5f600567", + +// c0001f6e diff 2 +"000000023bf53ef343a50f7599601f849c93ecce63530b0b449a44630000000000000000ee9817160e35d4410601c8dc741c1a810c485f3b40a0859be5f58f0bf6ef1694536dc6321900896c6e1f00c0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"ae215785178ff6350064060ebbb219a71716a10e88528fc4bb1cb5c8fdd0cf60", + +// d0005f26 diff 7 +"000000029ca55e5f1bc0328c84f358fddadc13cb232599bc2ca9dbe100000000000000001e514cf738455a54f004ec86edafcfd9fd2022017bb31c245340353911744fb7536dfe1f1900896c265f00d0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"dcafaa86defe850b057ae74f7218a79b0ede086a196f18f0e7c585eb88d1139a", + +// e0008993 diff 2 +"000000023bf53ef343a50f7599601f849c93ecce63530b0b449a446300000000000000005edbd53fcc64850b5334678199d769514818fbcc79861fc77e572bb4753b7fe2536dc5d91900896c938900e0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"5e653df5956ece518a78a5d11297431af94ce8ba91d80cfb2aa8c5b3095fa256", + +// f000709e diff 1 +"000000023bf53ef343a50f7599601f849c93ecce63530b0b449a44630000000000000000596fc4aa5da839ba267c36aa1a5b29d813747b2273dc03aa9e404c4da0238e2b536dc4cc1900896c9e7000f0" +"000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" +"0e23806a533bd956787eef52dd8edee456c60d6cecbb6175458ee53fc8c6c813" +}; +#endif /* __BENCH_BLOCK_H__ */ diff --git a/bitforce-firmware-flash.c b/bitforce-firmware-flash.c new file mode 100644 index 0000000..b4f6aca --- /dev/null +++ b/bitforce-firmware-flash.c @@ -0,0 +1,108 @@ +/* + * Copyright 2012 Luke Dashjr + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#define _BSD_SOURCE +#include +#include +#include + +#include +#include + +#define BFL_FILE_MAGIC "BFLDATA" +#define BFL_UPLOAD_MAGIC "NGH-STREAM" + +#define myassert(expr, n, ...) \ +do { \ + if (!(expr)) { \ + fprintf(stderr, __VA_ARGS__); \ + return n; \ + } \ +} while(0) + +#define ERRRESP(buf) buf, (buf[strlen(buf)-1] == '\n' ? "" : "\n") + +#define WAITFOROK(n, msg) \ +do { \ + myassert(fgets(buf, sizeof(buf), BFL), n, "Error reading response from " msg "\n"); \ + myassert(!strcmp(buf, "OK\n"), n, "Invalid response from " msg ": %s%s", ERRRESP(buf)); \ +} while(0) + +int main(int argc, char**argv) +{ + myassert(argc == 3, 1, "Usage: %s \n", argv[0]); + setbuf(stdout, NULL); + + // Check filename + char *FWname = basename(strdup(argv[2])); + size_t FWnameLen = strlen(FWname); + myassert(FWnameLen <= 255, 0x0f, "Firmware filename '%s' is too long\n", FWname); + uint8_t n8 = FWnameLen; + + // Open and check firmware file + FILE *FW = fopen(argv[2], "r"); + myassert(FW, 0x10, "Failed to open '%s' for reading\n", argv[2]); + char buf[0x20]; + myassert(1 == fread(buf, 7, 1, FW), 0x10, "Failed to read from '%s'\n", argv[2]); + myassert(!memcmp(buf, BFL_FILE_MAGIC, sizeof(BFL_FILE_MAGIC)-1), 0x11, "'%s' doesn't look like a BFL firmware\n", argv[2]); + myassert(!fseek(FW, 0, SEEK_END), 0x12, "Failed to find end of '%s'\n", argv[2]); + long FWlen = ftell(FW); + myassert(FWlen > 0, 0x12, "Couldn't get size of '%s'\n", argv[2]); + myassert(!fseek(FW, 7, SEEK_SET), 0x12, "Failed to rewind firmware file after getting size\n"); + FWlen -= 7; + printf("Firmware file looks OK :)\n"); + + // Open device + FILE *BFL = fopen(argv[1], "r+"); + myassert(BFL, 0x20, "Failed to open '%s' for read/write\n", argv[1]); + myassert(!setvbuf(BFL, NULL, _IOFBF, 1032), 0x21, "Failed to setup buffer for device"); + + // ZAX: Start firmware upload + printf("Starting firmware upload... "); + myassert(1 == fwrite("ZAX", 3, 1, BFL), 0x22, "Failed to issue ZAX command\n"); + WAITFOROK(0x22, "ZAX"); + + // Firmware upload header + myassert(1 == fwrite(BFL_UPLOAD_MAGIC, sizeof(BFL_UPLOAD_MAGIC)-1, 1, BFL), 0x23, "Failed to send firmware upload header (magic)\n"); + uint32_t n32 = htonl(FWlen - FWlen / 6); + myassert(1 == fwrite(&n32, sizeof(n32), 1, BFL), 0x23, "Failed to send firmware upload header (size)\n"); + myassert(1 == fwrite("\0\0", 2 , 1, BFL), 0x23, "Failed to send firmware upload header (padding 1)\n"); + myassert(1 == fwrite(&n8, sizeof(n8) , 1, BFL), 0x23, "Failed to send firmware upload header (filename length)\n"); + myassert(1 == fwrite(FWname, n8 , 1, BFL), 0x23, "Failed to send firmware upload header (filename)\n"); + myassert(1 == fwrite("\0>>>>>>>>", 9 , 1, BFL), 0x23, "Failed to send firmware upload header (padding 2)\n"); + WAITFOROK(0x23, "firmware upload header"); + printf("OK, sending...\n"); + + // Actual firmware upload + long i, j; + for (i = 0, j = 0; i < FWlen; ++i) { + myassert(1 == fread(&n8, sizeof(n8), 1, FW), 0x30, "Error reading data from firmware file\n"); + if (5 == i % 6) + continue; + n8 ^= 0x2f; + myassert(1 == fwrite(&n8, sizeof(n8), 1, BFL), 0x31, "Error sending data to device\n"); + if (!(++j % 0x400)) { + myassert(1 == fwrite(">>>>>>>>", 8, 1, BFL), 0x32, "Error sending block-finish to device\n"); + printf("\r%5.2f%% complete", (double)i * 100. / (double)FWlen); + WAITFOROK(0x32, "block-finish"); + } + } + printf("\r100%% complete :)\n"); + myassert(1 == fwrite(">>>>>>>>", 8, 1, BFL), 0x3f, "Error sending upload-finished to device\n"); + myassert(fgets(buf, sizeof(buf), BFL), 0x3f, "Error reading response from upload-finished\n"); + myassert(!strcmp(buf, "DONE\n"), 0x3f, "Invalid response from upload-finished: %s%s", ERRRESP(buf)); + + // ZBX: Finish programming + printf("Waiting for device... "); + myassert(1 == fwrite("ZBX", 3, 1, BFL), 0x40, "Failed to issue ZBX command\n"); + WAITFOROK(0x40, "ZBX"); + printf("All done! Try mining to test the flash succeeded.\n"); + + return 0; +} diff --git a/bitmain-readme.txt b/bitmain-readme.txt new file mode 100644 index 0000000..b3e6e5a --- /dev/null +++ b/bitmain-readme.txt @@ -0,0 +1,58 @@ +###################################################################################### +# # +# BitMain setup and build instructions (on mingw32/Windows): # +# # +###################################################################################### + +************************************************************************************** +* Build cgminer.exe * +************************************************************************************** +Run the MinGW MSYS shell +(Start Icon/keyboard key ==> All Programs ==> MinGW ==> MinGW Shell). +Change the working directory to your CGMiner project folder. +Example: cd cgminer-2.1.2 [Enter Key] if you are unsure then type "ls -la" +Another way is to type "cd cg" and then press the tab key; It will auto fill. +Type the lines below one at a time. Look for problems after each one before going on +to the next. + + adl.sh (optional - see below) + autoreconf -fvi + CFLAGS="-O2 -msse2" ./configure (additional config options, see below) + make + strip cgminer.exe <== only do this if you are not compiling for debugging + +For bitmain mode: + autoreconf -fvi + CFLAGS="-O2 -msse2" ./configure --enable-bmsc + make + +************************************************************************************** +* Some ./configure options * +************************************************************************************** +--enable-cpumining Build with cpu mining support(default disabled) +--disable-opencl Override detection and disable building with opencl +--disable-adl Override detection and disable building with adl +--enable-bitforce Compile support for BitForce FPGAs(default disabled) +--enable-icarus Compile support for Icarus Board(default disabled) +--enable-bitmain Compile support for BitMain Devices(default disabled) +--enable-modminer Compile support for ModMiner FPGAs(default disabled) +--enable-ztex Compile support for Ztex Board(default disabled) +--enable-scrypt Compile support for scrypt litecoin mining (default disabled) +--without-curses Compile support for curses TUI (default enabled) +--without-libudev Autodetect FPGAs using libudev (default enabled) +--enable-forcecombo Allow combinations of drivers not intended to be built together(default disabled) + +************************************************************************************** +* Run cgminer for bitmain mode * +************************************************************************************** +BitMain options: +--bitmain-options baud:miner_count:asic_count:timeout:frequency + +For example: +cgminer --bitmain-options 115200:24:10:30:300 -o http://stratum.btcguild.com:3333 -u xlc1985_1 -p abc123456 -D + +###################################################################################### +# # +# BitMain setup and build instructions (on mingw32/Windows) complete # +# # +###################################################################################### diff --git a/bitstreams/COPYING_fpgaminer b/bitstreams/COPYING_fpgaminer new file mode 100644 index 0000000..9db2c5f --- /dev/null +++ b/bitstreams/COPYING_fpgaminer @@ -0,0 +1,23 @@ +All the bitstream files included in this directory that follow the name pattern fpgaminer_*.ncd are: + +---- + +Copyright (c) 2011-2012 fpgaminer@bitcoin-mining.com + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +---- + +You can find the original sources at the Open Source FPGA Bitcoin Miner project GitHub repository: +https://github.com/progranism/Open-Source-FPGA-Bitcoin-Miner/tree/master/projects/X6000_ztex_comm4/hdl diff --git a/bitstreams/README b/bitstreams/README new file mode 100644 index 0000000..a00d01a --- /dev/null +++ b/bitstreams/README @@ -0,0 +1 @@ +You must put the file fpgaminer_top_fixed7_197MHz.ncd in here for modminer to work. \ No newline at end of file diff --git a/bitstreams/fpgaminer_top_fixed7_197MHz.ncd b/bitstreams/fpgaminer_top_fixed7_197MHz.ncd new file mode 100644 index 0000000..1df4e1d Binary files /dev/null and b/bitstreams/fpgaminer_top_fixed7_197MHz.ncd differ diff --git a/c++defs.h b/c++defs.h new file mode 100644 index 0000000..b6821a6 --- /dev/null +++ b/c++defs.h @@ -0,0 +1,271 @@ +/* C++ compatible function declaration macros. + Copyright (C) 2010-2011 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +#ifndef _GL_CXXDEFS_H +#define _GL_CXXDEFS_H + +/* The three most frequent use cases of these macros are: + + * For providing a substitute for a function that is missing on some + platforms, but is declared and works fine on the platforms on which + it exists: + + #if @GNULIB_FOO@ + # if !@HAVE_FOO@ + _GL_FUNCDECL_SYS (foo, ...); + # endif + _GL_CXXALIAS_SYS (foo, ...); + _GL_CXXALIASWARN (foo); + #elif defined GNULIB_POSIXCHECK + ... + #endif + + * For providing a replacement for a function that exists on all platforms, + but is broken/insufficient and needs to be replaced on some platforms: + + #if @GNULIB_FOO@ + # if @REPLACE_FOO@ + # if !(defined __cplusplus && defined GNULIB_NAMESPACE) + # undef foo + # define foo rpl_foo + # endif + _GL_FUNCDECL_RPL (foo, ...); + _GL_CXXALIAS_RPL (foo, ...); + # else + _GL_CXXALIAS_SYS (foo, ...); + # endif + _GL_CXXALIASWARN (foo); + #elif defined GNULIB_POSIXCHECK + ... + #endif + + * For providing a replacement for a function that exists on some platforms + but is broken/insufficient and needs to be replaced on some of them and + is additionally either missing or undeclared on some other platforms: + + #if @GNULIB_FOO@ + # if @REPLACE_FOO@ + # if !(defined __cplusplus && defined GNULIB_NAMESPACE) + # undef foo + # define foo rpl_foo + # endif + _GL_FUNCDECL_RPL (foo, ...); + _GL_CXXALIAS_RPL (foo, ...); + # else + # if !@HAVE_FOO@ or if !@HAVE_DECL_FOO@ + _GL_FUNCDECL_SYS (foo, ...); + # endif + _GL_CXXALIAS_SYS (foo, ...); + # endif + _GL_CXXALIASWARN (foo); + #elif defined GNULIB_POSIXCHECK + ... + #endif +*/ + +/* _GL_EXTERN_C declaration; + performs the declaration with C linkage. */ +#if defined __cplusplus +# define _GL_EXTERN_C extern "C" +#else +# define _GL_EXTERN_C extern +#endif + +/* _GL_FUNCDECL_RPL (func, rettype, parameters_and_attributes); + declares a replacement function, named rpl_func, with the given prototype, + consisting of return type, parameters, and attributes. + Example: + _GL_FUNCDECL_RPL (open, int, (const char *filename, int flags, ...) + _GL_ARG_NONNULL ((1))); + */ +#define _GL_FUNCDECL_RPL(func,rettype,parameters_and_attributes) \ + _GL_FUNCDECL_RPL_1 (rpl_##func, rettype, parameters_and_attributes) +#define _GL_FUNCDECL_RPL_1(rpl_func,rettype,parameters_and_attributes) \ + _GL_EXTERN_C rettype rpl_func parameters_and_attributes + +/* _GL_FUNCDECL_SYS (func, rettype, parameters_and_attributes); + declares the system function, named func, with the given prototype, + consisting of return type, parameters, and attributes. + Example: + _GL_FUNCDECL_SYS (open, int, (const char *filename, int flags, ...) + _GL_ARG_NONNULL ((1))); + */ +#define _GL_FUNCDECL_SYS(func,rettype,parameters_and_attributes) \ + _GL_EXTERN_C rettype func parameters_and_attributes + +/* _GL_CXXALIAS_RPL (func, rettype, parameters); + declares a C++ alias called GNULIB_NAMESPACE::func + that redirects to rpl_func, if GNULIB_NAMESPACE is defined. + Example: + _GL_CXXALIAS_RPL (open, int, (const char *filename, int flags, ...)); + */ +#define _GL_CXXALIAS_RPL(func,rettype,parameters) \ + _GL_CXXALIAS_RPL_1 (func, rpl_##func, rettype, parameters) +#if defined __cplusplus && defined GNULIB_NAMESPACE +# define _GL_CXXALIAS_RPL_1(func,rpl_func,rettype,parameters) \ + namespace GNULIB_NAMESPACE \ + { \ + rettype (*const func) parameters = ::rpl_func; \ + } \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#else +# define _GL_CXXALIAS_RPL_1(func,rpl_func,rettype,parameters) \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#endif + +/* _GL_CXXALIAS_RPL_CAST_1 (func, rpl_func, rettype, parameters); + is like _GL_CXXALIAS_RPL_1 (func, rpl_func, rettype, parameters); + except that the C function rpl_func may have a slightly different + declaration. A cast is used to silence the "invalid conversion" error + that would otherwise occur. */ +#if defined __cplusplus && defined GNULIB_NAMESPACE +# define _GL_CXXALIAS_RPL_CAST_1(func,rpl_func,rettype,parameters) \ + namespace GNULIB_NAMESPACE \ + { \ + rettype (*const func) parameters = \ + reinterpret_cast(::rpl_func); \ + } \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#else +# define _GL_CXXALIAS_RPL_CAST_1(func,rpl_func,rettype,parameters) \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#endif + +/* _GL_CXXALIAS_SYS (func, rettype, parameters); + declares a C++ alias called GNULIB_NAMESPACE::func + that redirects to the system provided function func, if GNULIB_NAMESPACE + is defined. + Example: + _GL_CXXALIAS_SYS (open, int, (const char *filename, int flags, ...)); + */ +#if defined __cplusplus && defined GNULIB_NAMESPACE + /* If we were to write + rettype (*const func) parameters = ::func; + like above in _GL_CXXALIAS_RPL_1, the compiler could optimize calls + better (remove an indirection through a 'static' pointer variable), + but then the _GL_CXXALIASWARN macro below would cause a warning not only + for uses of ::func but also for uses of GNULIB_NAMESPACE::func. */ +# define _GL_CXXALIAS_SYS(func,rettype,parameters) \ + namespace GNULIB_NAMESPACE \ + { \ + static rettype (*func) parameters = ::func; \ + } \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#else +# define _GL_CXXALIAS_SYS(func,rettype,parameters) \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#endif + +/* _GL_CXXALIAS_SYS_CAST (func, rettype, parameters); + is like _GL_CXXALIAS_SYS (func, rettype, parameters); + except that the C function func may have a slightly different declaration. + A cast is used to silence the "invalid conversion" error that would + otherwise occur. */ +#if defined __cplusplus && defined GNULIB_NAMESPACE +# define _GL_CXXALIAS_SYS_CAST(func,rettype,parameters) \ + namespace GNULIB_NAMESPACE \ + { \ + static rettype (*func) parameters = \ + reinterpret_cast(::func); \ + } \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#else +# define _GL_CXXALIAS_SYS_CAST(func,rettype,parameters) \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#endif + +/* _GL_CXXALIAS_SYS_CAST2 (func, rettype, parameters, rettype2, parameters2); + is like _GL_CXXALIAS_SYS (func, rettype, parameters); + except that the C function is picked among a set of overloaded functions, + namely the one with rettype2 and parameters2. Two consecutive casts + are used to silence the "cannot find a match" and "invalid conversion" + errors that would otherwise occur. */ +#if defined __cplusplus && defined GNULIB_NAMESPACE + /* The outer cast must be a reinterpret_cast. + The inner cast: When the function is defined as a set of overloaded + functions, it works as a static_cast<>, choosing the designated variant. + When the function is defined as a single variant, it works as a + reinterpret_cast<>. The parenthesized cast syntax works both ways. */ +# define _GL_CXXALIAS_SYS_CAST2(func,rettype,parameters,rettype2,parameters2) \ + namespace GNULIB_NAMESPACE \ + { \ + static rettype (*func) parameters = \ + reinterpret_cast( \ + (rettype2(*)parameters2)(::func)); \ + } \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#else +# define _GL_CXXALIAS_SYS_CAST2(func,rettype,parameters,rettype2,parameters2) \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#endif + +/* _GL_CXXALIASWARN (func); + causes a warning to be emitted when ::func is used but not when + GNULIB_NAMESPACE::func is used. func must be defined without overloaded + variants. */ +#if defined __cplusplus && defined GNULIB_NAMESPACE +# define _GL_CXXALIASWARN(func) \ + _GL_CXXALIASWARN_1 (func, GNULIB_NAMESPACE) +# define _GL_CXXALIASWARN_1(func,namespace) \ + _GL_CXXALIASWARN_2 (func, namespace) +/* To work around GCC bug , + we enable the warning only when not optimizing. */ +# if !__OPTIMIZE__ +# define _GL_CXXALIASWARN_2(func,namespace) \ + _GL_WARN_ON_USE (func, \ + "The symbol ::" #func " refers to the system function. " \ + "Use " #namespace "::" #func " instead.") +# elif __GNUC__ >= 3 && GNULIB_STRICT_CHECKING +# define _GL_CXXALIASWARN_2(func,namespace) \ + extern __typeof__ (func) func +# else +# define _GL_CXXALIASWARN_2(func,namespace) \ + _GL_EXTERN_C int _gl_cxxalias_dummy +# endif +#else +# define _GL_CXXALIASWARN(func) \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#endif + +/* _GL_CXXALIASWARN1 (func, rettype, parameters_and_attributes); + causes a warning to be emitted when the given overloaded variant of ::func + is used but not when GNULIB_NAMESPACE::func is used. */ +#if defined __cplusplus && defined GNULIB_NAMESPACE +# define _GL_CXXALIASWARN1(func,rettype,parameters_and_attributes) \ + _GL_CXXALIASWARN1_1 (func, rettype, parameters_and_attributes, \ + GNULIB_NAMESPACE) +# define _GL_CXXALIASWARN1_1(func,rettype,parameters_and_attributes,namespace) \ + _GL_CXXALIASWARN1_2 (func, rettype, parameters_and_attributes, namespace) +/* To work around GCC bug , + we enable the warning only when not optimizing. */ +# if !__OPTIMIZE__ +# define _GL_CXXALIASWARN1_2(func,rettype,parameters_and_attributes,namespace) \ + _GL_WARN_ON_USE_CXX (func, rettype, parameters_and_attributes, \ + "The symbol ::" #func " refers to the system function. " \ + "Use " #namespace "::" #func " instead.") +# elif __GNUC__ >= 3 && GNULIB_STRICT_CHECKING +# define _GL_CXXALIASWARN1_2(func,rettype,parameters_and_attributes,namespace) \ + extern __typeof__ (func) func +# else +# define _GL_CXXALIASWARN1_2(func,rettype,parameters_and_attributes,namespace) \ + _GL_EXTERN_C int _gl_cxxalias_dummy +# endif +#else +# define _GL_CXXALIASWARN1(func,rettype,parameters_and_attributes) \ + _GL_EXTERN_C int _gl_cxxalias_dummy +#endif + +#endif /* _GL_CXXDEFS_H */ diff --git a/ccan/Makefile.am b/ccan/Makefile.am new file mode 100644 index 0000000..a667df3 --- /dev/null +++ b/ccan/Makefile.am @@ -0,0 +1,4 @@ +noinst_LIBRARIES = libccan.a + +libccan_a_SOURCES = compiler/compiler.h opt/helpers.c opt/opt.c opt/opt.h opt/parse.c opt/private.h opt/usage.c typesafe_cb/typesafe_cb.h +libccan_a_CPPFLAGS = -I$(top_srcdir) diff --git a/ccan/compiler/LICENSE b/ccan/compiler/LICENSE new file mode 100644 index 0000000..fc8a5de --- /dev/null +++ b/ccan/compiler/LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/ccan/compiler/_info b/ccan/compiler/_info new file mode 100644 index 0000000..c55ba22 --- /dev/null +++ b/ccan/compiler/_info @@ -0,0 +1,64 @@ +#include +#include +#include "config.h" + +/** + * compiler - macros for common compiler extensions + * + * Abstracts away some compiler hints. Currently these include: + * - COLD + * For functions not called in fast paths (aka. cold functions) + * - PRINTF_FMT + * For functions which take printf-style parameters. + * - IDEMPOTENT + * For functions which return the same value for same parameters. + * - NEEDED + * For functions and variables which must be emitted even if unused. + * - UNNEEDED + * For functions and variables which need not be emitted if unused. + * - UNUSED + * For parameters which are not used. + * - IS_COMPILE_CONSTANT + * For using different tradeoffs for compiletime vs runtime evaluation. + * + * License: LGPL (3 or any later version) + * Author: Rusty Russell + * + * Example: + * #include + * #include + * #include + * + * // Example of a (slow-path) logging function. + * static int log_threshold = 2; + * static void COLD PRINTF_FMT(2,3) + * logger(int level, const char *fmt, ...) + * { + * va_list ap; + * va_start(ap, fmt); + * if (level >= log_threshold) + * vfprintf(stderr, fmt, ap); + * va_end(ap); + * } + * + * int main(int argc, char *argv[]) + * { + * if (argc != 1) { + * logger(3, "Don't want %i arguments!\n", argc-1); + * return 1; + * } + * return 0; + * } + */ +int main(int argc, char *argv[]) +{ + /* Expect exactly one argument */ + if (argc != 2) + return 1; + + if (strcmp(argv[1], "depends") == 0) { + return 0; + } + + return 1; +} diff --git a/ccan/compiler/compiler.h b/ccan/compiler/compiler.h new file mode 100644 index 0000000..74e0f18 --- /dev/null +++ b/ccan/compiler/compiler.h @@ -0,0 +1,216 @@ +#ifndef CCAN_COMPILER_H +#define CCAN_COMPILER_H +#include "config.h" + +#ifndef COLD +#if HAVE_ATTRIBUTE_COLD +/** + * COLD - a function is unlikely to be called. + * + * Used to mark an unlikely code path and optimize appropriately. + * It is usually used on logging or error routines. + * + * Example: + * static void COLD moan(const char *reason) + * { + * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno)); + * } + */ +#define COLD __attribute__((cold)) +#else +#define COLD +#endif +#endif + +#ifndef NORETURN +#if HAVE_ATTRIBUTE_NORETURN +/** + * NORETURN - a function does not return + * + * Used to mark a function which exits; useful for suppressing warnings. + * + * Example: + * static void NORETURN fail(const char *reason) + * { + * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno)); + * exit(1); + * } + */ +#define NORETURN __attribute__((noreturn)) +#else +#define NORETURN +#endif +#endif + +#ifndef PRINTF_FMT +#if HAVE_ATTRIBUTE_PRINTF +/** + * PRINTF_FMT - a function takes printf-style arguments + * @nfmt: the 1-based number of the function's format argument. + * @narg: the 1-based number of the function's first variable argument. + * + * This allows the compiler to check your parameters as it does for printf(). + * + * Example: + * void PRINTF_FMT(2,3) my_printf(const char *prefix, const char *fmt, ...); + */ +#define PRINTF_FMT(nfmt, narg) \ + __attribute__((format(__printf__, nfmt, narg))) +#else +#define PRINTF_FMT(nfmt, narg) +#endif +#endif + +#ifndef IDEMPOTENT +#if HAVE_ATTRIBUTE_CONST +/** + * IDEMPOTENT - a function's return depends only on its argument + * + * This allows the compiler to assume that the function will return the exact + * same value for the exact same arguments. This implies that the function + * must not use global variables, or dereference pointer arguments. + */ +#define IDEMPOTENT __attribute__((const)) +#else +#define IDEMPOTENT +#endif +#endif + +#if HAVE_ATTRIBUTE_UNUSED +#ifndef UNNEEDED +/** + * UNNEEDED - a variable/function may not be needed + * + * This suppresses warnings about unused variables or functions, but tells + * the compiler that if it is unused it need not emit it into the source code. + * + * Example: + * // With some preprocessor options, this is unnecessary. + * static UNNEEDED int counter; + * + * // With some preprocessor options, this is unnecessary. + * static UNNEEDED void add_to_counter(int add) + * { + * counter += add; + * } + */ +#define UNNEEDED __attribute__((unused)) +#endif + +#ifndef NEEDED +#if HAVE_ATTRIBUTE_USED +/** + * NEEDED - a variable/function is needed + * + * This suppresses warnings about unused variables or functions, but tells + * the compiler that it must exist even if it (seems) unused. + * + * Example: + * // Even if this is unused, these are vital for debugging. + * static NEEDED int counter; + * static NEEDED void dump_counter(void) + * { + * printf("Counter is %i\n", counter); + * } + */ +#define NEEDED __attribute__((used)) +#else +/* Before used, unused functions and vars were always emitted. */ +#define NEEDED __attribute__((unused)) +#endif +#endif + +#ifndef UNUSED +/** + * UNUSED - a parameter is unused + * + * Some compilers (eg. gcc with -W or -Wunused) warn about unused + * function parameters. This suppresses such warnings and indicates + * to the reader that it's deliberate. + * + * Example: + * // This is used as a callback, so needs to have this prototype. + * static int some_callback(void *unused UNUSED) + * { + * return 0; + * } + */ +#define UNUSED __attribute__((unused)) +#endif +#else +#ifndef UNNEEDED +#define UNNEEDED +#endif +#ifndef NEEDED +#define NEEDED +#endif +#ifndef UNUSED +#define UNUSED +#endif +#endif + +#ifndef IS_COMPILE_CONSTANT +#if HAVE_BUILTIN_CONSTANT_P +/** + * IS_COMPILE_CONSTANT - does the compiler know the value of this expression? + * @expr: the expression to evaluate + * + * When an expression manipulation is complicated, it is usually better to + * implement it in a function. However, if the expression being manipulated is + * known at compile time, it is better to have the compiler see the entire + * expression so it can simply substitute the result. + * + * This can be done using the IS_COMPILE_CONSTANT() macro. + * + * Example: + * enum greek { ALPHA, BETA, GAMMA, DELTA, EPSILON }; + * + * // Out-of-line version. + * const char *greek_name(enum greek greek); + * + * // Inline version. + * static inline const char *_greek_name(enum greek greek) + * { + * switch (greek) { + * case ALPHA: return "alpha"; + * case BETA: return "beta"; + * case GAMMA: return "gamma"; + * case DELTA: return "delta"; + * case EPSILON: return "epsilon"; + * default: return "**INVALID**"; + * } + * } + * + * // Use inline if compiler knows answer. Otherwise call function + * // to avoid copies of the same code everywhere. + * #define greek_name(g) \ + * (IS_COMPILE_CONSTANT(greek) ? _greek_name(g) : greek_name(g)) + */ +#define IS_COMPILE_CONSTANT(expr) __builtin_constant_p(expr) +#else +/* If we don't know, assume it's not. */ +#define IS_COMPILE_CONSTANT(expr) 0 +#endif +#endif + +#ifndef WARN_UNUSED_RESULT +#if HAVE_WARN_UNUSED_RESULT +/** + * WARN_UNUSED_RESULT - warn if a function return value is unused. + * + * Used to mark a function where it is extremely unlikely that the caller + * can ignore the result, eg realloc(). + * + * Example: + * // buf param may be freed by this; need return value! + * static char *WARN_UNUSED_RESULT enlarge(char *buf, unsigned *size) + * { + * return realloc(buf, (*size) *= 2); + * } + */ +#define WARN_UNUSED_RESULT __attribute__((warn_unused_result)) +#else +#define WARN_UNUSED_RESULT +#endif +#endif +#endif /* CCAN_COMPILER_H */ diff --git a/ccan/compiler/test/compile_fail-printf.c b/ccan/compiler/test/compile_fail-printf.c new file mode 100644 index 0000000..8f34ae5 --- /dev/null +++ b/ccan/compiler/test/compile_fail-printf.c @@ -0,0 +1,22 @@ +#include + +static void PRINTF_FMT(2,3) my_printf(int x, const char *fmt, ...) +{ +} + +int main(int argc, char *argv[]) +{ + unsigned int i = 0; + + my_printf(1, "Not a pointer " +#ifdef FAIL + "%p", +#if !HAVE_ATTRIBUTE_PRINTF +#error "Unfortunately we don't fail if !HAVE_ATTRIBUTE_PRINTF." +#endif +#else + "%i", +#endif + i); + return 0; +} diff --git a/ccan/compiler/test/run-is_compile_constant.c b/ccan/compiler/test/run-is_compile_constant.c new file mode 100644 index 0000000..a66f2e1 --- /dev/null +++ b/ccan/compiler/test/run-is_compile_constant.c @@ -0,0 +1,15 @@ +#include +#include + +int main(int argc, char *argv[]) +{ + plan_tests(2); + + ok1(!IS_COMPILE_CONSTANT(argc)); +#if HAVE_BUILTIN_CONSTANT_P + ok1(IS_COMPILE_CONSTANT(7)); +#else + pass("If !HAVE_BUILTIN_CONSTANT_P, IS_COMPILE_CONSTANT always false"); +#endif + return exit_status(); +} diff --git a/ccan/opt/LICENSE b/ccan/opt/LICENSE new file mode 100644 index 0000000..d511905 --- /dev/null +++ b/ccan/opt/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/ccan/opt/_info b/ccan/opt/_info new file mode 100644 index 0000000..97b98f2 --- /dev/null +++ b/ccan/opt/_info @@ -0,0 +1,67 @@ +#include +#include +#include "config.h" + +/** + * opt - simple command line parsing + * + * Simple but powerful command line parsing. + * + * Example: + * #include + * #include + * #include + * + * static bool someflag; + * static int verbose; + * static char *somestring; + * + * static struct opt_table opts[] = { + * OPT_WITHOUT_ARG("--verbose|-v", opt_inc_intval, &verbose, + * "Verbose mode (can be specified more than once)"), + * OPT_WITHOUT_ARG("--someflag", opt_set_bool, &someflag, + * "Set someflag"), + * OPT_WITH_ARG("--somefile=", opt_set_charp, opt_show_charp, + * &somestring, "Set somefile to "), + * OPT_WITHOUT_ARG("--usage|--help|-h", opt_usage_and_exit, + * "args...\nA silly test program.", + * "Print this message."), + * OPT_ENDTABLE + * }; + * + * int main(int argc, char *argv[]) + * { + * int i; + * + * opt_register_table(opts, NULL); + * // For fun, register an extra one. + * opt_register_noarg("--no-someflag", opt_set_invbool, &someflag, + * "Unset someflag"); + * if (!opt_parse(&argc, argv, opt_log_stderr)) + * exit(1); + * + * printf("someflag = %i, verbose = %i, somestring = %s\n", + * someflag, verbose, somestring); + * printf("%u args left over:", argc - 1); + * for (i = 1; i < argc; i++) + * printf(" %s", argv[i]); + * printf("\n"); + * return 0; + * } + * + * License: GPL (2 or any later version) + * Author: Rusty Russell + */ +int main(int argc, char *argv[]) +{ + if (argc != 2) + return 1; + + if (strcmp(argv[1], "depends") == 0) { + printf("ccan/typesafe_cb\n"); + printf("ccan/compiler\n"); + return 0; + } + + return 1; +} diff --git a/ccan/opt/helpers.c b/ccan/opt/helpers.c new file mode 100644 index 0000000..8d0ac54 --- /dev/null +++ b/ccan/opt/helpers.c @@ -0,0 +1,193 @@ +#include +#include +#include +#include +#include +#include "private.h" + +/* Upper bound to sprintf this simple type? Each 3 bits < 1 digit. */ +#define CHAR_SIZE(type) (((sizeof(type)*CHAR_BIT + 2) / 3) + 1) + +/* FIXME: asprintf module? */ +static char *arg_bad(const char *fmt, const char *arg) +{ + char *str = malloc(strlen(fmt) + strlen(arg)); + sprintf(str, fmt, arg); + return str; +} + +char *opt_set_bool(bool *b) +{ + *b = true; + return NULL; +} + +char *opt_set_invbool(bool *b) +{ + *b = false; + return NULL; +} + +char *opt_set_bool_arg(const char *arg, bool *b) +{ + if (!strcasecmp(arg, "yes") || !strcasecmp(arg, "true")) + return opt_set_bool(b); + if (!strcasecmp(arg, "no") || !strcasecmp(arg, "false")) + return opt_set_invbool(b); + + return opt_invalid_argument(arg); +} + +char *opt_set_invbool_arg(const char *arg, bool *b) +{ + char *err = opt_set_bool_arg(arg, b); + + if (!err) + *b = !*b; + return err; +} + +/* Set a char *. */ +char *opt_set_charp(const char *arg, char **p) +{ + *p = (char *)arg; + return NULL; +} + +/* Set an integer value, various forms. Sets to 1 on arg == NULL. */ +char *opt_set_intval(const char *arg, int *i) +{ + long l; + char *err = opt_set_longval(arg, &l); + + if (err) + return err; + *i = l; + /* Beware truncation... */ + if (*i != l) + return arg_bad("value '%s' does not fit into an integer", arg); + return err; +} + +char *opt_set_floatval(const char *arg, float *f) +{ + char *endp; + + errno = 0; + *f = strtof(arg, &endp); + if (*endp || !arg[0]) + return arg_bad("'%s' is not a number", arg); + if (errno) + return arg_bad("'%s' is out of range", arg); + return NULL; +} + +char *opt_set_uintval(const char *arg, unsigned int *ui) +{ + int i; + char *err = opt_set_intval(arg, &i); + + if (err) + return err; + if (i < 0) + return arg_bad("'%s' is negative", arg); + *ui = i; + return NULL; +} + +char *opt_set_longval(const char *arg, long *l) +{ + char *endp; + + /* This is how the manpage says to do it. Yech. */ + errno = 0; + *l = strtol(arg, &endp, 0); + if (*endp || !arg[0]) + return arg_bad("'%s' is not a number", arg); + if (errno) + return arg_bad("'%s' is out of range", arg); + return NULL; +} + +char *opt_set_ulongval(const char *arg, unsigned long *ul) +{ + long int l; + char *err; + + err = opt_set_longval(arg, &l); + if (err) + return err; + *ul = l; + if (l < 0) + return arg_bad("'%s' is negative", arg); + return NULL; +} + +char *opt_inc_intval(int *i) +{ + (*i)++; + return NULL; +} + +/* Display version string. */ +char *opt_version_and_exit(const char *version) +{ + printf("%s\n", version); + fflush(stdout); + exit(0); +} + +char *opt_usage_and_exit(const char *extra) +{ + printf("%s", opt_usage(opt_argv0, extra)); + fflush(stdout); + exit(0); +} + +void opt_show_bool(char buf[OPT_SHOW_LEN], const bool *b) +{ + strncpy(buf, *b ? "true" : "false", OPT_SHOW_LEN); +} + +void opt_show_invbool(char buf[OPT_SHOW_LEN], const bool *b) +{ + strncpy(buf, *b ? "false" : "true", OPT_SHOW_LEN); +} + +void opt_show_charp(char buf[OPT_SHOW_LEN], char *const *p) +{ + size_t len = strlen(*p); + buf[0] = '"'; + if (len > OPT_SHOW_LEN - 2) + len = OPT_SHOW_LEN - 2; + strncpy(buf+1, *p, len); + buf[1+len] = '"'; + if (len < OPT_SHOW_LEN - 2) + buf[2+len] = '\0'; +} + +/* Set an integer value, various forms. Sets to 1 on arg == NULL. */ +void opt_show_intval(char buf[OPT_SHOW_LEN], const int *i) +{ + snprintf(buf, OPT_SHOW_LEN, "%i", *i); +} + +void opt_show_floatval(char buf[OPT_SHOW_LEN], const float *f) +{ + snprintf(buf, OPT_SHOW_LEN, "%.1f", *f); +} + +void opt_show_uintval(char buf[OPT_SHOW_LEN], const unsigned int *ui) +{ + snprintf(buf, OPT_SHOW_LEN, "%u", *ui); +} + +void opt_show_longval(char buf[OPT_SHOW_LEN], const long *l) +{ + snprintf(buf, OPT_SHOW_LEN, "%li", *l); +} + +void opt_show_ulongval(char buf[OPT_SHOW_LEN], const unsigned long *ul) +{ + snprintf(buf, OPT_SHOW_LEN, "%lu", *ul); +} diff --git a/ccan/opt/opt.c b/ccan/opt/opt.c new file mode 100644 index 0000000..48cd386 --- /dev/null +++ b/ccan/opt/opt.c @@ -0,0 +1,255 @@ +#include +#include +#include +#include +#include + +#ifndef WIN32 + #include +#else +#include + #define errx(status, fmt, ...) { \ + fprintf(stderr, fmt, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(status); } +#endif + +#include +#include +#include +#include "private.h" + +struct opt_table *opt_table; +unsigned int opt_count, opt_num_short, opt_num_short_arg, opt_num_long; +const char *opt_argv0; + +/* Returns string after first '-'. */ +static const char *first_name(const char *names, unsigned *len) +{ + *len = strcspn(names + 1, "|= "); + return names + 1; +} + +static const char *next_name(const char *names, unsigned *len) +{ + names += *len; + if (names[0] == ' ' || names[0] == '=' || names[0] == '\0') + return NULL; + return first_name(names + 1, len); +} + +static const char *first_opt(unsigned *i, unsigned *len) +{ + for (*i = 0; *i < opt_count; (*i)++) { + if (opt_table[*i].type == OPT_SUBTABLE) + continue; + return first_name(opt_table[*i].names, len); + } + return NULL; +} + +static const char *next_opt(const char *p, unsigned *i, unsigned *len) +{ + for (; *i < opt_count; (*i)++) { + if (opt_table[*i].type == OPT_SUBTABLE) + continue; + if (!p) + return first_name(opt_table[*i].names, len); + p = next_name(p, len); + if (p) + return p; + } + return NULL; +} + +const char *first_lopt(unsigned *i, unsigned *len) +{ + const char *p; + for (p = first_opt(i, len); p; p = next_opt(p, i, len)) { + if (p[0] == '-') { + /* Skip leading "-" */ + (*len)--; + p++; + break; + } + } + return p; +} + +const char *next_lopt(const char *p, unsigned *i, unsigned *len) +{ + for (p = next_opt(p, i, len); p; p = next_opt(p, i, len)) { + if (p[0] == '-') { + /* Skip leading "-" */ + (*len)--; + p++; + break; + } + } + return p; +} + +const char *first_sopt(unsigned *i) +{ + const char *p; + unsigned int len = 0 /* GCC bogus warning */; + + for (p = first_opt(i, &len); p; p = next_opt(p, i, &len)) { + if (p[0] != '-') + break; + } + return p; +} + +const char *next_sopt(const char *p, unsigned *i) +{ + unsigned int len = 1; + for (p = next_opt(p, i, &len); p; p = next_opt(p, i, &len)) { + if (p[0] != '-') + break; + } + return p; +} + +static void check_opt(const struct opt_table *entry) +{ + const char *p; + unsigned len; + + if (entry->type != OPT_HASARG && entry->type != OPT_NOARG && entry->type != OPT_PROCESSARG) + errx(1, "Option %s: unknown entry type %u", + entry->names, entry->type); + + if (!entry->desc) + errx(1, "Option %s: description cannot be NULL", entry->names); + + + if (entry->names[0] != '-') + errx(1, "Option %s: does not begin with '-'", entry->names); + + for (p = first_name(entry->names, &len); p; p = next_name(p, &len)) { + if (*p == '-') { + if (len == 1) + errx(1, "Option %s: invalid long option '--'", + entry->names); + opt_num_long++; + } else { + if (len != 1) + errx(1, "Option %s: invalid short option" + " '%.*s'", entry->names, len+1, p-1); + opt_num_short++; + if (entry->type == OPT_HASARG || entry->type == OPT_PROCESSARG) + opt_num_short_arg++; + } + /* Don't document args unless there are some. */ + if (entry->type == OPT_NOARG) { + if (p[len] == ' ' || p[len] == '=') + errx(1, "Option %s: does not take arguments" + " '%s'", entry->names, p+len+1); + } + } +} + +static void add_opt(const struct opt_table *entry) +{ + opt_table = realloc(opt_table, sizeof(opt_table[0]) * (opt_count+1)); + opt_table[opt_count++] = *entry; +} + +void _opt_register(const char *names, enum opt_type type, + char *(*cb)(void *arg), + char *(*cb_arg)(const char *optarg, void *arg), + void (*show)(char buf[OPT_SHOW_LEN], const void *arg), + const void *arg, const char *desc) +{ + struct opt_table opt; + opt.names = names; + opt.type = type; + opt.cb = cb; + opt.cb_arg = cb_arg; + opt.show = show; + opt.u.carg = arg; + opt.desc = desc; + check_opt(&opt); + add_opt(&opt); +} + +void opt_register_table(const struct opt_table entry[], const char *desc) +{ + unsigned int i, start = opt_count; + + if (desc) { + struct opt_table heading = OPT_SUBTABLE(NULL, desc); + add_opt(&heading); + } + for (i = 0; entry[i].type != OPT_END; i++) { + if (entry[i].type == OPT_SUBTABLE) + opt_register_table(subtable_of(&entry[i]), + entry[i].desc); + else { + check_opt(&entry[i]); + add_opt(&entry[i]); + } + } + /* We store the table length in arg ptr. */ + if (desc) + opt_table[start].u.tlen = (opt_count - start); +} + +/* Parse your arguments. */ +bool opt_parse(int *argc, char *argv[], void (*errlog)(const char *fmt, ...)) +{ + int ret; + unsigned offset = 0; + + #ifdef WIN32 + char *original_argv0 = argv[0]; + argv[0] = (char*)basename(argv[0]); + #endif + + /* This helps opt_usage. */ + opt_argv0 = argv[0]; + + while ((ret = parse_one(argc, argv, &offset, errlog)) == 1); + + #ifdef WIN32 + argv[0] = original_argv0; + #endif + + /* parse_one returns 0 on finish, -1 on error */ + return (ret == 0); +} + +void opt_free_table(void) +{ + free(opt_table); + opt_table=0; +} + +void opt_log_stderr(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + va_end(ap); +} + +void opt_log_stderr_exit(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n"); + va_end(ap); + exit(1); +} + +char *opt_invalid_argument(const char *arg) +{ + char *str = malloc(sizeof("Invalid argument '%s'") + strlen(arg)); + sprintf(str, "Invalid argument '%s'", arg); + return str; +} diff --git a/ccan/opt/opt.h b/ccan/opt/opt.h new file mode 100644 index 0000000..90ff680 --- /dev/null +++ b/ccan/opt/opt.h @@ -0,0 +1,365 @@ +#ifndef CCAN_OPT_H +#define CCAN_OPT_H +#include +#include +#include +#include + +struct opt_table; + +/** + * OPT_WITHOUT_ARG() - macro for initializing an opt_table entry (without arg) + * @names: the names of the option eg. "--foo", "-f" or "--foo|-f|--foobar". + * @cb: the callback when the option is found. + * @arg: the argument to hand to @cb. + * @desc: the description for opt_usage(), or opt_hidden. + * + * This is a typesafe wrapper for initializing a struct opt_table. The callback + * of type "char *cb(type *)", "char *cb(const type *)" or "char *cb(void *)", + * where "type" is the type of the @arg argument. + * + * If the @cb returns non-NULL, opt_parse() will stop parsing, use the + * returned string to form an error message for errlog(), free() the + * string and return false. + * + * Any number of equivalent short or long options can be listed in @names, + * separated by '|'. Short options are a single hyphen followed by a single + * character, long options are two hyphens followed by one or more characters. + * + * See Also: + * OPT_WITH_ARG() + */ +#define OPT_WITHOUT_ARG(names, cb, arg, desc) \ + { (names), OPT_CB_NOARG((cb), (arg)), { (arg) }, (desc) } + +/** + * OPT_WITH_ARG() - macro for initializing long and short option (with arg) + * @names: the option names eg. "--foo=", "-f" or "-f|--foo ". + * @cb: the callback when the option is found (along with ). + * @show: the callback to print the value in get_usage (or NULL) + * @arg: the argument to hand to @cb and @show + * @desc: the description for opt_usage(), or opt_hidden. + * + * This is a typesafe wrapper for initializing a struct opt_table. The callback + * is of type "char *cb(const char *, type *)", + * "char *cb(const char *, const type *)" or "char *cb(const char *, void *)", + * where "type" is the type of the @arg argument. The first argument to the + * @cb is the argument found on the commandline. + * + * Similarly, if @show is not NULL, it should be of type "void *show(char *, + * const type *)". It should write up to OPT_SHOW_LEN bytes into the first + * argument; unless it uses the entire OPT_SHOW_LEN bytes it should + * nul-terminate that buffer. + * + * Any number of equivalent short or long options can be listed in @names, + * separated by '|'. Short options are a single hyphen followed by a single + * character, long options are two hyphens followed by one or more characters. + * A space or equals in @names is ignored for parsing, and only used + * for printing the usage. + * + * If the @cb returns non-NULL, opt_parse() will stop parsing, use the + * returned string to form an error message for errlog(), free() the + * string and return false. + * + * See Also: + * OPT_WITHOUT_ARG() + */ +#define OPT_WITH_ARG(name, cb, show, arg, desc) \ + { (name), OPT_CB_ARG((cb), (show), (arg)), { (arg) }, (desc) } + +/** + * OPT_WITH_CBARG() - variant of OPT_WITH_ARG which assigns arguments to arg + * and then performs the callback function on the args as well. + */ +#define OPT_WITH_CBARG(name, cb, show, arg, desc) \ + { (name), OPT_CB_WITHARG((cb), (show), (arg)), { (arg) }, (desc) } + +/** + * OPT_SUBTABLE() - macro for including another table inside a table. + * @table: the table to include in this table. + * @desc: description of this subtable (for opt_usage()) or NULL. + */ +#define OPT_SUBTABLE(table, desc) \ + { (const char *)(table), OPT_SUBTABLE, \ + sizeof(_check_is_entry(table)) ? NULL : NULL, NULL, NULL, \ + { NULL }, (desc) } + +/** + * OPT_ENDTABLE - macro to create final entry in table. + * + * This must be the final element in the opt_table array. + */ +#define OPT_ENDTABLE { NULL, OPT_END, NULL, NULL, NULL, { NULL }, NULL } + +/** + * opt_register_table - register a table of options + * @table: the table of options + * @desc: description of this subtable (for opt_usage()) or NULL. + * + * The table must be terminated by OPT_ENDTABLE. + * + * Example: + * static int verbose = 0; + * static struct opt_table opts[] = { + * OPT_WITHOUT_ARG("--verbose", opt_inc_intval, &verbose, + * "Verbose mode (can be specified more than once)"), + * OPT_WITHOUT_ARG("-v", opt_inc_intval, &verbose, + * "Verbose mode (can be specified more than once)"), + * OPT_WITHOUT_ARG("--usage", opt_usage_and_exit, + * "args...\nA silly test program.", + * "Print this message."), + * OPT_ENDTABLE + * }; + * + * ... + * opt_register_table(opts, NULL); + */ +void opt_register_table(const struct opt_table *table, const char *desc); + +/** + * opt_register_noarg - register an option with no arguments + * @names: the names of the option eg. "--foo", "-f" or "--foo|-f|--foobar". + * @cb: the callback when the option is found. + * @arg: the argument to hand to @cb. + * @desc: the verbose description of the option (for opt_usage()), or NULL. + * + * This is used for registering a single commandline option which takes + * no argument. + * + * The callback is of type "char *cb(type *)", "char *cb(const type *)" + * or "char *cb(void *)", where "type" is the type of the @arg + * argument. + * + * If the @cb returns non-NULL, opt_parse() will stop parsing, use the + * returned string to form an error message for errlog(), free() the + * string and return false. + */ +#define opt_register_noarg(names, cb, arg, desc) \ + _opt_register((names), OPT_CB_NOARG((cb), (arg)), (arg), (desc)) + +/** + * opt_register_arg - register an option with an arguments + * @names: the names of the option eg. "--foo", "-f" or "--foo|-f|--foobar". + * @cb: the callback when the option is found. + * @show: the callback to print the value in get_usage (or NULL) + * @arg: the argument to hand to @cb. + * @desc: the verbose description of the option (for opt_usage()), or NULL. + * + * This is used for registering a single commandline option which takes + * an argument. + * + * The callback is of type "char *cb(const char *, type *)", + * "char *cb(const char *, const type *)" or "char *cb(const char *, void *)", + * where "type" is the type of the @arg argument. The first argument to the + * @cb is the argument found on the commandline. + * + * At least one of @longopt and @shortopt must be non-zero. If the + * @cb returns false, opt_parse() will stop parsing and return false. + * + * Example: + * static char *explode(const char *optarg, void *unused) + * { + * errx(1, "BOOM! %s", optarg); + * } + * ... + * opt_register_arg("--explode|--boom", explode, NULL, NULL, opt_hidden); + */ +#define opt_register_arg(names, cb, show, arg, desc) \ + _opt_register((names), OPT_CB_ARG((cb), (show), (arg)), (arg), (desc)) + +/** + * opt_parse - parse arguments. + * @argc: pointer to argc + * @argv: argv array. + * @errlog: the function to print errors + * + * This iterates through the command line and calls callbacks registered with + * opt_register_table()/opt_register_arg()/opt_register_noarg(). If there + * are unknown options, missing arguments or a callback returns false, then + * an error message is printed and false is returned. + * + * On success, argc and argv are adjusted so only the non-option elements + * remain, and true is returned. + * + * Example: + * if (!opt_parse(&argc, argv, opt_log_stderr)) { + * printf("You screwed up, aborting!\n"); + * exit(1); + * } + * + * See Also: + * opt_log_stderr, opt_log_stderr_exit + */ +bool opt_parse(int *argc, char *argv[], void (*errlog)(const char *fmt, ...)); + +/** + * opt_free_table - free the table. + * + * This frees the internal memory. Call this as the last + * opt function. + */ +void opt_free_table(void); + +/** + * opt_log_stderr - print message to stderr. + * @fmt: printf-style format. + * + * This is a helper for opt_parse, to print errors to stderr. + * + * See Also: + * opt_log_stderr_exit + */ +void opt_log_stderr(const char *fmt, ...); + +/** + * opt_log_stderr_exit - print message to stderr, then exit(1) + * @fmt: printf-style format. + * + * Just like opt_log_stderr, only then does exit(1). This means that + * when handed to opt_parse, opt_parse will never return false. + * + * Example: + * // This never returns false; just exits if there's an erorr. + * opt_parse(&argc, argv, opt_log_stderr_exit); + */ +void opt_log_stderr_exit(const char *fmt, ...); + +/** + * opt_invalid_argument - helper to allocate an "Invalid argument '%s'" string + * @arg: the argument which was invalid. + * + * This is a helper for callbacks to return a simple error string. + */ +char *opt_invalid_argument(const char *arg); + +/** + * opt_usage - create usage message + * @argv0: the program name + * @extra: extra details to print after the initial command, or NULL. + * + * Creates a usage message, with the program name, arguments, some extra details + * and a table of all the options with their descriptions. If an option has + * description opt_hidden, it is not shown here. + * + * If "extra" is NULL, then the extra information is taken from any + * registered option which calls opt_usage_and_exit(). This avoids duplicating + * that string in the common case. + * + * The result should be passed to free(). + */ +char *opt_usage(const char *argv0, const char *extra); + +/** + * opt_hidden - string for undocumented options. + * + * This can be used as the desc parameter if you want an option not to be + * shown by opt_usage(). + */ +extern const char opt_hidden[]; + +/* Maximum length of arg to show in opt_usage */ +#define OPT_SHOW_LEN 80 + +/* Standard helpers. You can write your own: */ +/* Sets the @b to true. */ +char *opt_set_bool(bool *b); +/* Sets @b based on arg: (yes/no/true/false). */ +char *opt_set_bool_arg(const char *arg, bool *b); +void opt_show_bool(char buf[OPT_SHOW_LEN], const bool *b); +/* The inverse */ +char *opt_set_invbool(bool *b); +void opt_show_invbool(char buf[OPT_SHOW_LEN], const bool *b); +/* Sets @b based on !arg: (yes/no/true/false). */ +char *opt_set_invbool_arg(const char *arg, bool *b); + +/* Set a char *. */ +char *opt_set_charp(const char *arg, char **p); +void opt_show_charp(char buf[OPT_SHOW_LEN], char *const *p); + +/* Set an integer value, various forms. Sets to 1 on arg == NULL. */ +char *opt_set_intval(const char *arg, int *i); +void opt_show_intval(char buf[OPT_SHOW_LEN], const int *i); +char *opt_set_floatval(const char *arg, float *f); +void opt_show_floatval(char buf[OPT_SHOW_LEN], const float *f); +char *opt_set_uintval(const char *arg, unsigned int *ui); +void opt_show_uintval(char buf[OPT_SHOW_LEN], const unsigned int *ui); +char *opt_set_longval(const char *arg, long *l); +void opt_show_longval(char buf[OPT_SHOW_LEN], const long *l); +char *opt_set_ulongval(const char *arg, unsigned long *ul); +void opt_show_ulongval(char buf[OPT_SHOW_LEN], const unsigned long *ul); + +/* Increment. */ +char *opt_inc_intval(int *i); + +/* Display version string to stdout, exit(0). */ +char *opt_version_and_exit(const char *version); + +/* Display usage string to stdout, exit(0). */ +char *opt_usage_and_exit(const char *extra); + +/* Below here are private declarations. */ +/* You can use this directly to build tables, but the macros will ensure + * consistency and type safety. */ +enum opt_type { + OPT_NOARG = 1, /* -f|--foo */ + OPT_HASARG = 2, /* -f arg|--foo=arg|--foo arg */ + OPT_PROCESSARG = 4, + OPT_SUBTABLE = 8, /* Actually, longopt points to a subtable... */ + OPT_END = 16, /* End of the table. */ +}; + +struct opt_table { + const char *names; /* pipe-separated names, --longopt or -s */ + enum opt_type type; + char *(*cb)(void *arg); /* OPT_NOARG */ + char *(*cb_arg)(const char *optarg, void *arg); /* OPT_HASARG */ + void (*show)(char buf[OPT_SHOW_LEN], const void *arg); + union { + const void *carg; + void *arg; + size_t tlen; + } u; + const char *desc; +}; + +/* Resolves to the four parameters for non-arg callbacks. */ +#define OPT_CB_NOARG(cb, arg) \ + OPT_NOARG, \ + typesafe_cb_cast3(char *(*)(void *), \ + char *(*)(typeof(*(arg))*), \ + char *(*)(const typeof(*(arg))*), \ + char *(*)(const void *), (cb)), \ + NULL, NULL + +/* Resolves to the four parameters for arg callbacks. */ +#define OPT_CB_ARG(cb, show, arg) \ + OPT_HASARG, NULL, \ + typesafe_cb_cast3(char *(*)(const char *,void *), \ + char *(*)(const char *, typeof(*(arg))*), \ + char *(*)(const char *, const typeof(*(arg))*), \ + char *(*)(const char *, const void *), \ + (cb)), \ + typesafe_cb_cast(void (*)(char buf[], const void *), \ + void (*)(char buf[], const typeof(*(arg))*), (show)) + +#define OPT_CB_WITHARG(cb, show, arg) \ + OPT_PROCESSARG, NULL, \ + typesafe_cb_cast3(char *(*)(const char *,void *), \ + char *(*)(const char *, typeof(*(arg))*), \ + char *(*)(const char *, const typeof(*(arg))*), \ + char *(*)(const char *, const void *), \ + (cb)), \ + typesafe_cb_cast(void (*)(char buf[], const void *), \ + void (*)(char buf[], const typeof(*(arg))*), (show)) + +/* Non-typesafe register function. */ +void _opt_register(const char *names, enum opt_type type, + char *(*cb)(void *arg), + char *(*cb_arg)(const char *optarg, void *arg), + void (*show)(char buf[OPT_SHOW_LEN], const void *arg), + const void *arg, const char *desc); + +/* We use this to get typechecking for OPT_SUBTABLE */ +static inline int _check_is_entry(struct opt_table *e UNUSED) { return 0; } + +#endif /* CCAN_OPT_H */ diff --git a/ccan/opt/parse.c b/ccan/opt/parse.c new file mode 100644 index 0000000..18af157 --- /dev/null +++ b/ccan/opt/parse.c @@ -0,0 +1,132 @@ +/* Actual code to parse commandline. */ +#include +#include +#include +#include +#include "private.h" + +/* glibc does this as: +/tmp/opt-example: invalid option -- 'x' +/tmp/opt-example: unrecognized option '--long' +/tmp/opt-example: option '--someflag' doesn't allow an argument +/tmp/opt-example: option '--s' is ambiguous +/tmp/opt-example: option requires an argument -- 's' +*/ +static int parse_err(void (*errlog)(const char *fmt, ...), + const char *argv0, const char *arg, unsigned len, + const char *problem) +{ + errlog("%s: %.*s: %s", argv0, len, arg, problem); + return -1; +} + +static void consume_option(int *argc, char *argv[], unsigned optnum) +{ + memmove(&argv[optnum], &argv[optnum+1], + sizeof(argv[optnum]) * (*argc-optnum)); + (*argc)--; +} + +/* Returns 1 if argument consumed, 0 if all done, -1 on error. */ +int parse_one(int *argc, char *argv[], unsigned *offset, + void (*errlog)(const char *fmt, ...)) +{ + unsigned i, arg, len; + const char *o, *optarg = NULL; + char *problem; + + if (getenv("POSIXLY_CORRECT")) { + /* Don't find options after non-options. */ + arg = 1; + } else { + for (arg = 1; argv[arg]; arg++) { + if (argv[arg][0] == '-') + break; + } + } + + if (!argv[arg] || argv[arg][0] != '-') + return 0; + + /* Special arg terminator option. */ + if (strcmp(argv[arg], "--") == 0) { + consume_option(argc, argv, arg); + return 0; + } + + /* Long options start with -- */ + if (argv[arg][1] == '-') { + assert(*offset == 0); + for (o = first_lopt(&i, &len); o; o = next_lopt(o, &i, &len)) { + if (strncmp(argv[arg] + 2, o, len) != 0) + continue; + if (argv[arg][2 + len] == '=') + optarg = argv[arg] + 2 + len + 1; + else if (argv[arg][2 + len] != '\0') + continue; + break; + } + if (!o) + return parse_err(errlog, argv[0], + argv[arg], strlen(argv[arg]), + "unrecognized option"); + /* For error messages, we include the leading '--' */ + o -= 2; + len += 2; + } else { + /* offset allows us to handle -abc */ + for (o = first_sopt(&i); o; o = next_sopt(o, &i)) { + if (argv[arg][*offset + 1] != *o) + continue; + (*offset)++; + break; + } + if (!o) + return parse_err(errlog, argv[0], + argv[arg], strlen(argv[arg]), + "unrecognized option"); + /* For error messages, we include the leading '-' */ + o--; + len = 2; + } + + if (opt_table[i].type == OPT_NOARG) { + if (optarg) + return parse_err(errlog, argv[0], o, len, + "doesn't allow an argument"); + problem = opt_table[i].cb(opt_table[i].u.arg); + } else { + if (!optarg) { + /* Swallow any short options as optarg, eg -afile */ + if (*offset && argv[arg][*offset + 1]) { + optarg = argv[arg] + *offset + 1; + *offset = 0; + } else + optarg = argv[arg+1]; + } + if (!optarg) + return parse_err(errlog, argv[0], o, len, + "requires an argument"); + if (opt_table[i].type == OPT_PROCESSARG) + opt_set_charp(optarg, opt_table[i].u.arg); + problem = opt_table[i].cb_arg(optarg, opt_table[i].u.arg); + } + + if (problem) { + parse_err(errlog, argv[0], o, len, problem); + free(problem); + return -1; + } + + /* If no more letters in that short opt, reset offset. */ + if (*offset && !argv[arg][*offset + 1]) + *offset = 0; + + /* All finished with that option? */ + if (*offset == 0) { + consume_option(argc, argv, arg); + if (optarg && optarg == argv[arg]) + consume_option(argc, argv, arg); + } + return 1; +} diff --git a/ccan/opt/private.h b/ccan/opt/private.h new file mode 100644 index 0000000..048951e --- /dev/null +++ b/ccan/opt/private.h @@ -0,0 +1,19 @@ +#ifndef CCAN_OPT_PRIVATE_H +#define CCAN_OPT_PRIVATE_H + +extern struct opt_table *opt_table; +extern unsigned int opt_count, opt_num_short, opt_num_short_arg, opt_num_long; + +extern const char *opt_argv0; + +#define subtable_of(entry) ((struct opt_table *)((entry)->names)) + +const char *first_sopt(unsigned *i); +const char *next_sopt(const char *names, unsigned *i); +const char *first_lopt(unsigned *i, unsigned *len); +const char *next_lopt(const char *p, unsigned *i, unsigned *len); + +int parse_one(int *argc, char *argv[], unsigned *offset, + void (*errlog)(const char *fmt, ...)); + +#endif /* CCAN_OPT_PRIVATE_H */ diff --git a/ccan/opt/test/compile_ok-const-arg.c b/ccan/opt/test/compile_ok-const-arg.c new file mode 100644 index 0000000..f1d10da --- /dev/null +++ b/ccan/opt/test/compile_ok-const-arg.c @@ -0,0 +1,13 @@ +#include +#include +#include +#include +#include + +int main(int argc, char *argv[]) +{ + opt_register_noarg("-v", opt_version_and_exit, + (const char *)"1.2.3", + (const char *)"Print version"); + return 0; +} diff --git a/ccan/opt/test/run-checkopt.c b/ccan/opt/test/run-checkopt.c new file mode 100644 index 0000000..71ee3c4 --- /dev/null +++ b/ccan/opt/test/run-checkopt.c @@ -0,0 +1,144 @@ +#include "config.h" +#include +#include +#include +#include +#include +#include +#include "utils.h" + +/* We don't actually want it to exit... */ +static jmp_buf exited; +#define errx save_and_jump + +static void save_and_jump(int ecode, const char *fmt, ...); + +#include +#include +#include +#include + +static char *output = NULL; + +static int saved_vprintf(const char *fmt, va_list ap) +{ + char *p; + int ret = vasprintf(&p, fmt, ap); + + if (output) { + output = realloc(output, strlen(output) + strlen(p) + 1); + strcat(output, p); + free(p); + } else + output = p; + return ret; +} + +static void save_and_jump(int ecode, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + saved_vprintf(fmt, ap); + va_end(ap); + longjmp(exited, ecode + 1); +} + +static void reset(void) +{ + free(output); + output = NULL; + free(opt_table); + opt_table = NULL; + opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; +} + +int main(int argc, char *argv[]) +{ + int exitval; + + plan_tests(14); + + exitval = setjmp(exited); + if (exitval == 0) { + /* Bad type. */ + _opt_register("-a", OPT_SUBTABLE, (void *)opt_version_and_exit, + NULL, NULL, "1.2.3", ""); + fail("_opt_register returned?"); + } else { + ok1(exitval - 1 == 1); + ok1(strstr(output, "Option -a: unknown entry type")); + } + reset(); + + exitval = setjmp(exited); + if (exitval == 0) { + /* NULL description. */ + opt_register_noarg("-a", test_noarg, "", NULL); + fail("_opt_register returned?"); + } else { + ok1(exitval - 1 == 1); + ok1(strstr(output, "Option -a: description cannot be NULL")); + } + reset(); + + exitval = setjmp(exited); + if (exitval == 0) { + /* Bad option name. */ + opt_register_noarg("a", test_noarg, "", ""); + fail("_opt_register returned?"); + } else { + ok1(exitval - 1 == 1); + ok1(strstr(output, "Option a: does not begin with '-'")); + } + + reset(); + + exitval = setjmp(exited); + if (exitval == 0) { + /* Bad option name. */ + opt_register_noarg("--", test_noarg, "", ""); + fail("_opt_register returned?"); + } else { + ok1(exitval - 1 == 1); + ok1(strstr(output, "Option --: invalid long option '--'")); + } + + reset(); + + exitval = setjmp(exited); + if (exitval == 0) { + /* Bad option name. */ + opt_register_noarg("--a|-aaa", test_noarg, "", ""); + fail("_opt_register returned?"); + } else { + ok1(exitval - 1 == 1); + ok1(strstr(output, + "Option --a|-aaa: invalid short option '-aaa'")); + } + reset(); + + exitval = setjmp(exited); + if (exitval == 0) { + /* Documentation for non-optios. */ + opt_register_noarg("--a foo", test_noarg, "", ""); + fail("_opt_register returned?"); + } else { + ok1(exitval - 1 == 1); + ok1(strstr(output, + "Option --a foo: does not take arguments 'foo'")); + } + reset(); + + exitval = setjmp(exited); + if (exitval == 0) { + /* Documentation for non-optios. */ + opt_register_noarg("--a=foo", test_noarg, "", ""); + fail("_opt_register returned?"); + } else { + ok1(exitval - 1 == 1); + ok1(strstr(output, + "Option --a=foo: does not take arguments 'foo'")); + } + return exit_status(); +} diff --git a/ccan/opt/test/run-correct-reporting.c b/ccan/opt/test/run-correct-reporting.c new file mode 100644 index 0000000..8534f29 --- /dev/null +++ b/ccan/opt/test/run-correct-reporting.c @@ -0,0 +1,49 @@ +/* Make sure when multiple equivalent options, correct one is used for errors */ + +#include +#include +#include +#include +#include +#include +#include "utils.h" + +int main(int argc, char *argv[]) +{ + plan_tests(12); + + /* --aaa without args. */ + opt_register_arg("-a|--aaa", test_arg, NULL, "aaa", ""); + ok1(!parse_args(&argc, &argv, "--aaa", NULL)); + ok1(strstr(err_output, ": --aaa: requires an argument")); + free(err_output); + err_output = NULL; + ok1(!parse_args(&argc, &argv, "-a", NULL)); + ok1(strstr(err_output, ": -a: requires an argument")); + free(err_output); + err_output = NULL; + + /* Multiple */ + opt_register_arg("--bbb|-b|-c|--ccc", test_arg, NULL, "aaa", ""); + ok1(!parse_args(&argc, &argv, "--bbb", NULL)); + ok1(strstr(err_output, ": --bbb: requires an argument")); + free(err_output); + err_output = NULL; + ok1(!parse_args(&argc, &argv, "-b", NULL)); + ok1(strstr(err_output, ": -b: requires an argument")); + free(err_output); + err_output = NULL; + ok1(!parse_args(&argc, &argv, "-c", NULL)); + ok1(strstr(err_output, ": -c: requires an argument")); + free(err_output); + err_output = NULL; + ok1(!parse_args(&argc, &argv, "--ccc", NULL)); + ok1(strstr(err_output, ": --ccc: requires an argument")); + free(err_output); + err_output = NULL; + + /* parse_args allocates argv */ + free(argv); + return exit_status(); +} + diff --git a/ccan/opt/test/run-helpers.c b/ccan/opt/test/run-helpers.c new file mode 100644 index 0000000..a58e4d9 --- /dev/null +++ b/ccan/opt/test/run-helpers.c @@ -0,0 +1,440 @@ +#include "config.h" +#include +#include +#include +#include +#include +#include "utils.h" + +/* We don't actually want it to exit... */ +static jmp_buf exited; +#define exit(status) longjmp(exited, (status) + 1) + +#define printf saved_printf +static int saved_printf(const char *fmt, ...); + +#define fprintf saved_fprintf +static int saved_fprintf(FILE *ignored, const char *fmt, ...); + +#define vfprintf(f, fmt, ap) saved_vprintf(fmt, ap) +static int saved_vprintf(const char *fmt, va_list ap); + +#define malloc(size) saved_malloc(size) +static void *saved_malloc(size_t size); + +#include +#include +#include +#include + +static void reset_options(void) +{ + free(opt_table); + opt_table = NULL; + opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; +} + +static char *output = NULL; + +static int saved_vprintf(const char *fmt, va_list ap) +{ + char *p; + int ret = vasprintf(&p, fmt, ap); + + if (output) { + output = realloc(output, strlen(output) + strlen(p) + 1); + strcat(output, p); + free(p); + } else + output = p; + return ret; +} + +static int saved_printf(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = saved_vprintf(fmt, ap); + va_end(ap); + return ret; +} + +static int saved_fprintf(FILE *ignored, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = saved_vprintf(fmt, ap); + va_end(ap); + return ret; +} + +#undef malloc +static void *last_allocation; +static void *saved_malloc(size_t size) +{ + return last_allocation = malloc(size); +} + +/* Test helpers. */ +int main(int argc, char *argv[]) +{ + plan_tests(100); + + /* opt_set_bool */ + { + bool arg = false; + reset_options(); + opt_register_noarg("-a", opt_set_bool, &arg, ""); + ok1(parse_args(&argc, &argv, "-a", NULL)); + ok1(arg); + opt_register_arg("-b", opt_set_bool_arg, NULL, &arg, ""); + ok1(parse_args(&argc, &argv, "-b", "no", NULL)); + ok1(!arg); + ok1(parse_args(&argc, &argv, "-b", "yes", NULL)); + ok1(arg); + ok1(parse_args(&argc, &argv, "-b", "false", NULL)); + ok1(!arg); + ok1(parse_args(&argc, &argv, "-b", "true", NULL)); + ok1(arg); + ok1(!parse_args(&argc, &argv, "-b", "unknown", NULL)); + ok1(arg); + ok1(strstr(err_output, ": -b: Invalid argument 'unknown'")); + } + /* opt_set_invbool */ + { + bool arg = true; + reset_options(); + opt_register_noarg("-a", opt_set_invbool, &arg, ""); + ok1(parse_args(&argc, &argv, "-a", NULL)); + ok1(!arg); + opt_register_arg("-b", opt_set_invbool_arg, NULL, + &arg, ""); + ok1(parse_args(&argc, &argv, "-b", "no", NULL)); + ok1(arg); + ok1(parse_args(&argc, &argv, "-b", "yes", NULL)); + ok1(!arg); + ok1(parse_args(&argc, &argv, "-b", "false", NULL)); + ok1(arg); + ok1(parse_args(&argc, &argv, "-b", "true", NULL)); + ok1(!arg); + ok1(!parse_args(&argc, &argv, "-b", "unknown", NULL)); + ok1(!arg); + ok1(strstr(err_output, ": -b: Invalid argument 'unknown'")); + } + /* opt_set_charp */ + { + char *arg = (char *)"wrong"; + reset_options(); + opt_register_arg("-a", opt_set_charp, NULL, &arg, "All"); + ok1(parse_args(&argc, &argv, "-a", "string", NULL)); + ok1(strcmp(arg, "string") == 0); + } + /* opt_set_intval */ + { + int arg = 1000; + reset_options(); + opt_register_arg("-a", opt_set_intval, NULL, &arg, "All"); + ok1(parse_args(&argc, &argv, "-a", "9999", NULL)); + ok1(arg == 9999); + ok1(parse_args(&argc, &argv, "-a", "-9999", NULL)); + ok1(arg == -9999); + ok1(parse_args(&argc, &argv, "-a", "0", NULL)); + ok1(arg == 0); + ok1(!parse_args(&argc, &argv, "-a", "100crap", NULL)); + if (sizeof(int) == 4) + ok1(!parse_args(&argc, &argv, "-a", "4294967296", NULL)); + else + fail("Handle other int sizes"); + } + /* opt_set_uintval */ + { + unsigned int arg = 1000; + reset_options(); + opt_register_arg("-a", opt_set_uintval, NULL, &arg, "All"); + ok1(parse_args(&argc, &argv, "-a", "9999", NULL)); + ok1(arg == 9999); + ok1(!parse_args(&argc, &argv, "-a", "-9999", NULL)); + ok1(parse_args(&argc, &argv, "-a", "0", NULL)); + ok1(arg == 0); + ok1(!parse_args(&argc, &argv, "-a", "100crap", NULL)); + ok1(!parse_args(&argc, &argv, "-a", "4294967296", NULL)); + if (ULONG_MAX == UINT_MAX) { + pass("Can't test overflow"); + pass("Can't test error message"); + } else { + char buf[30]; + sprintf(buf, "%lu", ULONG_MAX); + ok1(!parse_args(&argc, &argv, "-a", buf, NULL)); + ok1(strstr(err_output, ": -a: value '") + && strstr(err_output, buf) + && strstr(err_output, "' does not fit into an integer")); + } + } + /* opt_set_longval */ + { + long int arg = 1000; + reset_options(); + opt_register_arg("-a", opt_set_longval, NULL, &arg, "All"); + ok1(parse_args(&argc, &argv, "-a", "9999", NULL)); + ok1(arg == 9999); + ok1(parse_args(&argc, &argv, "-a", "-9999", NULL)); + ok1(arg == -9999); + ok1(parse_args(&argc, &argv, "-a", "0", NULL)); + ok1(arg == 0); + ok1(!parse_args(&argc, &argv, "-a", "100crap", NULL)); + if (sizeof(long) == 4) + ok1(!parse_args(&argc, &argv, "-a", "4294967296", NULL)); + else if (sizeof(long)== 8) + ok1(!parse_args(&argc, &argv, "-a", "18446744073709551616", NULL)); + else + fail("FIXME: Handle other long sizes"); + } + /* opt_set_ulongval */ + { + unsigned long int arg = 1000; + reset_options(); + opt_register_arg("-a", opt_set_ulongval, NULL, &arg, "All"); + ok1(parse_args(&argc, &argv, "-a", "9999", NULL)); + ok1(arg == 9999); + ok1(!parse_args(&argc, &argv, "-a", "-9999", NULL)); + ok1(parse_args(&argc, &argv, "-a", "0", NULL)); + ok1(arg == 0); + ok1(!parse_args(&argc, &argv, "-a", "100crap", NULL)); + if (sizeof(long) == 4) + ok1(!parse_args(&argc, &argv, "-a", "4294967296", NULL)); + else if (sizeof(long)== 8) + ok1(!parse_args(&argc, &argv, "-a", "18446744073709551616", NULL)); + else + fail("FIXME: Handle other long sizes"); + } + /* opt_inc_intval */ + { + int arg = 1000; + reset_options(); + opt_register_noarg("-a", opt_inc_intval, &arg, ""); + ok1(parse_args(&argc, &argv, "-a", NULL)); + ok1(arg == 1001); + ok1(parse_args(&argc, &argv, "-a", "-a", NULL)); + ok1(arg == 1003); + ok1(parse_args(&argc, &argv, "-aa", NULL)); + ok1(arg == 1005); + } + + /* opt_show_version_and_exit. */ + { + int exitval; + reset_options(); + opt_register_noarg("-a", + opt_version_and_exit, "1.2.3", ""); + /* parse_args allocates argv */ + free(argv); + + argc = 2; + argv = malloc(sizeof(argv[0]) * 3); + argv[0] = "thisprog"; + argv[1] = "-a"; + argv[2] = NULL; + + exitval = setjmp(exited); + if (exitval == 0) { + opt_parse(&argc, argv, save_err_output); + fail("opt_show_version_and_exit returned?"); + } else { + ok1(exitval - 1 == 0); + } + ok1(strcmp(output, "1.2.3\n") == 0); + free(output); + free(argv); + output = NULL; + } + + /* opt_usage_and_exit. */ + { + int exitval; + reset_options(); + opt_register_noarg("-a", + opt_usage_and_exit, "[args]", ""); + + argc = 2; + argv = malloc(sizeof(argv[0]) * 3); + argv[0] = "thisprog"; + argv[1] = "-a"; + argv[2] = NULL; + + exitval = setjmp(exited); + if (exitval == 0) { + opt_parse(&argc, argv, save_err_output); + fail("opt_usage_and_exit returned?"); + } else { + ok1(exitval - 1 == 0); + } + ok1(strstr(output, "[args]")); + ok1(strstr(output, argv[0])); + ok1(strstr(output, "[-a]")); + free(output); + free(argv); + /* It exits without freeing usage string. */ + free(last_allocation); + output = NULL; + } + + /* opt_show_bool */ + { + bool b; + char buf[OPT_SHOW_LEN+2] = { 0 }; + buf[OPT_SHOW_LEN] = '!'; + + b = true; + opt_show_bool(buf, &b); + ok1(strcmp(buf, "true") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + + b = false; + opt_show_bool(buf, &b); + ok1(strcmp(buf, "false") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + } + + /* opt_show_invbool */ + { + bool b; + char buf[OPT_SHOW_LEN+2] = { 0 }; + buf[OPT_SHOW_LEN] = '!'; + + b = true; + opt_show_invbool(buf, &b); + ok1(strcmp(buf, "false") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + + b = false; + opt_show_invbool(buf, &b); + ok1(strcmp(buf, "true") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + } + + /* opt_show_charp */ + { + char str[OPT_SHOW_LEN*2], *p; + char buf[OPT_SHOW_LEN+2] = { 0 }; + buf[OPT_SHOW_LEN] = '!'; + + /* Short test. */ + p = str; + strcpy(p, "short"); + opt_show_charp(buf, &p); + ok1(strcmp(buf, "\"short\"") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + + /* Truncate test. */ + memset(p, 'x', OPT_SHOW_LEN*2); + p[OPT_SHOW_LEN*2-1] = '\0'; + opt_show_charp(buf, &p); + ok1(buf[0] == '"'); + ok1(buf[OPT_SHOW_LEN-1] == '"'); + ok1(buf[OPT_SHOW_LEN] == '!'); + ok1(strspn(buf+1, "x") == OPT_SHOW_LEN-2); + } + + /* opt_show_intval */ + { + int i; + char buf[OPT_SHOW_LEN+2] = { 0 }; + buf[OPT_SHOW_LEN] = '!'; + + i = -77; + opt_show_intval(buf, &i); + ok1(strcmp(buf, "-77") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + + i = 77; + opt_show_intval(buf, &i); + ok1(strcmp(buf, "77") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + } + + /* opt_show_uintval */ + { + unsigned int ui; + char buf[OPT_SHOW_LEN+2] = { 0 }; + buf[OPT_SHOW_LEN] = '!'; + + ui = 4294967295U; + opt_show_uintval(buf, &ui); + ok1(strcmp(buf, "4294967295") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + } + + /* opt_show_longval */ + { + long l; + char buf[OPT_SHOW_LEN+2] = { 0 }; + buf[OPT_SHOW_LEN] = '!'; + + l = 1234567890L; + opt_show_longval(buf, &l); + ok1(strcmp(buf, "1234567890") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + } + + /* opt_show_ulongval */ + { + unsigned long ul; + char buf[OPT_SHOW_LEN+2] = { 0 }; + buf[OPT_SHOW_LEN] = '!'; + + ul = 4294967295UL; + opt_show_ulongval(buf, &ul); + ok1(strcmp(buf, "4294967295") == 0); + ok1(buf[OPT_SHOW_LEN] == '!'); + } + + /* opt_log_stderr. */ + { + reset_options(); + opt_register_noarg("-a", + opt_usage_and_exit, "[args]", ""); + + argc = 2; + argv = malloc(sizeof(argv[0]) * 3); + argv[0] = "thisprog"; + argv[1] = "--garbage"; + argv[2] = NULL; + ok1(!opt_parse(&argc, argv, opt_log_stderr)); + ok1(!strcmp(output, + "thisprog: --garbage: unrecognized option\n")); + free(output); + free(argv); + output = NULL; + } + + /* opt_log_stderr_exit. */ + { + int exitval; + reset_options(); + opt_register_noarg("-a", + opt_usage_and_exit, "[args]", ""); + argc = 2; + argv = malloc(sizeof(argv[0]) * 3); + argv[0] = "thisprog"; + argv[1] = "--garbage"; + argv[2] = NULL; + exitval = setjmp(exited); + if (exitval == 0) { + opt_parse(&argc, argv, opt_log_stderr_exit); + fail("opt_log_stderr_exit returned?"); + } else { + ok1(exitval - 1 == 1); + } + free(argv); + ok1(!strcmp(output, + "thisprog: --garbage: unrecognized option\n")); + free(output); + output = NULL; + } + + return exit_status(); +} diff --git a/ccan/opt/test/run-iter.c b/ccan/opt/test/run-iter.c new file mode 100644 index 0000000..66fd5db --- /dev/null +++ b/ccan/opt/test/run-iter.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include +#include "utils.h" +#include +#include +#include +#include + +static void reset_options(void) +{ + free(opt_table); + opt_table = NULL; + opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; +} + +/* Test iterators. */ +int main(int argc, char *argv[]) +{ + unsigned j, i, len = 0; + const char *p; + + plan_tests(37 * 2); + for (j = 0; j < 2; j ++) { + reset_options(); + /* Giving subtable a title makes an extra entry! */ + opt_register_table(subtables, j == 0 ? NULL : "subtable"); + + p = first_lopt(&i, &len); + ok1(i == j + 0); + ok1(len == 3); + ok1(strncmp(p, "jjj", len) == 0); + p = next_lopt(p, &i, &len); + ok1(i == j + 0); + ok1(len == 3); + ok1(strncmp(p, "lll", len) == 0); + p = next_lopt(p, &i, &len); + ok1(i == j + 1); + ok1(len == 3); + ok1(strncmp(p, "mmm", len) == 0); + p = next_lopt(p, &i, &len); + ok1(i == j + 5); + ok1(len == 3); + ok1(strncmp(p, "ddd", len) == 0); + p = next_lopt(p, &i, &len); + ok1(i == j + 6); + ok1(len == 3); + ok1(strncmp(p, "eee", len) == 0); + p = next_lopt(p, &i, &len); + ok1(i == j + 7); + ok1(len == 3); + ok1(strncmp(p, "ggg", len) == 0); + p = next_lopt(p, &i, &len); + ok1(i == j + 8); + ok1(len == 3); + ok1(strncmp(p, "hhh", len) == 0); + p = next_lopt(p, &i, &len); + ok1(!p); + + p = first_sopt(&i); + ok1(i == j + 0); + ok1(*p == 'j'); + p = next_sopt(p, &i); + ok1(i == j + 0); + ok1(*p == 'l'); + p = next_sopt(p, &i); + ok1(i == j + 1); + ok1(*p == 'm'); + p = next_sopt(p, &i); + ok1(i == j + 2); + ok1(*p == 'a'); + p = next_sopt(p, &i); + ok1(i == j + 3); + ok1(*p == 'b'); + p = next_sopt(p, &i); + ok1(i == j + 7); + ok1(*p == 'g'); + p = next_sopt(p, &i); + ok1(i == j + 8); + ok1(*p == 'h'); + p = next_sopt(p, &i); + ok1(!p); + } + + return exit_status(); +} diff --git a/ccan/opt/test/run-no-options.c b/ccan/opt/test/run-no-options.c new file mode 100644 index 0000000..cf255fe --- /dev/null +++ b/ccan/opt/test/run-no-options.c @@ -0,0 +1,33 @@ +/* Make sure we still work with no options registered */ +#include +#include +#include +#include +#include +#include +#include "utils.h" + +int main(int argc, char *argv[]) +{ + const char *myname = argv[0]; + + plan_tests(7); + + /* Simple short arg.*/ + ok1(!parse_args(&argc, &argv, "-a", NULL)); + /* Simple long arg.*/ + ok1(!parse_args(&argc, &argv, "--aaa", NULL)); + + /* Extra arguments preserved. */ + ok1(parse_args(&argc, &argv, "extra", "args", NULL)); + ok1(argc == 3); + ok1(argv[0] == myname); + ok1(strcmp(argv[1], "extra") == 0); + ok1(strcmp(argv[2], "args") == 0); + + /* parse_args allocates argv */ + free(argv); + + return exit_status(); +} + diff --git a/ccan/opt/test/run-usage.c b/ccan/opt/test/run-usage.c new file mode 100644 index 0000000..2af2c7e --- /dev/null +++ b/ccan/opt/test/run-usage.c @@ -0,0 +1,112 @@ +#include +#include +#include +#include +#include +#include "utils.h" +#include +#include +#include +#include + +static char *my_cb(void *p) +{ + return NULL; +} + +static void reset_options(void) +{ + free(opt_table); + opt_table = NULL; + opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; +} + +/* Test helpers. */ +int main(int argc, char *argv[]) +{ + char *output; + char *longname = strdup("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); + char *shortname = strdup("shortname"); + + plan_tests(48); + opt_register_table(subtables, NULL); + opt_register_noarg("--kkk|-k", my_cb, NULL, "magic kkk option"); + opt_register_noarg("-?", opt_usage_and_exit, "...", + "This message"); + opt_register_arg("--longname", opt_set_charp, opt_show_charp, + &longname, "a really long option default"); + opt_register_arg("--shortname", opt_set_charp, opt_show_charp, + &shortname, "a short option default"); + output = opt_usage("my name", "ExTrA Args"); + diag("%s", output); + ok1(strstr(output, "Usage: my name")); + ok1(strstr(output, "--jjj|-j|--lll|-l ")); + ok1(strstr(output, "ExTrA Args")); + ok1(strstr(output, "-a ")); + ok1(strstr(output, " Description of a\n")); + ok1(strstr(output, "-b ")); + ok1(strstr(output, " Description of b (default: b)\n")); + ok1(strstr(output, "--ddd ")); + ok1(strstr(output, " Description of ddd\n")); + ok1(strstr(output, "--eee ")); + ok1(strstr(output, " (default: eee)\n")); + ok1(strstr(output, "long table options:\n")); + ok1(strstr(output, "--ggg|-g ")); + ok1(strstr(output, " Description of ggg\n")); + ok1(strstr(output, "-h|--hhh ")); + ok1(strstr(output, " Description of hhh\n")); + ok1(strstr(output, "--kkk|-k")); + ok1(strstr(output, "magic kkk option")); + /* This entry is hidden. */ + ok1(!strstr(output, "--mmm|-m")); + free(output); + + /* NULL should use string from registered options. */ + output = opt_usage("my name", NULL); + diag("%s", output); + ok1(strstr(output, "Usage: my name")); + ok1(strstr(output, "--jjj|-j|--lll|-l ")); + ok1(strstr(output, "...")); + ok1(strstr(output, "-a ")); + ok1(strstr(output, " Description of a\n")); + ok1(strstr(output, "-b ")); + ok1(strstr(output, " Description of b (default: b)\n")); + ok1(strstr(output, "--ddd ")); + ok1(strstr(output, " Description of ddd\n")); + ok1(strstr(output, "--eee ")); + ok1(strstr(output, " (default: eee)\n")); + ok1(strstr(output, "long table options:\n")); + ok1(strstr(output, "--ggg|-g ")); + ok1(strstr(output, " Description of ggg\n")); + ok1(strstr(output, "-h|--hhh ")); + ok1(strstr(output, " Description of hhh\n")); + ok1(strstr(output, "--kkk|-k")); + ok1(strstr(output, "magic kkk option")); + ok1(strstr(output, "--longname")); + ok1(strstr(output, "a really long option default")); + ok1(strstr(output, "(default: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"...)")); + ok1(strstr(output, "--shortname")); + ok1(strstr(output, "a short option default")); + ok1(strstr(output, "(default: \"shortname\")")); + /* This entry is hidden. */ + ok1(!strstr(output, "--mmm|-m")); + free(output); + + reset_options(); + /* Empty table test. */ + output = opt_usage("nothing", NULL); + ok1(strstr(output, "Usage: nothing \n")); + free(output); + + /* No short args. */ + opt_register_noarg("--aaa", test_noarg, NULL, "AAAAll"); + output = opt_usage("onearg", NULL); + ok1(strstr(output, "Usage: onearg \n")); + ok1(strstr(output, "--aaa")); + ok1(strstr(output, "AAAAll")); + free(output); + + free(shortname); + free(longname); + return exit_status(); +} diff --git a/ccan/opt/test/run.c b/ccan/opt/test/run.c new file mode 100644 index 0000000..9a769ba --- /dev/null +++ b/ccan/opt/test/run.c @@ -0,0 +1,297 @@ +#include +#include +#include +#include +#include +#include +#include "utils.h" + +static void reset_options(void) +{ + free(opt_table); + opt_table = NULL; + opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; + free(err_output); + err_output = NULL; +} + +int main(int argc, char *argv[]) +{ + const char *myname = argv[0]; + + plan_tests(215); + + /* Simple short arg.*/ + opt_register_noarg("-a", test_noarg, NULL, "All"); + ok1(parse_args(&argc, &argv, "-a", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 1); + + /* Simple long arg. */ + opt_register_noarg("--aaa", test_noarg, NULL, "AAAAll"); + ok1(parse_args(&argc, &argv, "--aaa", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 2); + + /* Both long and short args. */ + opt_register_noarg("--aaa|-a", test_noarg, NULL, "AAAAAAll"); + ok1(parse_args(&argc, &argv, "--aaa", "-a", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 4); + + /* Extra arguments preserved. */ + ok1(parse_args(&argc, &argv, "--aaa", "-a", "extra", "args", NULL)); + ok1(argc == 3); + ok1(argv[0] == myname); + ok1(strcmp(argv[1], "extra") == 0); + ok1(strcmp(argv[2], "args") == 0); + ok1(test_cb_called == 6); + + /* Malformed versions. */ + ok1(!parse_args(&argc, &argv, "--aaa=arg", NULL)); + ok1(strstr(err_output, ": --aaa: doesn't allow an argument")); + ok1(!parse_args(&argc, &argv, "--aa", NULL)); + ok1(strstr(err_output, ": --aa: unrecognized option")); + ok1(!parse_args(&argc, &argv, "--aaargh", NULL)); + ok1(strstr(err_output, ": --aaargh: unrecognized option")); + + /* Argument variants. */ + reset_options(); + test_cb_called = 0; + opt_register_arg("-a|--aaa", test_arg, NULL, "aaa", "AAAAAAll"); + ok1(parse_args(&argc, &argv, "--aaa", "aaa", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(test_cb_called == 1); + + ok1(parse_args(&argc, &argv, "--aaa=aaa", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(test_cb_called == 2); + + ok1(parse_args(&argc, &argv, "-a", "aaa", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(test_cb_called == 3); + + /* Malformed versions. */ + ok1(!parse_args(&argc, &argv, "-a", NULL)); + ok1(strstr(err_output, ": -a: requires an argument")); + ok1(!parse_args(&argc, &argv, "--aaa", NULL)); + ok1(strstr(err_output, ": --aaa: requires an argument")); + ok1(!parse_args(&argc, &argv, "--aa", NULL)); + ok1(strstr(err_output, ": --aa: unrecognized option")); + ok1(!parse_args(&argc, &argv, "--aaargh", NULL)); + ok1(strstr(err_output, ": --aaargh: unrecognized option")); + + /* Now, tables. */ + /* Short table: */ + reset_options(); + test_cb_called = 0; + opt_register_table(short_table, NULL); + ok1(parse_args(&argc, &argv, "-a", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 1); + /* This one needs an arg. */ + ok1(parse_args(&argc, &argv, "-b", NULL) == false); + ok1(test_cb_called == 1); + ok1(parse_args(&argc, &argv, "-b", "b", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 2); + + /* Long table: */ + reset_options(); + test_cb_called = 0; + opt_register_table(long_table, NULL); + ok1(parse_args(&argc, &argv, "--ddd", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 1); + /* This one needs an arg. */ + ok1(parse_args(&argc, &argv, "--eee", NULL) == false); + ok1(test_cb_called == 1); + ok1(parse_args(&argc, &argv, "--eee", "eee", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 2); + + /* Short and long, both. */ + reset_options(); + test_cb_called = 0; + opt_register_table(long_and_short_table, NULL); + ok1(parse_args(&argc, &argv, "-g", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 1); + ok1(parse_args(&argc, &argv, "--ggg", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 2); + /* This one needs an arg. */ + ok1(parse_args(&argc, &argv, "-h", NULL) == false); + ok1(test_cb_called == 2); + ok1(parse_args(&argc, &argv, "-h", "hhh", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 3); + ok1(parse_args(&argc, &argv, "--hhh", NULL) == false); + ok1(test_cb_called == 3); + ok1(parse_args(&argc, &argv, "--hhh", "hhh", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 4); + + /* Those will all work as tables. */ + test_cb_called = 0; + reset_options(); + opt_register_table(subtables, NULL); + ok1(parse_args(&argc, &argv, "-a", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 1); + /* This one needs an arg. */ + ok1(parse_args(&argc, &argv, "-b", NULL) == false); + ok1(test_cb_called == 1); + ok1(parse_args(&argc, &argv, "-b", "b", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 2); + + ok1(parse_args(&argc, &argv, "--ddd", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 3); + /* This one needs an arg. */ + ok1(parse_args(&argc, &argv, "--eee", NULL) == false); + ok1(test_cb_called == 3); + ok1(parse_args(&argc, &argv, "--eee", "eee", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 4); + + /* Short and long, both. */ + ok1(parse_args(&argc, &argv, "-g", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 5); + ok1(parse_args(&argc, &argv, "--ggg", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 6); + /* This one needs an arg. */ + ok1(parse_args(&argc, &argv, "-h", NULL) == false); + ok1(test_cb_called == 6); + ok1(parse_args(&argc, &argv, "-h", "hhh", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 7); + ok1(parse_args(&argc, &argv, "--hhh", NULL) == false); + ok1(test_cb_called == 7); + ok1(parse_args(&argc, &argv, "--hhh", "hhh", NULL)); + ok1(argc == 1); + ok1(argv[0] == myname); + ok1(argv[1] == NULL); + ok1(test_cb_called == 8); + + /* Now the tricky one: -? must not be confused with an unknown option */ + test_cb_called = 0; + reset_options(); + + /* glibc's getopt does not handle ? with arguments. */ + opt_register_noarg("-?", test_noarg, NULL, "Help"); + ok1(parse_args(&argc, &argv, "-?", NULL)); + ok1(test_cb_called == 1); + ok1(parse_args(&argc, &argv, "-a", NULL) == false); + ok1(test_cb_called == 1); + ok1(strstr(err_output, ": -a: unrecognized option")); + ok1(parse_args(&argc, &argv, "--aaaa", NULL) == false); + ok1(test_cb_called == 1); + ok1(strstr(err_output, ": --aaaa: unrecognized option")); + + test_cb_called = 0; + reset_options(); + + /* Corner cases involving short arg parsing weirdness. */ + opt_register_noarg("-a|--aaa", test_noarg, NULL, "a"); + opt_register_arg("-b|--bbb", test_arg, NULL, "bbb", "b"); + opt_register_arg("-c|--ccc", test_arg, NULL, "aaa", "c"); + /* -aa == -a -a */ + ok1(parse_args(&argc, &argv, "-aa", NULL)); + ok1(test_cb_called == 2); + ok1(parse_args(&argc, &argv, "-aab", NULL) == false); + ok1(test_cb_called == 4); + ok1(strstr(err_output, ": -b: requires an argument")); + ok1(parse_args(&argc, &argv, "-bbbb", NULL)); + ok1(test_cb_called == 5); + ok1(parse_args(&argc, &argv, "-aabbbb", NULL)); + ok1(test_cb_called == 8); + ok1(parse_args(&argc, &argv, "-aabbbb", "-b", "bbb", NULL)); + ok1(test_cb_called == 12); + ok1(parse_args(&argc, &argv, "-aabbbb", "--bbb", "bbb", NULL)); + ok1(test_cb_called == 16); + ok1(parse_args(&argc, &argv, "-aabbbb", "--bbb=bbb", NULL)); + ok1(test_cb_called == 20); + ok1(parse_args(&argc, &argv, "-aacaaa", NULL)); + ok1(test_cb_called == 23); + ok1(parse_args(&argc, &argv, "-aacaaa", "-a", NULL)); + ok1(test_cb_called == 27); + ok1(parse_args(&argc, &argv, "-aacaaa", "--bbb", "bbb", "-aacaaa", + NULL)); + ok1(test_cb_called == 34); + + test_cb_called = 0; + reset_options(); + + /* -- and POSIXLY_CORRECT */ + opt_register_noarg("-a|--aaa", test_noarg, NULL, "a"); + ok1(parse_args(&argc, &argv, "-a", "--", "-a", NULL)); + ok1(test_cb_called == 1); + ok1(argc == 2); + ok1(strcmp(argv[1], "-a") == 0); + ok1(!argv[2]); + + unsetenv("POSIXLY_CORRECT"); + ok1(parse_args(&argc, &argv, "-a", "somearg", "-a", "--", "-a", NULL)); + ok1(test_cb_called == 3); + ok1(argc == 3); + ok1(strcmp(argv[1], "somearg") == 0); + ok1(strcmp(argv[2], "-a") == 0); + ok1(!argv[3]); + + setenv("POSIXLY_CORRECT", "1", 1); + ok1(parse_args(&argc, &argv, "-a", "somearg", "-a", "--", "-a", NULL)); + ok1(test_cb_called == 4); + ok1(argc == 5); + ok1(strcmp(argv[1], "somearg") == 0); + ok1(strcmp(argv[2], "-a") == 0); + ok1(strcmp(argv[3], "--") == 0); + ok1(strcmp(argv[4], "-a") == 0); + ok1(!argv[5]); + + /* parse_args allocates argv */ + free(argv); + return exit_status(); +} diff --git a/ccan/opt/test/utils.c b/ccan/opt/test/utils.c new file mode 100644 index 0000000..9544fa7 --- /dev/null +++ b/ccan/opt/test/utils.c @@ -0,0 +1,110 @@ +#include "config.h" +#include +#include +#include +#include +#include +#include +#include +#include "utils.h" + +unsigned int test_cb_called; +char *test_noarg(void *arg) +{ + test_cb_called++; + return NULL; +} + +char *test_arg(const char *optarg, const char *arg) +{ + test_cb_called++; + ok1(strcmp(optarg, arg) == 0); + return NULL; +} + +void show_arg(char buf[OPT_SHOW_LEN], const char *arg) +{ + strncpy(buf, arg, OPT_SHOW_LEN); +} + +char *err_output = NULL; + +void save_err_output(const char *fmt, ...) +{ + va_list ap; + char *p; + + va_start(ap, fmt); + /* Check return, for fascist gcc */ + if (vasprintf(&p, fmt, ap) == -1) + p = NULL; + va_end(ap); + + if (err_output) { + err_output = realloc(err_output, + strlen(err_output) + strlen(p) + 1); + strcat(err_output, p); + free(p); + } else + err_output = p; +} + +static bool allocated = false; + +bool parse_args(int *argc, char ***argv, ...) +{ + char **a; + va_list ap; + + va_start(ap, argv); + *argc = 1; + a = malloc(sizeof(*a) * (*argc + 1)); + a[0] = (*argv)[0]; + while ((a[*argc] = va_arg(ap, char *)) != NULL) { + (*argc)++; + a = realloc(a, sizeof(*a) * (*argc + 1)); + } + + if (allocated) + free(*argv); + + *argv = a; + allocated = true; + /* Re-set before parsing. */ + optind = 0; + + return opt_parse(argc, *argv, save_err_output); +} + +struct opt_table short_table[] = { + /* Short opts, different args. */ + OPT_WITHOUT_ARG("-a", test_noarg, "a", "Description of a"), + OPT_WITH_ARG("-b", test_arg, show_arg, "b", "Description of b"), + OPT_ENDTABLE +}; + +struct opt_table long_table[] = { + /* Long opts, different args. */ + OPT_WITHOUT_ARG("--ddd", test_noarg, "ddd", "Description of ddd"), + OPT_WITH_ARG("--eee ", test_arg, show_arg, "eee", ""), + OPT_ENDTABLE +}; + +struct opt_table long_and_short_table[] = { + /* Short and long, different args. */ + OPT_WITHOUT_ARG("--ggg|-g", test_noarg, "ggg", "Description of ggg"), + OPT_WITH_ARG("-h|--hhh", test_arg, NULL, "hhh", "Description of hhh"), + OPT_ENDTABLE +}; + +/* Sub-table test. */ +struct opt_table subtables[] = { + /* Two short, and two long long, no description */ + OPT_WITH_ARG("--jjj|-j|--lll|-l", test_arg, show_arg, "jjj", ""), + /* Hidden option */ + OPT_WITH_ARG("--mmm|-m", test_arg, show_arg, "mmm", opt_hidden), + OPT_SUBTABLE(short_table, NULL), + OPT_SUBTABLE(long_table, "long table options"), + OPT_SUBTABLE(long_and_short_table, NULL), + OPT_ENDTABLE +}; diff --git a/ccan/opt/test/utils.h b/ccan/opt/test/utils.h new file mode 100644 index 0000000..f7c1896 --- /dev/null +++ b/ccan/opt/test/utils.h @@ -0,0 +1,19 @@ +#ifndef CCAN_OPT_TEST_UTILS_H +#define CCAN_OPT_TEST_UTILS_H +#include +#include + +bool parse_args(int *argc, char ***argv, ...); +extern char *err_output; +void save_err_output(const char *fmt, ...); + +extern unsigned int test_cb_called; +char *test_noarg(void *arg); +char *test_arg(const char *optarg, const char *arg); +void show_arg(char buf[OPT_SHOW_LEN], const char *arg); + +extern struct opt_table short_table[]; +extern struct opt_table long_table[]; +extern struct opt_table long_and_short_table[]; +extern struct opt_table subtables[]; +#endif /* CCAN_OPT_TEST_UTILS_H */ diff --git a/ccan/opt/usage.c b/ccan/opt/usage.c new file mode 100644 index 0000000..4d784bc --- /dev/null +++ b/ccan/opt/usage.c @@ -0,0 +1,111 @@ +#include +#include +#include +#include +#include +#include "private.h" + +/* We only use this for pointer comparisons. */ +const char opt_hidden[1]; + +static unsigned write_short_options(char *str) +{ + unsigned int i, num = 0; + const char *p; + + for (p = first_sopt(&i); p; p = next_sopt(p, &i)) { + if (opt_table[i].desc != opt_hidden) + str[num++] = *p; + } + return num; +} + +#define OPT_SPACE_PAD " " + +/* FIXME: Get all purdy. */ +char *opt_usage(const char *argv0, const char *extra) +{ + unsigned int i, num, len; + char *ret, *p; + + if (!extra) { + extra = ""; + for (i = 0; i < opt_count; i++) { + if (opt_table[i].cb == (void *)opt_usage_and_exit + && opt_table[i].u.carg) { + extra = opt_table[i].u.carg; + break; + } + } + } + + /* An overestimate of our length. */ + len = strlen("Usage: %s ") + strlen(argv0) + + strlen("[-%.*s]") + opt_num_short + 1 + + strlen(" ") + strlen(extra) + + strlen("\n"); + + for (i = 0; i < opt_count; i++) { + if (opt_table[i].type == OPT_SUBTABLE) { + len += strlen("\n") + strlen(opt_table[i].desc) + + strlen(":\n"); + } else if (opt_table[i].desc != opt_hidden) { + len += strlen(opt_table[i].names) + strlen(" "); + len += strlen(OPT_SPACE_PAD) + + strlen(opt_table[i].desc) + 1; + if (opt_table[i].show) { + len += strlen("(default: %s)") + + OPT_SHOW_LEN + sizeof("..."); + } + len += strlen("\n"); + } + } + + p = ret = malloc(len); + if (!ret) + return NULL; + + p += sprintf(p, "Usage: %s", argv0); + p += sprintf(p, " [-"); + num = write_short_options(p); + if (num) { + p += num; + p += sprintf(p, "]"); + } else { + /* Remove start of single-entry options */ + p -= 3; + } + if (extra) + p += sprintf(p, " %s", extra); + p += sprintf(p, "\n"); + + for (i = 0; i < opt_count; i++) { + if (opt_table[i].desc == opt_hidden) + continue; + if (opt_table[i].type == OPT_SUBTABLE) { + p += sprintf(p, "%s:\n", opt_table[i].desc); + continue; + } + len = sprintf(p, "%s", opt_table[i].names); + if (opt_table[i].type == OPT_HASARG + && !strchr(opt_table[i].names, ' ') + && !strchr(opt_table[i].names, '=')) + len += sprintf(p + len, " "); + len += sprintf(p + len, "%.*s", + len < strlen(OPT_SPACE_PAD) + ? (unsigned)strlen(OPT_SPACE_PAD) - len : 1, + OPT_SPACE_PAD); + + len += sprintf(p + len, "%s", opt_table[i].desc); + if (opt_table[i].show) { + char buf[OPT_SHOW_LEN + sizeof("...")]; + strcpy(buf + OPT_SHOW_LEN, "..."); + opt_table[i].show(buf, opt_table[i].u.arg); + len += sprintf(p + len, " (default: %s)", buf); + } + p += len; + p += sprintf(p, "\n"); + } + *p = '\0'; + return ret; +} diff --git a/ccan/typesafe_cb/LICENSE b/ccan/typesafe_cb/LICENSE new file mode 100644 index 0000000..2d2d780 --- /dev/null +++ b/ccan/typesafe_cb/LICENSE @@ -0,0 +1,510 @@ + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations +below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it +becomes a de-facto standard. To achieve this, non-free programs must +be allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control +compilation and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at least + three years, to give the same user the materials specified in + Subsection 6a, above, for a charge no more than the cost of + performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply, and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License +may add an explicit geographical distribution limitation excluding those +countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms +of the ordinary General Public License). + + To apply these terms, attach the following notices to the library. +It is safest to attach them to the start of each source file to most +effectively convey the exclusion of warranty; and each file should +have at least the "copyright" line and a pointer to where the full +notice is found. + + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or +your school, if any, to sign a "copyright disclaimer" for the library, +if necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James + Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/ccan/typesafe_cb/_info b/ccan/typesafe_cb/_info new file mode 100644 index 0000000..2fe4fec --- /dev/null +++ b/ccan/typesafe_cb/_info @@ -0,0 +1,151 @@ +#include +#include +#include "config.h" + +/** + * typesafe_cb - macros for safe callbacks. + * + * The basis of the typesafe_cb header is typesafe_cb_cast(): a + * conditional cast macro. If an expression exactly matches a given + * type, it is cast to the target type, otherwise it is left alone. + * + * This allows us to create functions which take a small number of + * specific types, rather than being forced to use a void *. In + * particular, it is useful for creating typesafe callbacks as the + * helpers typesafe_cb(), typesafe_cb_preargs() and + * typesafe_cb_postargs() demonstrate. + * + * The standard way of passing arguments to callback functions in C is + * to use a void pointer, which the callback then casts back to the + * expected type. This unfortunately subverts the type checking the + * compiler would perform if it were a direct call. Here's an example: + * + * static void my_callback(void *_obj) + * { + * struct obj *obj = _obj; + * ... + * } + * ... + * register_callback(my_callback, &my_obj); + * + * If we wanted to use the natural type for my_callback (ie. "void + * my_callback(struct obj *obj)"), we could make register_callback() + * take a void * as its first argument, but this would subvert all + * type checking. We really want register_callback() to accept only + * the exactly correct function type to match the argument, or a + * function which takes a void *. + * + * This is where typesafe_cb() comes in: it uses typesafe_cb_cast() to + * cast the callback function if it matches the argument type: + * + * void _register_callback(void (*cb)(void *arg), void *arg); + * #define register_callback(cb, arg) \ + * _register_callback(typesafe_cb(void, void *, (cb), (arg)), \ + * (arg)) + * + * On compilers which don't support the extensions required + * typesafe_cb_cast() and friend become an unconditional cast, so your + * code will compile but you won't get type checking. + * + * Example: + * #include + * #include + * #include + * + * // Generic callback infrastructure. + * struct callback { + * struct callback *next; + * int value; + * int (*callback)(int value, void *arg); + * void *arg; + * }; + * static struct callback *callbacks; + * + * static void _register_callback(int value, int (*cb)(int, void *), + * void *arg) + * { + * struct callback *new = malloc(sizeof(*new)); + * new->next = callbacks; + * new->value = value; + * new->callback = cb; + * new->arg = arg; + * callbacks = new; + * } + * #define register_callback(value, cb, arg) \ + * _register_callback(value, \ + * typesafe_cb_preargs(int, void *, \ + * (cb), (arg), int),\ + * (arg)) + * + * static struct callback *find_callback(int value) + * { + * struct callback *i; + * + * for (i = callbacks; i; i = i->next) + * if (i->value == value) + * return i; + * return NULL; + * } + * + * // Define several silly callbacks. Note they don't use void *! + * #define DEF_CALLBACK(name, op) \ + * static int name(int val, int *arg) \ + * { \ + * printf("%s", #op); \ + * return val op *arg; \ + * } + * DEF_CALLBACK(multiply, *); + * DEF_CALLBACK(add, +); + * DEF_CALLBACK(divide, /); + * DEF_CALLBACK(sub, -); + * DEF_CALLBACK(or, |); + * DEF_CALLBACK(and, &); + * DEF_CALLBACK(xor, ^); + * DEF_CALLBACK(assign, =); + * + * // Silly game to find the longest chain of values. + * int main(int argc, char *argv[]) + * { + * int i, run = 1, num = argv[1] ? atoi(argv[1]) : 0; + * + * for (i = 1; i < 1024;) { + * // Since run is an int, compiler checks "add" does too. + * register_callback(i++, add, &run); + * register_callback(i++, divide, &run); + * register_callback(i++, sub, &run); + * register_callback(i++, multiply, &run); + * register_callback(i++, or, &run); + * register_callback(i++, and, &run); + * register_callback(i++, xor, &run); + * register_callback(i++, assign, &run); + * } + * + * printf("%i ", num); + * while (run < 56) { + * struct callback *cb = find_callback(num % i); + * if (!cb) { + * printf("-> STOP\n"); + * return 1; + * } + * num = cb->callback(num, cb->arg); + * printf("->%i ", num); + * run++; + * } + * printf("-> Winner!\n"); + * return 0; + * } + * + * License: LGPL (2 or any later version) + * Author: Rusty Russell + */ +int main(int argc, char *argv[]) +{ + if (argc != 2) + return 1; + + if (strcmp(argv[1], "depends") == 0) { + return 0; + } + + return 1; +} diff --git a/ccan/typesafe_cb/test/compile_fail-cast_if_type-promotable.c b/ccan/typesafe_cb/test/compile_fail-cast_if_type-promotable.c new file mode 100644 index 0000000..11d42f4 --- /dev/null +++ b/ccan/typesafe_cb/test/compile_fail-cast_if_type-promotable.c @@ -0,0 +1,23 @@ +#include +#include + +static void _set_some_value(void *val) +{ +} + +#define set_some_value(expr) \ + _set_some_value(typesafe_cb_cast(void *, long, (expr))) + +int main(int argc, char *argv[]) +{ +#ifdef FAIL + bool x = 0; +#if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P +#error "Unfortunately we don't fail if typesafe_cb_cast is a noop." +#endif +#else + long x = 0; +#endif + set_some_value(x); + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_fail-typesafe_cb-int.c b/ccan/typesafe_cb/test/compile_fail-typesafe_cb-int.c new file mode 100644 index 0000000..c403336 --- /dev/null +++ b/ccan/typesafe_cb/test/compile_fail-typesafe_cb-int.c @@ -0,0 +1,27 @@ +#include +#include + +void _callback(void (*fn)(void *arg), void *arg); +void _callback(void (*fn)(void *arg), void *arg) +{ + fn(arg); +} + +/* Callback is set up to warn if arg isn't a pointer (since it won't + * pass cleanly to _callback's second arg. */ +#define callback(fn, arg) \ + _callback(typesafe_cb(void, (fn), (arg)), (arg)) + +void my_callback(int something); +void my_callback(int something) +{ +} + +int main(int argc, char *argv[]) +{ +#ifdef FAIL + /* This fails due to arg, not due to cast. */ + callback(my_callback, 100); +#endif + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_fail-typesafe_cb.c b/ccan/typesafe_cb/test/compile_fail-typesafe_cb.c new file mode 100644 index 0000000..81e36d7 --- /dev/null +++ b/ccan/typesafe_cb/test/compile_fail-typesafe_cb.c @@ -0,0 +1,34 @@ +#include +#include + +static void _register_callback(void (*cb)(void *arg), void *arg) +{ +} + +#define register_callback(cb, arg) \ + _register_callback(typesafe_cb(void, void *, (cb), (arg)), (arg)) + +static void my_callback(char *p) +{ +} + +int main(int argc, char *argv[]) +{ + char str[] = "hello world"; +#ifdef FAIL + int *p; +#if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P +#error "Unfortunately we don't fail if typesafe_cb_cast is a noop." +#endif +#else + char *p; +#endif + p = NULL; + + /* This should work always. */ + register_callback(my_callback, str); + + /* This will fail with FAIL defined */ + register_callback(my_callback, p); + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast-multi.c b/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast-multi.c new file mode 100644 index 0000000..62b5f91 --- /dev/null +++ b/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast-multi.c @@ -0,0 +1,43 @@ +#include +#include + +struct foo { + int x; +}; + +struct bar { + int x; +}; + +struct baz { + int x; +}; + +struct any { + int x; +}; + +struct other { + int x; +}; + +static void take_any(struct any *any) +{ +} + +int main(int argc, char *argv[]) +{ +#ifdef FAIL + struct other +#if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P +#error "Unfortunately we don't fail if typesafe_cb_cast is a noop." +#endif +#else + struct foo +#endif + *arg = NULL; + take_any(typesafe_cb_cast3(struct any *, + struct foo *, struct bar *, struct baz *, + arg)); + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast.c b/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast.c new file mode 100644 index 0000000..d2e6f2a --- /dev/null +++ b/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast.c @@ -0,0 +1,25 @@ +#include + +void _set_some_value(void *val); + +void _set_some_value(void *val) +{ +} + +#define set_some_value(expr) \ + _set_some_value(typesafe_cb_cast(void *, unsigned long, (expr))) + +int main(int argc, char *argv[]) +{ +#ifdef FAIL + int x = 0; + set_some_value(x); +#if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P +#error "Unfortunately we don't fail if typesafe_cb_cast is a noop." +#endif +#else + void *p = 0; + set_some_value(p); +#endif + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_fail-typesafe_cb_postargs.c b/ccan/typesafe_cb/test/compile_fail-typesafe_cb_postargs.c new file mode 100644 index 0000000..7d35308 --- /dev/null +++ b/ccan/typesafe_cb/test/compile_fail-typesafe_cb_postargs.c @@ -0,0 +1,27 @@ +#include +#include + +static void _register_callback(void (*cb)(void *arg, int x), void *arg) +{ +} +#define register_callback(cb, arg) \ + _register_callback(typesafe_cb_postargs(void, void *, (cb), (arg), int), (arg)) + +static void my_callback(char *p, int x) +{ +} + +int main(int argc, char *argv[]) +{ +#ifdef FAIL + int *p; +#if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P +#error "Unfortunately we don't fail if typesafe_cb_cast is a noop." +#endif +#else + char *p; +#endif + p = NULL; + register_callback(my_callback, p); + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_fail-typesafe_cb_preargs.c b/ccan/typesafe_cb/test/compile_fail-typesafe_cb_preargs.c new file mode 100644 index 0000000..bd55c67 --- /dev/null +++ b/ccan/typesafe_cb/test/compile_fail-typesafe_cb_preargs.c @@ -0,0 +1,28 @@ +#include +#include + +static void _register_callback(void (*cb)(int x, void *arg), void *arg) +{ +} + +#define register_callback(cb, arg) \ + _register_callback(typesafe_cb_preargs(void, void *, (cb), (arg), int), (arg)) + +static void my_callback(int x, char *p) +{ +} + +int main(int argc, char *argv[]) +{ +#ifdef FAIL + int *p; +#if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P +#error "Unfortunately we don't fail if typesafe_cb_cast is a noop." +#endif +#else + char *p; +#endif + p = NULL; + register_callback(my_callback, p); + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_ok-typesafe_cb-NULL.c b/ccan/typesafe_cb/test/compile_ok-typesafe_cb-NULL.c new file mode 100644 index 0000000..265de8b --- /dev/null +++ b/ccan/typesafe_cb/test/compile_ok-typesafe_cb-NULL.c @@ -0,0 +1,17 @@ +#include +#include + +/* NULL args for callback function should be OK for normal and _def. */ + +static void _register_callback(void (*cb)(const void *arg), const void *arg) +{ +} + +#define register_callback(cb, arg) \ + _register_callback(typesafe_cb(void, const void *, (cb), (arg)), (arg)) + +int main(int argc, char *argv[]) +{ + register_callback(NULL, "hello world"); + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_ok-typesafe_cb-undefined.c b/ccan/typesafe_cb/test/compile_ok-typesafe_cb-undefined.c new file mode 100644 index 0000000..aa50bad --- /dev/null +++ b/ccan/typesafe_cb/test/compile_ok-typesafe_cb-undefined.c @@ -0,0 +1,49 @@ +#include +#include + +/* const args in callbacks should be OK. */ + +static void _register_callback(void (*cb)(void *arg), void *arg) +{ +} + +#define register_callback(cb, arg) \ + _register_callback(typesafe_cb(void, void *, (cb), (arg)), (arg)) + +static void _register_callback_pre(void (*cb)(int x, void *arg), void *arg) +{ +} + +#define register_callback_pre(cb, arg) \ + _register_callback_pre(typesafe_cb_preargs(void, void *, (cb), (arg), int), (arg)) + +static void _register_callback_post(void (*cb)(void *arg, int x), void *arg) +{ +} + +#define register_callback_post(cb, arg) \ + _register_callback_post(typesafe_cb_postargs(void, void *, (cb), (arg), int), (arg)) + +struct undefined; + +static void my_callback(struct undefined *undef) +{ +} + +static void my_callback_pre(int x, struct undefined *undef) +{ +} + +static void my_callback_post(struct undefined *undef, int x) +{ +} + +int main(int argc, char *argv[]) +{ + struct undefined *handle = NULL; + + register_callback(my_callback, handle); + register_callback_pre(my_callback_pre, handle); + register_callback_post(my_callback_post, handle); + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_ok-typesafe_cb-vars.c b/ccan/typesafe_cb/test/compile_ok-typesafe_cb-vars.c new file mode 100644 index 0000000..f6a2bfe --- /dev/null +++ b/ccan/typesafe_cb/test/compile_ok-typesafe_cb-vars.c @@ -0,0 +1,52 @@ +#include +#include + +/* const args in callbacks should be OK. */ + +static void _register_callback(void (*cb)(void *arg), void *arg) +{ +} + +#define register_callback(cb, arg) \ + _register_callback(typesafe_cb(void, void *, (cb), (arg)), (arg)) + +static void _register_callback_pre(void (*cb)(int x, void *arg), void *arg) +{ +} + +#define register_callback_pre(cb, arg) \ + _register_callback_pre(typesafe_cb_preargs(void, void *, (cb), (arg), int), (arg)) + +static void _register_callback_post(void (*cb)(void *arg, int x), void *arg) +{ +} + +#define register_callback_post(cb, arg) \ + _register_callback_post(typesafe_cb_postargs(void, void *, (cb), (arg), int), (arg)) + +struct undefined; + +static void my_callback(struct undefined *undef) +{ +} + +static void my_callback_pre(int x, struct undefined *undef) +{ +} + +static void my_callback_post(struct undefined *undef, int x) +{ +} + +int main(int argc, char *argv[]) +{ + struct undefined *handle = NULL; + void (*cb)(struct undefined *undef) = my_callback; + void (*pre)(int x, struct undefined *undef) = my_callback_pre; + void (*post)(struct undefined *undef, int x) = my_callback_post; + + register_callback(cb, handle); + register_callback_pre(pre, handle); + register_callback_post(post, handle); + return 0; +} diff --git a/ccan/typesafe_cb/test/compile_ok-typesafe_cb_cast.c b/ccan/typesafe_cb/test/compile_ok-typesafe_cb_cast.c new file mode 100644 index 0000000..4bb3b8b --- /dev/null +++ b/ccan/typesafe_cb/test/compile_ok-typesafe_cb_cast.c @@ -0,0 +1,41 @@ +#include +#include + +struct foo { + int x; +}; + +struct bar { + int x; +}; + +struct baz { + int x; +}; + +struct any { + int x; +}; + +static void take_any(struct any *any) +{ +} + +int main(int argc, char *argv[]) +{ + /* Otherwise we get unused warnings for these. */ + struct foo *foo = NULL; + struct bar *bar = NULL; + struct baz *baz = NULL; + + take_any(typesafe_cb_cast3(struct any *, + struct foo *, struct bar *, struct baz *, + foo)); + take_any(typesafe_cb_cast3(struct any *, + struct foo *, struct bar *, struct baz *, + bar)); + take_any(typesafe_cb_cast3(struct any *, + struct foo *, struct bar *, struct baz *, + baz)); + return 0; +} diff --git a/ccan/typesafe_cb/test/run.c b/ccan/typesafe_cb/test/run.c new file mode 100644 index 0000000..79863db --- /dev/null +++ b/ccan/typesafe_cb/test/run.c @@ -0,0 +1,109 @@ +#include +#include +#include +#include + +static char dummy = 0; + +/* The example usage. */ +static void _set_some_value(void *val) +{ + ok1(val == &dummy); +} + +#define set_some_value(expr) \ + _set_some_value(typesafe_cb_cast(void *, unsigned long, (expr))) + +static void _callback_onearg(void (*fn)(void *arg), void *arg) +{ + fn(arg); +} + +static void _callback_preargs(void (*fn)(int a, int b, void *arg), void *arg) +{ + fn(1, 2, arg); +} + +static void _callback_postargs(void (*fn)(void *arg, int a, int b), void *arg) +{ + fn(arg, 1, 2); +} + +#define callback_onearg(cb, arg) \ + _callback_onearg(typesafe_cb(void, void *, (cb), (arg)), (arg)) + +#define callback_preargs(cb, arg) \ + _callback_preargs(typesafe_cb_preargs(void, void *, (cb), (arg), int, int), (arg)) + +#define callback_postargs(cb, arg) \ + _callback_postargs(typesafe_cb_postargs(void, void *, (cb), (arg), int, int), (arg)) + +static void my_callback_onearg(char *p) +{ + ok1(strcmp(p, "hello world") == 0); +} + +static void my_callback_preargs(int a, int b, char *p) +{ + ok1(a == 1); + ok1(b == 2); + ok1(strcmp(p, "hello world") == 0); +} + +static void my_callback_postargs(char *p, int a, int b) +{ + ok1(a == 1); + ok1(b == 2); + ok1(strcmp(p, "hello world") == 0); +} + +/* This is simply a compile test; we promised typesafe_cb_cast can be in a + * static initializer. */ +struct callback_onearg +{ + void (*fn)(void *arg); + const void *arg; +}; + +struct callback_onearg cb_onearg += { typesafe_cb(void, void *, my_callback_onearg, (char *)(intptr_t)"hello world"), + "hello world" }; + +struct callback_preargs +{ + void (*fn)(int a, int b, void *arg); + const void *arg; +}; + +struct callback_preargs cb_preargs += { typesafe_cb_preargs(void, void *, my_callback_preargs, + (char *)(intptr_t)"hi", int, int), "hi" }; + +struct callback_postargs +{ + void (*fn)(void *arg, int a, int b); + const void *arg; +}; + +struct callback_postargs cb_postargs += { typesafe_cb_postargs(void, void *, my_callback_postargs, + (char *)(intptr_t)"hi", int, int), "hi" }; + +int main(int argc, char *argv[]) +{ + void *p = &dummy; + unsigned long l = (unsigned long)p; + char str[] = "hello world"; + + plan_tests(2 + 1 + 3 + 3); + set_some_value(p); + set_some_value(l); + + callback_onearg(my_callback_onearg, str); + + callback_preargs(my_callback_preargs, str); + + callback_postargs(my_callback_postargs, str); + + return exit_status(); +} diff --git a/ccan/typesafe_cb/typesafe_cb.h b/ccan/typesafe_cb/typesafe_cb.h new file mode 100644 index 0000000..40cfa39 --- /dev/null +++ b/ccan/typesafe_cb/typesafe_cb.h @@ -0,0 +1,133 @@ +#ifndef CCAN_TYPESAFE_CB_H +#define CCAN_TYPESAFE_CB_H +#include "config.h" + +#if HAVE_TYPEOF && HAVE_BUILTIN_CHOOSE_EXPR && HAVE_BUILTIN_TYPES_COMPATIBLE_P +/** + * typesafe_cb_cast - only cast an expression if it matches a given type + * @desttype: the type to cast to + * @oktype: the type we allow + * @expr: the expression to cast + * + * This macro is used to create functions which allow multiple types. + * The result of this macro is used somewhere that a @desttype type is + * expected: if @expr is exactly of type @oktype, then it will be + * cast to @desttype type, otherwise left alone. + * + * This macro can be used in static initializers. + * + * This is merely useful for warnings: if the compiler does not + * support the primitives required for typesafe_cb_cast(), it becomes an + * unconditional cast, and the @oktype argument is not used. In + * particular, this means that @oktype can be a type which uses the + * "typeof": it will not be evaluated if typeof is not supported. + * + * Example: + * // We can take either an unsigned long or a void *. + * void _set_some_value(void *val); + * #define set_some_value(e) \ + * _set_some_value(typesafe_cb_cast(void *, (e), unsigned long)) + */ +#define typesafe_cb_cast(desttype, oktype, expr) \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(__typeof__(0?(expr):(expr)), \ + oktype), \ + (desttype)(expr), (expr)) +#else +#define typesafe_cb_cast(desttype, oktype, expr) ((desttype)(expr)) +#endif + +/** + * typesafe_cb_cast3 - only cast an expression if it matches given types + * @desttype: the type to cast to + * @ok1: the first type we allow + * @ok2: the second type we allow + * @ok3: the third type we allow + * @expr: the expression to cast + * + * This is a convenient wrapper for multiple typesafe_cb_cast() calls. + * You can chain them inside each other (ie. use typesafe_cb_cast() + * for expr) if you need more than 3 arguments. + * + * Example: + * // We can take either a long, unsigned long, void * or a const void *. + * void _set_some_value(void *val); + * #define set_some_value(expr) \ + * _set_some_value(typesafe_cb_cast3(void *,, \ + * long, unsigned long, const void *,\ + * (expr))) + */ +#define typesafe_cb_cast3(desttype, ok1, ok2, ok3, expr) \ + typesafe_cb_cast(desttype, ok1, \ + typesafe_cb_cast(desttype, ok2, \ + typesafe_cb_cast(desttype, ok3, \ + (expr)))) + +/** + * typesafe_cb - cast a callback function if it matches the arg + * @rtype: the return type of the callback function + * @atype: the (pointer) type which the callback function expects. + * @fn: the callback function to cast + * @arg: the (pointer) argument to hand to the callback function. + * + * If a callback function takes a single argument, this macro does + * appropriate casts to a function which takes a single atype argument if the + * callback provided matches the @arg. + * + * It is assumed that @arg is of pointer type: usually @arg is passed + * or assigned to a void * elsewhere anyway. + * + * Example: + * void _register_callback(void (*fn)(void *arg), void *arg); + * #define register_callback(fn, arg) \ + * _register_callback(typesafe_cb(void, (fn), void*, (arg)), (arg)) + */ +#define typesafe_cb(rtype, atype, fn, arg) \ + typesafe_cb_cast(rtype (*)(atype), \ + rtype (*)(__typeof__(arg)), \ + (fn)) + +/** + * typesafe_cb_preargs - cast a callback function if it matches the arg + * @rtype: the return type of the callback function + * @atype: the (pointer) type which the callback function expects. + * @fn: the callback function to cast + * @arg: the (pointer) argument to hand to the callback function. + * + * This is a version of typesafe_cb() for callbacks that take other arguments + * before the @arg. + * + * Example: + * void _register_callback(void (*fn)(int, void *arg), void *arg); + * #define register_callback(fn, arg) \ + * _register_callback(typesafe_cb_preargs(void, (fn), void *, \ + * (arg), int), \ + * (arg)) + */ +#define typesafe_cb_preargs(rtype, atype, fn, arg, ...) \ + typesafe_cb_cast(rtype (*)(__VA_ARGS__, atype), \ + rtype (*)(__VA_ARGS__, __typeof__(arg)), \ + (fn)) + +/** + * typesafe_cb_postargs - cast a callback function if it matches the arg + * @rtype: the return type of the callback function + * @atype: the (pointer) type which the callback function expects. + * @fn: the callback function to cast + * @arg: the (pointer) argument to hand to the callback function. + * + * This is a version of typesafe_cb() for callbacks that take other arguments + * after the @arg. + * + * Example: + * void _register_callback(void (*fn)(void *arg, int), void *arg); + * #define register_callback(fn, arg) \ + * _register_callback(typesafe_cb_postargs(void, (fn), void *, \ + * (arg), int), \ + * (arg)) + */ +#define typesafe_cb_postargs(rtype, atype, fn, arg, ...) \ + typesafe_cb_cast(rtype (*)(atype, __VA_ARGS__), \ + rtype (*)(__typeof__(arg), __VA_ARGS__), \ + (fn)) +#endif /* CCAN_CAST_IF_TYPE_H */ diff --git a/cgminer.c b/cgminer.c new file mode 100644 index 0000000..e9a0683 --- /dev/null +++ b/cgminer.c @@ -0,0 +1,11279 @@ +/* + * Copyright 2011-2014 Con Kolivas + * Copyright 2011-2012 Luke Dashjr + * Copyright 2010 Jeff Garzik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#ifdef HAVE_CURSES +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef USE_USBUTILS +#include +#endif + +#include +#include + +#ifndef WIN32 +#include +#else +#include +#endif +#include +#include +#ifdef HAVE_LIBCURL +#include +#else +char *curly = ":D"; +#endif +#include +#include + +#include "compat.h" +#include "miner.h" +#include "bench_block.h" +#ifdef USE_USBUTILS +#include "usbutils.h" +#endif + +#if defined(unix) || defined(__APPLE__) +#include +#include +#include +#endif + +#ifdef USE_AVALON +#include "driver-avalon.h" +#endif + +#ifdef USE_AVALON2 +#include "driver-avalon2.h" +#endif + +#ifdef USE_AVALON4 +#include "driver-avalon4.h" +#endif + +#ifdef USE_BFLSC +#include "driver-bflsc.h" +#endif + +#ifdef USE_SP10 +#include "driver-spondoolies-sp10.h" +#endif + +#ifdef USE_SP30 +#include "driver-spondoolies-sp30.h" +#endif + +#ifdef USE_BLOCK_ERUPTER +#include "driver-blockerupter.h" +#endif + +#ifdef USE_BITFURY +#include "driver-bitfury.h" +#endif + +#ifdef USE_COINTERRA +#include "driver-cointerra.h" +#endif + +#ifdef USE_HASHFAST +#include "driver-hashfast.h" +#endif + +#ifdef USE_BITMAIN +#include "driver-bitmain.h" +#endif + +#ifdef USE_BITMAIN_C5 +#include "driver-btm-c5.h" +#endif + +#if defined(USE_BITFORCE) || defined(USE_ICARUS) || defined(USE_AVALON) || defined(USE_AVALON2) || defined(USE_BMSC) || defined (USE_BITMAIN) || defined(USE_MODMINER) +# define USE_FPGA +#endif + +struct strategies strategies[] = +{ + { "Failover" }, + { "Round Robin" }, + { "Rotate" }, + { "Load Balance" }, + { "Balance" }, +}; + +static char packagename[256]; + +FILE * g_logwork_file = NULL; +FILE * g_logwork_files[65] = {0}; +FILE * g_logwork_diffs[65] = {0}; +int g_logwork_asicnum = 0; + +bool opt_work_update; +bool opt_protocol; +static struct benchfile_layout +{ + int length; + char *name; +} benchfile_data[] = +{ + { 1, "Version" }, + { 64, "MerkleRoot" }, + { 64, "PrevHash" }, + { 8, "DifficultyBits" }, + { 10, "NonceTime" } // 10 digits +}; +enum benchwork +{ + BENCHWORK_VERSION = 0, + BENCHWORK_MERKLEROOT, + BENCHWORK_PREVHASH, + BENCHWORK_DIFFBITS, + BENCHWORK_NONCETIME, + BENCHWORK_COUNT +}; + +#ifdef HAVE_LIBCURL +static char *opt_btc_address; +static char *opt_btc_sig; +#endif +static char *opt_benchfile; +static bool opt_benchfile_display; +static FILE *benchfile_in; +static int benchfile_line; +static int benchfile_work; +static bool opt_benchmark; +bool have_longpoll; +bool want_per_device_stats; +bool use_syslog; +bool opt_quiet; +bool opt_realquiet; +bool opt_loginput; +bool opt_compact; +const int opt_cutofftemp = 95; +int opt_log_interval = 5; +int opt_queue = 1; +static int max_queue = 1; +int opt_scantime = -1; +int opt_expiry = 120; +static const bool opt_time = true; +unsigned long long global_hashrate; +unsigned long global_quota_gcd = 1; +time_t last_getwork; + +#if defined(USE_USBUTILS) +int nDevs; +#endif +bool opt_restart = true; +bool opt_nogpu; + +struct list_head scan_devices; +static bool opt_display_devs; +int total_devices; +int zombie_devs; +static int most_devices; +struct cgpu_info **devices; +int mining_threads; +int num_processors; +#ifdef HAVE_CURSES +bool use_curses = true; +#else +bool use_curses; +#endif +static bool opt_widescreen; +static bool alt_status; +static bool switch_status; +static bool opt_submit_stale = true; +static int opt_shares; +bool opt_fail_only; +static bool opt_fix_protocol; +bool opt_lowmem; +bool opt_autofan; +bool opt_autoengine; +bool opt_noadl; +char *opt_version_path = NULL; +char *opt_logfile_path = NULL; +char *opt_logfile_openflag = NULL; +char *opt_logwork_path = NULL; +char *opt_logwork_asicnum = NULL; +bool opt_logwork_diff = false; +char *opt_api_allow = NULL; +char *opt_api_groups; +char *opt_api_description = PACKAGE_STRING; +int opt_api_port = 4028; +char *opt_api_host = API_LISTEN_ADDR; +bool opt_api_listen; +bool opt_api_mcast; +char *opt_api_mcast_addr = API_MCAST_ADDR; +char *opt_api_mcast_code = API_MCAST_CODE; +char *opt_api_mcast_des = ""; +int opt_api_mcast_port = 4028; +bool opt_api_network; +bool opt_delaynet; +bool opt_disable_pool; +static bool no_work; +#ifdef USE_ICARUS +char *opt_icarus_options = NULL; +char *opt_icarus_timing = NULL; +float opt_anu_freq = 250; +float opt_au3_freq = 225; +int opt_au3_volt = 750; +float opt_rock_freq = 270; +#endif +bool opt_worktime; +#ifdef USE_AVALON +char *opt_avalon_options; +char *opt_bitburner_fury_options; +static char *opt_set_avalon_fan; +static char *opt_set_avalon_freq; +#endif +#ifdef USE_AVALON2 +static char *opt_set_avalon2_freq; +static char *opt_set_avalon2_fan; +static char *opt_set_avalon2_voltage; +#endif +#ifdef USE_AVALON4 +static char *opt_set_avalon4_fan; +static char *opt_set_avalon4_voltage; +static char *opt_set_avalon4_freq; +#endif +#ifdef USE_BLOCKERUPTER +int opt_bet_clk = 0; +#endif +#ifdef USE_HASHRATIO +#include "driver-hashratio.h" +#endif +#ifdef USE_KLONDIKE +char *opt_klondike_options = NULL; +#endif +#ifdef USE_DRILLBIT +char *opt_drillbit_options = NULL; +char *opt_drillbit_auto = NULL; +#endif +char *opt_bab_options = NULL; +#ifdef USE_BITMINE_A1 +char *opt_bitmine_a1_options = NULL; +#endif +#ifdef USE_BMSC +char *opt_bmsc_options = NULL; +char *opt_bmsc_bandops = NULL; +char *opt_bmsc_timing = NULL; +bool opt_bmsc_gray = false; +char *opt_bmsc_freq = NULL; +char *opt_bmsc_rdreg = NULL; +char *opt_bmsc_voltage = NULL; +bool opt_bmsc_bootstart = false; +bool opt_bmsc_rdworktest = false; +#endif +#ifdef USE_BITMAIN +char *opt_bitmain_options = NULL; +char *opt_bitmain_freq = NULL; +char *opt_bitmain_voltage = NULL; +#endif +#ifdef USE_HASHFAST +static char *opt_set_hfa_fan; +#endif +static char *opt_set_null; +#ifdef USE_MINION +int opt_minion_chipreport; +char *opt_minion_cores; +bool opt_minion_extra; +char *opt_minion_freq; +int opt_minion_freqchange = 1000; +int opt_minion_freqpercent = 70; +bool opt_minion_idlecount; +int opt_minion_ledcount; +int opt_minion_ledlimit = 98; +bool opt_minion_noautofreq; +bool opt_minion_overheat; +int opt_minion_spidelay; +char *opt_minion_spireset; +int opt_minion_spisleep = 200; +int opt_minion_spiusec; +char *opt_minion_temp; +#endif + +#ifdef USE_USBUTILS +char *opt_usb_select = NULL; +int opt_usbdump = -1; +bool opt_usb_list_all; +cgsem_t usb_resource_sem; +static pthread_t usb_poll_thread; +static bool usb_polling; +#endif + +char *opt_kernel_path; +char *cgminer_path; + +#if defined(USE_BITFORCE) +bool opt_bfl_noncerange; +#endif +#define QUIET (opt_quiet || opt_realquiet) + +struct thr_info *control_thr; +struct thr_info **mining_thr; +static int gwsched_thr_id; +static int watchpool_thr_id; +static int watchdog_thr_id; +#ifdef HAVE_CURSES +static int input_thr_id; +#endif +int gpur_thr_id; +static int api_thr_id; +#ifdef USE_USBUTILS +static int usbres_thr_id; +static int hotplug_thr_id; +#endif +static int total_control_threads; +bool hotplug_mode; +static int new_devices; +static int new_threads; +int hotplug_time = 5; + +#if LOCK_TRACKING +pthread_mutex_t lockstat_lock; +#endif + +pthread_mutex_t hash_lock; +pthread_mutex_t update_job_lock; + +static pthread_mutex_t *stgd_lock; +pthread_mutex_t console_lock; +cglock_t ch_lock; +static pthread_rwlock_t blk_lock; +static pthread_mutex_t sshare_lock; + +pthread_rwlock_t netacc_lock; +pthread_rwlock_t mining_thr_lock; +pthread_rwlock_t devices_lock; + +static pthread_mutex_t lp_lock; +static pthread_cond_t lp_cond; + +pthread_mutex_t restart_lock; +pthread_cond_t restart_cond; + +pthread_cond_t gws_cond; + +#define CG_LOCAL_MHASHES_MAX_NUM 12 +double g_local_mhashes_dones[CG_LOCAL_MHASHES_MAX_NUM] = {0}; +int g_local_mhashes_index = 0; +double g_displayed_rolling = 0; +char g_miner_version[256] = {0}; +char g_miner_compiletime[256] = {0}; +char g_miner_type[256] = {0}; + +double rolling1, rolling5, rolling15; +double total_rolling; +double total_mhashes_done; +char displayed_hash_rate[16] = {0}; +static struct timeval total_tv_start, total_tv_end; +static struct timeval restart_tv_start, update_tv_start; + +cglock_t control_lock; +pthread_mutex_t stats_lock; + +int hw_errors; +int g_max_fan, g_max_temp; +int64_t total_accepted, total_rejected, total_diff1; +int64_t total_getworks, total_stale, total_discarded; +double total_diff_accepted, total_diff_rejected, total_diff_stale; +static int staged_rollable; +unsigned int new_blocks; +static unsigned int work_block = 0; +unsigned int found_blocks; + +unsigned int local_work; +unsigned int local_work_last = 0; +long local_work_lasttime = 0; +unsigned int total_go, total_ro; + +struct pool **pools; +static struct pool *currentpool = NULL; + +int total_pools, enabled_pools; +enum pool_strategy pool_strategy = POOL_FAILOVER; +int opt_rotate_period; +static int total_urls, total_users, total_passes, total_userpasses; + +static +#ifndef HAVE_CURSES +const +#endif +bool curses_active; + +/* Protected by ch_lock */ +char current_hash[68]; +static char prev_block[12]; +static char current_block[32]; + +static char datestamp[40]; +static char blocktime[32]; +struct timeval block_timeval; +static char best_share[8] = "0"; +double current_diff = 0xFFFFFFFFFFFFFFFFULL; +static char block_diff[8]; +uint64_t best_diff = 0; + +struct block +{ + char hash[68]; + UT_hash_handle hh; + int block_no; +}; + +static struct block *blocks = NULL; + + +int swork_id; + +/* For creating a hash database of stratum shares submitted that have not had + * a response yet */ +struct stratum_share +{ + UT_hash_handle hh; + bool block; + struct work *work; + int id; + time_t sshare_time; + time_t sshare_sent; +}; + +static struct stratum_share *stratum_shares = NULL; + +char *opt_socks_proxy = NULL; +int opt_suggest_diff; +int opt_multi_version = 1; +static const char def_conf[] = "bmminer.conf"; +static char *default_config; +static bool config_loaded; +static int include_count; +#define JSON_INCLUDE_CONF "include" +#define JSON_LOAD_ERROR "JSON decode of file '%s' failed\n %s" +#define JSON_LOAD_ERROR_LEN strlen(JSON_LOAD_ERROR) +#define JSON_MAX_DEPTH 10 +#define JSON_MAX_DEPTH_ERR "Too many levels of JSON includes (limit 10) or a loop" +#define JSON_WEB_ERROR "WEB config err" + +#if defined(unix) || defined(__APPLE__) +static char *opt_stderr_cmd = NULL; +static int forkpid; +#endif // defined(unix) + +struct sigaction termhandler, inthandler; + +struct thread_q *getq; + +static uint32_t total_work; +struct work *staged_work = NULL; + +struct schedtime +{ + bool enable; + struct tm tm; +}; + +struct schedtime schedstart; +struct schedtime schedstop; +bool sched_paused; + +static bool time_before(struct tm *tm1, struct tm *tm2) +{ + if (tm1->tm_hour < tm2->tm_hour) + return true; + if (tm1->tm_hour == tm2->tm_hour && tm1->tm_min < tm2->tm_min) + return true; + return false; +} + +static bool should_run(void) +{ + struct timeval tv; + struct tm *tm; + + if (!schedstart.enable && !schedstop.enable) + return true; + + cgtime(&tv); + const time_t tmp_time = tv.tv_sec; + tm = localtime(&tmp_time); + if (schedstart.enable) + { + if (!schedstop.enable) + { + if (time_before(tm, &schedstart.tm)) + return false; + + /* This is a once off event with no stop time set */ + schedstart.enable = false; + return true; + } + if (time_before(&schedstart.tm, &schedstop.tm)) + { + if (time_before(tm, &schedstop.tm) && !time_before(tm, &schedstart.tm)) + return true; + return false; + } /* Times are reversed */ + if (time_before(tm, &schedstart.tm)) + { + if (time_before(tm, &schedstop.tm)) + return true; + return false; + } + return true; + } + /* only schedstop.enable == true */ + if (!time_before(tm, &schedstop.tm)) + return false; + return true; +} + +void get_datestamp(char *f, size_t fsiz, struct timeval *tv) +{ + struct tm *tm; + + const time_t tmp_time = tv->tv_sec; + tm = localtime(&tmp_time); + snprintf(f, fsiz, "[%d-%02d-%02d %02d:%02d:%02d]", + tm->tm_year + 1900, + tm->tm_mon + 1, + tm->tm_mday, + tm->tm_hour, + tm->tm_min, + tm->tm_sec); +} + +static void get_timestamp(char *f, size_t fsiz, struct timeval *tv) +{ + struct tm *tm; + + const time_t tmp_time = tv->tv_sec; + tm = localtime(&tmp_time); + snprintf(f, fsiz, "[%02d:%02d:%02d]", + tm->tm_hour, + tm->tm_min, + tm->tm_sec); +} + +static char exit_buf[512]; + +static void applog_and_exit(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vsnprintf(exit_buf, sizeof(exit_buf), fmt, ap); + va_end(ap); + _applog(LOG_ERR, exit_buf, true); + exit(1); +} + +static pthread_mutex_t sharelog_lock; +static FILE *sharelog_file = NULL; + +static struct thr_info *__get_thread(int thr_id) +{ + return mining_thr[thr_id]; +} + +struct thr_info *get_thread(int thr_id) +{ + struct thr_info *thr; + + rd_lock(&mining_thr_lock); + thr = __get_thread(thr_id); + rd_unlock(&mining_thr_lock); + + return thr; +} + +static struct cgpu_info *get_thr_cgpu(int thr_id) +{ + struct thr_info *thr = get_thread(thr_id); + + return thr->cgpu; +} + +struct cgpu_info *get_devices(int id) +{ + struct cgpu_info *cgpu; + + rd_lock(&devices_lock); + cgpu = devices[id]; + rd_unlock(&devices_lock); + + return cgpu; +} + +static void sharelog(const char*disposition, const struct work*work) +{ + char *target, *hash, *data; + struct cgpu_info *cgpu; + unsigned long int t; + struct pool *pool; + int thr_id, rv; + char s[1024]; + size_t ret; + + if (!sharelog_file) + return; + + thr_id = work->thr_id; + cgpu = get_thr_cgpu(thr_id); + pool = work->pool; + t = (unsigned long int)(work->tv_work_found.tv_sec); + target = bin2hex(work->target, sizeof(work->target)); + hash = bin2hex(work->hash, sizeof(work->hash)); + data = bin2hex(work->data, sizeof(work->data)); + + // timestamp,disposition,target,pool,dev,thr,sharehash,sharedata + rv = snprintf(s, sizeof(s), "%lu,%s,%s,%s,%s%u,%u,%s,%s\n", t, disposition, target, pool->rpc_url, cgpu->drv->name, cgpu->device_id, thr_id, hash, data); + free(target); + free(hash); + free(data); + if (rv >= (int)(sizeof(s))) + s[sizeof(s) - 1] = '\0'; + else if (rv < 0) + { + applog(LOG_ERR, "sharelog printf error"); + return; + } + + mutex_lock(&sharelog_lock); + ret = fwrite(s, rv, 1, sharelog_file); + fflush(sharelog_file); + mutex_unlock(&sharelog_lock); + + if (ret != 1) + applog(LOG_ERR, "sharelog fwrite error"); +} + +static char *getwork_req = "{\"method\": \"getwork\", \"params\": [], \"id\":0}\n"; + +static char *gbt_req = "{\"id\": 0, \"method\": \"getblocktemplate\", \"params\": [{\"capabilities\": [\"coinbasetxn\", \"workid\", \"coinbase/append\"]}]}\n"; + +static char *gbt_solo_req = "{\"id\": 0, \"method\": \"getblocktemplate\"}\n"; + +/* Adjust all the pools' quota to the greatest common denominator after a pool + * has been added or the quotas changed. */ +void adjust_quota_gcd(void) +{ + unsigned long gcd, lowest_quota = ~0UL, quota; + struct pool *pool; + int i; + + for (i = 0; i < total_pools; i++) + { + pool = pools[i]; + quota = pool->quota; + if (!quota) + continue; + if (quota < lowest_quota) + lowest_quota = quota; + } + + if (likely(lowest_quota < ~0UL)) + { + gcd = lowest_quota; + for (i = 0; i < total_pools; i++) + { + pool = pools[i]; + quota = pool->quota; + if (!quota) + continue; + while (quota % gcd) + gcd--; + } + } + else + gcd = 1; + + for (i = 0; i < total_pools; i++) + { + pool = pools[i]; + pool->quota_used *= global_quota_gcd; + pool->quota_used /= gcd; + pool->quota_gcd = pool->quota / gcd; + } + + global_quota_gcd = gcd; + applog(LOG_DEBUG, "Global quota greatest common denominator set to %lu", gcd); +} + +/* Return value is ignored if not called from add_pool_details */ +struct pool *add_pool(void) +{ + struct pool *pool; + + pool = calloc(sizeof(struct pool), 1); +#ifdef USE_BITMAIN_C5 + pool->support_vil = false; +#endif + if (!pool) + quit(1, "Failed to malloc pool in add_pool"); + pool->pool_no = pool->prio = total_pools; + pools = realloc(pools, sizeof(struct pool *) * (total_pools + 2)); + pools[total_pools++] = pool; + mutex_init(&pool->pool_lock); + if (unlikely(pthread_cond_init(&pool->cr_cond, NULL))) + quit(1, "Failed to pthread_cond_init in add_pool"); + cglock_init(&pool->data_lock); + mutex_init(&pool->stratum_lock); + cglock_init(&pool->gbt_lock); + INIT_LIST_HEAD(&pool->curlring); + + /* Make sure the pool doesn't think we've been idle since time 0 */ + pool->tv_idle.tv_sec = ~0UL; + + pool->rpc_req = getwork_req; + pool->rpc_proxy = NULL; + pool->quota = 1; + adjust_quota_gcd(); + + return pool; +} + +/* Pool variant of test and set */ +static bool pool_tset(struct pool *pool, bool *var) +{ + bool ret; + + mutex_lock(&pool->pool_lock); + ret = *var; + *var = true; + mutex_unlock(&pool->pool_lock); + + return ret; +} + +bool pool_tclear(struct pool *pool, bool *var) +{ + bool ret; + + mutex_lock(&pool->pool_lock); + ret = *var; + *var = false; + mutex_unlock(&pool->pool_lock); + + return ret; +} + +struct pool *current_pool(void) +{ + struct pool *pool; + + cg_rlock(&control_lock); + pool = currentpool; + cg_runlock(&control_lock); + + return pool; +} + +char *set_int_range(const char *arg, int *i, int min, int max) +{ + char *err = opt_set_intval(arg, i); + + if (err) + return err; + + if (*i < min || *i > max) + return "Value out of range"; + + return NULL; +} + +static char *set_int_0_to_9999(const char *arg, int *i) +{ + return set_int_range(arg, i, 0, 9999); +} + +static char *set_int_1_to_65535(const char *arg, int *i) +{ + return set_int_range(arg, i, 1, 65535); +} + +static char *set_int_0_to_10(const char *arg, int *i) +{ + return set_int_range(arg, i, 0, 10); +} + +static char *set_int_0_to_100(const char *arg, int *i) +{ + return set_int_range(arg, i, 0, 100); +} + +static char *set_int_0_to_255(const char *arg, int *i) +{ + return set_int_range(arg, i, 0, 255); +} + +static char *set_int_0_to_200(const char *arg, int *i) +{ + return set_int_range(arg, i, 0, 200); +} + +static char *set_int_32_to_63(const char *arg, int *i) +{ + return set_int_range(arg, i, 32, 63); +} + +static char *set_int_22_to_55(const char *arg, int *i) +{ + return set_int_range(arg, i, 22, 55); +} + +static char *set_int_42_to_65(const char *arg, int *i) +{ + return set_int_range(arg, i, 42, 62); +} + +static char *set_int_1_to_10(const char *arg, int *i) +{ + return set_int_range(arg, i, 1, 10); +} + +static char __maybe_unused *set_int_0_to_4(const char *arg, int *i) +{ + return set_int_range(arg, i, 0, 4); +} + +#ifdef USE_FPGA_SERIAL +static char *opt_add_serial; +static char *add_serial(char *arg) +{ + string_elist_add(arg, &scan_devices); + return NULL; +} +#endif + +void get_intrange(char *arg, int *val1, int *val2) +{ + if (sscanf(arg, "%d-%d", val1, val2) == 1) + *val2 = *val1; +} + +static char *set_balance(enum pool_strategy *strategy) +{ + *strategy = POOL_BALANCE; + return NULL; +} + +static char *set_loadbalance(enum pool_strategy *strategy) +{ + *strategy = POOL_LOADBALANCE; + return NULL; +} + +static char *set_rotate(const char *arg, char __maybe_unused *i) +{ + pool_strategy = POOL_ROTATE; + return set_int_range(arg, &opt_rotate_period, 0, 9999); +} + +static char *set_rr(enum pool_strategy *strategy) +{ + *strategy = POOL_ROUNDROBIN; + return NULL; +} + +/* Detect that url is for a stratum protocol either via the presence of + * stratum+tcp or by detecting a stratum server response */ +bool detect_stratum(struct pool *pool, char *url) +{ + check_extranonce_option(pool, url); + if (!extract_sockaddr(url, &pool->sockaddr_url, &pool->stratum_port)) + return false; + + if (!strncasecmp(url, "stratum+tcp://", 14)) + { + pool->rpc_url = strdup(url); + pool->has_stratum = true; + pool->stratum_url = pool->sockaddr_url; + return true; + } + + return false; +} + +static struct pool *add_url(void) +{ + total_urls++; + if (total_urls > total_pools) + add_pool(); + return pools[total_urls - 1]; +} + +static void setup_url(struct pool *pool, char *arg) +{ + arg = get_proxy(arg, pool); + + if (detect_stratum(pool, arg)) + return; + + opt_set_charp(arg, &pool->rpc_url); + if (strncmp(arg, "http://", 7) && + strncmp(arg, "https://", 8)) + { + char *httpinput; + + httpinput = malloc(256); + if (!httpinput) + quit(1, "Failed to malloc httpinput"); + strcpy(httpinput, "stratum+tcp://"); + strncat(httpinput, arg, 242); + detect_stratum(pool, httpinput); + } +} + +static char *set_url(char *arg) +{ + struct pool *pool = add_url(); + + setup_url(pool, arg); + return NULL; +} + +static char *set_quota(char *arg) +{ + char *semicolon = strchr(arg, ';'), *url; + int len, qlen, quota; + struct pool *pool; + + if (!semicolon) + return "No semicolon separated quota;URL pair found"; + len = strlen(arg); + *semicolon = '\0'; + qlen = strlen(arg); + if (!qlen) + return "No parameter for quota found"; + len -= qlen + 1; + if (len < 1) + return "No parameter for URL found"; + quota = atoi(arg); + if (quota < 0) + return "Invalid negative parameter for quota set"; + url = arg + qlen + 1; + pool = add_url(); + setup_url(pool, url); + pool->quota = quota; + applog(LOG_INFO, "Setting pool %d to quota %d", pool->pool_no, pool->quota); + adjust_quota_gcd(); + + return NULL; +} + +static char *set_user(const char *arg) +{ + struct pool *pool; + + if (total_userpasses) + return "Use only user + pass or userpass, but not both"; + total_users++; + if (total_users > total_pools) + add_pool(); + + pool = pools[total_users - 1]; + opt_set_charp(arg, &pool->rpc_user); + + return NULL; +} + +static char *set_pass(const char *arg) +{ + struct pool *pool; + + if (total_userpasses) + return "Use only user + pass or userpass, but not both"; + total_passes++; + if (total_passes > total_pools) + add_pool(); + + pool = pools[total_passes - 1]; + opt_set_charp(arg, &pool->rpc_pass); + + return NULL; +} + +static char *set_userpass(const char *arg) +{ + struct pool *pool; + char *updup; + + if (total_users || total_passes) + return "Use only user + pass or userpass, but not both"; + total_userpasses++; + if (total_userpasses > total_pools) + add_pool(); + + pool = pools[total_userpasses - 1]; + updup = strdup(arg); + opt_set_charp(arg, &pool->rpc_userpass); + pool->rpc_user = strtok(updup, ":"); + if (!pool->rpc_user) + return "Failed to find : delimited user info"; + pool->rpc_pass = strtok(NULL, ":"); + if (!pool->rpc_pass) + pool->rpc_pass = strdup(""); + + return NULL; +} + +static char *enable_debug(bool *flag) +{ + *flag = true; + /* Turn on verbose output, too. */ + opt_log_output = true; + return NULL; +} + +static char *opt_set_sched_start; +static char *opt_set_sched_stop; + +static char *set_schedtime(const char *arg, struct schedtime *st) +{ + if (sscanf(arg, "%d:%d", &st->tm.tm_hour, &st->tm.tm_min) != 2) + return "Invalid time set, should be HH:MM"; + if (st->tm.tm_hour > 23 || st->tm.tm_min > 59 || st->tm.tm_hour < 0 || st->tm.tm_min < 0) + return "Invalid time set."; + st->enable = true; + return NULL; +} + +static char *set_sched_start(const char *arg) +{ + return set_schedtime(arg, &schedstart); +} + +static char *set_sched_stop(const char *arg) +{ + return set_schedtime(arg, &schedstop); +} + +static char *opt_set_sharelog; +static char* set_sharelog(char *arg) +{ + char *r = ""; + long int i = strtol(arg, &r, 10); + + if ((!*r) && i >= 0 && i <= INT_MAX) + { + sharelog_file = fdopen((int)i, "a"); + if (!sharelog_file) + applog(LOG_ERR, "Failed to open fd %u for share log", (unsigned int)i); + } + else if (!strcmp(arg, "-")) + { + sharelog_file = stdout; + if (!sharelog_file) + applog(LOG_ERR, "Standard output missing for share log"); + } + else + { + sharelog_file = fopen(arg, "a"); + if (!sharelog_file) + applog(LOG_ERR, "Failed to open %s for share log", arg); + } + + return NULL; +} + +static char *temp_cutoff_str = NULL; +static char __maybe_unused *opt_set_temp_cutoff; + +char *set_temp_cutoff(char *arg) +{ + int val; + + if (!(arg && arg[0])) + return "Invalid parameters for set temp cutoff"; + val = atoi(arg); + if (val < 0 || val > 200) + return "Invalid value passed to set temp cutoff"; + temp_cutoff_str = arg; + + return NULL; +} + +static void load_temp_cutoffs() +{ + int i, val = 0, device = 0; + char *nextptr; + + if (temp_cutoff_str) + { + for (device = 0, nextptr = strtok(temp_cutoff_str, ","); nextptr; ++device, nextptr = strtok(NULL, ",")) + { + if (device >= total_devices) + quit(1, "Too many values passed to set temp cutoff"); + val = atoi(nextptr); + if (val < 0 || val > 200) + quit(1, "Invalid value passed to set temp cutoff"); + + rd_lock(&devices_lock); + devices[device]->cutofftemp = val; + rd_unlock(&devices_lock); + } + } + else + { + rd_lock(&devices_lock); + for (i = device; i < total_devices; ++i) + { + if (!devices[i]->cutofftemp) + devices[i]->cutofftemp = opt_cutofftemp; + } + rd_unlock(&devices_lock); + + return; + } + if (device <= 1) + { + rd_lock(&devices_lock); + for (i = device; i < total_devices; ++i) + devices[i]->cutofftemp = val; + rd_unlock(&devices_lock); + } +} + +static char *set_logfile_path(const char *arg) +{ + opt_set_charp(arg, &opt_logfile_path); + + return NULL; +} + +static char *set_logfile_openflag(const char *arg) +{ + opt_set_charp(arg, &opt_logfile_openflag); + + return NULL; +} + +static char *set_logwork_path(const char *arg) +{ + opt_set_charp(arg, &opt_logwork_path); + + return NULL; +} + +static char *set_logwork_asicnum(const char *arg) +{ + opt_set_charp(arg, &opt_logwork_asicnum); + + return NULL; +} + +static char *set_float_125_to_500(const char *arg, float *i) +{ + char *err = opt_set_floatval(arg, i); + + if (err) + return err; + + if (*i < 125 || *i > 500) + return "Value out of range"; + + return NULL; +} + +static char *set_float_100_to_250(const char *arg, float *i) +{ + char *err = opt_set_floatval(arg, i); + + if (err) + return err; + + if (*i < 100 || *i > 250) + return "Value out of range"; + + return NULL; +} +static char *set_version_path(const char *arg) +{ + opt_set_charp(arg, &opt_version_path); + + return NULL; +} + +#ifdef USE_BMSC +static char *set_bmsc_options(const char *arg) +{ + opt_set_charp(arg, &opt_bmsc_options); + + return NULL; +} + +static char *set_bmsc_bandops(const char *arg) +{ + opt_set_charp(arg, &opt_bmsc_bandops); + + return NULL; +} + +static char *set_bmsc_timing(const char *arg) +{ + opt_set_charp(arg, &opt_bmsc_timing); + + return NULL; +} + +static char *set_bmsc_freq(const char *arg) +{ + opt_set_charp(arg, &opt_bmsc_freq); + + return NULL; +} + +static char *set_bmsc_rdreg(const char *arg) +{ + opt_set_charp(arg, &opt_bmsc_rdreg); + + return NULL; +} + +static char *set_bmsc_voltage(const char *arg) +{ + opt_set_charp(arg, &opt_bmsc_voltage); + + return NULL; +} +#endif + +#ifdef USE_BITMAIN +static char *set_bitmain_options(const char *arg) +{ + opt_set_charp(arg, &opt_bitmain_options); + + return NULL; +} +static char *set_bitmain_freq(const char *arg) +{ + opt_set_charp(arg, &opt_bitmain_freq); + + return NULL; +} +static char *set_bitmain_voltage(const char *arg) +{ + opt_set_charp(arg, &opt_bitmain_voltage); + + return NULL; +} + +#endif + +static char *set_null(const char __maybe_unused *arg) +{ + return NULL; +} + +/* These options are available from config file or commandline */ +static struct opt_table opt_config_table[] = +{ +#ifdef USE_ICARUS + OPT_WITH_ARG("--anu-freq", + set_float_125_to_500, &opt_show_floatval, &opt_anu_freq, + "Set AntminerU1/2 frequency in MHz, range 125-500"), +#endif + + OPT_WITH_ARG("--version-file", + set_version_path, NULL, opt_hidden, + "Set miner version file"), + OPT_WITH_ARG("--logfile-openflag", + set_logfile_openflag, NULL, opt_hidden, + "Set log file open flag, default: a+"), + OPT_WITH_ARG("--logwork", + set_logwork_path, NULL, opt_hidden, + "Set log work file path, following: minertext"), + OPT_WITH_ARG("--logwork-asicnum", + set_logwork_asicnum, NULL, opt_hidden, + "Set log work asic num, following: 1, 32, 64"), + OPT_WITHOUT_ARG("--logwork-diff", + opt_set_bool, &opt_logwork_diff, + "Allow log work diff"), + OPT_WITH_ARG("--logfile", + set_logfile_path, NULL, opt_hidden, + "Set log file, default: bmminer.log"), + OPT_WITH_ARG("--api-allow", + opt_set_charp, NULL, &opt_api_allow, + "Allow API access only to the given list of [G:]IP[/Prefix] addresses[/subnets]"), + OPT_WITH_ARG("--api-description", + opt_set_charp, NULL, &opt_api_description, + "Description placed in the API status header, default: bmminer version"), + OPT_WITH_ARG("--api-groups", + opt_set_charp, NULL, &opt_api_groups, + "API one letter groups G:cmd:cmd[,P:cmd:*...] defining the cmds a groups can use"), + OPT_WITHOUT_ARG("--api-listen", + opt_set_bool, &opt_api_listen, + "Enable API, default: disabled"), + OPT_WITHOUT_ARG("--api-mcast", + opt_set_bool, &opt_api_mcast, + "Enable API Multicast listener, default: disabled"), + OPT_WITH_ARG("--api-mcast-addr", + opt_set_charp, NULL, &opt_api_mcast_addr, + "API Multicast listen address"), + OPT_WITH_ARG("--api-mcast-code", + opt_set_charp, NULL, &opt_api_mcast_code, + "Code expected in the API Multicast message, don't use '-'"), + OPT_WITH_ARG("--api-mcast-des", + opt_set_charp, NULL, &opt_api_mcast_des, + "Description appended to the API Multicast reply, default: ''"), + OPT_WITH_ARG("--api-mcast-port", + set_int_1_to_65535, opt_show_intval, &opt_api_mcast_port, + "API Multicast listen port"), + OPT_WITHOUT_ARG("--api-network", + opt_set_bool, &opt_api_network, + "Allow API (if enabled) to listen on/for any address, default: only 127.0.0.1"), + OPT_WITH_ARG("--api-port", + set_int_1_to_65535, opt_show_intval, &opt_api_port, + "Port number of miner API"), + OPT_WITH_ARG("--api-host", + opt_set_charp, NULL, &opt_api_host, + "Specify API listen address, default: 0.0.0.0"), +#ifdef USE_ICARUS + OPT_WITH_ARG("--au3-freq", + set_float_100_to_250, &opt_show_floatval, &opt_au3_freq, + "Set AntminerU3 frequency in MHz, range 100-250"), + OPT_WITH_ARG("--au3-volt", + set_int_0_to_9999, &opt_show_intval, &opt_au3_volt, + "Set AntminerU3 voltage in mv, range 725-850, 0 to not set"), +#endif +#ifdef USE_AVALON + OPT_WITHOUT_ARG("--avalon-auto", + opt_set_bool, &opt_avalon_auto, + "Adjust avalon overclock frequency dynamically for best hashrate"), + OPT_WITH_ARG("--avalon-cutoff", + set_int_0_to_100, opt_show_intval, &opt_avalon_overheat, + "Set avalon overheat cut off temperature"), + OPT_WITH_CBARG("--avalon-fan", + set_avalon_fan, NULL, &opt_set_avalon_fan, + "Set fanspeed percentage for avalon, single value or range (default: 20-100)"), + OPT_WITH_CBARG("--avalon-freq", + set_avalon_freq, NULL, &opt_set_avalon_freq, + "Set frequency range for avalon-auto, single value or range"), + OPT_WITH_ARG("--avalon-options", + opt_set_charp, NULL, &opt_avalon_options, + "Set avalon options baud:miners:asic:timeout:freq:tech"), + OPT_WITH_ARG("--avalon-temp", + set_int_0_to_100, opt_show_intval, &opt_avalon_temp, + "Set avalon target temperature"), +#endif +#ifdef USE_AVALON2 + OPT_WITH_CBARG("--avalon2-freq", + set_avalon2_freq, NULL, &opt_set_avalon2_freq, + "Set frequency range for Avalon2, single value or range, step: 25"), + OPT_WITH_CBARG("--avalon2-voltage", + set_avalon2_voltage, NULL, &opt_set_avalon2_voltage, + "Set Avalon2 core voltage, in millivolts, step: 125"), + OPT_WITH_CBARG("--avalon2-fan", + set_avalon2_fan, NULL, &opt_set_avalon2_fan, + "Set Avalon2 target fan speed"), + OPT_WITH_ARG("--avalon2-cutoff", + set_int_0_to_100, opt_show_intval, &opt_avalon2_overheat, + "Set Avalon2 overheat cut off temperature"), + OPT_WITHOUT_ARG("--avalon2-fixed-speed", + set_avalon2_fixed_speed, &opt_avalon2_fan_fixed, + "Set Avalon2 fan to fixed speed"), + OPT_WITH_ARG("--avalon2-polling-delay", + set_int_1_to_65535, opt_show_intval, &opt_avalon2_polling_delay, + "Set Avalon2 polling delay value (ms)"), +#endif +#ifdef USE_AVALON4 + OPT_WITHOUT_ARG("--avalon4-automatic-voltage", + opt_set_bool, &opt_avalon4_autov, + "Automatic adjust voltage base on module DH"), + OPT_WITH_CBARG("--avalon4-voltage", + set_avalon4_voltage, NULL, &opt_set_avalon4_voltage, + "Set Avalon4 core voltage, in millivolts, step: 125"), + OPT_WITH_CBARG("--avalon4-freq", + set_avalon4_freq, NULL, &opt_set_avalon4_freq, + "Set frequency for Avalon4, 1 to 3 values, example: 445:385:370"), + OPT_WITH_CBARG("--avalon4-fan", + set_avalon4_fan, NULL, &opt_set_avalon4_fan, + "Set Avalon4 target fan speed range"), + OPT_WITH_ARG("--avalon4-temp", + set_int_22_to_55, opt_show_intval, &opt_avalon4_temp_target, + "Set Avalon4 target temperature"), + OPT_WITH_ARG("--avalon4-cutoff", + set_int_42_to_65, opt_show_intval, &opt_avalon4_overheat, + "Set Avalon4 overheat cut off temperature"), + OPT_WITH_ARG("--avalon4-polling-delay", + set_int_1_to_65535, opt_show_intval, &opt_avalon4_polling_delay, + "Set Avalon4 polling delay value (ms)"), + OPT_WITH_ARG("--avalon4-ntime-offset", + opt_set_intval, opt_show_intval, &opt_avalon4_ntime_offset, + "Set Avalon4 MM ntime rolling max offset"), + OPT_WITH_ARG("--avalon4-aucspeed", + opt_set_intval, opt_show_intval, &opt_avalon4_aucspeed, + "Set Avalon4 AUC IIC bus speed"), + OPT_WITH_ARG("--avalon4-aucxdelay", + opt_set_intval, opt_show_intval, &opt_avalon4_aucxdelay, + "Set Avalon4 AUC IIC xfer read delay, 4800 ~= 1ms"), +#endif +#ifdef USE_BAB + OPT_WITH_ARG("--bab-options", + opt_set_charp, NULL, &opt_bab_options, + "Set bab options max:def:min:up:down:hz:delay:trf"), +#endif + OPT_WITHOUT_ARG("--balance", + set_balance, &pool_strategy, + "Change multipool strategy from failover to even share balance"), + OPT_WITH_ARG("--benchfile", + opt_set_charp, NULL, &opt_benchfile, + "Run bmminer in benchmark mode using a work file - produces no shares"), + OPT_WITHOUT_ARG("--benchfile-display", + opt_set_bool, &opt_benchfile_display, + "Display each benchfile nonce found"), + OPT_WITHOUT_ARG("--benchmark", + opt_set_bool, &opt_benchmark, + "Run bmminer in benchmark mode - produces no shares"), +#if defined(USE_BITFORCE) + OPT_WITHOUT_ARG("--bfl-range", + opt_set_bool, &opt_bfl_noncerange, + "Use nonce range on bitforce devices if supported"), +#endif +#ifdef USE_BFLSC + OPT_WITH_ARG("--bflsc-overheat", + set_int_0_to_200, opt_show_intval, &opt_bflsc_overheat, + "Set overheat temperature where BFLSC devices throttle, 0 to disable"), +#endif +#ifdef USE_AVALON + OPT_WITH_ARG("--bitburner-voltage", + opt_set_intval, NULL, &opt_bitburner_core_voltage, + "Set BitBurner (Avalon) core voltage, in millivolts"), + OPT_WITH_ARG("--bitburner-fury-voltage", + opt_set_intval, NULL, &opt_bitburner_fury_core_voltage, + "Set BitBurner Fury core voltage, in millivolts"), + OPT_WITH_ARG("--bitburner-fury-options", + opt_set_charp, NULL, &opt_bitburner_fury_options, + "Override avalon-options for BitBurner Fury boards baud:miners:asic:timeout:freq"), +#endif +#ifdef USE_BMSC + OPT_WITH_ARG("--bmsc-options", + set_bmsc_options, NULL, NULL, + opt_hidden), + OPT_WITH_ARG("--bmsc-bandops", + set_bmsc_bandops, NULL, NULL, + opt_hidden), + OPT_WITH_ARG("--bmsc-timing", + set_bmsc_timing, NULL, NULL, + opt_hidden), + OPT_WITHOUT_ARG("--bmsc-gray", + opt_set_bool, &opt_bmsc_gray, + "Use gray"), + OPT_WITH_ARG("--bmsc-freq", + set_bmsc_freq, NULL, NULL, + opt_hidden), + OPT_WITH_ARG("--bmsc-rdreg", + set_bmsc_rdreg, NULL, NULL, + opt_hidden), + OPT_WITH_ARG("--bmsc-voltage", + set_bmsc_voltage, NULL, NULL, + opt_hidden), + OPT_WITHOUT_ARG("--bmsc-bootstart", + opt_set_bool, &opt_bmsc_bootstart, + "Enable boot start, default: disabled"), + OPT_WITHOUT_ARG("--bmsc-rdworktest", + opt_set_bool, &opt_bmsc_rdworktest, + "Record work test data to file"), +#endif + +#ifdef USE_BITMAIN_C5 + OPT_WITHOUT_ARG("--bitmain-fan-ctrl", + opt_set_bool, &opt_bitmain_fan_ctrl, + "Enable bitmain miner fan controlling"), + OPT_WITH_ARG("--bitmain-fan-pwm", + set_int_0_to_100, opt_show_intval, &opt_bitmain_fan_pwm, + "Set bitmain fan pwm percentage 0~100"), + OPT_WITH_ARG("--bitmain-freq", + set_int_0_to_9999,opt_show_intval, &opt_bitmain_c5_freq, + "Set frequency"), + OPT_WITH_ARG("--bitmain-voltage", + set_int_0_to_9999,opt_show_intval, &opt_bitmain_c5_voltage, + "Set voltage"), + OPT_WITHOUT_ARG("--bitmain-use-vil", + opt_set_bool, &opt_bitmain_new_cmd_type_vil, + "Set bitmain miner use vil mode"), + +#endif + +#ifdef USE_BITMAIN + OPT_WITH_ARG("--bitmain-dev", + set_bitmain_dev, NULL, NULL, + "Set bitmain device (default: usb mode, other windows: COM1 or linux: /dev/bitmain-asic)"), + OPT_WITHOUT_ARG("--bitmain-hwerror", + opt_set_bool, &opt_bitmain_hwerror, + "Set bitmain device detect hardware error"), + OPT_WITHOUT_ARG("--bitmain-checkall", + opt_set_bool, &opt_bitmain_checkall, + "Set bitmain check all"), + OPT_WITHOUT_ARG("--bitmain-checkn2diff", + opt_set_bool, &opt_bitmain_checkn2diff, + "Set bitmain check not 2 pow diff"), + OPT_WITHOUT_ARG("--bitmain-nobeeper", + opt_set_bool, &opt_bitmain_nobeeper, + "Set bitmain beeper no ringing"), + OPT_WITHOUT_ARG("--bitmain-notempoverctrl", + opt_set_bool, &opt_bitmain_notempoverctrl, + "Set bitmain not stop runing when temprerature is over 80 degree Celsius"), + OPT_WITHOUT_ARG("--bitmain-auto", + opt_set_bool, &opt_bitmain_auto, + "Adjust bitmain overclock frequency dynamically for best hashrate"), + OPT_WITHOUT_ARG("--bitmain-homemode", + opt_set_bool, &opt_bitmain_homemode, + "Set bitmain miner to home mode"), + OPT_WITH_ARG("--bitmain-cutoff", + set_int_0_to_100, opt_show_intval, &opt_bitmain_overheat, + "Set bitmain overheat cut off temperature"), + OPT_WITH_ARG("--bitmain-fan", + set_bitmain_fan, NULL, NULL, + "Set fanspeed percentage for bitmain, single value or range (default: 20-100)"), + OPT_WITH_ARG("--bitmain-freq", + set_bitmain_freq, NULL, NULL, + "Set frequency"), + OPT_WITH_ARG("--bitmain-voltage", + set_bitmain_voltage, NULL, NULL, + "Set voltage"), + OPT_WITH_ARG("--bitmain-options", + set_bitmain_options, NULL, NULL, + "Set bitmain options baud:miners:asic:timeout:freq"), + OPT_WITH_ARG("--bitmain-temp", + set_int_0_to_100, opt_show_intval, &opt_bitmain_temp, + "Set bitmain target temperature"), +#endif +#ifdef USE_BITMINE_A1 + OPT_WITH_ARG("--bitmine-a1-options", + opt_set_charp, NULL, &opt_bitmine_a1_options, + "Bitmine A1 options ref_clk_khz:sys_clk_khz:spi_clk_khz:override_chip_num"), +#endif +#ifdef USE_BITFURY + OPT_WITH_ARG("--bxf-bits", + set_int_32_to_63, opt_show_intval, &opt_bxf_bits, + "Set max BXF/HXF bits for overclocking"), + OPT_WITH_ARG("--bxf-debug", + set_int_0_to_4, opt_show_intval, &opt_bxf_debug, + "BXF: Debug all USB I/O, > is to the board(s), < is from the board(s)"), + OPT_WITH_ARG("--bxf-temp-target", + set_int_0_to_200, opt_show_intval, &opt_bxf_temp_target, + "Set target temperature for BXF/HXF devices"), + OPT_WITH_ARG("--bxm-bits", + set_int_0_to_100, opt_show_intval, &opt_bxm_bits, + "Set BXM bits for overclocking"), +#endif +#ifdef USE_BLOCKERUPTER + OPT_WITH_ARG("--bet-clk", + opt_set_intval, opt_show_intval, &opt_bet_clk, + "Set Block Erupter clock"), +#endif +#ifdef HAVE_LIBCURL + OPT_WITH_ARG("--btc-address", + opt_set_charp, NULL, &opt_btc_address, + "Set bitcoin target address when solo mining to bitcoind (mandatory)"), + OPT_WITH_ARG("--btc-sig", + opt_set_charp, NULL, &opt_btc_sig, + "Set signature to add to coinbase when solo mining (optional)"), +#endif +#ifdef HAVE_CURSES + OPT_WITHOUT_ARG("--compact", + opt_set_bool, &opt_compact, + "Use compact display without per device statistics"), +#endif +#ifdef USE_COINTERRA + OPT_WITH_ARG("--cta-load", + set_int_0_to_255, opt_show_intval, &opt_cta_load, + "Set load for CTA devices, 0-255 range"), + OPT_WITH_ARG("--ps-load", + set_int_0_to_100, opt_show_intval, &opt_ps_load, + "Set power supply load for CTA devices, 0-100 range"), +#endif + OPT_WITHOUT_ARG("--debug|-D", + enable_debug, &opt_debug, + "Enable debug output"), + OPT_WITHOUT_ARG("--disable-rejecting", + opt_set_bool, &opt_disable_pool, + "Automatically disable pools that continually reject shares"), +#ifdef USE_DRILLBIT + OPT_WITH_ARG("--drillbit-options", + opt_set_charp, NULL, &opt_drillbit_options, + "Set drillbit options :clock[:clock_divider][:voltage]"), + OPT_WITH_ARG("--drillbit-auto", + opt_set_charp, NULL, &opt_drillbit_auto, + "Enable drillbit automatic tuning :[::]"), +#endif + OPT_WITH_ARG("--expiry|-E", + set_int_0_to_9999, opt_show_intval, &opt_expiry, + "Upper bound on how many seconds after getting work we consider a share from it stale"), + OPT_WITHOUT_ARG("--failover-only", + opt_set_bool, &opt_fail_only, + "Don't leak work to backup pools when primary pool is lagging"), + OPT_WITHOUT_ARG("--fix-protocol", + opt_set_bool, &opt_fix_protocol, + "Do not redirect to a different getwork protocol (eg. stratum)"), +#ifdef USE_HASHFAST + OPT_WITHOUT_ARG("--hfa-dfu-boot", + opt_set_bool, &opt_hfa_dfu_boot, + opt_hidden), + OPT_WITH_ARG("--hfa-hash-clock", + set_int_0_to_9999, opt_show_intval, &opt_hfa_hash_clock, + "Set hashfast clock speed"), + OPT_WITH_ARG("--hfa-fail-drop", + set_int_0_to_100, opt_show_intval, &opt_hfa_fail_drop, + "Set how many MHz to drop clockspeed each failure on an overlocked hashfast device"), + OPT_WITH_CBARG("--hfa-fan", + set_hfa_fan, NULL, &opt_set_hfa_fan, + "Set fanspeed percentage for hashfast, single value or range (default: 10-85)"), + OPT_WITH_ARG("--hfa-name", + opt_set_charp, NULL, &opt_hfa_name, + "Set a unique name for a single hashfast device specified with --usb or the first device found"), + OPT_WITHOUT_ARG("--hfa-noshed", + opt_set_bool, &opt_hfa_noshed, + "Disable hashfast dynamic core disabling feature"), + OPT_WITH_ARG("--hfa-ntime-roll", + opt_set_intval, NULL, &opt_hfa_ntime_roll, + opt_hidden), + OPT_WITH_ARG("--hfa-options", + opt_set_charp, NULL, &opt_hfa_options, + "Set hashfast options name:clock (comma separated)"), + OPT_WITHOUT_ARG("--hfa-pll-bypass", + opt_set_bool, &opt_hfa_pll_bypass, + opt_hidden), + OPT_WITH_ARG("--hfa-temp-overheat", + set_int_0_to_200, opt_show_intval, &opt_hfa_overheat, + "Set the hashfast overheat throttling temperature"), + OPT_WITH_ARG("--hfa-temp-target", + set_int_0_to_200, opt_show_intval, &opt_hfa_target, + "Set the hashfast target temperature (0 to disable)"), +#endif +#ifdef USE_HASHRATIO + OPT_WITH_CBARG("--hro-freq", + set_hashratio_freq, NULL, &opt_hashratio_freq, + "Set the hashratio clock frequency"), +#endif + OPT_WITH_ARG("--hotplug", + set_int_0_to_9999, NULL, &hotplug_time, +#ifdef USE_USBUTILS + "Seconds between hotplug checks (0 means never check)" +#else + opt_hidden +#endif + ), +#ifdef USE_ICARUS + OPT_WITH_ARG("--icarus-options", + opt_set_charp, NULL, &opt_icarus_options, + opt_hidden), + OPT_WITH_ARG("--icarus-timing", + opt_set_charp, NULL, &opt_icarus_timing, + opt_hidden), +#endif +#if defined(HAVE_MODMINER) + OPT_WITH_ARG("--kernel-path|-K", + opt_set_charp, opt_show_charp, &opt_kernel_path, + "Specify a path to where bitstream files are"), +#endif +#ifdef USE_KLONDIKE + OPT_WITH_ARG("--klondike-options", + opt_set_charp, NULL, &opt_klondike_options, + "Set klondike options clock:temptarget"), +#endif + OPT_WITHOUT_ARG("--load-balance", + set_loadbalance, &pool_strategy, + "Change multipool strategy from failover to quota based balance"), + OPT_WITH_ARG("--log|-l", + set_int_0_to_9999, opt_show_intval, &opt_log_interval, + "Interval in seconds between log output"), + OPT_WITHOUT_ARG("--lowmem", + opt_set_bool, &opt_lowmem, + "Minimise caching of shares for low memory applications"), +#ifdef USE_MINION + OPT_WITH_ARG("--minion-chipreport", + set_int_0_to_100, opt_show_intval, &opt_minion_chipreport, + "Seconds to report chip 5min hashrate, range 0-100 (default: 0=disabled)"), + OPT_WITH_ARG("--minion-cores", + opt_set_charp, NULL, &opt_minion_cores, + opt_hidden), + OPT_WITHOUT_ARG("--minion-extra", + opt_set_bool, &opt_minion_extra, + opt_hidden), + OPT_WITH_ARG("--minion-freq", + opt_set_charp, NULL, &opt_minion_freq, + "Set minion chip frequencies in MHz, single value or comma list, range 100-1400 (default: 1200)"), + OPT_WITH_ARG("--minion-freqchange", + set_int_0_to_9999, opt_show_intval, &opt_minion_freqchange, + "Millisecond total time to do frequency changes (default: 1000)"), + OPT_WITH_ARG("--minion-freqpercent", + set_int_0_to_100, opt_show_intval, &opt_minion_freqpercent, + "Percentage to use when starting up a chip (default: 70%)"), + OPT_WITHOUT_ARG("--minion-idlecount", + opt_set_bool, &opt_minion_idlecount, + "Report when IdleCount is >0 or changes"), + OPT_WITH_ARG("--minion-ledcount", + set_int_0_to_100, opt_show_intval, &opt_minion_ledcount, + "Turn off led when more than this many chips below the ledlimit (default: 0)"), + OPT_WITH_ARG("--minion-ledlimit", + set_int_0_to_200, opt_show_intval, &opt_minion_ledlimit, + "Turn off led when chips GHs are below this (default: 90)"), + OPT_WITHOUT_ARG("--minion-noautofreq", + opt_set_bool, &opt_minion_noautofreq, + "Disable automatic frequency adjustment"), + OPT_WITHOUT_ARG("--minion-overheat", + opt_set_bool, &opt_minion_overheat, + "Enable directly halting any chip when the status exceeds 100C"), + OPT_WITH_ARG("--minion-spidelay", + set_int_0_to_9999, opt_show_intval, &opt_minion_spidelay, + "Add a delay in microseconds after each SPI I/O"), + OPT_WITH_ARG("--minion-spireset", + opt_set_charp, NULL, &opt_minion_spireset, + "SPI regular reset: iNNN for I/O count or sNNN for seconds - 0 means none"), + OPT_WITH_ARG("--minion-spisleep", + set_int_0_to_9999, opt_show_intval, &opt_minion_spisleep, + "Sleep time in milliseconds when doing an SPI reset"), + OPT_WITH_ARG("--minion-spiusec", + set_int_0_to_9999, NULL, &opt_minion_spiusec, + opt_hidden), + OPT_WITH_ARG("--minion-temp", + opt_set_charp, NULL, &opt_minion_temp, + "Set minion chip temperature threshold, single value or comma list, range 120-160 (default: 135C)"), +#endif +#if defined(unix) || defined(__APPLE__) + OPT_WITH_ARG("--monitor|-m", + opt_set_charp, NULL, &opt_stderr_cmd, + "Use custom pipe cmd for output messages"), +#endif // defined(unix) +#ifdef USE_BITFURY + OPT_WITH_ARG("--nfu-bits", + set_int_32_to_63, opt_show_intval, &opt_nfu_bits, + "Set nanofury bits for overclocking, range 32-63"), +#endif + OPT_WITHOUT_ARG("--net-delay", + opt_set_bool, &opt_delaynet, + "Impose small delays in networking to not overload slow routers"), + OPT_WITHOUT_ARG("--no-pool-disable", + opt_set_invbool, &opt_disable_pool, + opt_hidden), + OPT_WITHOUT_ARG("--no-submit-stale", + opt_set_invbool, &opt_submit_stale, + "Don't submit shares if they are detected as stale"), +#ifdef USE_BITFURY + OPT_WITH_ARG("--osm-led-mode", + set_int_0_to_4, opt_show_intval, &opt_osm_led_mode, + "Set LED mode for OneStringMiner devices"), +#endif + OPT_WITH_ARG("--pass|-p", + set_pass, NULL, &opt_set_null, + "Password for bitcoin JSON-RPC server"), + OPT_WITHOUT_ARG("--per-device-stats", + opt_set_bool, &want_per_device_stats, + "Force verbose mode and output per-device statistics"), + OPT_WITH_ARG("--pools", + opt_set_bool, NULL, &opt_set_null, opt_hidden), + OPT_WITHOUT_ARG("--protocol-dump|-P", + opt_set_bool, &opt_protocol, + "Verbose dump of protocol-level activities"), + OPT_WITH_ARG("--queue|-Q", + set_int_0_to_9999, opt_show_intval, &opt_queue, + "Maximum number of work items to have queued"), + OPT_WITHOUT_ARG("--quiet|-q", + opt_set_bool, &opt_quiet, + "Disable logging output, display status and errors"), + OPT_WITH_ARG("--quota|-U", + set_quota, NULL, &opt_set_null, + "quota;URL combination for server with load-balance strategy quotas"), + OPT_WITHOUT_ARG("--real-quiet", + opt_set_bool, &opt_realquiet, + "Disable all output"), + OPT_WITH_ARG("--retries", + set_null, NULL, &opt_set_null, + opt_hidden), + OPT_WITH_ARG("--retry-pause", + set_null, NULL, &opt_set_null, + opt_hidden), +#ifdef USE_ICARUS + OPT_WITH_ARG("--rock-freq", + set_float_125_to_500, &opt_show_floatval, &opt_rock_freq, + "Set RockMiner frequency in MHz, range 125-500"), +#endif + OPT_WITH_ARG("--rotate", + set_rotate, NULL, &opt_set_null, + "Change multipool strategy from failover to regularly rotate at N minutes"), + OPT_WITHOUT_ARG("--round-robin", + set_rr, &pool_strategy, + "Change multipool strategy from failover to round robin on failure"), +#ifdef USE_FPGA_SERIAL + OPT_WITH_CBARG("--scan-serial|-S", + add_serial, NULL, &opt_add_serial, + "Serial port to probe for Serial FPGA Mining device"), +#endif + OPT_WITH_ARG("--scan-time|-s", + set_int_0_to_9999, opt_show_intval, &opt_scantime, + "Upper bound on time spent scanning current work, in seconds"), + OPT_WITH_CBARG("--sched-start", + set_sched_start, NULL, &opt_set_sched_start, + "Set a time of day in HH:MM to start mining (a once off without a stop time)"), + OPT_WITH_CBARG("--sched-stop", + set_sched_stop, NULL, &opt_set_sched_stop, + "Set a time of day in HH:MM to stop mining (will quit without a start time)"), + OPT_WITH_CBARG("--sharelog", + set_sharelog, NULL, &opt_set_sharelog, + "Append share log to file"), + OPT_WITH_ARG("--shares", + opt_set_intval, NULL, &opt_shares, + "Quit after mining N shares (default: unlimited)"), + OPT_WITH_ARG("--socks-proxy", + opt_set_charp, NULL, &opt_socks_proxy, + "Set socks4 proxy (host:port)"), + OPT_WITH_ARG("--suggest-diff", + opt_set_intval, NULL, &opt_suggest_diff, + "Suggest miner difficulty for pool to user (default: none)"), + OPT_WITH_ARG("--multi-version", + opt_set_intval, NULL, &opt_multi_version, + "Multi version"), +#ifdef HAVE_SYSLOG_H + OPT_WITHOUT_ARG("--syslog", + opt_set_bool, &use_syslog, + "Use system log for output messages (default: standard error)"), +#endif +#if defined(USE_BITFORCE) || defined(USE_MODMINER) || defined(USE_BFLSC) + OPT_WITH_CBARG("--temp-cutoff", + set_temp_cutoff, opt_show_intval, &opt_set_temp_cutoff, + "Temperature where a device will be automatically disabled, one value or comma separated list"), +#endif + OPT_WITHOUT_ARG("--text-only|-T", + opt_set_invbool, &use_curses, +#ifdef HAVE_CURSES + "Disable ncurses formatted screen output" +#else + opt_hidden +#endif + ), + OPT_WITH_ARG("--url|-o", + set_url, NULL, &opt_set_null, + "URL for bitcoin JSON-RPC server"), +#ifdef USE_USBUTILS + OPT_WITH_ARG("--usb", + opt_set_charp, NULL, &opt_usb_select, + "USB device selection"), + OPT_WITH_ARG("--usb-dump", + set_int_0_to_10, opt_show_intval, &opt_usbdump, + opt_hidden), + OPT_WITHOUT_ARG("--usb-list-all", + opt_set_bool, &opt_usb_list_all, + opt_hidden), +#endif + OPT_WITH_ARG("--user|-u", + set_user, NULL, &opt_set_null, + "Username for bitcoin JSON-RPC server"), + OPT_WITH_ARG("--userpass|-O", + set_userpass, NULL, &opt_set_null, + "Username:Password pair for bitcoin JSON-RPC server"), + OPT_WITHOUT_ARG("--verbose", + opt_set_bool, &opt_log_output, + "Log verbose output to stderr as well as status output"), + OPT_WITHOUT_ARG("--widescreen", + opt_set_bool, &opt_widescreen, + "Use extra wide display without toggling"), + OPT_WITHOUT_ARG("--worktime", + opt_set_bool, &opt_worktime, + "Display extra work time debug information"), + OPT_ENDTABLE +}; + +static char *load_config(const char *arg, void __maybe_unused *unused); + +static int fileconf_load; + +static char *parse_config(json_t *config, bool fileconf) +{ + static char err_buf[200]; + struct opt_table *opt; + const char *str; + json_t *val; + + if (fileconf && !fileconf_load) + fileconf_load = 1; + + for (opt = opt_config_table; opt->type != OPT_END; opt++) + { + char *p, *name; + + /* We don't handle subtables. */ + assert(!(opt->type & OPT_SUBTABLE)); + + if (!opt->names) + continue; + + /* Pull apart the option name(s). */ + name = strdup(opt->names); + for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) + { + char *err = NULL; + + /* Ignore short options. */ + if (p[1] != '-') + continue; + + val = json_object_get(config, p+2); + if (!val) + continue; + + if ((opt->type & (OPT_HASARG | OPT_PROCESSARG)) && json_is_string(val)) + { + str = json_string_value(val); + err = opt->cb_arg(str, opt->u.arg); + if (opt->type == OPT_PROCESSARG) + opt_set_charp(str, opt->u.arg); + } + else if ((opt->type & (OPT_HASARG | OPT_PROCESSARG)) && json_is_array(val)) + { + json_t *arr_val; + size_t index; + + json_array_foreach(val, index, arr_val) + { + if (json_is_string(arr_val)) + { + str = json_string_value(arr_val); + err = opt->cb_arg(str, opt->u.arg); + if (opt->type == OPT_PROCESSARG) + opt_set_charp(str, opt->u.arg); + } + else if (json_is_object(arr_val)) + err = parse_config(arr_val, false); + if (err) + break; + } + } + else if ((opt->type & OPT_NOARG) && json_is_true(val)) + err = opt->cb(opt->u.arg); + else + err = "Invalid value"; + + if (err) + { + /* Allow invalid values to be in configuration + * file, just skipping over them provided the + * JSON is still valid after that. */ + if (fileconf) + { + applog(LOG_ERR, "Invalid config option %s: %s", p, err); + fileconf_load = -1; + } + else + { + snprintf(err_buf, sizeof(err_buf), "Parsing JSON option %s: %s", + p, err); + return err_buf; + } + } + } + free(name); + } + + val = json_object_get(config, JSON_INCLUDE_CONF); + if (val && json_is_string(val)) + return load_config(json_string_value(val), NULL); + + return NULL; +} + +char *cnfbuf = NULL; + +#ifdef HAVE_LIBCURL +char conf_web1[] = "http://"; +char conf_web2[] = "https://"; + +static char *load_web_config(const char *arg) +{ + json_t *val = json_web_config(arg); + + if (!val || !json_is_object(val)) + return JSON_WEB_ERROR; + + if (!cnfbuf) + cnfbuf = strdup(arg); + + config_loaded = true; + + return parse_config(val, true); +} +#endif + +static char *load_config(const char *arg, void __maybe_unused *unused) +{ + json_error_t err; + json_t *config; + char *json_error; + size_t siz; + +#ifdef HAVE_LIBCURL + if (strncasecmp(arg, conf_web1, sizeof(conf_web1)-1) == 0 || + strncasecmp(arg, conf_web2, sizeof(conf_web2)-1) == 0) + return load_web_config(arg); +#endif + + if (!cnfbuf) + cnfbuf = strdup(arg); + + if (++include_count > JSON_MAX_DEPTH) + return JSON_MAX_DEPTH_ERR; + + config = json_load_file(arg, 0, &err); + if (!json_is_object(config)) + { + siz = JSON_LOAD_ERROR_LEN + strlen(arg) + strlen(err.text); + json_error = malloc(siz); + if (!json_error) + quit(1, "Malloc failure in json error"); + + snprintf(json_error, siz, JSON_LOAD_ERROR, arg, err.text); + return json_error; + } + + config_loaded = true; + + /* Parse the config now, so we can override it. That can keep pointers + * so don't free config object. */ + return parse_config(config, true); +} + +static char *set_default_config(const char *arg) +{ + opt_set_charp(arg, &default_config); + + return NULL; +} + +void default_save_file(char *filename); + +static void load_default_config(void) +{ + cnfbuf = malloc(PATH_MAX); + + default_save_file(cnfbuf); + + if (!access(cnfbuf, R_OK)) + load_config(cnfbuf, NULL); + else + { + free(cnfbuf); + cnfbuf = NULL; + } +} + +extern const char *opt_argv0; + +static char *opt_verusage_and_exit(const char *extra) +{ + printf("%s\nBuilt with " +#ifdef USE_BMSC + "bmsc " +#endif +#ifdef USE_BITMAIN + "bitmain " +#endif +#ifdef USE_BITMAIN_C5 + "bitmain_c5 " +#endif +#ifdef USE_AVALON + "avalon " +#endif +#ifdef USE_AVALON2 + "avalon2 " +#endif +#ifdef USE_AVALON4 + "avalon4 " +#endif +#ifdef USE_BFLSC + "bflsc " +#endif +#ifdef USE_BITFORCE + "bitforce " +#endif +#ifdef USE_BITFURY + "bitfury " +#endif +#ifdef USE_COINTERRA + "cointerra " +#endif +#ifdef USE_DRILLBIT + "drillbit " +#endif +#ifdef USE_HASHFAST + "hashfast " +#endif +#ifdef USE_ICARUS + "icarus " +#endif +#ifdef USE_KLONDIKE + "klondike " +#endif +#ifdef USE_KNC + "KnC " +#endif +#ifdef USE_BAB + "BaB " +#endif +#ifdef USE_MINION + "minion " +#endif +#ifdef USE_MODMINER + "modminer " +#endif +#ifdef USE_BITMINE_A1 + "Bitmine.A1 " +#endif +#ifdef USE_SP10 + "spondoolies " +#endif +#ifdef USE_SP30 + "sp30 " +#endif + + "mining support.\n" + , packagename); + printf("%s", opt_usage(opt_argv0, extra)); + fflush(stdout); + exit(0); +} + +#if defined(USE_USBUTILS) +char *display_devs(int *ndevs) +{ + *ndevs = 0; + usb_all(0); + exit(*ndevs); +} +#endif + +/* These options are available from commandline only */ +static struct opt_table opt_cmdline_table[] = +{ + OPT_WITH_ARG("--config|-c", + load_config, NULL, &opt_set_null, + "Load a JSON-format configuration file\n" + "See example.conf for an example configuration."), + OPT_WITH_ARG("--default-config", + set_default_config, NULL, &opt_set_null, + "Specify the filename of the default config file\n" + "Loaded at start and used when saving without a name."), + OPT_WITHOUT_ARG("--help|-h", + opt_verusage_and_exit, NULL, + "Print this message"), +#if defined(USE_USBUTILS) + OPT_WITHOUT_ARG("--ndevs|-n", + display_devs, &nDevs, + "Display all USB devices and exit"), +#endif + OPT_WITHOUT_ARG("--version|-V", + opt_version_and_exit, packagename, + "Display version and exit"), + OPT_ENDTABLE +}; + +#ifdef HAVE_LIBCURL +static bool jobj_binary(const json_t *obj, const char *key, + void *buf, size_t buflen, bool required) +{ + const char *hexstr; + json_t *tmp; + + tmp = json_object_get(obj, key); + if (unlikely(!tmp)) + { + if (unlikely(required)) + applog(LOG_ERR, "JSON key '%s' not found", key); + return false; + } + hexstr = json_string_value(tmp); + if (unlikely(!hexstr)) + { + applog(LOG_ERR, "JSON key '%s' is not a string", key); + return false; + } + if (!hex2bin(buf, hexstr, buflen)) + return false; + + return true; +} +#endif + +static void calc_midstate(struct work *work) +{ + unsigned char data[64]; + uint32_t *data32 = (uint32_t *)data; + sha256_ctx ctx; + + flip64(data32, work->data); + sha256_init(&ctx); + sha256_update(&ctx, data, 64); + memcpy(work->midstate, ctx.h, 32); + endian_flip32(work->midstate, work->midstate); +} + +/* Returns the current value of total_work and increments it */ +static int total_work_inc(void) +{ + int ret; + + cg_wlock(&control_lock); + ret = total_work++; + cg_wunlock(&control_lock); + + return ret; +} + +static struct work *make_work(void) +{ + struct work *work = calloc(1, sizeof(struct work)); + + if (unlikely(!work)) + quit(1, "Failed to calloc work in make_work"); + + work->id = total_work_inc(); + + return work; +} + +/* This is the central place all work that is about to be retired should be + * cleaned to remove any dynamically allocated arrays within the struct */ +void clean_work(struct work *work) +{ + free(work->job_id); + free(work->ntime); + free(work->coinbase); + free(work->nonce1); + memset(work, 0, sizeof(struct work)); +} + +/* All dynamically allocated work structs should be freed here to not leak any + * ram from arrays allocated within the work struct */ +void _free_work(struct work *work) +{ + clean_work(work); + free(work); +} + +static void gen_hash(unsigned char *data, unsigned char *hash, int len); +static void calc_diff(struct work *work, double known); +char *workpadding = "000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000"; + +#ifdef HAVE_LIBCURL +/* Process transactions with GBT by storing the binary value of the first + * transaction, and the hashes of the remaining transactions since these + * remain constant with an altered coinbase when generating work. Must be + * entered under gbt_lock */ +static void gbt_merkle_bins(struct pool *pool, json_t *transaction_arr); + +static void __build_gbt_txns(struct pool *pool, json_t *res_val) +{ + json_t *txn_array; + + txn_array = json_object_get(res_val, "transactions"); + gbt_merkle_bins(pool, txn_array); +} + +static void __gbt_merkleroot(struct pool *pool, unsigned char *merkle_root) +{ + unsigned char merkle_sha[64]; + int i; + + gen_hash(pool->coinbase, merkle_root, pool->coinbase_len); + memcpy(merkle_sha, merkle_root, 32); + for (i = 0; i < pool->merkles; i++) + { + memcpy(merkle_sha + 32, pool->merklebin + i * 32, 32); + gen_hash(merkle_sha, merkle_root, 64); + memcpy(merkle_sha, merkle_root, 32); + } +} + +static bool work_decode(struct pool *pool, struct work *work, json_t *val); + +static void update_gbt(struct pool *pool) +{ + int rolltime; + json_t *val; + CURL *curl; + + curl = curl_easy_init(); + if (unlikely(!curl)) + quit (1, "CURL initialisation failed in update_gbt"); + + val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, + pool->rpc_req, true, false, &rolltime, pool, false); + + if (val) + { + struct work *work = make_work(); + bool rc = work_decode(pool, work, val); + + total_getworks++; + pool->getwork_requested++; + if (rc) + { + applog(LOG_DEBUG, "Successfully retrieved and updated GBT from pool %u %s", + pool->pool_no, pool->rpc_url); + if (pool == current_pool()) + opt_work_update = true; + } + else + { + applog(LOG_DEBUG, "Successfully retrieved but FAILED to decipher GBT from pool %u %s", + pool->pool_no, pool->rpc_url); + } + json_decref(val); + free_work(work); + } + else + { + applog(LOG_DEBUG, "FAILED to update GBT from pool %u %s", + pool->pool_no, pool->rpc_url); + } + curl_easy_cleanup(curl); +} + +static void gen_gbt_work(struct pool *pool, struct work *work) +{ + unsigned char merkleroot[32]; + struct timeval now; + uint64_t nonce2le; + + cgtime(&now); + if (now.tv_sec - pool->tv_lastwork.tv_sec > 60) + update_gbt(pool); + + cg_wlock(&pool->gbt_lock); + nonce2le = htole64(pool->nonce2); + memcpy(pool->coinbase + pool->nonce2_offset, &nonce2le, pool->n2size); + pool->nonce2++; + cg_dwlock(&pool->gbt_lock); + __gbt_merkleroot(pool, merkleroot); + + memcpy(work->data, &pool->gbt_version, 4); + memcpy(work->data + 4, pool->previousblockhash, 32); + memcpy(work->data + 4 + 32 + 32, &pool->curtime, 4); + memcpy(work->data + 4 + 32 + 32 + 4, &pool->gbt_bits, 4); + + memcpy(work->target, pool->gbt_target, 32); + + work->coinbase = bin2hex(pool->coinbase, pool->coinbase_len); + + /* For encoding the block data on submission */ + work->gbt_txns = pool->gbt_txns + 1; + + if (pool->gbt_workid) + work->job_id = strdup(pool->gbt_workid); + cg_runlock(&pool->gbt_lock); + + flip32(work->data + 4 + 32, merkleroot); + memset(work->data + 4 + 32 + 32 + 4 + 4, 0, 4); /* nonce */ + + hex2bin(work->data + 4 + 32 + 32 + 4 + 4 + 4, workpadding, 48); + + if (opt_debug) + { + char *header = bin2hex(work->data, 128); + + applog(LOG_DEBUG, "Generated GBT header %s", header); + applog(LOG_DEBUG, "Work coinbase %s", work->coinbase); + free(header); + } + + calc_midstate(work); + local_work++; + work->pool = pool; + work->gbt = true; + work->longpoll = false; + work->getwork_mode = GETWORK_MODE_GBT; + work->work_block = work_block; + /* Nominally allow a driver to ntime roll 60 seconds */ + work->drv_rolllimit = 60; + calc_diff(work, 0); + cgtime(&work->tv_staged); +} + +static bool gbt_decode(struct pool *pool, json_t *res_val) +{ + const char *previousblockhash; + const char *target; + const char *coinbasetxn; + const char *longpollid; + unsigned char hash_swap[32]; + int expires; + int version; + int curtime; + bool submitold; + const char *bits; + const char *workid; + int cbt_len, orig_len; + uint8_t *extra_len; + size_t cal_len; + + previousblockhash = json_string_value(json_object_get(res_val, "previousblockhash")); + target = json_string_value(json_object_get(res_val, "target")); + coinbasetxn = json_string_value(json_object_get(json_object_get(res_val, "coinbasetxn"), "data")); + longpollid = json_string_value(json_object_get(res_val, "longpollid")); + expires = json_integer_value(json_object_get(res_val, "expires")); + version = json_integer_value(json_object_get(res_val, "version")); + curtime = json_integer_value(json_object_get(res_val, "curtime")); + submitold = json_is_true(json_object_get(res_val, "submitold")); + bits = json_string_value(json_object_get(res_val, "bits")); + workid = json_string_value(json_object_get(res_val, "workid")); + + if (!previousblockhash || !target || !coinbasetxn || !longpollid || + !expires || !version || !curtime || !bits) + { + applog(LOG_ERR, "JSON failed to decode GBT"); + return false; + } + + applog(LOG_DEBUG, "previousblockhash: %s", previousblockhash); + applog(LOG_DEBUG, "target: %s", target); + applog(LOG_DEBUG, "coinbasetxn: %s", coinbasetxn); + applog(LOG_DEBUG, "longpollid: %s", longpollid); + applog(LOG_DEBUG, "expires: %d", expires); + applog(LOG_DEBUG, "version: %d", version); + applog(LOG_DEBUG, "curtime: %d", curtime); + applog(LOG_DEBUG, "submitold: %s", submitold ? "true" : "false"); + applog(LOG_DEBUG, "bits: %s", bits); + if (workid) + applog(LOG_DEBUG, "workid: %s", workid); + + cg_wlock(&pool->gbt_lock); + free(pool->coinbasetxn); + pool->coinbasetxn = strdup(coinbasetxn); + cbt_len = strlen(pool->coinbasetxn) / 2; + /* We add 8 bytes of extra data corresponding to nonce2 */ + pool->n2size = 8; + pool->coinbase_len = cbt_len + pool->n2size; + cal_len = pool->coinbase_len + 1; + align_len(&cal_len); + free(pool->coinbase); + pool->coinbase = calloc(cal_len, 1); + if (unlikely(!pool->coinbase)) + quit(1, "Failed to calloc pool coinbase in gbt_decode"); + hex2bin(pool->coinbase, pool->coinbasetxn, 42); + extra_len = (uint8_t *)(pool->coinbase + 41); + orig_len = *extra_len; + hex2bin(pool->coinbase + 42, pool->coinbasetxn + 84, orig_len); + *extra_len += pool->n2size; + hex2bin(pool->coinbase + 42 + *extra_len, pool->coinbasetxn + 84 + (orig_len * 2), + cbt_len - orig_len - 42); + pool->nonce2_offset = orig_len + 42; + + free(pool->longpollid); + pool->longpollid = strdup(longpollid); + free(pool->gbt_workid); + if (workid) + pool->gbt_workid = strdup(workid); + else + pool->gbt_workid = NULL; + + hex2bin(hash_swap, previousblockhash, 32); + swap256(pool->previousblockhash, hash_swap); + + hex2bin(hash_swap, target, 32); + swab256(pool->gbt_target, hash_swap); + + pool->gbt_expires = expires; + pool->gbt_version = htobe32(version); + pool->curtime = htobe32(curtime); + pool->submit_old = submitold; + + hex2bin((unsigned char *)&pool->gbt_bits, bits, 4); + + __build_gbt_txns(pool, res_val); + cg_wunlock(&pool->gbt_lock); + + return true; +} + +static bool getwork_decode(json_t *res_val, struct work *work) +{ + if (unlikely(!jobj_binary(res_val, "data", work->data, sizeof(work->data), true))) + { + applog(LOG_ERR, "JSON inval data"); + return false; + } + + if (!jobj_binary(res_val, "midstate", work->midstate, sizeof(work->midstate), false)) + { + // Calculate it ourselves + applog(LOG_DEBUG, "Calculating midstate locally"); + calc_midstate(work); + } + + if (unlikely(!jobj_binary(res_val, "target", work->target, sizeof(work->target), true))) + { + applog(LOG_ERR, "JSON inval target"); + return false; + } + return true; +} + +/* Returns whether the pool supports local work generation or not. */ +static bool pool_localgen(struct pool *pool) +{ + return (pool->has_stratum || pool->has_gbt || pool->gbt_solo); +} + +static void gbt_merkle_bins(struct pool *pool, json_t *transaction_arr) +{ + unsigned char *hashbin; + json_t *arr_val; + int i, j, binleft, binlen; + + free(pool->txn_data); + pool->txn_data = NULL; + pool->transactions = 0; + pool->merkles = 0; + pool->transactions = json_array_size(transaction_arr); + binlen = pool->transactions * 32 + 32; + hashbin = alloca(binlen + 32); + memset(hashbin, 0, 32); + binleft = binlen / 32; + if (pool->transactions) + { + int len = 0, ofs = 0; + const char *txn; + + for (i = 0; i < pool->transactions; i++) + { + arr_val = json_array_get(transaction_arr, i); + txn = json_string_value(json_object_get(arr_val, "data")); + if (!txn) + { + applog(LOG_ERR, "Pool %d json_string_value fail - cannot find transaction data", + pool->pool_no); + return; + } + len += strlen(txn); + } + + pool->txn_data = malloc(len + 1); + if (unlikely(!pool->txn_data)) + quit(1, "Failed to calloc txn_data in gbt_merkle_bins"); + pool->txn_data[len] = '\0'; + + for (i = 0; i < pool->transactions; i++) + { + unsigned char binswap[32]; + const char *hash; + + arr_val = json_array_get(transaction_arr, i); + hash = json_string_value(json_object_get(arr_val, "hash")); + txn = json_string_value(json_object_get(arr_val, "data")); + len = strlen(txn); + memcpy(pool->txn_data + ofs, txn, len); + ofs += len; + if (!hash) + { + unsigned char *txn_bin; + int txn_len; + + txn_len = len / 2; + txn_bin = malloc(txn_len); + if (!txn_bin) + quit(1, "Failed to malloc txn_bin in gbt_merkle_bins"); + hex2bin(txn_bin, txn, txn_len); + /* This is needed for pooled mining since only + * transaction data and not hashes are sent */ + gen_hash(txn_bin, hashbin + 32 + 32 * i, txn_len); + continue; + } + if (!hex2bin(binswap, hash, 32)) + { + applog(LOG_ERR, "Failed to hex2bin hash in gbt_merkle_bins"); + return; + } + swab256(hashbin + 32 + 32 * i, binswap); + } + } + if (binleft > 1) + { + while (42) + { + if (binleft == 1) + break; + memcpy(pool->merklebin + (pool->merkles * 32), hashbin + 32, 32); + pool->merkles++; + if (binleft % 2) + { + memcpy(hashbin + binlen, hashbin + binlen - 32, 32); + binlen += 32; + binleft++; + } + for (i = 32, j = 64; j < binlen; i += 32, j += 64) + { + gen_hash(hashbin + j, hashbin + i, 64); + } + binleft /= 2; + binlen = binleft * 32; + } + } + if (opt_debug) + { + char hashhex[68]; + + for (i = 0; i < pool->merkles; i++) + { + __bin2hex(hashhex, pool->merklebin + i * 32, 32); + applog(LOG_DEBUG, "MH%d %s",i, hashhex); + } + } + applog(LOG_INFO, "Stored %d transactions from pool %d", pool->transactions, + pool->pool_no); +} + +static double diff_from_target(void *target); + +static const char scriptsig_header[] = "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff"; +static unsigned char scriptsig_header_bin[41]; + +static bool gbt_solo_decode(struct pool *pool, json_t *res_val) +{ + json_t *transaction_arr, *coinbase_aux; + const char *previousblockhash; + unsigned char hash_swap[32]; + struct timeval now; + const char *target; + uint64_t coinbasevalue; + const char *flags; + const char *bits; + char header[228]; + int ofs = 0, len; + uint64_t *u64; + uint32_t *u32; + int version; + int curtime; + int height; + + previousblockhash = json_string_value(json_object_get(res_val, "previousblockhash")); + target = json_string_value(json_object_get(res_val, "target")); + transaction_arr = json_object_get(res_val, "transactions"); + version = json_integer_value(json_object_get(res_val, "version")); + curtime = json_integer_value(json_object_get(res_val, "curtime")); + bits = json_string_value(json_object_get(res_val, "bits")); + height = json_integer_value(json_object_get(res_val, "height")); + coinbasevalue = json_integer_value(json_object_get(res_val, "coinbasevalue")); + coinbase_aux = json_object_get(res_val, "coinbaseaux"); + flags = json_string_value(json_object_get(coinbase_aux, "flags")); + + if (!previousblockhash || !target || !version || !curtime || !bits || !coinbase_aux || !flags) + { + applog(LOG_ERR, "Pool %d JSON failed to decode GBT", pool->pool_no); + return false; + } + + applog(LOG_DEBUG, "previousblockhash: %s", previousblockhash); + applog(LOG_DEBUG, "target: %s", target); + applog(LOG_DEBUG, "version: %d", version); + applog(LOG_DEBUG, "curtime: %d", curtime); + applog(LOG_DEBUG, "bits: %s", bits); + applog(LOG_DEBUG, "height: %d", height); + applog(LOG_DEBUG, "flags: %s", flags); + + cg_wlock(&pool->gbt_lock); + hex2bin(hash_swap, previousblockhash, 32); + swap256(pool->previousblockhash, hash_swap); + __bin2hex(pool->prev_hash, pool->previousblockhash, 32); + + hex2bin(hash_swap, target, 32); + swab256(pool->gbt_target, hash_swap); + pool->sdiff = diff_from_target(pool->gbt_target); + + pool->gbt_version = htobe32(version); + pool->curtime = htobe32(curtime); + snprintf(pool->ntime, 9, "%08x", curtime); + snprintf(pool->bbversion, 9, "%08x", version); + snprintf(pool->nbit, 9, "%s", bits); + pool->nValue = coinbasevalue; + hex2bin((unsigned char *)&pool->gbt_bits, bits, 4); + gbt_merkle_bins(pool, transaction_arr); + pool->height = height; + + memset(pool->scriptsig_base, 0, 42); + ofs++; // Leave room for template length + + /* Put block height at start of template. */ + ofs += ser_number(pool->scriptsig_base + ofs, height); // max 5 + + /* Followed by flags */ + len = strlen(flags) / 2; + pool->scriptsig_base[ofs++] = len; + hex2bin(pool->scriptsig_base + ofs, flags, len); + ofs += len; + + /* Followed by timestamp */ + cgtime(&now); + pool->scriptsig_base[ofs++] = 0xfe; // Encode seconds as u32 + u32 = (uint32_t *)&pool->scriptsig_base[ofs]; + *u32 = htole32(now.tv_sec); + ofs += 4; // sizeof uint32_t + pool->scriptsig_base[ofs++] = 0xfe; // Encode usecs as u32 + u32 = (uint32_t *)&pool->scriptsig_base[ofs]; + *u32 = htole32(now.tv_usec); + ofs += 4; // sizeof uint32_t + + memcpy(pool->scriptsig_base + ofs, "\x09\x63\x67\x6d\x69\x6e\x65\x72\x34\x32", 10); + ofs += 10; + + /* Followed by extranonce size, fixed at 8 */ + pool->scriptsig_base[ofs++] = 8; + pool->nonce2_offset = 41 + ofs; + ofs += 8; + + if (opt_btc_sig) + { + len = strlen(opt_btc_sig); + if (len > 32) + len = 32; + pool->scriptsig_base[ofs++] = len; + memcpy(pool->scriptsig_base + ofs, opt_btc_sig, len); + ofs += len; + } + + pool->scriptsig_base[0] = ofs++; // Template length + pool->n1_len = ofs; + + len = 41 // prefix + + ofs // Template length + + 4 // txin sequence no + + 1 // transactions + + 8 // value + + 1 + 25 // txout + + 4; // lock + free(pool->coinbase); + pool->coinbase = calloc(len, 1); + if (unlikely(!pool->coinbase)) + quit(1, "Failed to calloc coinbase in gbt_solo_decode"); + + memcpy(pool->coinbase + 41, pool->scriptsig_base, ofs); + memcpy(pool->coinbase + 41 + ofs, "\xff\xff\xff\xff", 4); + pool->coinbase[41 + ofs + 4] = 1; + u64 = (uint64_t *)&(pool->coinbase[41 + ofs + 4 + 1]); + *u64 = htole64(coinbasevalue); + + pool->nonce2 = 0; + pool->n2size = 4; + pool->coinbase_len = 41 + ofs + 4 + 1 + 8 + 1 + 25 + 4; + cg_wunlock(&pool->gbt_lock); + + snprintf(header, 225, "%s%s%s%s%s%s%s", + pool->bbversion, + pool->prev_hash, + "0000000000000000000000000000000000000000000000000000000000000000", + pool->ntime, + pool->nbit, + "00000000", /* nonce */ + workpadding); + if (unlikely(!hex2bin(pool->header_bin, header, 112))) + quit(1, "Failed to hex2bin header in gbt_solo_decode"); + + return true; +} + +static bool work_decode(struct pool *pool, struct work *work, json_t *val) +{ + json_t *res_val = json_object_get(val, "result"); + bool ret = false; + + cgtime(&pool->tv_lastwork); + if (!res_val || json_is_null(res_val)) + { + applog(LOG_ERR, "JSON Failed to decode result"); + goto out; + } + + if (pool->gbt_solo) + { + if (unlikely(!gbt_solo_decode(pool, res_val))) + goto out; + ret = true; + goto out; + } + else if (pool->has_gbt) + { + if (unlikely(!gbt_decode(pool, res_val))) + goto out; + work->gbt = true; + ret = true; + goto out; + } + else if (unlikely(!getwork_decode(res_val, work))) + goto out; + + memset(work->hash, 0, sizeof(work->hash)); + + cgtime(&work->tv_staged); + + ret = true; + +out: + return ret; +} +#else /* HAVE_LIBCURL */ +/* Always true with stratum */ +#define pool_localgen(pool) (true) +#define json_rpc_call(curl, url, userpass, rpc_req, probe, longpoll, rolltime, pool, share) (NULL) +#define work_decode(pool, work, val) (false) +#define gen_gbt_work(pool, work) {} +#endif /* HAVE_LIBCURL */ + +int dev_from_id(int thr_id) +{ + struct cgpu_info *cgpu = get_thr_cgpu(thr_id); + + return cgpu->device_id; +} + +/* Create an exponentially decaying average over the opt_log_interval */ +void decay_time(double *f, double fadd, double fsecs, double interval) +{ + double ftotal, fprop; + + if (fsecs <= 0) + return; + fprop = 1.0 - 1 / (exp(fsecs / interval)); + ftotal = 1.0 + fprop; + *f += (fadd / fsecs * fprop); + *f /= ftotal; +} + +static int __total_staged(void) +{ + return HASH_COUNT(staged_work); +} + +static int total_staged(void) +{ + int ret; + + mutex_lock(stgd_lock); + ret = __total_staged(); + mutex_unlock(stgd_lock); + + return ret; +} + +#ifdef HAVE_CURSES +WINDOW *mainwin, *statuswin, *logwin; +#endif +double total_secs = 1.0; +double last_total_secs = 1.0; +static char statusline[256]; +/* logstart is where the log window should start */ +static int devcursor, logstart, logcursor; +#ifdef HAVE_CURSES +/* statusy is where the status window goes up to in cases where it won't fit at startup */ +static int statusy; +#endif + +#ifdef HAVE_CURSES +static inline void unlock_curses(void) +{ + mutex_unlock(&console_lock); +} + +static inline void lock_curses(void) +{ + mutex_lock(&console_lock); +} + +static bool curses_active_locked(void) +{ + bool ret; + + lock_curses(); + ret = curses_active; + if (!ret) + unlock_curses(); + return ret; +} +#endif + +/* Convert a uint64_t value into a truncated string for displaying with its + * associated suitable for Mega, Giga etc. Buf array needs to be long enough */ +static void suffix_string(uint64_t val, char *buf, size_t bufsiz, int sigdigits) +{ + const double dkilo = 1000.0; + const uint64_t kilo = 1000ull; + const uint64_t mega = 1000000ull; + const uint64_t giga = 1000000000ull; + const uint64_t tera = 1000000000000ull; + const uint64_t peta = 1000000000000000ull; + const uint64_t exa = 1000000000000000000ull; + char suffix[2] = ""; + bool decimal = true; + double dval; + + if (val >= exa) + { + val /= peta; + dval = (double)val / dkilo; + strcpy(suffix, "E"); + } + else if (val >= peta) + { + val /= tera; + dval = (double)val / dkilo; + strcpy(suffix, "P"); + } + else if (val >= tera) + { + val /= giga; + dval = (double)val / dkilo; + strcpy(suffix, "T"); + } + else if (val >= giga) + { + val /= mega; + dval = (double)val / dkilo; + strcpy(suffix, "G"); + } + else if (val >= mega) + { + val /= kilo; + dval = (double)val / dkilo; + strcpy(suffix, "M"); + } + else if (val >= kilo) + { + dval = (double)val / dkilo; + strcpy(suffix, "K"); + } + else + { + dval = val; + decimal = false; + } + + if (!sigdigits) + { + if (decimal) + snprintf(buf, bufsiz, "%.3g%s", dval, suffix); + else + snprintf(buf, bufsiz, "%d%s", (unsigned int)dval, suffix); + } + else + { + /* Always show sigdigits + 1, padded on right with zeroes + * followed by suffix */ + int ndigits = sigdigits - 1 - (dval > 0.0 ? floor(log10(dval)) : 0); + snprintf(buf, bufsiz, "%*.*f%s", sigdigits + 1, ndigits, dval, suffix); + + } +} + +double cgpu_runtime(struct cgpu_info *cgpu) +{ + struct timeval now; + double dev_runtime; + + if (cgpu->dev_start_tv.tv_sec == 0) + dev_runtime = total_secs; + else + { + cgtime(&now); + dev_runtime = tdiff(&now, &(cgpu->dev_start_tv)); + } + + if (dev_runtime < 1.0) + dev_runtime = 1.0; + return dev_runtime; +} + +double tsince_restart(void) +{ + struct timeval now; + + cgtime(&now); + return tdiff(&now, &restart_tv_start); +} + +double tsince_update(void) +{ + struct timeval now; + + cgtime(&now); + return tdiff(&now, &update_tv_start); +} + +static void get_statline(char *buf, size_t bufsiz, struct cgpu_info *cgpu) +{ + char displayed_hashes[16], displayed_rolling[16]; + double dev_runtime, wu; + uint64_t dh64, dr64; + + dev_runtime = cgpu_runtime(cgpu); + + wu = cgpu->diff1 / dev_runtime * 60.0; + + dh64 = (double)cgpu->total_mhashes / dev_runtime * 1000000ull; + dr64 = (double)cgpu->rolling * 1000000ull; + suffix_string(dh64, displayed_hashes, sizeof(displayed_hashes), 4); + suffix_string(dr64, displayed_rolling, sizeof(displayed_rolling), 4); + + snprintf(buf, bufsiz, "%s%d ", cgpu->drv->name, cgpu->device_id); + cgpu->drv->get_statline_before(buf, bufsiz, cgpu); + tailsprintf(buf, bufsiz, "(%ds):%s (avg):%sh/s | A:%.0f R:%.0f HW:%d WU:%.1f/m", + opt_log_interval, + displayed_rolling, + displayed_hashes, + cgpu->diff_accepted, + cgpu->diff_rejected, + cgpu->hw_errors, + wu); + cgpu->drv->get_statline(buf, bufsiz, cgpu); +} + +static bool shared_strategy(void) +{ + return (pool_strategy == POOL_LOADBALANCE || pool_strategy == POOL_BALANCE); +} + +#ifdef HAVE_CURSES +#define CURBUFSIZ 256 +#define cg_mvwprintw(win, y, x, fmt, ...) do { \ + char tmp42[CURBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + mvwprintw(win, y, x, "%s", tmp42); \ +} while (0) +#define cg_wprintw(win, fmt, ...) do { \ + char tmp42[CURBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + wprintw(win, "%s", tmp42); \ +} while (0) + +/* Must be called with curses mutex lock held and curses_active */ +static void curses_print_status(void) +{ + struct pool *pool = current_pool(); + int linewidth = opt_widescreen ? 100 : 80; + + wattron(statuswin, A_BOLD); + cg_mvwprintw(statuswin, 0, 0, " " PACKAGE " version " VERSION " - Started: %s", datestamp); + wattroff(statuswin, A_BOLD); + mvwhline(statuswin, 1, 0, '-', linewidth); + cg_mvwprintw(statuswin, 2, 0, " %s", statusline); + wclrtoeol(statuswin); + if (opt_widescreen) + { + cg_mvwprintw(statuswin, 3, 0, " A:%.0f R:%.0f HW:%d WU:%.1f/m |" + " ST: %d SS: %"PRId64" NB: %d LW: %d GF: %d RF: %d", + total_diff_accepted, total_diff_rejected, hw_errors, + total_diff1 / total_secs * 60, + total_staged(), total_stale, new_blocks, local_work, total_go, total_ro); + } + else if (alt_status) + { + cg_mvwprintw(statuswin, 3, 0, " ST: %d SS: %"PRId64" NB: %d LW: %d GF: %d RF: %d", + total_staged(), total_stale, new_blocks, local_work, total_go, total_ro); + } + else + { + cg_mvwprintw(statuswin, 3, 0, " A:%.0f R:%.0f HW:%d WU:%.1f/m", + total_diff_accepted, total_diff_rejected, hw_errors, + total_diff1 / total_secs * 60); + } + wclrtoeol(statuswin); + if (shared_strategy() && total_pools > 1) + { + cg_mvwprintw(statuswin, 4, 0, " Connected to multiple pools with%s block change notify", + have_longpoll ? "": "out"); + } + else if (pool->has_stratum) + { + cg_mvwprintw(statuswin, 4, 0, " Connected to %s diff %s with stratum as user %s", + pool->sockaddr_url, pool->diff, pool->rpc_user); + } + else + { + cg_mvwprintw(statuswin, 4, 0, " Connected to %s diff %s with%s %s as user %s", + pool->sockaddr_url, pool->diff, have_longpoll ? "": "out", + pool->has_gbt ? "GBT" : "LP", pool->rpc_user); + } + wclrtoeol(statuswin); + cg_mvwprintw(statuswin, 5, 0, " Block: %s... Diff:%s Started: %s Best share: %s ", + prev_block, block_diff, blocktime, best_share); + mvwhline(statuswin, 6, 0, '-', linewidth); + mvwhline(statuswin, statusy - 1, 0, '-', linewidth); +#ifdef USE_USBUTILS + cg_mvwprintw(statuswin, devcursor - 1, 1, "[U]SB management [P]ool management [S]ettings [D]isplay options [Q]uit"); +#else + cg_mvwprintw(statuswin, devcursor - 1, 1, "[P]ool management [S]ettings [D]isplay options [Q]uit"); +#endif +} + +static void adj_width(int var, int *length) +{ + if ((int)(log10(var) + 1) > *length) + (*length)++; +} + +static void adj_fwidth(float var, int *length) +{ + if ((int)(log10(var) + 1) > *length) + (*length)++; +} + +#define STATBEFORELEN 23 +const char blanks[] = " "; + +static void curses_print_devstatus(struct cgpu_info *cgpu, int devno, int count) +{ + static int devno_width = 1, dawidth = 1, drwidth = 1, hwwidth = 1, wuwidth = 1; + char logline[256], unique_id[12]; + struct timeval now; + double dev_runtime, wu; + unsigned int devstatlen; + + if (opt_compact) + return; + + if (devcursor + count > LINES - 2) + return; + + if (count >= most_devices) + return; + + if (cgpu->dev_start_tv.tv_sec == 0) + dev_runtime = total_secs; + else + { + cgtime(&now); + dev_runtime = tdiff(&now, &(cgpu->dev_start_tv)); + } + + if (dev_runtime < 1.0) + dev_runtime = 1.0; + + cgpu->utility = cgpu->accepted / dev_runtime * 60; + wu = cgpu->diff1 / dev_runtime * 60; + + wmove(statuswin,devcursor + count, 0); + adj_width(devno, &devno_width); + if (cgpu->unique_id) + { + unique_id[8] = '\0'; + memcpy(unique_id, blanks, 8); + strncpy(unique_id, cgpu->unique_id, 8); + } + else + sprintf(unique_id, "%-8d", cgpu->device_id); + cg_wprintw(statuswin, " %*d: %s %-8s: ", devno_width, devno, cgpu->drv->name, + unique_id); + logline[0] = '\0'; + cgpu->drv->get_statline_before(logline, sizeof(logline), cgpu); + devstatlen = strlen(logline); + if (devstatlen < STATBEFORELEN) + strncat(logline, blanks, STATBEFORELEN - devstatlen); + cg_wprintw(statuswin, "%s | ", logline); + + +#ifdef USE_USBUTILS + if (cgpu->usbinfo.nodev) + cg_wprintw(statuswin, "ZOMBIE"); + else +#endif + if (cgpu->status == LIFE_DEAD) + cg_wprintw(statuswin, "DEAD "); + else if (cgpu->status == LIFE_SICK) + cg_wprintw(statuswin, "SICK "); + else if (cgpu->deven == DEV_DISABLED) + cg_wprintw(statuswin, "OFF "); + else if (cgpu->deven == DEV_RECOVER) + cg_wprintw(statuswin, "REST "); + else if (opt_widescreen) + { + char displayed_hashes[16], displayed_rolling[16]; + uint64_t d64; + + d64 = (double)cgpu->total_mhashes / dev_runtime * 1000000ull; + suffix_string(d64, displayed_hashes, sizeof(displayed_hashes), 4); + d64 = (double)cgpu->rolling * 1000000ull; + suffix_string(d64, displayed_rolling, sizeof(displayed_rolling), 4); + adj_width(wu, &wuwidth); + adj_fwidth(cgpu->diff_accepted, &dawidth); + adj_fwidth(cgpu->diff_rejected, &drwidth); + adj_width(cgpu->hw_errors, &hwwidth); + cg_wprintw(statuswin, "%6s / %6sh/s WU:%*.1f/m " + "A:%*.0f R:%*.0f HW:%*d", + displayed_rolling, + displayed_hashes, wuwidth + 2, wu, + dawidth, cgpu->diff_accepted, + drwidth, cgpu->diff_rejected, + hwwidth, cgpu->hw_errors); + } + else if (!alt_status) + { + char displayed_hashes[16], displayed_rolling[16]; + uint64_t d64; + + d64 = (double)cgpu->total_mhashes / dev_runtime * 1000000ull; + suffix_string(d64, displayed_hashes, sizeof(displayed_hashes), 4); + d64 = (double)cgpu->rolling * 1000000ull; + suffix_string(d64, displayed_rolling, sizeof(displayed_rolling), 4); + adj_width(wu, &wuwidth); + cg_wprintw(statuswin, "%6s / %6sh/s WU:%*.1f/m", displayed_rolling, + displayed_hashes, wuwidth + 2, wu); + } + else + { + adj_fwidth(cgpu->diff_accepted, &dawidth); + adj_fwidth(cgpu->diff_rejected, &drwidth); + adj_width(cgpu->hw_errors, &hwwidth); + cg_wprintw(statuswin, "A:%*.0f R:%*.0f HW:%*d", + dawidth, cgpu->diff_accepted, + drwidth, cgpu->diff_rejected, + hwwidth, cgpu->hw_errors); + } + + logline[0] = '\0'; + cgpu->drv->get_statline(logline, sizeof(logline), cgpu); + cg_wprintw(statuswin, "%s", logline); + + wclrtoeol(statuswin); +} +#endif + +#ifdef HAVE_CURSES +/* Check for window resize. Called with curses mutex locked */ +static inline void change_logwinsize(void) +{ + int x, y, logx, logy; + + getmaxyx(mainwin, y, x); + if (x < 80 || y < 25) + return; + + if (y > statusy + 2 && statusy < logstart) + { + if (y - 2 < logstart) + statusy = y - 2; + else + statusy = logstart; + logcursor = statusy + 1; + mvwin(logwin, logcursor, 0); + wresize(statuswin, statusy, x); + } + + y -= logcursor; + getmaxyx(logwin, logy, logx); + /* Detect screen size change */ + if (x != logx || y != logy) + wresize(logwin, y, x); +} + +static void check_winsizes(void) +{ + if (!use_curses) + return; + if (curses_active_locked()) + { + int y, x; + + erase(); + x = getmaxx(statuswin); + if (logstart > LINES - 2) + statusy = LINES - 2; + else + statusy = logstart; + logcursor = statusy; + wresize(statuswin, statusy, x); + getmaxyx(mainwin, y, x); + y -= logcursor; + wresize(logwin, y, x); + mvwin(logwin, logcursor, 0); + unlock_curses(); + } +} + +static void disable_curses_windows(void); +static void enable_curses_windows(void); + +static void switch_logsize(bool __maybe_unused newdevs) +{ + if (curses_active_locked()) + { +#ifdef WIN32 + if (newdevs) + disable_curses_windows(); +#endif + if (opt_compact) + { + logstart = devcursor + 1; + logcursor = logstart + 1; + } + else + { + logstart = devcursor + most_devices + 1; + logcursor = logstart + 1; + } +#ifdef WIN32 + if (newdevs) + enable_curses_windows(); +#endif + unlock_curses(); + check_winsizes(); + } +} + +/* For mandatory printing when mutex is already locked */ +void _wlog(const char *str) +{ + wprintw(logwin, "%s", str); +} + +/* Mandatory printing */ +void _wlogprint(const char *str) +{ + if (curses_active_locked()) + { + wprintw(logwin, "%s", str); + unlock_curses(); + } +} +#endif + +#ifdef HAVE_CURSES +bool log_curses_only(int prio, const char *datetime, const char *str) +{ + bool high_prio; + + high_prio = (prio == LOG_WARNING || prio == LOG_ERR); + + if (curses_active_locked()) + { + if (!opt_loginput || high_prio) + { + wprintw(logwin, "%s%s\n", datetime, str); + if (high_prio) + { + touchwin(logwin); + wrefresh(logwin); + } + } + unlock_curses(); + return true; + } + return false; +} + +void clear_logwin(void) +{ + if (curses_active_locked()) + { + erase(); + wclear(logwin); + unlock_curses(); + } +} + +void logwin_update(void) +{ + if (curses_active_locked()) + { + touchwin(logwin); + wrefresh(logwin); + unlock_curses(); + } +} +#endif + +static void enable_pool(struct pool *pool) +{ + if (pool->enabled != POOL_ENABLED) + { + enabled_pools++; + pool->enabled = POOL_ENABLED; + } +} + +#ifdef HAVE_CURSES +static void disable_pool(struct pool *pool) +{ + if (pool->enabled == POOL_ENABLED) + enabled_pools--; + pool->enabled = POOL_DISABLED; +} +#endif + +static void reject_pool(struct pool *pool) +{ + if (pool->enabled == POOL_ENABLED) + enabled_pools--; + pool->enabled = POOL_REJECTING; +} + +static void restart_threads(void); + +/* Theoretically threads could race when modifying accepted and + * rejected values but the chance of two submits completing at the + * same time is zero so there is no point adding extra locking */ +static void +share_result(json_t *val, json_t *res, json_t *err, const struct work *work, + char *hashshow, bool resubmit, char *worktime) +{ + struct pool *pool = work->pool; + struct cgpu_info *cgpu; + + cgpu = get_thr_cgpu(work->thr_id); + + if (json_is_true(res) || (work->gbt && json_is_null(res))) + { + mutex_lock(&stats_lock); + cgpu->accepted++; + total_accepted++; + pool->accepted++; + cgpu->diff_accepted += work->work_difficulty; + total_diff_accepted += work->work_difficulty; + pool->diff_accepted += work->work_difficulty; + mutex_unlock(&stats_lock); + + pool->seq_rejects = 0; + cgpu->last_share_pool = pool->pool_no; + cgpu->last_share_pool_time = time(NULL); + cgpu->last_share_diff = work->work_difficulty; + pool->last_share_time = cgpu->last_share_pool_time; + pool->last_share_diff = work->work_difficulty; + applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)"); + if (!QUIET) + { + if (total_pools > 1) + applog(LOG_NOTICE, "Accepted %s %s %d pool %d %s%s", + hashshow, cgpu->drv->name, cgpu->device_id, work->pool->pool_no, resubmit ? "(resubmit)" : "", worktime); + else + applog(LOG_NOTICE, "Accepted %s %s %d %s%s", + hashshow, cgpu->drv->name, cgpu->device_id, resubmit ? "(resubmit)" : "", worktime); + } + sharelog("accept", work); + if (opt_shares && total_diff_accepted >= opt_shares) + { + applog(LOG_WARNING, "Successfully mined %d accepted shares as requested and exiting.", opt_shares); + kill_work(); + return; + } + + /* Detect if a pool that has been temporarily disabled for + * continually rejecting shares has started accepting shares. + * This will only happen with the work returned from a + * longpoll */ + if (unlikely(pool->enabled == POOL_REJECTING)) + { + applog(LOG_WARNING, "Rejecting pool %d now accepting shares, re-enabling!", pool->pool_no); + enable_pool(pool); + switch_pools(NULL); + } + /* If we know we found the block we know better than anyone + * that new work is needed. */ + if (unlikely(work->block)) + restart_threads(); + } + else + { + mutex_lock(&stats_lock); + cgpu->rejected++; + total_rejected++; + pool->rejected++; + cgpu->diff_rejected += work->work_difficulty; + total_diff_rejected += work->work_difficulty; + pool->diff_rejected += work->work_difficulty; + pool->seq_rejects++; + mutex_unlock(&stats_lock); + + applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)"); + if (!QUIET) + { + char where[20]; + char disposition[36] = "reject"; + char reason[32]; + + strcpy(reason, ""); + if (total_pools > 1) + snprintf(where, sizeof(where), "pool %d", work->pool->pool_no); + else + strcpy(where, ""); + + if (!work->gbt) + res = json_object_get(val, "reject-reason"); + if (res) + { + const char *reasontmp = json_string_value(res); + + size_t reasonLen = strlen(reasontmp); + if (reasonLen > 28) + reasonLen = 28; + reason[0] = ' '; + reason[1] = '('; + memcpy(2 + reason, reasontmp, reasonLen); + reason[reasonLen + 2] = ')'; + reason[reasonLen + 3] = '\0'; + memcpy(disposition + 7, reasontmp, reasonLen); + disposition[6] = ':'; + disposition[reasonLen + 7] = '\0'; + } + else if (work->stratum && err) + { + if (json_is_array(err)) + { + json_t *reason_val = json_array_get(err, 1); + char *reason_str; + + if (reason_val && json_is_string(reason_val)) + { + reason_str = (char *)json_string_value(reason_val); + snprintf(reason, 31, " (%s)", reason_str); + } + } + else if (json_is_string(err)) + { + const char *s = json_string_value(err); + snprintf(reason, 31, " (%s)", s); + } + } + + applog(LOG_NOTICE, "Rejected %s %s %d %s%s %s%s", + hashshow, cgpu->drv->name, cgpu->device_id, where, reason, resubmit ? "(resubmit)" : "", worktime); + sharelog(disposition, work); + } + + /* Once we have more than a nominal amount of sequential rejects, + * at least 10 and more than 3 mins at the current utility, + * disable the pool because some pool error is likely to have + * ensued. Do not do this if we know the share just happened to + * be stale due to networking delays. + */ + if (pool->seq_rejects > 10 && !work->stale && opt_disable_pool && enabled_pools > 1) + { + double utility = total_accepted / total_secs * 60; + + if (pool->seq_rejects > utility * 3 && enabled_pools > 1) + { + applog(LOG_WARNING, "Pool %d rejected %d sequential shares, disabling!", + pool->pool_no, pool->seq_rejects); + reject_pool(pool); + if (pool == current_pool()) + switch_pools(NULL); + pool->seq_rejects = 0; + } + } + } +} + +static void show_hash(struct work *work, char *hashshow) +{ + unsigned char rhash[32]; + char diffdisp[16]; + unsigned long h32; + uint32_t *hash32; + uint64_t uintdiff; + int ofs; + + swab256(rhash, work->hash); + for (ofs = 0; ofs <= 28; ofs ++) + { + if (rhash[ofs]) + break; + } + hash32 = (uint32_t *)(rhash + ofs); + h32 = be32toh(*hash32); + uintdiff = round(work->work_difficulty); + suffix_string(work->share_diff, diffdisp, sizeof (diffdisp), 0); + snprintf(hashshow, 64, "%08lx Diff %s/%"PRIu64"%s", h32, diffdisp, uintdiff, + work->block? " BLOCK!" : ""); +} + +#ifdef HAVE_LIBCURL +static void text_print_status(int thr_id) +{ + struct cgpu_info *cgpu; + char logline[256]; + + cgpu = get_thr_cgpu(thr_id); + if (cgpu) + { + get_statline(logline, sizeof(logline), cgpu); + printf("%s\n", logline); + } +} + +static void print_status(int thr_id) +{ + if (!curses_active) + text_print_status(thr_id); +} + +static bool submit_upstream_work(struct work *work, CURL *curl, bool resubmit) +{ + json_t *val, *res, *err; + char *s; + bool rc = false; + int thr_id = work->thr_id; + struct cgpu_info *cgpu; + struct pool *pool = work->pool; + int rolltime; + struct timeval tv_submit, tv_submit_reply; + char hashshow[64 + 4] = ""; + char worktime[200] = ""; + struct timeval now; + double dev_runtime; + + cgpu = get_thr_cgpu(thr_id); + + /* build JSON-RPC request */ + if (work->gbt) + { + char gbt_block[1024], varint[12]; + unsigned char data[80]; + + flip80(data, work->data); + __bin2hex(gbt_block, data, 80); // 160 length + + if (work->gbt_txns < 0xfd) + { + uint8_t val8 = work->gbt_txns; + + __bin2hex(varint, (const unsigned char *)&val8, 1); + } + else if (work->gbt_txns <= 0xffff) + { + uint16_t val16 = htole16(work->gbt_txns); + + strcat(gbt_block, "fd"); // +2 + __bin2hex(varint, (const unsigned char *)&val16, 2); + } + else + { + uint32_t val32 = htole32(work->gbt_txns); + + strcat(gbt_block, "fe"); // +2 + __bin2hex(varint, (const unsigned char *)&val32, 4); + } + strcat(gbt_block, varint); // +8 max + strcat(gbt_block, work->coinbase); + + s = malloc(1024); + if (unlikely(!s)) + quit(1, "Failed to malloc s in submit_upstream_work"); + sprintf(s, "{\"id\": 0, \"method\": \"submitblock\", \"params\": [\"%s", gbt_block); + /* Has submit/coinbase support */ + if (!pool->has_gbt) + { + cg_rlock(&pool->gbt_lock); + if (pool->txn_data) + s = realloc_strcat(s, pool->txn_data); + cg_runlock(&pool->gbt_lock); + } + if (work->job_id) + { + s = realloc_strcat(s, "\", {\"workid\": \""); + s = realloc_strcat(s, work->job_id); + s = realloc_strcat(s, "\"}]}"); + } + else + s = realloc_strcat(s, "\"]}"); + } + else + { + char *hexstr; + + endian_flip128(work->data, work->data); + + /* build hex string */ + hexstr = bin2hex(work->data, 118); + s = strdup("{\"method\": \"getwork\", \"params\": [ \""); + s = realloc_strcat(s, hexstr); + s = realloc_strcat(s, "\" ], \"id\":1}"); + free(hexstr); + } + applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, s); + s = realloc_strcat(s, "\n"); + + cgtime(&tv_submit); + /* issue JSON-RPC request */ + val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, s, false, false, &rolltime, pool, true); + cgtime(&tv_submit_reply); + free(s); + + if (unlikely(!val)) + { + applog(LOG_INFO, "submit_upstream_work json_rpc_call failed"); + if (!pool_tset(pool, &pool->submit_fail)) + { + total_ro++; + pool->remotefail_occasions++; + if (opt_lowmem) + { + applog(LOG_WARNING, "Pool %d communication failure, discarding shares", pool->pool_no); + goto out; + } + applog(LOG_WARNING, "Pool %d communication failure, caching submissions", pool->pool_no); + } + cgsleep_ms(5000); + goto out; + } + else if (pool_tclear(pool, &pool->submit_fail)) + applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no); + + res = json_object_get(val, "result"); + err = json_object_get(val, "error"); + + if (!QUIET) + { + show_hash(work, hashshow); + + if (opt_worktime) + { + char workclone[20]; + struct tm *tm, tm_getwork, tm_submit_reply; + double getwork_time = tdiff((struct timeval *)&(work->tv_getwork_reply), + (struct timeval *)&(work->tv_getwork)); + double getwork_to_work = tdiff((struct timeval *)&(work->tv_work_start), + (struct timeval *)&(work->tv_getwork_reply)); + double work_time = tdiff((struct timeval *)&(work->tv_work_found), + (struct timeval *)&(work->tv_work_start)); + double work_to_submit = tdiff(&tv_submit, + (struct timeval *)&(work->tv_work_found)); + double submit_time = tdiff(&tv_submit_reply, &tv_submit); + int diffplaces = 3; + + time_t tmp_time = work->tv_getwork.tv_sec; + tm = localtime(&tmp_time); + memcpy(&tm_getwork, tm, sizeof(struct tm)); + tmp_time = tv_submit_reply.tv_sec; + tm = localtime(&tmp_time); + memcpy(&tm_submit_reply, tm, sizeof(struct tm)); + + if (work->clone) + { + snprintf(workclone, sizeof(workclone), "C:%1.3f", + tdiff((struct timeval *)&(work->tv_cloned), + (struct timeval *)&(work->tv_getwork_reply))); + } + else + strcpy(workclone, "O"); + + if (work->work_difficulty < 1) + diffplaces = 6; + + snprintf(worktime, sizeof(worktime), + " <-%08lx.%08lx M:%c D:%1.*f G:%02d:%02d:%02d:%1.3f %s (%1.3f) W:%1.3f (%1.3f) S:%1.3f R:%02d:%02d:%02d", + (unsigned long)be32toh(*(uint32_t *)&(work->data[28])), + (unsigned long)be32toh(*(uint32_t *)&(work->data[24])), + work->getwork_mode, diffplaces, work->work_difficulty, + tm_getwork.tm_hour, tm_getwork.tm_min, + tm_getwork.tm_sec, getwork_time, workclone, + getwork_to_work, work_time, work_to_submit, submit_time, + tm_submit_reply.tm_hour, tm_submit_reply.tm_min, + tm_submit_reply.tm_sec); + } + } + + share_result(val, res, err, work, hashshow, resubmit, worktime); + + if (cgpu->dev_start_tv.tv_sec == 0) + dev_runtime = total_secs; + else + { + cgtime(&now); + dev_runtime = tdiff(&now, &(cgpu->dev_start_tv)); + } + + if (dev_runtime < 1.0) + dev_runtime = 1.0; + + cgpu->utility = cgpu->accepted / dev_runtime * 60; + + if (!opt_realquiet) + print_status(thr_id); + if (!want_per_device_stats) + { + char logline[256]; + + get_statline(logline, sizeof(logline), cgpu); + applog(LOG_INFO, "%s", logline); + } + + json_decref(val); + + rc = true; +out: + return rc; +} + +static bool get_upstream_work(struct work *work, CURL *curl) +{ + struct pool *pool = work->pool; + struct cgminer_pool_stats *pool_stats = &(pool->cgminer_pool_stats); + struct timeval tv_elapsed; + json_t *val = NULL; + bool rc = false; + char *url; + + url = pool->rpc_url; + + applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", url, pool->rpc_req); + + cgtime(&work->tv_getwork); + + val = json_rpc_call(curl, url, pool->rpc_userpass, pool->rpc_req, false, + false, &work->rolltime, pool, false); + pool_stats->getwork_attempts++; + + if (likely(val)) + { + rc = work_decode(pool, work, val); + if (unlikely(!rc)) + applog(LOG_DEBUG, "Failed to decode work in get_upstream_work"); + } + else + applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work"); + + cgtime(&work->tv_getwork_reply); + timersub(&(work->tv_getwork_reply), &(work->tv_getwork), &tv_elapsed); + pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63; + pool_stats->getwork_wait_rolling /= 1.63; + + timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait)); + if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) + { + pool_stats->getwork_wait_max.tv_sec = tv_elapsed.tv_sec; + pool_stats->getwork_wait_max.tv_usec = tv_elapsed.tv_usec; + } + if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_min), <)) + { + pool_stats->getwork_wait_min.tv_sec = tv_elapsed.tv_sec; + pool_stats->getwork_wait_min.tv_usec = tv_elapsed.tv_usec; + } + pool_stats->getwork_calls++; + + work->pool = pool; + work->longpoll = false; + work->getwork_mode = GETWORK_MODE_POOL; + calc_diff(work, 0); + total_getworks++; + pool->getwork_requested++; + + if (likely(val)) + json_decref(val); + + return rc; +} +#endif /* HAVE_LIBCURL */ + +/* Specifies whether we can use this pool for work or not. */ +static bool pool_unworkable(struct pool *pool) +{ + if (pool->idle) + return true; + if (pool->enabled != POOL_ENABLED) + return true; + if (pool->has_stratum && !pool->stratum_active) + return true; + return false; +} + +/* In balanced mode, the amount of diff1 solutions per pool is monitored as a + * rolling average per 10 minutes and if pools start getting more, it biases + * away from them to distribute work evenly. The share count is reset to the + * rolling average every 10 minutes to not send all work to one pool after it + * has been disabled/out for an extended period. */ +static struct pool *select_balanced(struct pool *cp) +{ + int i, lowest = cp->shares; + struct pool *ret = cp; + + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + + if (pool_unworkable(pool)) + continue; + if (pool->shares < lowest) + { + lowest = pool->shares; + ret = pool; + } + } + + ret->shares++; + return ret; +} + +static struct pool *priority_pool(int choice); +static bool pool_unusable(struct pool *pool); + +/* Select any active pool in a rotating fashion when loadbalance is chosen if + * it has any quota left. */ +static inline struct pool *select_pool(bool lagging) +{ + static int rotating_pool = 0; + struct pool *pool, *cp; + bool avail = false; + int tested, i; + + cp = current_pool(); + + if (pool_strategy == POOL_BALANCE) + { + pool = select_balanced(cp); + goto out; + } + + if (pool_strategy != POOL_LOADBALANCE && (!lagging || opt_fail_only)) + { + pool = cp; + goto out; + } + else + pool = NULL; + + for (i = 0; i < total_pools; i++) + { + struct pool *tp = pools[i]; + + if (tp->quota_used < tp->quota_gcd) + { + avail = true; + break; + } + } + + /* There are no pools with quota, so reset them. */ + if (!avail) + { + for (i = 0; i < total_pools; i++) + pools[i]->quota_used = 0; + if (++rotating_pool >= total_pools) + rotating_pool = 0; + } + + /* Try to find the first pool in the rotation that is usable */ + tested = 0; + while (!pool && tested++ < total_pools) + { + pool = pools[rotating_pool]; + if (pool->quota_used++ < pool->quota_gcd) + { + if (!pool_unworkable(pool)) + break; + /* Failover-only flag for load-balance means distribute + * unused quota to priority pool 0. */ + if (opt_fail_only) + priority_pool(0)->quota_used--; + } + pool = NULL; + if (++rotating_pool >= total_pools) + rotating_pool = 0; + } + + /* If there are no alive pools with quota, choose according to + * priority. */ + if (!pool) + { + for (i = 0; i < total_pools; i++) + { + struct pool *tp = priority_pool(i); + + if (!pool_unusable(tp)) + { + pool = tp; + break; + } + } + } + + /* If still nothing is usable, use the current pool */ + if (!pool) + pool = cp; +out: + applog(LOG_DEBUG, "Selecting pool %d for work", pool->pool_no); + return pool; +} + +/* truediffone == 0x00000000FFFF0000000000000000000000000000000000000000000000000000 + * Generate a 256 bit binary LE target by cutting up diff into 64 bit sized + * portions or vice versa. */ +static const double truediffone = 26959535291011309493156476344723991336010898738574164086137773096960.0; +static const double bits192 = 6277101735386680763835789423207666416102355444464034512896.0; +static const double bits128 = 340282366920938463463374607431768211456.0; +static const double bits64 = 18446744073709551616.0; + +/* Converts a little endian 256 bit value to a double */ +static double le256todouble(const void *target) +{ + uint64_t *data64; + double dcut64; + + data64 = (uint64_t *)(target + 24); + dcut64 = le64toh(*data64) * bits192; + + data64 = (uint64_t *)(target + 16); + dcut64 += le64toh(*data64) * bits128; + + data64 = (uint64_t *)(target + 8); + dcut64 += le64toh(*data64) * bits64; + + data64 = (uint64_t *)(target); + dcut64 += le64toh(*data64); + + return dcut64; +} + +static double diff_from_target(void *target) +{ + double d64, dcut64; + + d64 = truediffone; + dcut64 = le256todouble(target); + if (unlikely(!dcut64)) + dcut64 = 1; + return d64 / dcut64; +} + +/* + * Calculate the work->work_difficulty based on the work->target + */ +static void calc_diff(struct work *work, double known) +{ + struct cgminer_pool_stats *pool_stats = &(work->pool->cgminer_pool_stats); + double difficulty; + uint64_t uintdiff; + + if (known) + work->work_difficulty = known; + else + work->work_difficulty = diff_from_target(work->target); + + difficulty = work->work_difficulty; + + pool_stats->last_diff = difficulty; + uintdiff = round(difficulty); + suffix_string(uintdiff, work->pool->diff, sizeof(work->pool->diff), 0); + + if (difficulty == pool_stats->min_diff) + pool_stats->min_diff_count++; + else if (difficulty < pool_stats->min_diff || pool_stats->min_diff == 0) + { + pool_stats->min_diff = difficulty; + pool_stats->min_diff_count = 1; + } + + if (difficulty == pool_stats->max_diff) + pool_stats->max_diff_count++; + else if (difficulty > pool_stats->max_diff) + { + pool_stats->max_diff = difficulty; + pool_stats->max_diff_count = 1; + } +} + +static unsigned char bench_hidiff_bins[16][160]; +static unsigned char bench_lodiff_bins[16][160]; +static unsigned char bench_target[32]; + +/* Iterate over the lo and hi diff benchmark work items such that we find one + * diff 32+ share every 32 work items. */ +static void get_benchmark_work(struct work *work) +{ + work->work_difficulty = 32; + memcpy(work->target, bench_target, 32); + work->drv_rolllimit = 0; + work->mandatory = true; + work->pool = pools[0]; + cgtime(&work->tv_getwork); + copy_time(&work->tv_getwork_reply, &work->tv_getwork); + work->getwork_mode = GETWORK_MODE_BENCHMARK; +} + +static void benchfile_dspwork(struct work *work, uint32_t nonce) +{ + char buf[1024]; + uint32_t dn; + int i; + + dn = 0; + for (i = 0; i < 4; i++) + { + dn *= 0x100; + dn += nonce & 0xff; + nonce /= 0x100; + } + + if ((sizeof(work->data) * 2 + 1) > sizeof(buf)) + quithere(1, "BENCHFILE Invalid buf size"); + + __bin2hex(buf, work->data, sizeof(work->data)); + + applog(LOG_ERR, "BENCHFILE nonce %u=0x%08x for work=%s", + (unsigned int)dn, (unsigned int)dn, buf); + +} + +static bool benchfile_get_work(struct work *work) +{ + char buf[1024]; + char item[1024]; + bool got = false; + + if (!benchfile_in) + { + if (opt_benchfile) + benchfile_in = fopen(opt_benchfile, "r"); + else + quit(1, "BENCHFILE Invalid benchfile NULL"); + + if (!benchfile_in) + quit(1, "BENCHFILE Failed to open benchfile '%s'", opt_benchfile); + + benchfile_line = 0; + + if (!fgets(buf, 1024, benchfile_in)) + quit(1, "BENCHFILE Failed to read benchfile '%s'", opt_benchfile); + + got = true; + benchfile_work = 0; + } + + if (!got) + { + if (!fgets(buf, 1024, benchfile_in)) + { + if (benchfile_work == 0) + quit(1, "BENCHFILE No work in benchfile '%s'", opt_benchfile); + fclose(benchfile_in); + benchfile_in = NULL; + return benchfile_get_work(work); + } + } + + do + { + benchfile_line++; + + // Empty lines and lines starting with '#' or '/' are ignored + if (*buf != '\0' && *buf != '#' && *buf != '/') + { + char *commas[BENCHWORK_COUNT]; + int i, j, len; + long nonce_time; + + commas[0] = buf; + for (i = 1; i < BENCHWORK_COUNT; i++) + { + commas[i] = strchr(commas[i-1], ','); + if (!commas[i]) + { + quit(1, "BENCHFILE Invalid input file line %d" + " - field count is %d but should be %d", + benchfile_line, i, BENCHWORK_COUNT); + } + len = commas[i] - commas[i-1]; + if (benchfile_data[i-1].length && + (len != benchfile_data[i-1].length)) + { + quit(1, "BENCHFILE Invalid input file line %d " + "field %d (%s) length is %d but should be %d", + benchfile_line, i, + benchfile_data[i-1].name, + len, benchfile_data[i-1].length); + } + + *(commas[i]++) = '\0'; + } + + // NonceTime may have LF's etc + len = strlen(commas[BENCHWORK_NONCETIME]); + if (len < benchfile_data[BENCHWORK_NONCETIME].length) + { + quit(1, "BENCHFILE Invalid input file line %d field %d" + " (%s) length is %d but should be least %d", + benchfile_line, BENCHWORK_NONCETIME+1, + benchfile_data[BENCHWORK_NONCETIME].name, len, + benchfile_data[BENCHWORK_NONCETIME].length); + } + + sprintf(item, "0000000%c", commas[BENCHWORK_VERSION][0]); + + j = strlen(item); + for (i = benchfile_data[BENCHWORK_PREVHASH].length-8; i >= 0; i -= 8) + { + sprintf(&(item[j]), "%.8s", &commas[BENCHWORK_PREVHASH][i]); + j += 8; + } + + for (i = benchfile_data[BENCHWORK_MERKLEROOT].length-8; i >= 0; i -= 8) + { + sprintf(&(item[j]), "%.8s", &commas[BENCHWORK_MERKLEROOT][i]); + j += 8; + } + + nonce_time = atol(commas[BENCHWORK_NONCETIME]); + + sprintf(&(item[j]), "%08lx", nonce_time); + j += 8; + + strcpy(&(item[j]), commas[BENCHWORK_DIFFBITS]); + j += benchfile_data[BENCHWORK_DIFFBITS].length; + + memset(work, 0, sizeof(*work)); + + hex2bin(work->data, item, j >> 1); + + calc_midstate(work); + + benchfile_work++; + + return true; + } + } + while (fgets(buf, 1024, benchfile_in)); + + if (benchfile_work == 0) + quit(1, "BENCHFILE No work in benchfile '%s'", opt_benchfile); + fclose(benchfile_in); + benchfile_in = NULL; + return benchfile_get_work(work); +} + +static void get_benchfile_work(struct work *work) +{ + benchfile_get_work(work); + work->mandatory = true; + work->pool = pools[0]; + cgtime(&work->tv_getwork); + copy_time(&work->tv_getwork_reply, &work->tv_getwork); + work->getwork_mode = GETWORK_MODE_BENCHMARK; + calc_diff(work, 0); +} + +#ifdef HAVE_CURSES +static void disable_curses_windows(void) +{ + leaveok(logwin, false); + leaveok(statuswin, false); + leaveok(mainwin, false); + nocbreak(); + echo(); + delwin(logwin); + delwin(statuswin); +} + +/* Force locking of curses console_lock on shutdown since a dead thread might + * have grabbed the lock. */ +static bool curses_active_forcelocked(void) +{ + bool ret; + + mutex_trylock(&console_lock); + ret = curses_active; + if (!ret) + unlock_curses(); + return ret; +} + +static void disable_curses(void) +{ + if (curses_active_forcelocked()) + { + use_curses = false; + curses_active = false; + disable_curses_windows(); + delwin(mainwin); + endwin(); +#ifdef WIN32 + // Move the cursor to after curses output. + HANDLE hout = GetStdHandle(STD_OUTPUT_HANDLE); + CONSOLE_SCREEN_BUFFER_INFO csbi; + COORD coord; + + if (GetConsoleScreenBufferInfo(hout, &csbi)) + { + coord.X = 0; + coord.Y = csbi.dwSize.Y - 1; + SetConsoleCursorPosition(hout, coord); + } +#endif + unlock_curses(); + } +} +#endif + +static void kill_timeout(struct thr_info *thr) +{ + cg_completion_timeout(&thr_info_cancel, thr, 1000); +} + +static void kill_mining(void) +{ + struct thr_info *thr; + int i; + + forcelog(LOG_DEBUG, "Killing off mining threads"); + /* Kill the mining threads*/ + for (i = 0; i < mining_threads; i++) + { + pthread_t *pth = NULL; + + thr = get_thread(i); + if (thr && PTH(thr) != 0L) + pth = &thr->pth; + thr_info_cancel(thr); +#ifndef WIN32 + if (pth && *pth) + pthread_join(*pth, NULL); +#else + if (pth && pth->p) + pthread_join(*pth, NULL); +#endif + } +} + +static void __kill_work(void) +{ + struct thr_info *thr; + int i; + + if (!successful_connect) + return; + + forcelog(LOG_INFO, "Received kill message"); + +#ifdef USE_USBUTILS + /* Best to get rid of it first so it doesn't + * try to create any new devices */ + forcelog(LOG_DEBUG, "Killing off HotPlug thread"); + thr = &control_thr[hotplug_thr_id]; + kill_timeout(thr); +#endif + + forcelog(LOG_DEBUG, "Killing off watchpool thread"); + /* Kill the watchpool thread */ + thr = &control_thr[watchpool_thr_id]; + kill_timeout(thr); + + forcelog(LOG_DEBUG, "Killing off watchdog thread"); + /* Kill the watchdog thread */ + thr = &control_thr[watchdog_thr_id]; + kill_timeout(thr); + + forcelog(LOG_DEBUG, "Shutting down mining threads"); + for (i = 0; i < mining_threads; i++) + { + struct cgpu_info *cgpu; + + thr = get_thread(i); + if (!thr) + continue; + cgpu = thr->cgpu; + if (!cgpu) + continue; + + cgpu->shutdown = true; + } + + sleep(1); + + cg_completion_timeout(&kill_mining, NULL, 3000); + + /* Stop the others */ + forcelog(LOG_DEBUG, "Killing off API thread"); + thr = &control_thr[api_thr_id]; + kill_timeout(thr); + +#ifdef USE_USBUTILS + /* Release USB resources in case it's a restart + * and not a QUIT */ + forcelog(LOG_DEBUG, "Releasing all USB devices"); + cg_completion_timeout(&usb_cleanup, NULL, 1000); + + forcelog(LOG_DEBUG, "Killing off usbres thread"); + thr = &control_thr[usbres_thr_id]; + kill_timeout(thr); +#endif + +} + +/* This should be the common exit path */ +void kill_work(void) +{ + cg_completion_timeout(&__kill_work, NULL, 5000); + + quit(0, "Shutdown signal received."); +} + +static +#ifdef WIN32 +const +#endif +char **initial_args; + +static void clean_up(bool restarting); + +void app_restart(void) +{ + applog(LOG_WARNING, "Attempting to restart %s", packagename); + + cg_completion_timeout(&__kill_work, NULL, 5000); + clean_up(true); + +#if defined(unix) || defined(__APPLE__) + if (forkpid > 0) + { + kill(forkpid, SIGTERM); + forkpid = 0; + } +#endif + + execv(initial_args[0], (EXECV_2ND_ARG_TYPE)initial_args); + applog(LOG_WARNING, "Failed to restart application"); +} + +static void sighandler(int __maybe_unused sig) +{ + /* Restore signal handlers so we can still quit if kill_work fails */ + sigaction(SIGTERM, &termhandler, NULL); + sigaction(SIGINT, &inthandler, NULL); + kill_work(); +} + +static void _stage_work(struct work *work); + +#define stage_work(WORK) do { \ + _stage_work(WORK); \ + WORK = NULL; \ +} while (0) + +/* Adjust an existing char ntime field with a relative noffset */ +static void modify_ntime(char *ntime, int noffset) +{ + unsigned char bin[4]; + uint32_t h32, *be32 = (uint32_t *)bin; + + hex2bin(bin, ntime, 4); + h32 = be32toh(*be32) + noffset; + *be32 = htobe32(h32); + __bin2hex(ntime, bin, 4); +} + +void roll_work(struct work *work) +{ + uint32_t *work_ntime; + uint32_t ntime; + + work_ntime = (uint32_t *)(work->data + 68); + ntime = be32toh(*work_ntime); + ntime++; + *work_ntime = htobe32(ntime); + local_work++; + work->rolls++; + work->nonce = 0; + applog(LOG_DEBUG, "Successfully rolled work"); + /* Change the ntime field if this is stratum work */ + if (work->ntime) + modify_ntime(work->ntime, 1); + + /* This is now a different work item so it needs a different ID for the + * hashtable */ + work->id = total_work_inc(); +} + +struct work *make_clone(struct work *work) +{ + struct work *work_clone = copy_work(work); + + work_clone->clone = true; + cgtime((struct timeval *)&(work_clone->tv_cloned)); + work_clone->longpoll = false; + work_clone->mandatory = false; + /* Make cloned work appear slightly older to bias towards keeping the + * master work item which can be further rolled */ + work_clone->tv_staged.tv_sec -= 1; + + return work_clone; +} + +#ifdef HAVE_LIBCURL +/* Called with pool_lock held. Recruit an extra curl if none are available for + * this pool. */ +static void recruit_curl(struct pool *pool) +{ + struct curl_ent *ce = calloc(sizeof(struct curl_ent), 1); + + if (unlikely(!ce)) + quit(1, "Failed to calloc in recruit_curl"); + + ce->curl = curl_easy_init(); + if (unlikely(!ce->curl)) + quit(1, "Failed to init in recruit_curl"); + + list_add(&ce->node, &pool->curlring); + pool->curls++; +} + +/* Grab an available curl if there is one. If not, then recruit extra curls + * unless we are in a submit_fail situation, or we have opt_delaynet enabled + * and there are already 5 curls in circulation. Limit total number to the + * number of mining threads per pool as well to prevent blasting a pool during + * network delays/outages. */ +static struct curl_ent *pop_curl_entry(struct pool *pool) +{ + int curl_limit = opt_delaynet ? 5 : (mining_threads + opt_queue) * 2; + bool recruited = false; + struct curl_ent *ce; + + mutex_lock(&pool->pool_lock); +retry: + if (!pool->curls) + { + recruit_curl(pool); + recruited = true; + } + else if (list_empty(&pool->curlring)) + { + if (pool->curls >= curl_limit) + { + pthread_cond_wait(&pool->cr_cond, &pool->pool_lock); + goto retry; + } + else + { + recruit_curl(pool); + recruited = true; + } + } + ce = list_entry(pool->curlring.next, struct curl_ent, node); + list_del(&ce->node); + mutex_unlock(&pool->pool_lock); + + if (recruited) + applog(LOG_DEBUG, "Recruited curl for pool %d", pool->pool_no); + return ce; +} + +static void push_curl_entry(struct curl_ent *ce, struct pool *pool) +{ + mutex_lock(&pool->pool_lock); + list_add_tail(&ce->node, &pool->curlring); + cgtime(&ce->tv); + pthread_cond_broadcast(&pool->cr_cond); + mutex_unlock(&pool->pool_lock); +} + +static bool stale_work(struct work *work, bool share); + +static inline bool should_roll(struct work *work) +{ + struct timeval now; + time_t expiry; + + if (work->pool != current_pool() && pool_strategy != POOL_LOADBALANCE && pool_strategy != POOL_BALANCE) + return false; + + if (work->rolltime > opt_scantime) + expiry = work->rolltime; + else + expiry = opt_scantime; + expiry = expiry * 2 / 3; + + /* We shouldn't roll if we're unlikely to get one shares' duration + * work out of doing so */ + cgtime(&now); + if (now.tv_sec - work->tv_staged.tv_sec > expiry) + return false; + + return true; +} + +/* Limit rolls to 7000 to not beyond 2 hours in the future where bitcoind will + * reject blocks as invalid. */ +static inline bool can_roll(struct work *work) +{ + return (!work->stratum && work->pool && work->rolltime && !work->clone && + work->rolls < 7000 && !stale_work(work, false)); +} + +static void *submit_work_thread(void *userdata) +{ + struct work *work = (struct work *)userdata; + struct pool *pool = work->pool; + bool resubmit = false; + struct curl_ent *ce; + + pthread_detach(pthread_self()); + + RenameThread("SubmitWork"); + + applog(LOG_DEBUG, "Creating extra submit work thread"); + + ce = pop_curl_entry(pool); + /* submit solution to bitcoin via JSON-RPC */ + while (!submit_upstream_work(work, ce->curl, resubmit)) + { + if (opt_lowmem) + { + applog(LOG_NOTICE, "Pool %d share being discarded to minimise memory cache", pool->pool_no); + break; + } + resubmit = true; + if (stale_work(work, true)) + { + applog(LOG_NOTICE, "Pool %d share became stale while retrying submit, discarding", pool->pool_no); + + mutex_lock(&stats_lock); + total_stale++; + pool->stale_shares++; + total_diff_stale += work->work_difficulty; + pool->diff_stale += work->work_difficulty; + mutex_unlock(&stats_lock); + + free_work(work); + break; + } + + /* pause, then restart work-request loop */ + applog(LOG_INFO, "json_rpc_call failed on submit_work, retrying"); + } + push_curl_entry(ce, pool); + + return NULL; +} + +static bool clone_available(void) +{ + struct work *work_clone = NULL, *work, *tmp; + bool cloned = false; + + mutex_lock(stgd_lock); + if (!staged_rollable) + goto out_unlock; + + HASH_ITER(hh, staged_work, work, tmp) + { + if (can_roll(work) && should_roll(work)) + { + roll_work(work); + work_clone = make_clone(work); + roll_work(work); + cloned = true; + break; + } + } + +out_unlock: + mutex_unlock(stgd_lock); + + if (cloned) + { + applog(LOG_DEBUG, "Pushing cloned available work to stage thread"); + stage_work(work_clone); + } + return cloned; +} + +/* Clones work by rolling it if possible, and returning a clone instead of the + * original work item which gets staged again to possibly be rolled again in + * the future */ +static struct work *clone_work(struct work *work) +{ + int mrs = mining_threads + opt_queue - total_staged(); + struct work *work_clone; + bool cloned; + + if (mrs < 1) + return work; + + cloned = false; + work_clone = make_clone(work); + while (mrs-- > 0 && can_roll(work) && should_roll(work)) + { + applog(LOG_DEBUG, "Pushing rolled converted work to stage thread"); + stage_work(work_clone); + roll_work(work); + work_clone = make_clone(work); + /* Roll it again to prevent duplicates should this be used + * directly later on */ + roll_work(work); + cloned = true; + } + + if (cloned) + { + stage_work(work); + return work_clone; + } + + free_work(work_clone); + + return work; +} + +#else /* HAVE_LIBCURL */ +static void *submit_work_thread(void __maybe_unused *userdata) +{ + pthread_detach(pthread_self()); + return NULL; +} +#endif /* HAVE_LIBCURL */ + +/* Return an adjusted ntime if we're submitting work that a device has + * internally offset the ntime. */ +static char *offset_ntime(const char *ntime, int noffset) +{ + unsigned char bin[4]; + uint32_t h32, *be32 = (uint32_t *)bin; + + hex2bin(bin, ntime, 4); + h32 = be32toh(*be32) + noffset; + *be32 = htobe32(h32); + + return bin2hex(bin, 4); +} + +/* Duplicates any dynamically allocated arrays within the work struct to + * prevent a copied work struct from freeing ram belonging to another struct */ +static void _copy_work(struct work *work, const struct work *base_work, int noffset) +{ + uint32_t id = work->id; + + clean_work(work); + memcpy(work, base_work, sizeof(struct work)); + /* Keep the unique new id assigned during make_work to prevent copied + * work from having the same id. */ + work->id = id; + if (base_work->job_id) + work->job_id = strdup(base_work->job_id); + if (base_work->nonce1) + work->nonce1 = strdup(base_work->nonce1); + if (base_work->ntime) + { + /* If we are passed an noffset the binary work->data ntime and + * the work->ntime hex string need to be adjusted. */ + if (noffset) + { + uint32_t *work_ntime = (uint32_t *)(work->data + 68); + uint32_t ntime = be32toh(*work_ntime); + + ntime += noffset; + *work_ntime = htobe32(ntime); + work->ntime = offset_ntime(base_work->ntime, noffset); + } + else + work->ntime = strdup(base_work->ntime); + } + else if (noffset) + { + uint32_t *work_ntime = (uint32_t *)(work->data + 68); + uint32_t ntime = be32toh(*work_ntime); + + ntime += noffset; + *work_ntime = htobe32(ntime); + } + if (base_work->coinbase) + work->coinbase = strdup(base_work->coinbase); +#ifdef USE_BITMAIN_C5 + work->version = base_work->version; +#endif +} + +void set_work_ntime(struct work *work, int ntime) +{ + uint32_t *work_ntime = (uint32_t *)(work->data + 68); + + *work_ntime = htobe32(ntime); + if (work->ntime) + { + free(work->ntime); + work->ntime = bin2hex((unsigned char *)work_ntime, 4); + } +} + +/* Generates a copy of an existing work struct, creating fresh heap allocations + * for all dynamically allocated arrays within the struct. noffset is used for + * when a driver has internally rolled the ntime, noffset is a relative value. + * The macro copy_work() calls this function with an noffset of 0. */ +struct work *copy_work_noffset(struct work *base_work, int noffset) +{ + struct work *work = make_work(); + + _copy_work(work, base_work, noffset); + + return work; +} + +void pool_died(struct pool *pool) +{ + if (!pool_tset(pool, &pool->idle)) + { + cgtime(&pool->tv_idle); + if (pool == current_pool()) + { + applog(LOG_WARNING, "Pool %d %s not responding!", pool->pool_no, pool->rpc_url); + switch_pools(NULL); + } + else + applog(LOG_INFO, "Pool %d %s failed to return work", pool->pool_no, pool->rpc_url); + } +} + +static bool stale_work(struct work *work, bool share) +{ + struct timeval now; + time_t work_expiry; + struct pool *pool; + int getwork_delay; + + if (opt_benchmark || opt_benchfile) + return false; + + if (work->work_block != work_block) + { + applog(LOG_DEBUG, "Work stale due to block mismatch"); + return true; + } + + /* Technically the rolltime should be correct but some pools + * advertise a broken expire= that is lower than a meaningful + * scantime */ + if (work->rolltime > opt_scantime) + work_expiry = work->rolltime; + else + work_expiry = opt_expiry; + + pool = work->pool; + + if (!share && pool->has_stratum) + { + bool same_job; + + if (!pool->stratum_active || !pool->stratum_notify) + { + applog(LOG_DEBUG, "Work stale due to stratum inactive"); + return true; + } + + same_job = true; + + cg_rlock(&pool->data_lock); + if (strcmp(work->job_id, pool->swork.job_id)) + same_job = false; + cg_runlock(&pool->data_lock); + + if (!same_job) + { + applog(LOG_DEBUG, "Work stale due to stratum job_id mismatch"); + return true; + } + } + + /* Factor in the average getwork delay of this pool, rounding it up to + * the nearest second */ + getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1; + work_expiry -= getwork_delay; + if (unlikely(work_expiry < 5)) + work_expiry = 5; + + cgtime(&now); + if ((now.tv_sec - work->tv_staged.tv_sec) >= work_expiry) + { + applog(LOG_DEBUG, "Work stale due to expiry"); + return true; + } + + if (opt_fail_only && !share && pool != current_pool() && !work->mandatory && + pool_strategy != POOL_LOADBALANCE && pool_strategy != POOL_BALANCE) + { + applog(LOG_DEBUG, "Work stale due to fail only pool mismatch"); + return true; + } + + return false; +} + +uint64_t share_diff(const struct work *work) +{ + bool new_best = false; + double d64, s64; + uint64_t ret; + + d64 = truediffone; + s64 = le256todouble(work->hash); + if (unlikely(!s64)) + s64 = 0; + + ret = round(d64 / s64); + + cg_wlock(&control_lock); + if (unlikely(ret > best_diff)) + { + new_best = true; + best_diff = ret; + suffix_string(best_diff, best_share, sizeof(best_share), 0); + } + if (unlikely(ret > work->pool->best_diff)) + work->pool->best_diff = ret; + cg_wunlock(&control_lock); + + if (unlikely(new_best)) + applog(LOG_INFO, "New best share: %s", best_share); + + return ret; +} + +uint64_t share_ndiff(const struct work *work) +{ + double d64, s64; + uint64_t ret = 0; + + if(work != NULL) + { + d64 = truediffone; + s64 = le256todouble(work->hash); + if (unlikely(!s64)) + { + ret = 0; + } + else + { + ret = (d64 / s64); + } + } + return ret; +} + +static void regen_hash(struct work *work) +{ + uint32_t *data32 = (uint32_t *)(work->data); + unsigned char swap[80]; + uint32_t *swap32 = (uint32_t *)swap; + unsigned char hash1[32]; + + flip80(swap32, data32); + sha256(swap, 80, hash1); + sha256(hash1, 32, (unsigned char *)(work->hash)); +} + +static bool cnx_needed(struct pool *pool); + +/* Find the pool that currently has the highest priority */ +static struct pool *priority_pool(int choice) +{ + struct pool *ret = NULL; + int i; + + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + + if (pool->prio == choice) + { + ret = pool; + break; + } + } + + if (unlikely(!ret)) + { + applog(LOG_ERR, "WTF No pool %d found!", choice); + return pools[choice]; + } + return ret; +} + +/* Specifies whether we can switch to this pool or not. */ +static bool pool_unusable(struct pool *pool) +{ + if (pool->idle) + return true; + if (pool->enabled != POOL_ENABLED) + return true; + return false; +} + +void switch_pools(struct pool *selected) +{ + struct pool *pool, *last_pool; + int i, pool_no, next_pool; + + cg_wlock(&control_lock); + last_pool = currentpool; + pool_no = currentpool->pool_no; + + /* Switch selected to pool number 0 and move the rest down */ + if (selected) + { + if (selected->prio != 0) + { + for (i = 0; i < total_pools; i++) + { + pool = pools[i]; + if (pool->prio < selected->prio) + pool->prio++; + } + selected->prio = 0; + } + } + + switch (pool_strategy) + { + /* All of these set to the master pool */ + case POOL_BALANCE: + case POOL_FAILOVER: + case POOL_LOADBALANCE: + for (i = 0; i < total_pools; i++) + { + pool = priority_pool(i); + if (pool_unusable(pool)) + continue; + pool_no = pool->pool_no; + break; + } + break; + /* Both of these simply increment and cycle */ + case POOL_ROUNDROBIN: + case POOL_ROTATE: + if (selected && !selected->idle) + { + pool_no = selected->pool_no; + break; + } + next_pool = pool_no; + /* Select the next alive pool */ + for (i = 1; i < total_pools; i++) + { + next_pool++; + if (next_pool >= total_pools) + next_pool = 0; + pool = pools[next_pool]; + if (pool_unusable(pool)) + continue; + pool_no = next_pool; + break; + } + break; + default: + break; + } + + currentpool = pools[pool_no]; + pool = currentpool; + cg_wunlock(&control_lock); + + /* Set the lagging flag to avoid pool not providing work fast enough + * messages in failover only mode since we have to get all fresh work + * as in restart_threads */ + if (opt_fail_only) + pool_tset(pool, &pool->lagging); + + if (pool != last_pool && pool_strategy != POOL_LOADBALANCE && pool_strategy != POOL_BALANCE) + { + applog(LOG_WARNING, "Switching to pool %d %s", pool->pool_no, pool->rpc_url); + if (pool_localgen(pool) || opt_fail_only) + clear_pool_work(last_pool); + } + + mutex_lock(&lp_lock); + pthread_cond_broadcast(&lp_cond); + mutex_unlock(&lp_lock); + +} + +void _discard_work(struct work *work) +{ + if (!work->clone && !work->rolls && !work->mined) + { + if (work->pool) + { + work->pool->discarded_work++; + work->pool->quota_used--; + work->pool->works--; + } + total_discarded++; + //applog(LOG_DEBUG, "Discarded work"); + } + else + applog(LOG_DEBUG, "Discarded cloned or rolled work"); + free_work(work); +} + +static void wake_gws(void) +{ + mutex_lock(stgd_lock); + pthread_cond_signal(&gws_cond); + mutex_unlock(stgd_lock); +} + +static void discard_stale(void) +{ + struct work *work, *tmp; + int stale = 0; + + mutex_lock(stgd_lock); + HASH_ITER(hh, staged_work, work, tmp) + { + if (stale_work(work, false)) + { + HASH_DEL(staged_work, work); + discard_work(work); + stale++; + } + } + pthread_cond_signal(&gws_cond); + mutex_unlock(stgd_lock); + + if (stale) + applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); +} + +/* A generic wait function for threads that poll that will wait a specified + * time tdiff waiting on the pthread conditional that is broadcast when a + * work restart is required. Returns the value of pthread_cond_timedwait + * which is zero if the condition was met or ETIMEDOUT if not. + */ +int restart_wait(struct thr_info *thr, unsigned int mstime) +{ + struct timeval now, then, tdiff; + struct timespec abstime; + int rc; + + tdiff.tv_sec = mstime / 1000; + tdiff.tv_usec = mstime * 1000 - (tdiff.tv_sec * 1000000); + cgtime(&now); + timeradd(&now, &tdiff, &then); + abstime.tv_sec = then.tv_sec; + abstime.tv_nsec = then.tv_usec * 1000; + + mutex_lock(&restart_lock); + if (thr->work_restart) + rc = 0; + else + rc = pthread_cond_timedwait(&restart_cond, &restart_lock, &abstime); + mutex_unlock(&restart_lock); + + return rc; +} + +static void *restart_thread(void __maybe_unused *arg) +{ + struct pool *cp = current_pool(); + struct cgpu_info *cgpu; + int i, mt; + pthread_detach(pthread_self()); + /* Artificially set the lagging flag to avoid pool not providing work + * fast enough messages after every long poll */ + pool_tset(cp, &cp->lagging); + /* Discard staged work that is now stale */ + discard_stale(); + rd_lock(&mining_thr_lock); + mt = mining_threads; + rd_unlock(&mining_thr_lock); + for (i = 0; i < mt; i++) + { + cgpu = mining_thr[i]->cgpu; + if (unlikely(!cgpu)) + continue; + if (cgpu->deven != DEV_ENABLED) + continue; + mining_thr[i]->work_restart = true; + flush_queue(cgpu); + cgpu->drv->flush_work(cgpu); + } + mutex_lock(&restart_lock); + pthread_cond_broadcast(&restart_cond); + mutex_unlock(&restart_lock); +#ifdef USE_USBUTILS + /* Cancels any cancellable usb transfers. Flagged as such it means they + * are usualy waiting on a read result and it's safe to abort the read + * early. */ + cancel_usb_transfers(); +#endif + return NULL; +} + +/* In order to prevent a deadlock via the various drv->flush_work + * implementations we send the restart messages via a separate thread. */ +static void restart_threads(void) +{ + pthread_t rthread; + + cgtime(&restart_tv_start); + if (unlikely(pthread_create(&rthread, NULL, restart_thread, NULL))) + quit(1, "Failed to create restart thread"); +} + +static void signal_work_update(void) +{ + int i; + + applog(LOG_INFO, "Work update message received"); + + cgtime(&update_tv_start); + rd_lock(&mining_thr_lock); + for (i = 0; i < mining_threads; i++) + mining_thr[i]->work_update = true; + rd_unlock(&mining_thr_lock); +} + +static void set_curblock(char *hexstr, unsigned char *bedata) +{ + int ofs; + + cg_wlock(&ch_lock); + cgtime(&block_timeval); + strcpy(current_hash, hexstr); + memcpy(current_block, bedata, 32); + get_timestamp(blocktime, sizeof(blocktime), &block_timeval); + cg_wunlock(&ch_lock); + + for (ofs = 0; ofs <= 56; ofs++) + { + if (memcmp(¤t_hash[ofs], "0", 1)) + break; + } + strncpy(prev_block, ¤t_hash[ofs], 8); + prev_block[8] = '\0'; + + applog(LOG_INFO, "New block: %s... diff %s", current_hash, block_diff); +} + +/* Search to see if this string is from a block that has been seen before */ +static bool block_exists(char *hexstr) +{ + struct block *s; + + rd_lock(&blk_lock); + HASH_FIND_STR(blocks, hexstr, s); + rd_unlock(&blk_lock); + + if (s) + return true; + return false; +} + +/* Tests if this work is from a block that has been seen before */ +static inline bool from_existing_block(struct work *work) +{ + char *hexstr = bin2hex(work->data + 8, 18); + bool ret; + + ret = block_exists(hexstr); + free(hexstr); + return ret; +} + +static int block_sort(struct block *blocka, struct block *blockb) +{ + return blocka->block_no - blockb->block_no; +} + +/* Decode the current block difficulty which is in packed form */ +static void set_blockdiff(const struct work *work) +{ + uint8_t pow = work->data[72]; + int powdiff = (8 * (0x1d - 3)) - (8 * (pow - 3)); + uint32_t diff32 = be32toh(*((uint32_t *)(work->data + 72))) & 0x00FFFFFF; + double numerator = 0xFFFFULL << powdiff; + double ddiff = numerator / (double)diff32; + + if (unlikely(current_diff != ddiff)) + { + suffix_string(ddiff, block_diff, sizeof(block_diff), 0); + current_diff = ddiff; + applog(LOG_NOTICE, "Network diff set to %s", block_diff); + } +} + +static bool test_work_current(struct work *work) +{ + struct pool *pool = work->pool; + unsigned char bedata[32]; + char hexstr[68]; + bool ret = true; + + if (work->mandatory) + return ret; + + swap256(bedata, work->data + 4); + __bin2hex(hexstr, bedata, 32); + + /* Search to see if this block exists yet and if not, consider it a + * new block and set the current block details to this one */ + if (!block_exists(hexstr)) + { + struct block *s = calloc(sizeof(struct block), 1); + int deleted_block = 0; + + if (unlikely(!s)) + quit (1, "test_work_current OOM"); + strcpy(s->hash, hexstr); + s->block_no = new_blocks++; + + wr_lock(&blk_lock); + /* Only keep the last hour's worth of blocks in memory since + * work from blocks before this is virtually impossible and we + * want to prevent memory usage from continually rising */ + if (HASH_COUNT(blocks) > 6) + { + struct block *oldblock; + + HASH_SORT(blocks, block_sort); + oldblock = blocks; + deleted_block = oldblock->block_no; + HASH_DEL(blocks, oldblock); + free(oldblock); + } + HASH_ADD_STR(blocks, hash, s); + set_blockdiff(work); + wr_unlock(&blk_lock); + + if (deleted_block) + applog(LOG_DEBUG, "Deleted block %d from database", deleted_block); + set_curblock(hexstr, bedata); + /* Copy the information to this pool's prev_block since it + * knows the new block exists. */ + memcpy(pool->prev_block, bedata, 32); + if (unlikely(new_blocks == 1)) + { + ret = false; + goto out; + } + + work->work_block = ++work_block; + + if (work->longpoll) + { + if (work->stratum) + { + applog(LOG_NOTICE, "Stratum from pool %d detected new block", + pool->pool_no); + } + else + { + applog(LOG_NOTICE, "%sLONGPOLL from pool %d detected new block", + work->gbt ? "GBT " : "", work->pool->pool_no); + } + } + else if (have_longpoll && !pool->gbt_solo) + applog(LOG_NOTICE, "New block detected on network before pool notification"); + else if (!pool->gbt_solo) + applog(LOG_NOTICE, "New block detected on network"); + restart_threads(); + } + else + { + if (memcmp(pool->prev_block, bedata, 32)) + { + /* Work doesn't match what this pool has stored as + * prev_block. Let's see if the work is from an old + * block or the pool is just learning about a new + * block. */ + if (memcmp(bedata, current_block, 32)) + { + /* Doesn't match current block. It's stale */ + applog(LOG_DEBUG, "Stale data from pool %d", pool->pool_no); + ret = false; + } + else + { + /* Work is from new block and pool is up now + * current. */ + applog(LOG_INFO, "Pool %d now up to date", pool->pool_no); + memcpy(pool->prev_block, bedata, 32); + } + } +#if 0 + /* This isn't ideal, this pool is still on an old block but + * accepting shares from it. To maintain fair work distribution + * we work on it anyway. */ + if (memcmp(bedata, current_block, 32)) + applog(LOG_DEBUG, "Pool %d still on old block", pool->pool_no); +#endif + if (work->longpoll) + { + work->work_block = ++work_block; + if (shared_strategy() || work->pool == current_pool()) + { + if (work->stratum) + { + applog(LOG_NOTICE, "Stratum from pool %d requested work restart", + pool->pool_no); + } + else + { + applog(LOG_NOTICE, "%sLONGPOLL from pool %d requested work restart", + work->gbt ? "GBT " : "", work->pool->pool_no); + } + restart_threads(); + } + } + } +out: + work->longpoll = false; + + return ret; +} + +static int tv_sort(struct work *worka, struct work *workb) +{ + return worka->tv_staged.tv_sec - workb->tv_staged.tv_sec; +} + +static bool work_rollable(struct work *work) +{ + return (!work->clone && work->rolltime); +} + +static bool hash_push(struct work *work) +{ + bool rc = true; + + mutex_lock(stgd_lock); + if (work_rollable(work)) + staged_rollable++; + if (likely(!getq->frozen)) + { + HASH_ADD_INT(staged_work, id, work); + HASH_SORT(staged_work, tv_sort); + } + else + rc = false; + pthread_cond_broadcast(&getq->cond); + mutex_unlock(stgd_lock); + + return rc; +} + +static void _stage_work(struct work *work) +{ + applog(LOG_DEBUG, "Pushing work from pool %d to hash queue", work->pool->pool_no); + work->work_block = work_block; + test_work_current(work); + work->pool->works++; + hash_push(work); +} + +#ifdef HAVE_CURSES +int curses_int(const char *query) +{ + int ret; + char *cvar; + + cvar = curses_input(query); + ret = atoi(cvar); + free(cvar); + return ret; +} +#endif + +#ifdef HAVE_CURSES +static bool input_pool(bool live); +#endif + +#ifdef HAVE_CURSES +static void display_pool_summary(struct pool *pool) +{ + double efficiency = 0.0; + + if (curses_active_locked()) + { + wlog("Pool: %s\n", pool->rpc_url); + if (pool->solved) + wlog("SOLVED %d BLOCK%s!\n", pool->solved, pool->solved > 1 ? "S" : ""); + if (!pool->has_stratum) + wlog("%s own long-poll support\n", pool->hdr_path ? "Has" : "Does not have"); + wlog(" Queued work requests: %d\n", pool->getwork_requested); + wlog(" Share submissions: %"PRId64"\n", pool->accepted + pool->rejected); + wlog(" Accepted shares: %"PRId64"\n", pool->accepted); + wlog(" Rejected shares: %"PRId64"\n", pool->rejected); + wlog(" Accepted difficulty shares: %1.f\n", pool->diff_accepted); + wlog(" Rejected difficulty shares: %1.f\n", pool->diff_rejected); + if (pool->accepted || pool->rejected) + wlog(" Reject ratio: %.1f%%\n", (double)(pool->rejected * 100) / (double)(pool->accepted + pool->rejected)); + efficiency = pool->getwork_requested ? pool->accepted * 100.0 / pool->getwork_requested : 0.0; + if (!pool_localgen(pool)) + wlog(" Efficiency (accepted / queued): %.0f%%\n", efficiency); + + wlog(" Items worked on: %d\n", pool->works); + wlog(" Discarded work due to new blocks: %d\n", pool->discarded_work); + wlog(" Stale submissions discarded due to new blocks: %d\n", pool->stale_shares); + wlog(" Unable to get work from server occasions: %d\n", pool->getfail_occasions); + wlog(" Submitting work remotely delay occasions: %d\n\n", pool->remotefail_occasions); + unlock_curses(); + } +} +#endif + +/* We can't remove the memory used for this struct pool because there may + * still be work referencing it. We just remove it from the pools list */ +void remove_pool(struct pool *pool) +{ + int i, last_pool = total_pools - 1; + struct pool *other; + + /* Boost priority of any lower prio than this one */ + for (i = 0; i < total_pools; i++) + { + other = pools[i]; + if (other->prio > pool->prio) + other->prio--; + } + + if (pool->pool_no < last_pool) + { + /* Swap the last pool for this one */ + (pools[last_pool])->pool_no = pool->pool_no; + pools[pool->pool_no] = pools[last_pool]; + } + /* Give it an invalid number */ + pool->pool_no = total_pools; + pool->removed = true; + total_pools--; +} + +/* add a mutex if this needs to be thread safe in the future */ +static struct JE +{ + char *buf; + struct JE *next; +} *jedata = NULL; + +static void json_escape_free() +{ + struct JE *jeptr = jedata; + struct JE *jenext; + + jedata = NULL; + + while (jeptr) + { + jenext = jeptr->next; + free(jeptr->buf); + free(jeptr); + jeptr = jenext; + } +} + +static char *json_escape(char *str) +{ + struct JE *jeptr; + char *buf, *ptr; + + /* 2x is the max, may as well just allocate that */ + ptr = buf = malloc(strlen(str) * 2 + 1); + + jeptr = malloc(sizeof(*jeptr)); + + jeptr->buf = buf; + jeptr->next = jedata; + jedata = jeptr; + + while (*str) + { + if (*str == '\\' || *str == '"') + *(ptr++) = '\\'; + + *(ptr++) = *(str++); + } + + *ptr = '\0'; + + return buf; +} + +void write_config(FILE *fcfg) +{ + struct opt_table *opt; + int i; + + /* Write pool values */ + fputs("{\n\"pools\" : [", fcfg); + for(i = 0; i < total_pools; i++) + { + struct pool *pool = priority_pool(i); + + if (pool->quota != 1) + { + fprintf(fcfg, "%s\n\t{\n\t\t\"quota\" : \"%s%s%s%d;%s\",", i > 0 ? "," : "", + pool->rpc_proxy ? json_escape((char *)proxytype(pool->rpc_proxytype)) : "", + pool->rpc_proxy ? json_escape(pool->rpc_proxy) : "", + pool->rpc_proxy ? "|" : "", + pool->quota, + json_escape(pool->rpc_url)); + } + else + { + fprintf(fcfg, "%s\n\t{\n\t\t\"url\" : \"%s%s%s%s\",", i > 0 ? "," : "", + pool->rpc_proxy ? json_escape((char *)proxytype(pool->rpc_proxytype)) : "", + pool->rpc_proxy ? json_escape(pool->rpc_proxy) : "", + pool->rpc_proxy ? "|" : "", + json_escape(pool->rpc_url)); + } + fprintf(fcfg, "\n\t\t\"user\" : \"%s\",", json_escape(pool->rpc_user)); + fprintf(fcfg, "\n\t\t\"pass\" : \"%s\"\n\t}", json_escape(pool->rpc_pass)); + } + fputs("\n]\n", fcfg); + + /* Simple bool,int and char options */ + for (opt = opt_config_table; opt->type != OPT_END; opt++) + { + char *p, *name = strdup(opt->names); + + for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) + { + if (p[1] != '-') + continue; + + if (opt->desc == opt_hidden) + continue; + + if (opt->type & OPT_NOARG && + ((void *)opt->cb == (void *)opt_set_bool || (void *)opt->cb == (void *)opt_set_invbool) && + (*(bool *)opt->u.arg == ((void *)opt->cb == (void *)opt_set_bool))) + { + fprintf(fcfg, ",\n\"%s\" : true", p+2); + continue; + } + + if (opt->type & OPT_HASARG && + ((void *)opt->cb_arg == (void *)opt_set_intval || + (void *)opt->cb_arg == (void *)set_int_0_to_9999 || + (void *)opt->cb_arg == (void *)set_int_1_to_65535 || + (void *)opt->cb_arg == (void *)set_int_0_to_10 || + (void *)opt->cb_arg == (void *)set_int_1_to_10 || + (void *)opt->cb_arg == (void *)set_int_0_to_100 || + (void *)opt->cb_arg == (void *)set_int_0_to_255 || + (void *)opt->cb_arg == (void *)set_int_0_to_200 || + (void *)opt->cb_arg == (void *)set_int_0_to_4 || + (void *)opt->cb_arg == (void *)set_int_32_to_63 || + (void *)opt->cb_arg == (void *)set_int_22_to_55 || + (void *)opt->cb_arg == (void *)set_int_42_to_65)) + { + fprintf(fcfg, ",\n\"%s\" : \"%d\"", p+2, *(int *)opt->u.arg); + continue; + } + + if (opt->type & OPT_HASARG && + (((void *)opt->cb_arg == (void *)set_float_125_to_500) || + (void *)opt->cb_arg == (void *)set_float_100_to_250)) + { + fprintf(fcfg, ",\n\"%s\" : \"%.1f\"", p+2, *(float *)opt->u.arg); + continue; + } + + if (opt->type & (OPT_HASARG | OPT_PROCESSARG) && + (opt->u.arg != &opt_set_null)) + { + char *carg = *(char **)opt->u.arg; + + if (carg) + fprintf(fcfg, ",\n\"%s\" : \"%s\"", p+2, json_escape(carg)); + continue; + } + } + free(name); + } + + /* Special case options */ + if (pool_strategy == POOL_BALANCE) + fputs(",\n\"balance\" : true", fcfg); + if (pool_strategy == POOL_LOADBALANCE) + fputs(",\n\"load-balance\" : true", fcfg); + if (pool_strategy == POOL_ROUNDROBIN) + fputs(",\n\"round-robin\" : true", fcfg); + if (pool_strategy == POOL_ROTATE) + fprintf(fcfg, ",\n\"rotate\" : \"%d\"", opt_rotate_period); + fputs("\n}\n", fcfg); + + json_escape_free(); +} + +void zero_bestshare(void) +{ + int i; + + best_diff = 0; + memset(best_share, 0, 8); + suffix_string(best_diff, best_share, sizeof(best_share), 0); + + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + pool->best_diff = 0; + } +} + +static struct timeval tv_hashmeter; +static time_t hashdisplay_t; + +void zero_stats(void) +{ + int i; + + cgtime(&total_tv_start); + copy_time(&tv_hashmeter, &total_tv_start); + total_rolling = 0; + rolling1 = 0; + rolling5 = 0; + rolling15 = 0; + total_mhashes_done = 0; + + for(i = 0; i < CG_LOCAL_MHASHES_MAX_NUM; i++) + { + g_local_mhashes_dones[i] = 0; + } + g_local_mhashes_index = 0; + g_max_fan = 0; + g_max_temp = 0; + total_getworks = 0; + total_accepted = 0; + total_rejected = 0; + hw_errors = 0; + total_stale = 0; + total_discarded = 0; + local_work = 0; + total_go = 0; + total_ro = 0; + total_secs = 1.0; + last_total_secs = 1.0; + total_diff1 = 0; + found_blocks = 0; + total_diff_accepted = 0; + total_diff_rejected = 0; + total_diff_stale = 0; + + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + + pool->getwork_requested = 0; + pool->accepted = 0; + pool->rejected = 0; + pool->stale_shares = 0; + pool->discarded_work = 0; + pool->getfail_occasions = 0; + pool->remotefail_occasions = 0; + pool->last_share_time = 0; + pool->diff1 = 0; + pool->diff_accepted = 0; + pool->diff_rejected = 0; + pool->diff_stale = 0; + pool->last_share_diff = 0; + } + + zero_bestshare(); + + for (i = 0; i < total_devices; ++i) + { + struct cgpu_info *cgpu = get_devices(i); + + copy_time(&cgpu->dev_start_tv, &total_tv_start); + + mutex_lock(&hash_lock); + cgpu->total_mhashes = 0; + cgpu->accepted = 0; + cgpu->rejected = 0; + cgpu->hw_errors = 0; + cgpu->utility = 0.0; + cgpu->last_share_pool_time = 0; + cgpu->diff1 = 0; + cgpu->diff_accepted = 0; + cgpu->diff_rejected = 0; + cgpu->last_share_diff = 0; + mutex_unlock(&hash_lock); + + /* Don't take any locks in the driver zero stats function, as + * it's called async from everything else and we don't want to + * deadlock. */ + cgpu->drv->zero_stats(cgpu); + } +} + +static void set_highprio(void) +{ +#ifndef WIN32 + int ret = nice(-10); + + if (!ret) + applog(LOG_DEBUG, "Unable to set thread to high priority"); +#else + SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST); +#endif +} + +static void set_lowprio(void) +{ +#ifndef WIN32 + int ret = nice(10); + + if (!ret) + applog(LOG_INFO, "Unable to set thread to low priority"); +#else + SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_LOWEST); +#endif +} + +#ifdef HAVE_CURSES +static void display_pools(void) +{ + struct pool *pool; + int selected, i; + char input; + + opt_loginput = true; + immedok(logwin, true); + clear_logwin(); +updated: + for (i = 0; i < total_pools; i++) + { + pool = pools[i]; + + if (pool == current_pool()) + wattron(logwin, A_BOLD); + if (pool->enabled != POOL_ENABLED) + wattron(logwin, A_DIM); + wlogprint("%d: ", pool->pool_no); + switch (pool->enabled) + { + case POOL_ENABLED: + wlogprint("Enabled "); + break; + case POOL_DISABLED: + wlogprint("Disabled "); + break; + case POOL_REJECTING: + wlogprint("Rejecting "); + break; + } + wlogprint("%s Quota %d Prio %d: %s User:%s\n", + pool->idle? "Dead" : "Alive", + pool->quota, + pool->prio, + pool->rpc_url, pool->rpc_user); + wattroff(logwin, A_BOLD | A_DIM); + } +retry: + wlogprint("\nCurrent pool management strategy: %s\n", + strategies[pool_strategy].s); + if (pool_strategy == POOL_ROTATE) + wlogprint("Set to rotate every %d minutes\n", opt_rotate_period); + wlogprint("[F]ailover only %s\n", opt_fail_only ? "enabled" : "disabled"); + wlogprint("Pool [A]dd [R]emove [D]isable [E]nable [Q]uota change\n"); + wlogprint("[C]hange management strategy [S]witch pool [I]nformation\n"); + wlogprint("Or press any other key to continue\n"); + logwin_update(); + input = getch(); + + if (!strncasecmp(&input, "a", 1)) + { + input_pool(true); + goto updated; + } + else if (!strncasecmp(&input, "r", 1)) + { + if (total_pools <= 1) + { + wlogprint("Cannot remove last pool"); + goto retry; + } + selected = curses_int("Select pool number"); + if (selected < 0 || selected >= total_pools) + { + wlogprint("Invalid selection\n"); + goto retry; + } + pool = pools[selected]; + if (pool == current_pool()) + switch_pools(NULL); + if (pool == current_pool()) + { + wlogprint("Unable to remove pool due to activity\n"); + goto retry; + } + disable_pool(pool); + remove_pool(pool); + goto updated; + } + else if (!strncasecmp(&input, "s", 1)) + { + selected = curses_int("Select pool number"); + if (selected < 0 || selected >= total_pools) + { + wlogprint("Invalid selection\n"); + goto retry; + } + pool = pools[selected]; + enable_pool(pool); + switch_pools(pool); + goto updated; + } + else if (!strncasecmp(&input, "d", 1)) + { + if (enabled_pools <= 1) + { + wlogprint("Cannot disable last pool"); + goto retry; + } + selected = curses_int("Select pool number"); + if (selected < 0 || selected >= total_pools) + { + wlogprint("Invalid selection\n"); + goto retry; + } + pool = pools[selected]; + disable_pool(pool); + if (pool == current_pool()) + switch_pools(NULL); + goto updated; + } + else if (!strncasecmp(&input, "e", 1)) + { + selected = curses_int("Select pool number"); + if (selected < 0 || selected >= total_pools) + { + wlogprint("Invalid selection\n"); + goto retry; + } + pool = pools[selected]; + enable_pool(pool); + if (pool->prio < current_pool()->prio) + switch_pools(pool); + goto updated; + } + else if (!strncasecmp(&input, "c", 1)) + { + for (i = 0; i <= TOP_STRATEGY; i++) + wlogprint("%d: %s\n", i, strategies[i].s); + selected = curses_int("Select strategy number type"); + if (selected < 0 || selected > TOP_STRATEGY) + { + wlogprint("Invalid selection\n"); + goto retry; + } + if (selected == POOL_ROTATE) + { + opt_rotate_period = curses_int("Select interval in minutes"); + + if (opt_rotate_period < 0 || opt_rotate_period > 9999) + { + opt_rotate_period = 0; + wlogprint("Invalid selection\n"); + goto retry; + } + } + pool_strategy = selected; + switch_pools(NULL); + goto updated; + } + else if (!strncasecmp(&input, "i", 1)) + { + selected = curses_int("Select pool number"); + if (selected < 0 || selected >= total_pools) + { + wlogprint("Invalid selection\n"); + goto retry; + } + pool = pools[selected]; + display_pool_summary(pool); + goto retry; + } + else if (!strncasecmp(&input, "q", 1)) + { + selected = curses_int("Select pool number"); + if (selected < 0 || selected >= total_pools) + { + wlogprint("Invalid selection\n"); + goto retry; + } + pool = pools[selected]; + selected = curses_int("Set quota"); + if (selected < 0) + { + wlogprint("Invalid negative quota\n"); + goto retry; + } + pool->quota = selected; + adjust_quota_gcd(); + goto updated; + } + else if (!strncasecmp(&input, "f", 1)) + { + opt_fail_only ^= true; + goto updated; + } + else + clear_logwin(); + + immedok(logwin, false); + opt_loginput = false; +} + +static void display_options(void) +{ + int selected; + char input; + + opt_loginput = true; + immedok(logwin, true); + clear_logwin(); +retry: + wlogprint("[N]ormal [C]lear [S]ilent mode (disable all output)\n"); + wlogprint("[D]ebug:%s\n[P]er-device:%s\n[Q]uiet:%s\n[V]erbose:%s\n" + "[R]PC debug:%s\n[W]orkTime details:%s\nco[M]pact: %s\n" + "[T]oggle status switching:%s\n" + "w[I]descreen:%s\n" + "[Z]ero statistics\n" + "[L]og interval:%d\n", + opt_debug ? "on" : "off", + want_per_device_stats? "on" : "off", + opt_quiet ? "on" : "off", + opt_log_output ? "on" : "off", + opt_protocol ? "on" : "off", + opt_worktime ? "on" : "off", + opt_compact ? "on" : "off", + switch_status ? "enabled" : "disabled", + opt_widescreen ? "enabled" : "disabled", + opt_log_interval); + wlogprint("Select an option or any other key to return\n"); + logwin_update(); + input = getch(); + if (!strncasecmp(&input, "q", 1)) + { + opt_quiet ^= true; + wlogprint("Quiet mode %s\n", opt_quiet ? "enabled" : "disabled"); + goto retry; + } + else if (!strncasecmp(&input, "v", 1)) + { + opt_log_output ^= true; + if (opt_log_output) + opt_quiet = false; + wlogprint("Verbose mode %s\n", opt_log_output ? "enabled" : "disabled"); + goto retry; + } + else if (!strncasecmp(&input, "n", 1)) + { + opt_log_output = false; + opt_debug = false; + opt_quiet = false; + opt_protocol = false; + opt_compact = false; + want_per_device_stats = false; + wlogprint("Output mode reset to normal\n"); + switch_logsize(false); + goto retry; + } + else if (!strncasecmp(&input, "d", 1)) + { + opt_debug ^= true; + opt_log_output = opt_debug; + if (opt_debug) + opt_quiet = false; + wlogprint("Debug mode %s\n", opt_debug ? "enabled" : "disabled"); + goto retry; + } + else if (!strncasecmp(&input, "m", 1)) + { + opt_compact ^= true; + wlogprint("Compact mode %s\n", opt_compact ? "enabled" : "disabled"); + switch_logsize(false); + goto retry; + } + else if (!strncasecmp(&input, "p", 1)) + { + want_per_device_stats ^= true; + opt_log_output = want_per_device_stats; + wlogprint("Per-device stats %s\n", want_per_device_stats ? "enabled" : "disabled"); + goto retry; + } + else if (!strncasecmp(&input, "r", 1)) + { + opt_protocol ^= true; + if (opt_protocol) + opt_quiet = false; + wlogprint("RPC protocol debugging %s\n", opt_protocol ? "enabled" : "disabled"); + goto retry; + } + else if (!strncasecmp(&input, "c", 1)) + clear_logwin(); + else if (!strncasecmp(&input, "l", 1)) + { + selected = curses_int("Interval in seconds"); + if (selected < 0 || selected > 9999) + { + wlogprint("Invalid selection\n"); + goto retry; + } + opt_log_interval = selected; + wlogprint("Log interval set to %d seconds\n", opt_log_interval); + goto retry; + } + else if (!strncasecmp(&input, "s", 1)) + { + opt_realquiet = true; + } + else if (!strncasecmp(&input, "w", 1)) + { + opt_worktime ^= true; + wlogprint("WorkTime details %s\n", opt_worktime ? "enabled" : "disabled"); + goto retry; + } + else if (!strncasecmp(&input, "t", 1)) + { + switch_status ^= true; + goto retry; + } + else if (!strncasecmp(&input, "i", 1)) + { + opt_widescreen ^= true; + goto retry; + } + else if (!strncasecmp(&input, "z", 1)) + { + zero_stats(); + goto retry; + } + else + clear_logwin(); + + immedok(logwin, false); + opt_loginput = false; +} +#endif + +void default_save_file(char *filename) +{ + if (default_config && *default_config) + { + strcpy(filename, default_config); + return; + } + +#if defined(unix) || defined(__APPLE__) + if (getenv("HOME") && *getenv("HOME")) + { + strcpy(filename, getenv("HOME")); + strcat(filename, "/"); + } + else + strcpy(filename, ""); + strcat(filename, ".bmminer/"); + mkdir(filename, 0777); +#else + strcpy(filename, ""); +#endif + strcat(filename, def_conf); +} + +#ifdef HAVE_CURSES +static void set_options(void) +{ + int selected; + char input; + + opt_loginput = true; + immedok(logwin, true); + clear_logwin(); +retry: + wlogprint("[Q]ueue: %d\n[S]cantime: %d\n[E]xpiry: %d\n" + "[W]rite config file\n[C]gminer restart\n", + opt_queue, opt_scantime, opt_expiry); + wlogprint("Select an option or any other key to return\n"); + logwin_update(); + input = getch(); + + if (!strncasecmp(&input, "q", 1)) + { + selected = curses_int("Extra work items to queue"); + if (selected < 0 || selected > 9999) + { + wlogprint("Invalid selection\n"); + goto retry; + } + opt_queue = selected; + if (opt_queue < max_queue) + max_queue = opt_queue; + goto retry; + } + else if (!strncasecmp(&input, "s", 1)) + { + selected = curses_int("Set scantime in seconds"); + if (selected < 0 || selected > 9999) + { + wlogprint("Invalid selection\n"); + goto retry; + } + opt_scantime = selected; + goto retry; + } + else if (!strncasecmp(&input, "e", 1)) + { + selected = curses_int("Set expiry time in seconds"); + if (selected < 0 || selected > 9999) + { + wlogprint("Invalid selection\n"); + goto retry; + } + opt_expiry = selected; + goto retry; + } + else if (!strncasecmp(&input, "w", 1)) + { + FILE *fcfg; + char *str, filename[PATH_MAX], prompt[PATH_MAX + 50]; + + default_save_file(filename); + snprintf(prompt, sizeof(prompt), "Config filename to write (Enter for default) [%s]", filename); + str = curses_input(prompt); + if (strcmp(str, "-1")) + { + struct stat statbuf; + + strcpy(filename, str); + free(str); + if (!stat(filename, &statbuf)) + { + wlogprint("File exists, overwrite?\n"); + input = getch(); + if (strncasecmp(&input, "y", 1)) + goto retry; + } + } + else + free(str); + fcfg = fopen(filename, "w"); + if (!fcfg) + { + wlogprint("Cannot open or create file\n"); + goto retry; + } + write_config(fcfg); + fclose(fcfg); + goto retry; + + } + else if (!strncasecmp(&input, "c", 1)) + { + wlogprint("Are you sure?\n"); + input = getch(); + if (!strncasecmp(&input, "y", 1)) + app_restart(); + else + clear_logwin(); + } + else + clear_logwin(); + + immedok(logwin, false); + opt_loginput = false; +} + +#ifdef USE_USBUTILS +static void mt_enable(struct thr_info *mythr) +{ + cgsem_post(&mythr->sem); +} + +static void set_usb(void) +{ + int selected, i, mt, enabled, disabled, zombie, total, blacklisted; + struct cgpu_info *cgpu; + struct thr_info *thr; + double val; + char input; + + opt_loginput = true; + immedok(logwin, true); + clear_logwin(); + +retry: + enabled = 0; + disabled = 0; + zombie = 0; + total = 0; + blacklisted = 0; + + rd_lock(&mining_thr_lock); + mt = mining_threads; + rd_unlock(&mining_thr_lock); + + for (i = 0; i < mt; i++) + { + cgpu = mining_thr[i]->cgpu; + if (unlikely(!cgpu)) + continue; + if (cgpu->usbinfo.nodev) + zombie++; + else if (cgpu->deven == DEV_DISABLED) + disabled++; + else + enabled++; + if (cgpu->blacklisted) + blacklisted++; + total++; + } + wlogprint("Hotplug interval:%d\n", hotplug_time); + wlogprint("%d USB devices, %d enabled, %d disabled, %d zombie\n", + total, enabled, disabled, zombie); + wlogprint("[S]ummary of device information\n"); + wlogprint("[E]nable device\n"); + wlogprint("[D]isable device\n"); + wlogprint("[U]nplug to allow hotplug restart\n"); + wlogprint("[R]eset device USB\n"); + wlogprint("[L]ist all known devices\n"); + wlogprint("[B]lacklist current device from current instance of bmminer\n"); + wlogprint("[W]hitelist previously blacklisted device\n"); + wlogprint("[H]otplug interval (0 to disable)\n"); + wlogprint("Select an option or any other key to return\n"); + logwin_update(); + input = getch(); + + if (!strncasecmp(&input, "s", 1)) + { + selected = curses_int("Select device number"); + if (selected < 0 || selected >= mt) + { + wlogprint("Invalid selection\n"); + goto retry; + } + cgpu = mining_thr[selected]->cgpu; + wlogprint("Device %03u:%03u\n", cgpu->usbinfo.bus_number, cgpu->usbinfo.device_address); + wlogprint("Name %s\n", cgpu->drv->name); + wlogprint("ID %d\n", cgpu->device_id); + wlogprint("Enabled: %s\n", cgpu->deven != DEV_DISABLED ? "Yes" : "No"); + wlogprint("Temperature %.1f\n", cgpu->temp); + wlogprint("MHS av %.0f\n", cgpu->total_mhashes / cgpu_runtime(cgpu)); + wlogprint("Accepted %d\n", cgpu->accepted); + wlogprint("Rejected %d\n", cgpu->rejected); + wlogprint("Hardware Errors %d\n", cgpu->hw_errors); + wlogprint("Last Share Pool %d\n", cgpu->last_share_pool_time > 0 ? cgpu->last_share_pool : -1); + wlogprint("Total MH %.1f\n", cgpu->total_mhashes); + wlogprint("Diff1 Work %"PRId64"\n", cgpu->diff1); + wlogprint("Difficulty Accepted %.1f\n", cgpu->diff_accepted); + wlogprint("Difficulty Rejected %.1f\n", cgpu->diff_rejected); + wlogprint("Last Share Difficulty %.1f\n", cgpu->last_share_diff); + wlogprint("No Device: %s\n", cgpu->usbinfo.nodev ? "True" : "False"); + wlogprint("Last Valid Work %"PRIu64"\n", (uint64_t)cgpu->last_device_valid_work); + val = 0; + if (cgpu->hw_errors + cgpu->diff1) + val = cgpu->hw_errors / (cgpu->hw_errors + cgpu->diff1); + wlogprint("Device Hardware %.1f%%\n", val); + val = 0; + if (cgpu->diff1) + val = cgpu->diff_rejected / cgpu->diff1; + wlogprint("Device Rejected %.1f%%\n", val); + goto retry; + } + else if (!strncasecmp(&input, "e", 1)) + { + selected = curses_int("Select device number"); + if (selected < 0 || selected >= mt) + { + wlogprint("Invalid selection\n"); + goto retry; + } + cgpu = mining_thr[selected]->cgpu; + if (cgpu->usbinfo.nodev) + { + wlogprint("Device removed, unable to re-enable!\n"); + goto retry; + } + thr = get_thread(selected); + cgpu->deven = DEV_ENABLED; + mt_enable(thr); + goto retry; + } + else if (!strncasecmp(&input, "d", 1)) + { + selected = curses_int("Select device number"); + if (selected < 0 || selected >= mt) + { + wlogprint("Invalid selection\n"); + goto retry; + } + cgpu = mining_thr[selected]->cgpu; + cgpu->deven = DEV_DISABLED; + goto retry; + } + else if (!strncasecmp(&input, "u", 1)) + { + selected = curses_int("Select device number"); + if (selected < 0 || selected >= mt) + { + wlogprint("Invalid selection\n"); + goto retry; + } + cgpu = mining_thr[selected]->cgpu; + if (cgpu->usbinfo.nodev) + { + wlogprint("Device already removed, unable to unplug!\n"); + goto retry; + } + usb_nodev(cgpu); + goto retry; + } + else if (!strncasecmp(&input, "r", 1)) + { + selected = curses_int("Select device number"); + if (selected < 0 || selected >= mt) + { + wlogprint("Invalid selection\n"); + goto retry; + } + cgpu = mining_thr[selected]->cgpu; + if (cgpu->usbinfo.nodev) + { + wlogprint("Device already removed, unable to reset!\n"); + goto retry; + } + usb_reset(cgpu); + goto retry; + } + else if (!strncasecmp(&input, "b", 1)) + { + selected = curses_int("Select device number"); + if (selected < 0 || selected >= mt) + { + wlogprint("Invalid selection\n"); + goto retry; + } + cgpu = mining_thr[selected]->cgpu; + if (cgpu->usbinfo.nodev) + { + wlogprint("Device already removed, unable to blacklist!\n"); + goto retry; + } + blacklist_cgpu(cgpu); + goto retry; + } + else if (!strncasecmp(&input, "w", 1)) + { + if (!blacklisted) + { + wlogprint("No blacklisted devices!\n"); + goto retry; + } + wlogprint("Blacklisted devices:\n"); + for (i = 0; i < mt; i++) + { + cgpu = mining_thr[i]->cgpu; + if (unlikely(!cgpu)) + continue; + if (cgpu->blacklisted) + { + wlogprint("%d: %s %d %03u:%03u\n", i, cgpu->drv->name, + cgpu->device_id, cgpu->usbinfo.bus_number, + cgpu->usbinfo.device_address); + } + } + selected = curses_int("Select device number"); + if (selected < 0 || selected >= mt) + { + wlogprint("Invalid selection\n"); + goto retry; + } + cgpu = mining_thr[selected]->cgpu; + if (!cgpu->blacklisted) + { + wlogprint("Device not blacklisted, unable to whitelist\n"); + goto retry; + } + whitelist_cgpu(cgpu); + goto retry; + } + else if (!strncasecmp(&input, "h", 1)) + { + selected = curses_int("Select hotplug interval in seconds (0 to disable)"); + if (selected < 0 || selected > 9999) + { + wlogprint("Invalid value\n"); + goto retry; + } + hotplug_time = selected; + goto retry; + } + else if (!strncasecmp(&input, "l", 1)) + { + usb_list(); + goto retry; + } + else + clear_logwin(); + + immedok(logwin, false); + opt_loginput = false; +} +#endif + +static void *input_thread(void __maybe_unused *userdata) +{ + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + RenameThread("Input"); + + if (!curses_active) + return NULL; + + while (1) + { + char input; + + input = getch(); + if (!strncasecmp(&input, "q", 1)) + { + kill_work(); + return NULL; + } + else if (!strncasecmp(&input, "d", 1)) + display_options(); + else if (!strncasecmp(&input, "p", 1)) + display_pools(); + else if (!strncasecmp(&input, "s", 1)) + set_options(); +#ifdef USE_USBUTILS + else if (!strncasecmp(&input, "u", 1)) + set_usb(); +#endif + if (opt_realquiet) + { + disable_curses(); + break; + } + } + + return NULL; +} +#endif + +static void *api_thread(void *userdata) +{ + struct thr_info *mythr = userdata; + + pthread_detach(pthread_self()); + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + RenameThread("API"); + + set_lowprio(); + api(api_thr_id); + + PTH(mythr) = 0L; + + return NULL; +} + +/* Sole work devices are serialised wrt calling get_work so they report in on + * each pass through their scanhash function as well as in get_work whereas + * queued work devices work asynchronously so get them to report in and out + * only across get_work. */ +static void thread_reportin(struct thr_info *thr) +{ + thr->getwork = false; + cgtime(&thr->last); + thr->cgpu->status = LIFE_WELL; + thr->cgpu->device_last_well = time(NULL); +} + +/* Tell the watchdog thread this thread is waiting on get work and should not + * be restarted */ +static void thread_reportout(struct thr_info *thr) +{ + thr->getwork = true; + cgtime(&thr->last); + thr->cgpu->status = LIFE_WELL; + thr->cgpu->device_last_well = time(NULL); +} + +static void hashmeter(int thr_id, uint64_t hashes_done) +{ + bool showlog = false; + double tv_tdiff; + time_t now_t; + int diff_t; + + uint64_t local_mhashes_done = 0; + uint64_t local_mhashes_done_avg = 0; + int local_mhashes_done_count = 0; + int i = 0; + + cgtime(&total_tv_end); + tv_tdiff = tdiff(&total_tv_end, &tv_hashmeter); + now_t = total_tv_end.tv_sec; + diff_t = now_t - hashdisplay_t; + if (diff_t >= opt_log_interval) + { + alt_status ^= switch_status; + hashdisplay_t = now_t; + showlog = true; + } + else if (thr_id < 0) + { + /* hashmeter is called by non-mining threads in case nothing + * has reported in to allow hashrate to converge to zero , but + * we only update if it has been more than opt_log_interval */ + return; + } + copy_time(&tv_hashmeter, &total_tv_end); + + if (thr_id >= 0) + { + struct thr_info *thr = get_thread(thr_id); + struct cgpu_info *cgpu = thr->cgpu; + double device_tdiff, thr_mhs; + + /* Update the last time this thread reported in */ + copy_time(&thr->last, &total_tv_end); + cgpu->device_last_well = now_t; + device_tdiff = tdiff(&total_tv_end, &cgpu->last_message_tv); + copy_time(&cgpu->last_message_tv, &total_tv_end); + thr_mhs = (double)hashes_done / device_tdiff / 1000000; + applog(LOG_DEBUG, "[thread %d: %"PRIu64" hashes, %.1f mhash/sec]", + thr_id, hashes_done, thr_mhs); + hashes_done /= 1000000; + + mutex_lock(&hash_lock); + cgpu->total_mhashes += hashes_done; + decay_time(&cgpu->rolling, hashes_done, device_tdiff, opt_log_interval); + decay_time(&cgpu->rolling1, hashes_done, device_tdiff, 60.0); + decay_time(&cgpu->rolling5, hashes_done, device_tdiff, 300.0); + decay_time(&cgpu->rolling15, hashes_done, device_tdiff, 900.0); + mutex_unlock(&hash_lock); + + if (want_per_device_stats && showlog) + { + char logline[256]; + + get_statline(logline, sizeof(logline), cgpu); + if (!curses_active) + { + printf("%s \r", logline); + fflush(stdout); + } + else + applog(LOG_INFO, "%s", logline); + } + } + else + { + /* No device has reported in, we have been called from the + * watchdog thread so decay all the hashrates */ + mutex_lock(&hash_lock); + for (thr_id = 0; thr_id < mining_threads; thr_id++) + { + struct thr_info *thr = get_thread(thr_id); + struct cgpu_info *cgpu = thr->cgpu; + double device_tdiff = tdiff(&total_tv_end, &cgpu->last_message_tv); + + copy_time(&cgpu->last_message_tv, &total_tv_end); + decay_time(&cgpu->rolling, 0, device_tdiff, opt_log_interval); + decay_time(&cgpu->rolling1, 0, device_tdiff, 60.0); + decay_time(&cgpu->rolling5, 0, device_tdiff, 300.0); + decay_time(&cgpu->rolling15, 0, device_tdiff, 900.0); + } + mutex_unlock(&hash_lock); + } + + mutex_lock(&hash_lock); + total_mhashes_done += hashes_done; + if(showlog) + { + g_local_mhashes_index++; + if(g_local_mhashes_index >= CG_LOCAL_MHASHES_MAX_NUM) + g_local_mhashes_index = 0; + + for(i = 0; i < CG_LOCAL_MHASHES_MAX_NUM; i++) + { + if(g_local_mhashes_dones[i] >= 0) + { + local_mhashes_done_avg += g_local_mhashes_dones[i]; + //applog(LOG_DEBUG, "g_local_mhashes_dones[%d] = %f,%d", i, g_local_mhashes_dones[i],g_local_mhashes_index); + local_mhashes_done_count++; + } + } + + if(local_mhashes_done_count > 0) + { + local_mhashes_done = local_mhashes_done_avg / local_mhashes_done_count; + } + else + { + local_mhashes_done = hashes_done; + } + + decay_time(&total_rolling, local_mhashes_done, opt_log_interval, opt_log_interval); + decay_time(&rolling1, hashes_done, tv_tdiff, 60.0); + decay_time(&rolling5, hashes_done,tv_tdiff, 300.0); + decay_time(&rolling15, hashes_done, tv_tdiff, 900.0); + global_hashrate = llround(total_rolling) * 1000000; + g_local_mhashes_dones[g_local_mhashes_index] = 0; + } + g_local_mhashes_dones[g_local_mhashes_index] += hashes_done; + total_secs = tdiff(&total_tv_end, &total_tv_start); + + if(total_secs - last_total_secs > 86400) + { + applog(LOG_ERR, "bmminer time error total_secs = %d last_total_secs = %d", total_secs, last_total_secs); + mutex_unlock(&hash_lock); + zero_stats(); + mutex_lock(&hash_lock); + } + else + { + last_total_secs = total_secs; + } + if (showlog) + { + char displayed_hashes[16], displayed_rolling[16]; + char displayed_r1[16], displayed_r5[16], displayed_r15[16]; + uint64_t d64; + + d64 = (double)total_mhashes_done / total_secs * 1000000ull; + suffix_string(d64, displayed_hashes, sizeof(displayed_hashes), 4); + d64 = (double)total_rolling * 1000000ull; + g_displayed_rolling = total_rolling / 1000.0; + suffix_string(d64, displayed_rolling, sizeof(displayed_rolling), 4); + d64 = (double)rolling1 * 1000000ull; + suffix_string(d64, displayed_r1, sizeof(displayed_rolling), 4); + d64 = (double)rolling5 * 1000000ull; + suffix_string(d64, displayed_r5, sizeof(displayed_rolling), 4); + d64 = (double)rolling15 * 1000000ull; + suffix_string(d64, displayed_r15, sizeof(displayed_rolling), 4); + + snprintf(statusline, sizeof(statusline), + "(%ds):%s (1m):%s (5m):%s (15m):%s (avg):%sh/s", + opt_log_interval, displayed_rolling, displayed_r1, displayed_r5, + displayed_r15, displayed_hashes); + } + mutex_unlock(&hash_lock); + + if (showlog) + { + if (!curses_active) + { + printf("%s \r", statusline); + fflush(stdout); + } + else + applog(LOG_INFO, "%s", statusline); + } +} + +static void stratum_share_result(json_t *val, json_t *res_val, json_t *err_val, + struct stratum_share *sshare) +{ + struct work *work = sshare->work; + time_t now_t = time(NULL); + char hashshow[64]; + int srdiff; + + srdiff = now_t - sshare->sshare_sent; + if (opt_debug || srdiff > 0) + { + applog(LOG_INFO, "Pool %d stratum share result lag time %d seconds", + work->pool->pool_no, srdiff); + } + show_hash(work, hashshow); + share_result(val, res_val, err_val, work, hashshow, false, ""); +} + +/* Parses stratum json responses and tries to find the id that the request + * matched to and treat it accordingly. */ +static bool parse_stratum_response(struct pool *pool, char *s) +{ + json_t *val = NULL, *err_val, *res_val, *id_val; + struct stratum_share *sshare = NULL; + json_error_t err; + bool ret = false; + int id; + + val = JSON_LOADS(s, &err); + if (!val) + { + applog(LOG_INFO,"JSON decode failed(%d): %s", err.line, err.text); + goto out; + } + + res_val = json_object_get(val, "result"); + err_val = json_object_get(val, "error"); + id_val = json_object_get(val, "id"); + + if (json_is_null(id_val) || !id_val) + { + char *ss; + + if (err_val) + ss = json_dumps(err_val, JSON_INDENT(3)); + else + ss = strdup("(unknown reason)"); + + /*applog(LOG_INFO,*/printf("JSON-RPC non method decode failed: %s", ss); + + free(ss); + + goto out; + } + + id = json_integer_value(id_val); + + mutex_lock(&sshare_lock); + HASH_FIND_INT(stratum_shares, &id, sshare); + if (sshare) + { + HASH_DEL(stratum_shares, sshare); + pool->sshares--; + } + mutex_unlock(&sshare_lock); + + if (!sshare) + { + double pool_diff; + + if (!res_val) + goto out; + /* Since the share is untracked, we can only guess at what the + * work difficulty is based on the current pool diff. */ + cg_rlock(&pool->data_lock); + pool_diff = pool->sdiff; + cg_runlock(&pool->data_lock); + + if (json_is_true(res_val)) + { + applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no); + + /* We don't know what device this came from so we can't + * attribute the work to the relevant cgpu */ + mutex_lock(&stats_lock); + total_accepted++; + pool->accepted++; + total_diff_accepted += pool_diff; + pool->diff_accepted += pool_diff; + mutex_unlock(&stats_lock); + } + else + { + applog(LOG_NOTICE, "Rejected untracked stratum share from pool %d", pool->pool_no); + + mutex_lock(&stats_lock); + total_rejected++; + pool->rejected++; + total_diff_rejected += pool_diff; + pool->diff_rejected += pool_diff; + mutex_unlock(&stats_lock); + } + goto out; + } + stratum_share_result(val, res_val, err_val, sshare); + free_work(sshare->work); + free(sshare); + + ret = true; +out: + if (val) + json_decref(val); + + return ret; +} + +void clear_stratum_shares(struct pool *pool) +{ + struct stratum_share *sshare, *tmpshare; + double diff_cleared = 0; + int cleared = 0; + + mutex_lock(&sshare_lock); + HASH_ITER(hh, stratum_shares, sshare, tmpshare) + { + if (sshare->work->pool == pool) + { + HASH_DEL(stratum_shares, sshare); + diff_cleared += sshare->work->work_difficulty; + free_work(sshare->work); + pool->sshares--; + free(sshare); + cleared++; + } + } + mutex_unlock(&sshare_lock); + + if (cleared) + { + applog(LOG_WARNING, "Lost %d shares due to stratum disconnect on pool %d", cleared, pool->pool_no); + pool->stale_shares += cleared; + total_stale += cleared; + pool->diff_stale += diff_cleared; + total_diff_stale += diff_cleared; + } +} + +void clear_pool_work(struct pool *pool) +{ + struct work *work, *tmp; + int cleared = 0; + + mutex_lock(stgd_lock); + HASH_ITER(hh, staged_work, work, tmp) + { + if (work->pool == pool) + { + HASH_DEL(staged_work, work); + free_work(work); + cleared++; + } + } + mutex_unlock(stgd_lock); + + if (cleared) + applog(LOG_INFO, "Cleared %d work items due to stratum disconnect on pool %d", cleared, pool->pool_no); +} + +static int cp_prio(void) +{ + int prio; + + cg_rlock(&control_lock); + prio = currentpool->prio; + cg_runlock(&control_lock); + + return prio; +} + +/* We only need to maintain a secondary pool connection when we need the + * capacity to get work from the backup pools while still on the primary */ +static bool cnx_needed(struct pool *pool) +{ + struct pool *cp; + + if (pool->enabled != POOL_ENABLED) + return false; + + /* Balance strategies need all pools online */ + if (pool_strategy == POOL_BALANCE) + return true; + if (pool_strategy == POOL_LOADBALANCE) + return true; + + /* Idle stratum pool needs something to kick it alive again */ + if (pool->has_stratum && pool->idle) + return true; + + /* Getwork pools without opt_fail_only need backup pools up to be able + * to leak shares */ + cp = current_pool(); + if (cp == pool) + return true; + if (!pool_localgen(cp) && (!opt_fail_only || !cp->hdr_path)) + return true; + /* If we're waiting for a response from shares submitted, keep the + * connection open. */ + if (pool->sshares) + return true; + /* If the pool has only just come to life and is higher priority than + * the current pool keep the connection open so we can fail back to + * it. */ + if (pool_strategy == POOL_FAILOVER && pool->prio < cp_prio()) + return true; + /* We've run out of work, bring anything back to life. */ + if (no_work) + return true; + return false; +} + +static void wait_lpcurrent(struct pool *pool); +static void pool_resus(struct pool *pool); +static void gen_stratum_work(struct pool *pool, struct work *work); + +void stratum_resumed(struct pool *pool) +{ + if (pool_tclear(pool, &pool->idle)) + { + applog(LOG_INFO, "Stratum connection to pool %d resumed", pool->pool_no); + pool_resus(pool); + } +} + +static bool supports_resume(struct pool *pool) +{ + bool ret; + + cg_rlock(&pool->data_lock); + ret = (pool->sessionid != NULL); + cg_runlock(&pool->data_lock); + + return ret; +} + +/* One stratum receive thread per pool that has stratum waits on the socket + * checking for new messages and for the integrity of the socket connection. We + * reset the connection based on the integrity of the receive side only as the + * send side will eventually expire data it fails to send. */ +static void *stratum_rthread(void *userdata) +{ + struct pool *pool = (struct pool *)userdata; + char threadname[16]; + + pthread_detach(pthread_self()); + + snprintf(threadname, sizeof(threadname), "%d/RStratum", pool->pool_no); + RenameThread(threadname); + + while (42) + { + struct timeval timeout; + int sel_ret; + fd_set rd; + char *s; + + if (unlikely(pool->removed)) + { + suspend_stratum(pool); + break; + } + + /* Check to see whether we need to maintain this connection + * indefinitely or just bring it up when we switch to this + * pool */ + if (!sock_full(pool) && !cnx_needed(pool)) + { + suspend_stratum(pool); + clear_stratum_shares(pool); + clear_pool_work(pool); + + wait_lpcurrent(pool); + while (!restart_stratum(pool)) + { + if (pool->removed) + goto out; + if (enabled_pools > 1) + cgsleep_ms(30000); + else + cgsleep_ms(3000); + } + } + + FD_ZERO(&rd); + FD_SET(pool->sock, &rd); + timeout.tv_sec = 90; + timeout.tv_usec = 0; + + /* The protocol specifies that notify messages should be sent + * every minute so if we fail to receive any for 90 seconds we + * assume the connection has been dropped and treat this pool + * as dead */ + if (!sock_full(pool) && (sel_ret = select(pool->sock + 1, &rd, NULL, NULL, &timeout)) < 1) + { + applog(LOG_DEBUG, "Stratum select failed on pool %d with value %d", pool->pool_no, sel_ret); + s = NULL; + } + else + s = recv_line(pool); + if (!s) + { + applog(LOG_NOTICE, "Stratum connection to pool %d interrupted", pool->pool_no); + pool->getfail_occasions++; + total_go++; + + /* If the socket to our stratum pool disconnects, all + * tracked submitted shares are lost and we will leak + * the memory if we don't discard their records. */ + if (!supports_resume(pool) || opt_lowmem) + clear_stratum_shares(pool); + clear_pool_work(pool); + if (pool == current_pool()) + restart_threads(); + + while (!restart_stratum(pool)) + { + if (pool->removed) + goto out; + cgsleep_ms(30000); + } + continue; + } + + /* Check this pool hasn't died while being a backup pool and + * has not had its idle flag cleared */ + stratum_resumed(pool); + if (!parse_method(pool, s) && !parse_stratum_response(pool, s)) + applog(LOG_INFO, "Unknown stratum msg: %s", s); + else if (pool->swork.clean) + { + struct work *work = make_work(); + + /* Generate a single work item to update the current + * block database */ + pool->swork.clean = false; + gen_stratum_work(pool, work); + work->longpoll = true; + /* Return value doesn't matter. We're just informing + * that we may need to restart. */ + test_work_current(work); + free_work(work); + } + free(s); + } + +out: + return NULL; +} + +/* Each pool has one stratum send thread for sending shares to avoid many + * threads being created for submission since all sends need to be serialised + * anyway. */ +static void *stratum_sthread(void *userdata) +{ + struct pool *pool = (struct pool *)userdata; + uint64_t last_nonce2 = 0; + uint32_t last_nonce = 0; + char threadname[16]; + + pthread_detach(pthread_self()); + + snprintf(threadname, sizeof(threadname), "%d/SStratum", pool->pool_no); + RenameThread(threadname); + + pool->stratum_q = tq_new(); + if (!pool->stratum_q) + quit(1, "Failed to create stratum_q in stratum_sthread"); + + while (42) + { + char noncehex[12], nonce2hex[20], s[1024]; + struct stratum_share *sshare; + uint32_t *hash32, nonce; + unsigned char nonce2[8]; + uint64_t *nonce2_64; + struct work *work; + bool submitted; + + if (unlikely(pool->removed)) + break; + + work = tq_pop(pool->stratum_q, NULL); + if (unlikely(!work)) + quit(1, "Stratum q returned empty work"); + + if (unlikely(work->nonce2_len > 8)) + { + applog(LOG_ERR, "Pool %d asking for inappropriately long nonce2 length %d", + pool->pool_no, (int)work->nonce2_len); + applog(LOG_ERR, "Not attempting to submit shares"); + free_work(work); + continue; + } + + nonce = *((uint32_t *)(work->data + 76)); + nonce2_64 = (uint64_t *)nonce2; + *nonce2_64 = htole64(work->nonce2); + /* Filter out duplicate shares */ + if (unlikely(nonce == last_nonce && *nonce2_64 == last_nonce2)) + { + applog(LOG_INFO, "Filtering duplicate share to pool %d", + pool->pool_no); + free_work(work); + continue; + } + last_nonce = nonce; + last_nonce2 = *nonce2_64; + __bin2hex(noncehex, (const unsigned char *)&nonce, 4); + __bin2hex(nonce2hex, nonce2, work->nonce2_len); + + sshare = calloc(sizeof(struct stratum_share), 1); + hash32 = (uint32_t *)work->hash; + submitted = false; + + sshare->sshare_time = time(NULL); + /* This work item is freed in parse_stratum_response */ + sshare->work = work; + memset(s, 0, 1024); + + mutex_lock(&sshare_lock); + /* Give the stratum share a unique id */ + sshare->id = swork_id++; + mutex_unlock(&sshare_lock); + if(pool->support_vil) + { + snprintf(s, sizeof(s), + "{\"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%d\"], \"id\": %d, \"method\": \"mining.submit\"}", + pool->rpc_user, work->job_id, nonce2hex, work->ntime, noncehex,work->version,sshare->id); + } + else + snprintf(s, sizeof(s), + "{\"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\": %d, \"method\": \"mining.submit\"}", + pool->rpc_user, work->job_id, nonce2hex, work->ntime, noncehex, sshare->id); + + applog(LOG_INFO, "Submitting share %08lx to pool %d", + (long unsigned int)htole32(hash32[6]), pool->pool_no); + + /* Try resubmitting for up to 2 minutes if we fail to submit + * once and the stratum pool nonce1 still matches suggesting + * we may be able to resume. */ + while (time(NULL) < sshare->sshare_time + 120) + { + bool sessionid_match; + + if (likely(stratum_send(pool, s, strlen(s)))) + { + mutex_lock(&sshare_lock); + HASH_ADD_INT(stratum_shares, id, sshare); + pool->sshares++; + mutex_unlock(&sshare_lock); + + if (pool_tclear(pool, &pool->submit_fail)) + applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no); + applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db"); + submitted = true; + break; + } + if (!pool_tset(pool, &pool->submit_fail) && cnx_needed(pool)) + { + applog(LOG_WARNING, "Pool %d stratum share submission failure", pool->pool_no); + total_ro++; + pool->remotefail_occasions++; + } + + if (opt_lowmem) + { + applog(LOG_DEBUG, "Lowmem option prevents resubmitting stratum share"); + break; + } + + cg_rlock(&pool->data_lock); + sessionid_match = (pool->nonce1 && !strcmp(work->nonce1, pool->nonce1)); + cg_runlock(&pool->data_lock); + + if (!sessionid_match) + { + applog(LOG_DEBUG, "No matching session id for resubmitting stratum share"); + break; + } + /* Retry every 5 seconds */ + sleep(5); + } + + if (unlikely(!submitted)) + { + applog(LOG_DEBUG, "Failed to submit stratum share, discarding"); + free_work(work); + free(sshare); + pool->stale_shares++; + total_stale++; + } + else + { + int ssdiff; + + sshare->sshare_sent = time(NULL); + ssdiff = sshare->sshare_sent - sshare->sshare_time; + if (opt_debug || ssdiff > 0) + { + applog(LOG_INFO, "Pool %d stratum share submission lag time %d seconds", + pool->pool_no, ssdiff); + } + } + } + + /* Freeze the work queue but don't free up its memory in case there is + * work still trying to be submitted to the removed pool. */ + tq_freeze(pool->stratum_q); + + return NULL; +} + +static void init_stratum_threads(struct pool *pool) +{ + have_longpoll = true; + + if (unlikely(pthread_create(&pool->stratum_sthread, NULL, stratum_sthread, (void *)pool))) + quit(1, "Failed to create stratum sthread"); + if (unlikely(pthread_create(&pool->stratum_rthread, NULL, stratum_rthread, (void *)pool))) + quit(1, "Failed to create stratum rthread"); +} + +static void *longpoll_thread(void *userdata); + +static bool stratum_works(struct pool *pool) +{ + applog(LOG_INFO, "Testing pool %d stratum %s", pool->pool_no, pool->stratum_url); + check_extranonce_option(pool, pool->stratum_url); + if (!extract_sockaddr(pool->stratum_url, &pool->sockaddr_url, &pool->stratum_port)) + return false; + + if (!initiate_stratum(pool)) + return false; + + return true; +} + +#ifdef HAVE_LIBCURL +static void __setup_gbt_solo(struct pool *pool) +{ + cg_wlock(&pool->gbt_lock); + memcpy(pool->coinbase, scriptsig_header_bin, 41); + pool->coinbase[41 + pool->n1_len + 4 + 1 + 8] = 25; + memcpy(pool->coinbase + 41 + pool->n1_len + 4 + 1 + 8 + 1, pool->script_pubkey, 25); + cg_wunlock(&pool->gbt_lock); +} + +static bool setup_gbt_solo(CURL *curl, struct pool *pool) +{ + char s[256]; + int uninitialised_var(rolltime); + bool ret = false; + json_t *val = NULL, *res_val, *valid_val; + + if (!opt_btc_address) + { + applog(LOG_ERR, "No BTC address specified, unable to mine solo on %s", + pool->rpc_url); + goto out; + } + snprintf(s, 256, "{\"method\": \"validateaddress\", \"params\": [\"%s\"]}\n", opt_btc_address); + val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, s, true, + false, &rolltime, pool, false); + if (!val) + goto out; + res_val = json_object_get(val, "result"); + if (!res_val) + goto out; + valid_val = json_object_get(res_val, "isvalid"); + if (!valid_val) + goto out; + if (!json_is_true(valid_val)) + { + applog(LOG_ERR, "Bitcoin address %s is NOT valid", opt_btc_address); + goto out; + } + applog(LOG_NOTICE, "Solo mining to valid address: %s", opt_btc_address); + ret = true; + address_to_pubkeyhash(pool->script_pubkey, opt_btc_address); + hex2bin(scriptsig_header_bin, scriptsig_header, 41); + __setup_gbt_solo(pool); + + if (opt_debug) + { + char *cb = bin2hex(pool->coinbase, pool->coinbase_len); + + applog(LOG_DEBUG, "Pool %d coinbase %s", pool->pool_no, cb); + free(cb); + } + pool->gbt_curl = curl_easy_init(); + if (unlikely(!pool->gbt_curl)) + quit(1, "GBT CURL initialisation failed"); + +out: + if (val) + json_decref(val); + return ret; +} +#else +static bool setup_gbt_solo(CURL __maybe_unused *curl, struct pool __maybe_unused *pool) +{ + return false; +} +#endif + +static void pool_start_lp(struct pool *pool) +{ + if (!pool->lp_started) + { + pool->lp_started = true; + if (unlikely(pthread_create(&pool->longpoll_thread, NULL, longpoll_thread, (void *)pool))) + quit(1, "Failed to create pool longpoll thread"); + } +} + +static bool pool_active(struct pool *pool, bool pinging) +{ + struct timeval tv_getwork, tv_getwork_reply; + json_t *val = NULL; + bool ret = false; + CURL *curl; + int uninitialised_var(rolltime); + + if (pool->has_gbt) + applog(LOG_DEBUG, "Retrieving block template from pool %s", pool->rpc_url); + else + applog(LOG_INFO, "Testing pool %s", pool->rpc_url); + + /* This is the central point we activate stratum when we can */ +retry_stratum: + if (pool->has_stratum) + { + /* We create the stratum thread for each pool just after + * successful authorisation. Once the init flag has been set + * we never unset it and the stratum thread is responsible for + * setting/unsetting the active flag */ + bool init = pool_tset(pool, &pool->stratum_init); + + if (!init) + { + bool ret = initiate_stratum(pool) && auth_stratum(pool); + extranonce_subscribe_stratum(pool); + if (ret) + init_stratum_threads(pool); + else + pool_tclear(pool, &pool->stratum_init); + return ret; + } + return pool->stratum_active; + } + + curl = curl_easy_init(); + if (unlikely(!curl)) + { + applog(LOG_ERR, "CURL initialisation failed"); + return false; + } + + /* Probe for GBT support on first pass */ + if (!pool->probed) + { + applog(LOG_DEBUG, "Probing for GBT support"); + val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, + gbt_req, true, false, &rolltime, pool, false); + if (val) + { + bool append = false, submit = false, transactions = false; + json_t *res_val, *mutables; + int i, mutsize = 0; + + res_val = json_object_get(val, "result"); + if (res_val) + { + mutables = json_object_get(res_val, "mutable"); + mutsize = json_array_size(mutables); + } + + for (i = 0; i < mutsize; i++) + { + json_t *arrval = json_array_get(mutables, i); + + if (json_is_string(arrval)) + { + const char *mutable = json_string_value(arrval); + + if (!strncasecmp(mutable, "coinbase/append", 15)) + append = true; + else if (!strncasecmp(mutable, "submit/coinbase", 15)) + submit = true; + else if (!strncasecmp(mutable, "transactions", 12)) + transactions = true; + } + } + json_decref(val); + + /* Only use GBT if it supports coinbase append and + * submit coinbase */ + if (append && submit) + { + pool->has_gbt = true; + pool->rpc_req = gbt_req; + } + else if (transactions) + { + pool->gbt_solo = true; + pool->rpc_req = gbt_solo_req; + } + } + /* Reset this so we can probe fully just after this. It will be + * set to true that time.*/ + pool->probed = false; + + if (pool->has_gbt) + applog(LOG_DEBUG, "GBT coinbase + append support found, switching to GBT protocol"); + else if (pool->gbt_solo) + applog(LOG_DEBUG, "GBT coinbase without append found, switching to GBT solo protocol"); + else + applog(LOG_DEBUG, "No GBT coinbase + append support found, using getwork protocol"); + } + + cgtime(&tv_getwork); + val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, + pool->rpc_req, true, false, &rolltime, pool, false); + cgtime(&tv_getwork_reply); + + /* Detect if a http getwork pool has an X-Stratum header at startup, + * and if so, switch to that in preference to getwork if it works */ + if (pool->stratum_url && !opt_fix_protocol && stratum_works(pool)) + { + applog(LOG_NOTICE, "Switching pool %d %s to %s", pool->pool_no, pool->rpc_url, pool->stratum_url); + if (!pool->rpc_url) + pool->rpc_url = strdup(pool->stratum_url); + pool->has_stratum = true; + curl_easy_cleanup(curl); + + goto retry_stratum; + } + + if (val) + { + struct work *work = make_work(); + bool rc; + + rc = work_decode(pool, work, val); + if (rc) + { + if (pool->gbt_solo) + { + ret = setup_gbt_solo(curl, pool); + if (ret) + pool_start_lp(pool); + free_work(work); + goto out; + } + applog(LOG_DEBUG, "Successfully retrieved and deciphered work from pool %u %s", + pool->pool_no, pool->rpc_url); + work->pool = pool; + work->rolltime = rolltime; + copy_time(&work->tv_getwork, &tv_getwork); + copy_time(&work->tv_getwork_reply, &tv_getwork_reply); + work->getwork_mode = GETWORK_MODE_TESTPOOL; + calc_diff(work, 0); + applog(LOG_DEBUG, "Pushing pooltest work to base pool"); + + stage_work(work); + total_getworks++; + pool->getwork_requested++; + ret = true; + } + else + { + applog(LOG_DEBUG, "Successfully retrieved but FAILED to decipher work from pool %u %s", + pool->pool_no, pool->rpc_url); + free_work(work); + } + + if (pool->lp_url) + goto out; + + /* Decipher the longpoll URL, if any, and store it in ->lp_url */ + if (pool->hdr_path) + { + char *copy_start, *hdr_path; + bool need_slash = false; + size_t siz; + + hdr_path = pool->hdr_path; + if (strstr(hdr_path, "://")) + { + pool->lp_url = hdr_path; + hdr_path = NULL; + } + else + { + /* absolute path, on current server */ + copy_start = (*hdr_path == '/') ? (hdr_path + 1) : hdr_path; + if (pool->rpc_url[strlen(pool->rpc_url) - 1] != '/') + need_slash = true; + + siz = strlen(pool->rpc_url) + strlen(copy_start) + 2; + pool->lp_url = malloc(siz); + if (!pool->lp_url) + { + applog(LOG_ERR, "Malloc failure in pool_active"); + return false; + } + + snprintf(pool->lp_url, siz, "%s%s%s", pool->rpc_url, need_slash ? "/" : "", copy_start); + } + } + else + pool->lp_url = NULL; + + pool_start_lp(pool); + } + else + { + /* If we failed to parse a getwork, this could be a stratum + * url without the prefix stratum+tcp:// so let's check it */ + if (initiate_stratum(pool)) + { + pool->has_stratum = true; + goto retry_stratum; + } + applog(LOG_DEBUG, "FAILED to retrieve work from pool %u %s", + pool->pool_no, pool->rpc_url); + if (!pinging && !pool->idle) + applog(LOG_WARNING, "Pool %u slow/down or URL or credentials invalid", pool->pool_no); + } +out: + if (val) + json_decref(val); + curl_easy_cleanup(curl); + return ret; +} + +static void pool_resus(struct pool *pool) +{ + pool->seq_getfails = 0; + if (pool_strategy == POOL_FAILOVER && pool->prio < cp_prio()) + applog(LOG_WARNING, "Pool %d %s alive, testing stability", pool->pool_no, pool->rpc_url); + else + applog(LOG_INFO, "Pool %d %s alive", pool->pool_no, pool->rpc_url); +} + +static bool work_filled; +static bool work_emptied; + +/* If this is called non_blocking, it will return NULL for work so that must + * be handled. */ +static struct work *hash_pop(bool blocking) +{ + struct work *work = NULL, *tmp; + int hc; + + mutex_lock(stgd_lock); + if (!HASH_COUNT(staged_work)) + { + /* Increase the queue if we reach zero and we know we can reach + * the maximum we're asking for. */ + if (work_filled && max_queue < opt_queue) + { + max_queue++; + work_filled = false; + } + work_emptied = true; + if (!blocking) + goto out_unlock; + do + { + struct timespec then; + struct timeval now; + int rc; + + cgtime(&now); + then.tv_sec = now.tv_sec + 10; + then.tv_nsec = now.tv_usec * 1000; + pthread_cond_signal(&gws_cond); + rc = pthread_cond_timedwait(&getq->cond, stgd_lock, &then); + /* Check again for !no_work as multiple threads may be + * waiting on this condition and another may set the + * bool separately. */ + if (rc && !no_work) + { + no_work = true; + applog(LOG_WARNING, "Waiting for work to be available from pools."); + } + } + while (!HASH_COUNT(staged_work)); + } + + if (no_work) + { + applog(LOG_WARNING, "Work available from pools, resuming."); + no_work = false; + } + + hc = HASH_COUNT(staged_work); + /* Find clone work if possible, to allow masters to be reused */ + if (hc > staged_rollable) + { + HASH_ITER(hh, staged_work, work, tmp) + { + if (!work_rollable(work)) + break; + } + } + else + work = staged_work; + HASH_DEL(staged_work, work); + if (work_rollable(work)) + staged_rollable--; + + /* Signal the getwork scheduler to look for more work */ + pthread_cond_signal(&gws_cond); + + /* Signal hash_pop again in case there are mutliple hash_pop waiters */ + pthread_cond_signal(&getq->cond); + + /* Keep track of last getwork grabbed */ + last_getwork = time(NULL); +out_unlock: + mutex_unlock(stgd_lock); + + return work; +} + +static void gen_hash(unsigned char *data, unsigned char *hash, int len) +{ + unsigned char hash1[32]; + + sha256(data, len, hash1); + sha256(hash1, 32, hash); +} + +void set_target(unsigned char *dest_target, double diff) +{ + unsigned char target[32]; + uint64_t *data64, h64; + double d64, dcut64; + + if (unlikely(diff == 0.0)) + { + /* This shouldn't happen but best we check to prevent a crash */ + applog(LOG_ERR, "Diff zero passed to set_target"); + diff = 1.0; + } + + d64 = truediffone; + d64 /= diff; + + dcut64 = d64 / bits192; + h64 = dcut64; + data64 = (uint64_t *)(target + 24); + *data64 = htole64(h64); + dcut64 = h64; + dcut64 *= bits192; + d64 -= dcut64; + + dcut64 = d64 / bits128; + h64 = dcut64; + data64 = (uint64_t *)(target + 16); + *data64 = htole64(h64); + dcut64 = h64; + dcut64 *= bits128; + d64 -= dcut64; + + dcut64 = d64 / bits64; + h64 = dcut64; + data64 = (uint64_t *)(target + 8); + *data64 = htole64(h64); + dcut64 = h64; + dcut64 *= bits64; + d64 -= dcut64; + + h64 = d64; + data64 = (uint64_t *)(target); + *data64 = htole64(h64); + + if (opt_debug) + { + char *htarget = bin2hex(target, 32); + + applog(LOG_DEBUG, "Generated target %s", htarget); + free(htarget); + } + memcpy(dest_target, target, 32); +} + +#if defined (USE_AVALON2) || defined (USE_AVALON4) || defined (USE_HASHRATIO) +bool submit_nonce2_nonce(struct thr_info *thr, struct pool *pool, struct pool *real_pool, + uint32_t nonce2, uint32_t nonce, uint32_t ntime) +{ + const int thr_id = thr->id; + struct cgpu_info *cgpu = thr->cgpu; + struct device_drv *drv = cgpu->drv; + struct work *work = make_work(); + bool ret; + + cg_wlock(&pool->data_lock); + pool->nonce2 = nonce2; + cg_wunlock(&pool->data_lock); + + gen_stratum_work(pool, work); + while (ntime--) + { + roll_work(work); + } + + work->pool = real_pool; + + work->thr_id = thr_id; + work->work_block = work_block; + work->pool->works++; + + work->mined = true; + work->device_diff = MIN(drv->max_diff, work->work_difficulty); + work->device_diff = MAX(drv->min_diff, work->device_diff); + + ret = submit_nonce(thr, work, nonce); + free_work(work); + return ret; +} +#endif + + +#if defined USE_BITMAIN_C5 +void get_work_by_nonce2(struct thr_info *thr, struct work **work,struct pool *pool, struct pool *real_pool, + uint64_t nonce2, uint32_t ntime, uint32_t version) +{ + *work = make_work(); + const int thr_id = thr->id; + struct cgpu_info *cgpu = thr->cgpu; + struct device_drv *drv = cgpu->drv; + cg_wlock(&pool->data_lock); + pool->nonce2 = nonce2; + //if(pool->support_vil) + version = Swap32(version); + memcpy(pool->header_bin, &version, 4); + cg_wunlock(&pool->data_lock); + + gen_stratum_work(pool, *work); + + (*work)->pool = real_pool; + + (*work)->thr_id = thr_id; + (*work)->work_block = work_block; + (*work)->pool->works++; + + (*work)->mined = true; + (*work)->version = version; +} + +#endif + +/* Generates stratum based work based on the most recent notify information + * from the pool. This will keep generating work while a pool is down so we use + * other means to detect when the pool has died in stratum_thread */ +static void gen_stratum_work(struct pool *pool, struct work *work) +{ + unsigned char merkle_root[32], merkle_sha[64]; + uint32_t *data32, *swap32; + uint64_t nonce2le; + int i; + + cg_wlock(&pool->data_lock); + + /* Update coinbase. Always use an LE encoded nonce2 to fill in values + * from left to right and prevent overflow errors with small n2sizes */ + nonce2le = htole64(pool->nonce2); + memcpy(pool->coinbase + pool->nonce2_offset, &nonce2le, pool->n2size); + work->nonce2 = pool->nonce2++; + work->nonce2_len = pool->n2size; + + /* Downgrade to a read lock to read off the pool variables */ + cg_dwlock(&pool->data_lock); + + /* Generate merkle root */ + gen_hash(pool->coinbase, merkle_root, pool->coinbase_len); + memcpy(merkle_sha, merkle_root, 32); + for (i = 0; i < pool->merkles; i++) + { + memcpy(merkle_sha + 32, pool->swork.merkle_bin[i], 32); + gen_hash(merkle_sha, merkle_root, 64); + memcpy(merkle_sha, merkle_root, 32); + } + data32 = (uint32_t *)merkle_sha; + swap32 = (uint32_t *)merkle_root; + flip32(swap32, data32); + + /* Copy the data template from header_bin */ + memcpy(work->data, pool->header_bin, 112); + memcpy(work->data + 36, merkle_root, 32); + + /* Store the stratum work diff to check it still matches the pool's + * stratum diff when submitting shares */ + work->sdiff = pool->sdiff; + + /* Copy parameters required for share submission */ + work->job_id = strdup(pool->swork.job_id); + work->nonce1 = strdup(pool->nonce1); + work->ntime = strdup(pool->ntime); + cg_runlock(&pool->data_lock); + + if (opt_debug) + { + char *header, *merkle_hash; + + header = bin2hex(work->data, 112); + merkle_hash = bin2hex((const unsigned char *)merkle_root, 32); + //applog(LOG_DEBUG, "Generated stratum merkle %s", merkle_hash); + //applog(LOG_DEBUG, "Generated stratum header %s", header); + //applog(LOG_DEBUG, "Work job_id %s nonce2 %"PRIu64" ntime %s", work->job_id, + // work->nonce2, work->ntime); + free(header); + free(merkle_hash); + } + + calc_midstate(work); + set_target(work->target, work->sdiff); + + local_work++; + if((time(NULL) - local_work_lasttime) > 5) + { + int diff = local_work - local_work_last; + //applog(LOG_DEBUG, "local_work 5s gen work count:%d", diff/(time(NULL) - local_work_lasttime)); + local_work_lasttime = time(NULL); + local_work_last = local_work; + } + + work->pool = pool; + work->stratum = true; + work->nonce = 0; + work->longpoll = false; + work->getwork_mode = GETWORK_MODE_STRATUM; + work->work_block = work_block; + /* Nominally allow a driver to ntime roll 60 seconds */ + work->drv_rolllimit = 60; + calc_diff(work, work->sdiff); + + cgtime(&work->tv_staged); +} + +#ifdef HAVE_LIBCURL +static void gen_solo_work(struct pool *pool, struct work *work); + +/* Use the one instance of gbt_curl, protecting the bool with the gbt_lock but + * avoiding holding the lock once we've set the bool. */ +static void get_gbt_curl(struct pool *pool, int poll) +{ + cg_ilock(&pool->gbt_lock); + while (pool->gbt_curl_inuse) + { + cg_uilock(&pool->gbt_lock); + cgsleep_ms(poll); + cg_ilock(&pool->gbt_lock); + } + cg_ulock(&pool->gbt_lock); + pool->gbt_curl_inuse = true; + cg_wunlock(&pool->gbt_lock); +} + +/* No need for locking here */ +static inline void release_gbt_curl(struct pool *pool) +{ + pool->gbt_curl_inuse = false; +} + +static void update_gbt_solo(struct pool *pool) +{ + struct work *work = make_work(); + int rolltime; + json_t *val; + + get_gbt_curl(pool, 10); +retry: + /* Bitcoind doesn't like many open RPC connections. */ + curl_easy_setopt(pool->gbt_curl, CURLOPT_FORBID_REUSE, 1); + val = json_rpc_call(pool->gbt_curl, pool->rpc_url, pool->rpc_userpass, pool->rpc_req, + true, false, &rolltime, pool, false); + + if (likely(val)) + { + bool rc = work_decode(pool, work, val); + + if (rc) + { + __setup_gbt_solo(pool); + gen_solo_work(pool, work); + stage_work(work); + } + else + free_work(work); + json_decref(val); + } + else + { + applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get gbt, retrying in 5s", + pool->pool_no); + if (++pool->seq_getfails > 5) + { + pool_died(pool); + goto out; + } + cgsleep_ms(5000); + goto retry; + } +out: + release_gbt_curl(pool); +} + +static void gen_solo_work(struct pool *pool, struct work *work) +{ + unsigned char merkle_root[32], merkle_sha[64]; + uint32_t *data32, *swap32; + struct timeval now; + uint64_t nonce2le; + int i; + + cgtime(&now); + if (now.tv_sec - pool->tv_lastwork.tv_sec > 60) + update_gbt_solo(pool); + + cg_wlock(&pool->gbt_lock); + + /* Update coinbase. Always use an LE encoded nonce2 to fill in values + * from left to right and prevent overflow errors with small n2sizes */ + nonce2le = htole64(pool->nonce2); + memcpy(pool->coinbase + pool->nonce2_offset, &nonce2le, pool->n2size); + work->nonce2 = pool->nonce2++; + work->nonce2_len = pool->n2size; + work->gbt_txns = pool->transactions + 1; + + /* Downgrade to a read lock to read off the pool variables */ + cg_dwlock(&pool->gbt_lock); + work->coinbase = bin2hex(pool->coinbase, pool->coinbase_len); + /* Generate merkle root */ + gen_hash(pool->coinbase, merkle_root, pool->coinbase_len); + memcpy(merkle_sha, merkle_root, 32); + for (i = 0; i < pool->merkles; i++) + { + unsigned char *merkle_bin; + + merkle_bin = pool->merklebin + (i * 32); + memcpy(merkle_sha + 32, merkle_bin, 32); + gen_hash(merkle_sha, merkle_root, 64); + memcpy(merkle_sha, merkle_root, 32); + } + data32 = (uint32_t *)merkle_sha; + swap32 = (uint32_t *)merkle_root; + flip32(swap32, data32); + + /* Copy the data template from header_bin */ + memcpy(work->data, pool->header_bin, 112); + memcpy(work->data + 36, merkle_root, 32); + + work->sdiff = pool->sdiff; + + /* Copy parameters required for share submission */ + work->ntime = strdup(pool->ntime); + memcpy(work->target, pool->gbt_target, 32); + cg_runlock(&pool->gbt_lock); + + if (opt_debug) + { + char *header, *merkle_hash; + + header = bin2hex(work->data, 112); + merkle_hash = bin2hex((const unsigned char *)merkle_root, 32); + applog(LOG_DEBUG, "Generated GBT solo merkle %s", merkle_hash); + applog(LOG_DEBUG, "Generated GBT solo header %s", header); + applog(LOG_DEBUG, "Work nonce2 %"PRIu64" ntime %s", work->nonce2, + work->ntime); + free(header); + free(merkle_hash); + } + + calc_midstate(work); + + local_work++; + work->gbt = true; + work->pool = pool; + work->nonce = 0; + work->longpoll = false; + work->getwork_mode = GETWORK_MODE_SOLO; + work->work_block = work_block; + /* Nominally allow a driver to ntime roll 60 seconds */ + work->drv_rolllimit = 60; + calc_diff(work, work->sdiff); + + cgtime(&work->tv_staged); +} +#endif + +/* The time difference in seconds between when this device last got work via + * get_work() and generated a valid share. */ +int share_work_tdiff(struct cgpu_info *cgpu) +{ + return last_getwork - cgpu->last_device_valid_work; +} + +static void set_benchmark_work(struct cgpu_info *cgpu, struct work *work) +{ + cgpu->lodiff += cgpu->direction; + if (cgpu->lodiff < 1) + cgpu->direction = 1; + if (cgpu->lodiff > 15) + { + cgpu->direction = -1; + if (++cgpu->hidiff > 15) + cgpu->hidiff = 0; + memcpy(work, &bench_hidiff_bins[cgpu->hidiff][0], 160); + } + else + memcpy(work, &bench_lodiff_bins[cgpu->lodiff][0], 160); +} + +struct work *get_work(struct thr_info *thr, const int thr_id) +{ + struct cgpu_info *cgpu = thr->cgpu; + struct work *work = NULL; + time_t diff_t; + + thread_reportout(thr); + applog(LOG_DEBUG, "Popping work from get queue to get work"); + diff_t = time(NULL); + while (!work) + { + work = hash_pop(true); + if (stale_work(work, false)) + { + discard_work(work); + wake_gws(); + } + } + diff_t = time(NULL) - diff_t; + /* Since this is a blocking function, we need to add grace time to + * the device's last valid work to not make outages appear to be + * device failures. */ + if (diff_t > 0) + { + applog(LOG_DEBUG, "Get work blocked for %d seconds", (int)diff_t); + cgpu->last_device_valid_work += diff_t; + } + applog(LOG_DEBUG, "Got work from get queue to get work for thread %d", thr_id); + + work->thr_id = thr_id; + if (opt_benchmark) + set_benchmark_work(cgpu, work); + + thread_reportin(thr); + work->mined = true; + work->device_diff = MIN(cgpu->drv->max_diff, work->work_difficulty); + work->device_diff = MAX(cgpu->drv->min_diff, work->device_diff); + return work; +} + +/* Submit a copy of the tested, statistic recorded work item asynchronously */ +static void submit_work_async(struct work *work) +{ + struct pool *pool = work->pool; + pthread_t submit_thread; + + cgtime(&work->tv_work_found); + if (opt_benchmark) + { + struct cgpu_info *cgpu = get_thr_cgpu(work->thr_id); + + mutex_lock(&stats_lock); + cgpu->accepted++; + total_accepted++; + pool->accepted++; + cgpu->diff_accepted += work->work_difficulty; + total_diff_accepted += work->work_difficulty; + pool->diff_accepted += work->work_difficulty; + mutex_unlock(&stats_lock); + + applog(LOG_NOTICE, "Accepted %s %d benchmark share nonce %08x", + cgpu->drv->name, cgpu->device_id, *(uint32_t *)(work->data + 64 + 12)); + return; + } + + if (stale_work(work, true)) + { + if (opt_submit_stale) + applog(LOG_NOTICE, "Pool %d stale share detected, submitting as user requested", pool->pool_no); + else if (pool->submit_old) + applog(LOG_NOTICE, "Pool %d stale share detected, submitting as pool requested", pool->pool_no); + else + { + applog(LOG_NOTICE, "Pool %d stale share detected, discarding", pool->pool_no); + sharelog("discard", work); + + mutex_lock(&stats_lock); + total_stale++; + pool->stale_shares++; + total_diff_stale += work->work_difficulty; + pool->diff_stale += work->work_difficulty; + mutex_unlock(&stats_lock); + + free_work(work); + return; + } + work->stale = true; + } + + if (work->stratum) + { + applog(LOG_DEBUG, "Pushing pool %d work to stratum queue", pool->pool_no); + if (unlikely(!tq_push(pool->stratum_q, work))) + { + applog(LOG_DEBUG, "Discarding work from removed pool"); + free_work(work); + } + } + else + { + applog(LOG_DEBUG, "Pushing submit work to work thread"); + if (unlikely(pthread_create(&submit_thread, NULL, submit_work_thread, (void *)work))) + quit(1, "Failed to create submit_work_thread"); + } +} + +void inc_hw_errors(struct thr_info *thr) +{ + forcelog(LOG_INFO, "%s %d: invalid nonce - HW error", thr->cgpu->drv->name, + thr->cgpu->device_id); + + mutex_lock(&stats_lock); + hw_errors++; + thr->cgpu->hw_errors++; + mutex_unlock(&stats_lock); + + thr->cgpu->drv->hw_error(thr); +} + +void inc_hw_errors_with_diff(struct thr_info *thr, int diff) +{ + applog(LOG_ERR, "%s%d: invalid nonce - HW error", thr->cgpu->drv->name, + thr->cgpu->device_id); + + mutex_lock(&stats_lock); + hw_errors += diff ; + thr->cgpu->hw_errors += diff; + mutex_unlock(&stats_lock); + + thr->cgpu->drv->hw_error(thr); +} + + + +void inc_dev_status(int max_fan, int max_temp) +{ + mutex_lock(&stats_lock); + g_max_fan = max_fan; + g_max_temp = max_temp; + mutex_unlock(&stats_lock); +} + +/* Fills in the work nonce and builds the output data in work->hash */ +static void rebuild_nonce(struct work *work, uint32_t nonce) +{ + uint32_t *work_nonce = (uint32_t *)(work->data + 64 + 12); + + *work_nonce = htole32(nonce); + + regen_hash(work); +} + +/* For testing a nonce against diff 1 */ +bool test_nonce(struct work *work, uint32_t nonce) +{ + uint32_t *hash_32 = (uint32_t *)(work->hash + 28); + + rebuild_nonce(work, nonce); + return (*hash_32 == 0); +} + +/* For testing a nonce against an arbitrary diff */ +bool test_nonce_diff(struct work *work, uint32_t nonce, double diff) +{ + uint64_t *hash64 = (uint64_t *)(work->hash + 24), diff64; + + rebuild_nonce(work, nonce); + diff64 = 0x00000000ffff0000ULL; + diff64 /= diff; + + return (le64toh(*hash64) <= diff64); +} + +static void update_work_stats(struct thr_info *thr, struct work *work) +{ + double test_diff = current_diff; + + work->share_diff = share_diff(work); + + if (unlikely(work->share_diff >= test_diff)) + { + work->block = true; + work->pool->solved++; + found_blocks++; + work->mandatory = true; + applog(LOG_NOTICE, "Found block for pool %d!", work->pool->pool_no); + } + + mutex_lock(&stats_lock); + total_diff1 += work->device_diff; + thr->cgpu->diff1 += work->device_diff; + work->pool->diff1 += work->device_diff; + thr->cgpu->last_device_valid_work = time(NULL); + mutex_unlock(&stats_lock); +} + +void inc_work_stats(struct thr_info *thr, struct pool *pool, int diff1) +{ + mutex_lock(&stats_lock); + total_diff1 += diff1; + thr->cgpu->diff1 += diff1; + if(pool) + { + pool->diff1 += diff1; + } + else + { + pool = current_pool(); + pool->diff1 += diff1; + } + thr->cgpu->last_device_valid_work = time(NULL); + mutex_unlock(&stats_lock); +} + + +/* To be used once the work has been tested to be meet diff1 and has had its + * nonce adjusted. Returns true if the work target is met. */ +bool submit_tested_work(struct thr_info *thr, struct work *work) +{ + struct work *work_out; + update_work_stats(thr, work); + + if (!fulltest(work->hash, work->target)) + { + applog(LOG_INFO, "%s %s %d: Share above target",__FUNCTION__, thr->cgpu->drv->name, + thr->cgpu->device_id); + return false; + } + work_out = copy_work(work); + submit_work_async(work_out); + return true; +} + +/* Rudimentary test to see if cgpu has returned the same nonce twice in a row which is + * always going to be a duplicate which should be reported as a hw error. */ +static bool new_nonce(struct thr_info *thr, uint32_t nonce) +{ + struct cgpu_info *cgpu = thr->cgpu; + + if (unlikely(cgpu->last_nonce == nonce)) + { + applog(LOG_INFO, "%s %d duplicate share detected as HW error", + cgpu->drv->name, cgpu->device_id); + return false; + } + cgpu->last_nonce = nonce; + return true; +} + +/* Returns true if nonce for work was a valid share and not a dupe of the very last + * nonce submitted by this device. */ +bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce) +{ + if (new_nonce(thr, nonce) && test_nonce(work, nonce)) + submit_tested_work(thr, work); + else + { + inc_hw_errors(thr); + return false; + } + + if (opt_benchfile && opt_benchfile_display) + benchfile_dspwork(work, nonce); + + return true; +} + +bool submit_nonce_1(struct thr_info *thr, struct work *work, uint32_t nonce, int * nofull) +{ + if(nofull) *nofull = 0; + if (test_nonce(work, nonce)) + { + update_work_stats(thr, work); + if (!fulltest(work->hash, work->target)) + { + if(nofull) *nofull = 1; + applog(LOG_INFO, "Share above target"); + return false; + } + } + else + { + inc_hw_errors(thr); + return false; + } + return true; +} + +void submit_nonce_2(struct work *work) +{ + struct work *work_out; + work_out = copy_work(work); + submit_work_async(work_out); +} + +bool submit_nonce_direct(struct thr_info *thr, struct work *work, uint32_t nonce) +{ + struct work *work_out; + uint32_t *work_nonce = (uint32_t *)(work->data + 64 + 12); + *work_nonce = htole32(nonce); + + work_out = copy_work(work); + submit_work_async(work_out); + return true; +} + +/* Allows drivers to submit work items where the driver has changed the ntime + * value by noffset. Must be only used with a work protocol that does not ntime + * roll itself intrinsically to generate work (eg stratum). We do not touch + * the original work struct, but the copy of it only. */ +bool submit_noffset_nonce(struct thr_info *thr, struct work *work_in, uint32_t nonce, + int noffset) +{ + struct work *work = make_work(); + bool ret = false; + + _copy_work(work, work_in, noffset); + if (!test_nonce(work, nonce)) + { + free_work(work); + inc_hw_errors(thr); + goto out; + } + update_work_stats(thr, work); + + if (opt_benchfile && opt_benchfile_display) + benchfile_dspwork(work, nonce); + + ret = true; + if (!fulltest(work->hash, work->target)) + { + free_work(work); + applog(LOG_INFO, "%s %d: Share above target", thr->cgpu->drv->name, + thr->cgpu->device_id); + goto out; + } + submit_work_async(work); + +out: + return ret; +} + +static inline bool abandon_work(struct work *work, struct timeval *wdiff, uint64_t hashes) +{ + if (wdiff->tv_sec > opt_scantime || hashes >= 0xfffffffe || + stale_work(work, false)) + return true; + return false; +} + +static void mt_disable(struct thr_info *mythr, const int thr_id, + struct device_drv *drv) +{ + applog(LOG_WARNING, "Thread %d being disabled", thr_id); + mythr->cgpu->rolling = 0; + applog(LOG_DEBUG, "Waiting on sem in miner thread"); + cgsem_wait(&mythr->sem); + applog(LOG_WARNING, "Thread %d being re-enabled", thr_id); + drv->thread_enable(mythr); +} + +/* The main hashing loop for devices that are slow enough to work on one work + * item at a time, without a queue, aborting work before the entire nonce + * range has been hashed if needed. */ +static void hash_sole_work(struct thr_info *mythr) +{ + const int thr_id = mythr->id; + struct cgpu_info *cgpu = mythr->cgpu; + struct device_drv *drv = cgpu->drv; + struct timeval getwork_start, tv_start, *tv_end, tv_workstart, tv_lastupdate; + struct cgminer_stats *dev_stats = &(cgpu->cgminer_stats); + struct cgminer_stats *pool_stats; + /* Try to cycle approximately 5 times before each log update */ + const long cycle = opt_log_interval / 5 ? : 1; + const bool primary = (!mythr->device_thread) || mythr->primary_thread; + struct timeval diff, sdiff, wdiff = {0, 0}; + uint32_t max_nonce = drv->can_limit_work(mythr); + int64_t hashes_done = 0; + + tv_end = &getwork_start; + cgtime(&getwork_start); + sdiff.tv_sec = sdiff.tv_usec = 0; + cgtime(&tv_lastupdate); + + while (likely(!cgpu->shutdown)) + { + struct work *work = get_work(mythr, thr_id); + int64_t hashes; + + mythr->work_restart = false; + cgpu->new_work = true; + + cgtime(&tv_workstart); + work->nonce = 0; + cgpu->max_hashes = 0; + if (!drv->prepare_work(mythr, work)) + { + applog(LOG_ERR, "work prepare failed, exiting " + "mining thread %d", thr_id); + break; + } + work->device_diff = MIN(drv->max_diff, work->work_difficulty); + work->device_diff = MAX(drv->min_diff, work->device_diff); + + do + { + cgtime(&tv_start); + + subtime(&tv_start, &getwork_start); + + addtime(&getwork_start, &dev_stats->getwork_wait); + if (time_more(&getwork_start, &dev_stats->getwork_wait_max)) + copy_time(&dev_stats->getwork_wait_max, &getwork_start); + if (time_less(&getwork_start, &dev_stats->getwork_wait_min)) + copy_time(&dev_stats->getwork_wait_min, &getwork_start); + dev_stats->getwork_calls++; + + pool_stats = &(work->pool->cgminer_stats); + + addtime(&getwork_start, &pool_stats->getwork_wait); + if (time_more(&getwork_start, &pool_stats->getwork_wait_max)) + copy_time(&pool_stats->getwork_wait_max, &getwork_start); + if (time_less(&getwork_start, &pool_stats->getwork_wait_min)) + copy_time(&pool_stats->getwork_wait_min, &getwork_start); + pool_stats->getwork_calls++; + + cgtime(&(work->tv_work_start)); + + /* Only allow the mining thread to be cancelled when + * it is not in the driver code. */ + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); + + thread_reportin(mythr); + hashes = drv->scanhash(mythr, work, work->nonce + max_nonce); + thread_reportout(mythr); + + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); + pthread_testcancel(); + + /* tv_end is == &getwork_start */ + cgtime(&getwork_start); + + if (unlikely(hashes == -1)) + { + applog(LOG_ERR, "%s %d failure, disabling!", drv->name, cgpu->device_id); + cgpu->deven = DEV_DISABLED; + dev_error(cgpu, REASON_THREAD_ZERO_HASH); + cgpu->shutdown = true; + break; + } + + hashes_done += hashes; + if (hashes > cgpu->max_hashes) + cgpu->max_hashes = hashes; + + timersub(tv_end, &tv_start, &diff); + sdiff.tv_sec += diff.tv_sec; + sdiff.tv_usec += diff.tv_usec; + if (sdiff.tv_usec > 1000000) + { + ++sdiff.tv_sec; + sdiff.tv_usec -= 1000000; + } + + timersub(tv_end, &tv_workstart, &wdiff); + + if (unlikely((long)sdiff.tv_sec < cycle)) + { + int mult; + + if (likely(max_nonce == 0xffffffff)) + continue; + + mult = 1000000 / ((sdiff.tv_usec + 0x400) / 0x400) + 0x10; + mult *= cycle; + if (max_nonce > (0xffffffff * 0x400) / mult) + max_nonce = 0xffffffff; + else + max_nonce = (max_nonce * mult) / 0x400; + } + else if (unlikely(sdiff.tv_sec > cycle)) + max_nonce = max_nonce * cycle / sdiff.tv_sec; + else if (unlikely(sdiff.tv_usec > 100000)) + max_nonce = max_nonce * 0x400 / (((cycle * 1000000) + sdiff.tv_usec) / (cycle * 1000000 / 0x400)); + + timersub(tv_end, &tv_lastupdate, &diff); + /* Update the hashmeter at most 5 times per second */ + if ((hashes_done && (diff.tv_sec > 0 || diff.tv_usec > 200000)) || + diff.tv_sec >= opt_log_interval) + { + hashmeter(thr_id, hashes_done); + hashes_done = 0; + copy_time(&tv_lastupdate, tv_end); + } + + if (unlikely(mythr->work_restart)) + { + /* Apart from device_thread 0, we stagger the + * starting of every next thread to try and get + * all devices busy before worrying about + * getting work for their extra threads */ + if (!primary) + { + struct timespec rgtp; + + rgtp.tv_sec = 0; + rgtp.tv_nsec = 250 * mythr->device_thread * 1000000; + nanosleep(&rgtp, NULL); + } + break; + } + + if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) + mt_disable(mythr, thr_id, drv); + + sdiff.tv_sec = sdiff.tv_usec = 0; + } + while (!abandon_work(work, &wdiff, cgpu->max_hashes)); + free_work(work); + } + cgpu->deven = DEV_DISABLED; +} + +/* Put a new unqueued work item in cgpu->unqueued_work under cgpu->qlock till + * the driver tells us it's full so that it may extract the work item using + * the get_queued() function which adds it to the hashtable on + * cgpu->queued_work. */ +static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id) +{ + do + { + bool need_work; + + /* Do this lockless just to know if we need more unqueued work. */ + need_work = (!cgpu->unqueued_work); + + /* get_work is a blocking function so do it outside of lock + * to prevent deadlocks with other locks. */ + if (need_work) + { + struct work *work = get_work(mythr, thr_id); + + wr_lock(&cgpu->qlock); + /* Check we haven't grabbed work somehow between + * checking and picking up the lock. */ + if (likely(!cgpu->unqueued_work)) + cgpu->unqueued_work = work; + else + need_work = false; + wr_unlock(&cgpu->qlock); + + if (unlikely(!need_work)) + discard_work(work); + } + /* The queue_full function should be used by the driver to + * actually place work items on the physical device if it + * does have a queue. */ + } + while (!drv->queue_full(cgpu)); +} + +/* Add a work item to a cgpu's queued hashlist */ +void __add_queued(struct cgpu_info *cgpu, struct work *work) +{ + cgpu->queued_count++; + HASH_ADD_INT(cgpu->queued_work, id, work); +} + +struct work *__get_queued(struct cgpu_info *cgpu) +{ + struct work *work = NULL; + + if (cgpu->unqueued_work) + { + work = cgpu->unqueued_work; + if (unlikely(stale_work(work, false))) + { + discard_work(work); + wake_gws(); + } + else + __add_queued(cgpu, work); + cgpu->unqueued_work = NULL; + } + + return work; +} + +/* This function is for retrieving one work item from the unqueued pointer and + * adding it to the hashtable of queued work. Code using this function must be + * able to handle NULL as a return which implies there is no work available. */ +struct work *get_queued(struct cgpu_info *cgpu) +{ + struct work *work; + + wr_lock(&cgpu->qlock); + work = __get_queued(cgpu); + wr_unlock(&cgpu->qlock); + + return work; +} + +void add_queued(struct cgpu_info *cgpu, struct work *work) +{ + wr_lock(&cgpu->qlock); + __add_queued(cgpu, work); + wr_unlock(&cgpu->qlock); +} + +/* Get fresh work and add it to cgpu's queued hashlist */ +struct work *get_queue_work(struct thr_info *thr, struct cgpu_info *cgpu, int thr_id) +{ + struct work *work = get_work(thr, thr_id); + + add_queued(cgpu, work); + return work; +} + +/* This function is for finding an already queued work item in the + * given que hashtable. Code using this function must be able + * to handle NULL as a return which implies there is no matching work. + * The calling function must lock access to the que if it is required. + * The common values for midstatelen, offset, datalen are 32, 64, 12 */ +struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen) +{ + struct work *work, *tmp, *ret = NULL; + + HASH_ITER(hh, que, work, tmp) + { + if (memcmp(work->midstate, midstate, midstatelen) == 0 && + memcmp(work->data + offset, data, datalen) == 0) + { + ret = work; + break; + } + } + + return ret; +} + +/* This function is for finding an already queued work item in the + * device's queued_work hashtable. Code using this function must be able + * to handle NULL as a return which implies there is no matching work. + * The common values for midstatelen, offset, datalen are 32, 64, 12 */ +struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen) +{ + struct work *ret; + + rd_lock(&cgpu->qlock); + ret = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen); + rd_unlock(&cgpu->qlock); + + return ret; +} + +struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen) +{ + struct work *work, *ret = NULL; + + rd_lock(&cgpu->qlock); + work = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen); + if (work) + ret = copy_work(work); + rd_unlock(&cgpu->qlock); + + return ret; +} + +/* This function is for finding an already queued work item in the + * given que hashtable. Code using this function must be able + * to handle NULL as a return which implies there is no matching work. + * The calling function must lock access to the que if it is required. */ +struct work *__find_work_byid(struct work *que, uint32_t id) +{ + struct work *work, *tmp, *ret = NULL; + + HASH_ITER(hh, que, work, tmp) + { + if (work->id == id) + { + ret = work; + break; + } + } + + return ret; +} + +struct work *find_queued_work_byid(struct cgpu_info *cgpu, uint32_t id) +{ + struct work *ret; + + rd_lock(&cgpu->qlock); + ret = __find_work_byid(cgpu->queued_work, id); + rd_unlock(&cgpu->qlock); + + return ret; +} + +struct work *clone_queued_work_byid(struct cgpu_info *cgpu, uint32_t id) +{ + struct work *work, *ret = NULL; + + rd_lock(&cgpu->qlock); + work = __find_work_byid(cgpu->queued_work, id); + if (work) + ret = copy_work(work); + rd_unlock(&cgpu->qlock); + + return ret; +} + +void __work_completed(struct cgpu_info *cgpu, struct work *work) +{ + cgpu->queued_count--; + HASH_DEL(cgpu->queued_work, work); +} + +/* This iterates over a queued hashlist finding work started more than secs + * seconds ago and discards the work as completed. The driver must set the + * work->tv_work_start value appropriately. Returns the number of items aged. */ +int age_queued_work(struct cgpu_info *cgpu, double secs) +{ + struct work *work, *tmp; + struct timeval tv_now; + int aged = 0; + + cgtime(&tv_now); + + wr_lock(&cgpu->qlock); + HASH_ITER(hh, cgpu->queued_work, work, tmp) + { + if (tdiff(&tv_now, &work->tv_work_start) > secs) + { + __work_completed(cgpu, work); + free_work(work); + aged++; + } + } + wr_unlock(&cgpu->qlock); + + return aged; +} + +/* This function should be used by queued device drivers when they're sure + * the work struct is no longer in use. */ +void work_completed(struct cgpu_info *cgpu, struct work *work) +{ + wr_lock(&cgpu->qlock); + __work_completed(cgpu, work); + wr_unlock(&cgpu->qlock); + + free_work(work); +} + +/* Combines find_queued_work_bymidstate and work_completed in one function + * withOUT destroying the work so the driver must free it. */ +struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen) +{ + struct work *work; + + wr_lock(&cgpu->qlock); + work = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen); + if (work) + __work_completed(cgpu, work); + wr_unlock(&cgpu->qlock); + + return work; +} + +void flush_queue(struct cgpu_info *cgpu) +{ + struct work *work = NULL; + + if (unlikely(!cgpu)) + return; + + /* Use only a trylock in case we get into a deadlock with a queueing + * function holding the read lock when we're called. */ + if (wr_trylock(&cgpu->qlock)) + return; + work = cgpu->unqueued_work; + cgpu->unqueued_work = NULL; + wr_unlock(&cgpu->qlock); + + if (work) + { + free_work(work); + applog(LOG_DEBUG, "Discarded queued work item"); + } +} + +/* This version of hash work is for devices that are fast enough to always + * perform a full nonce range and need a queue to maintain the device busy. + * Work creation and destruction is not done from within this function + * directly. */ +void hash_queued_work(struct thr_info *mythr) +{ + struct timeval tv_start = {0, 0}, tv_end; + struct cgpu_info *cgpu = mythr->cgpu; + struct device_drv *drv = cgpu->drv; + const int thr_id = mythr->id; + int64_t hashes_done = 0; + + while (likely(!cgpu->shutdown)) + { + struct timeval diff; + int64_t hashes; + + mythr->work_update = false; + + fill_queue(mythr, cgpu, drv, thr_id); + + hashes = drv->scanwork(mythr); + + /* Reset the bool here in case the driver looks for it + * synchronously in the scanwork loop. */ + mythr->work_restart = false; + + if (unlikely(hashes == -1 )) + { + applog(LOG_ERR, "%s %d failure, disabling!", drv->name, cgpu->device_id); + cgpu->deven = DEV_DISABLED; + dev_error(cgpu, REASON_THREAD_ZERO_HASH); + break; + } + + hashes_done += hashes; + cgtime(&tv_end); + timersub(&tv_end, &tv_start, &diff); + /* Update the hashmeter at most 5 times per second */ + if ((hashes_done && (diff.tv_sec > 0 || diff.tv_usec > 200000)) || + diff.tv_sec >= opt_log_interval) + { + hashmeter(thr_id, hashes_done); + hashes_done = 0; + copy_time(&tv_start, &tv_end); + } + + if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) + mt_disable(mythr, thr_id, drv); + + if (mythr->work_update) + drv->update_work(cgpu); + } + cgpu->deven = DEV_DISABLED; +} + +/* This version of hash_work is for devices drivers that want to do their own + * work management entirely, usually by using get_work(). Note that get_work + * is a blocking function and will wait indefinitely if no work is available + * so this must be taken into consideration in the driver. */ +void hash_driver_work(struct thr_info *mythr) +{ + struct timeval tv_start = {0, 0}, tv_end; + struct cgpu_info *cgpu = mythr->cgpu; + struct device_drv *drv = cgpu->drv; + const int thr_id = mythr->id; + int64_t hashes_done = 0; + + while (likely(!cgpu->shutdown)) + { + struct timeval diff; + int64_t hashes; + + mythr->work_update = false; + + hashes = drv->scanwork(mythr); + + /* Reset the bool here in case the driver looks for it + * synchronously in the scanwork loop. */ + mythr->work_restart = false; + + if (unlikely(hashes == -1 )) + { + applog(LOG_ERR, "%s %d failure, disabling!", drv->name, cgpu->device_id); + cgpu->deven = DEV_DISABLED; + dev_error(cgpu, REASON_THREAD_ZERO_HASH); + break; + } + + hashes_done += hashes; + cgtime(&tv_end); + timersub(&tv_end, &tv_start, &diff); + /* Update the hashmeter at most 5 times per second */ + if ((hashes_done && (diff.tv_sec > 0 || diff.tv_usec > 200000)) || + diff.tv_sec >= opt_log_interval) + { + hashmeter(thr_id, hashes_done); + hashes_done = 0; + copy_time(&tv_start, &tv_end); + } + + if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) + mt_disable(mythr, thr_id, drv); + + if (mythr->work_update) + { + mutex_lock(&update_job_lock); + drv->update_work(cgpu); + mutex_unlock(&update_job_lock); + } + } + cgpu->deven = DEV_DISABLED; +} + +void *miner_thread(void *userdata) +{ + struct thr_info *mythr = userdata; + const int thr_id = mythr->id; + struct cgpu_info *cgpu = mythr->cgpu; + struct device_drv *drv = cgpu->drv; + char threadname[16]; + + snprintf(threadname, sizeof(threadname), "%d/Miner", thr_id); + RenameThread(threadname); + + thread_reportout(mythr); + if (!drv->thread_init(mythr)) + { + dev_error(cgpu, REASON_THREAD_FAIL_INIT); + goto out; + } + + applog(LOG_DEBUG, "Waiting on sem in miner thread"); + cgsem_wait(&mythr->sem); + + cgpu->last_device_valid_work = time(NULL); + drv->hash_work(mythr); + drv->thread_shutdown(mythr); +out: + return NULL; +} + +enum +{ + STAT_SLEEP_INTERVAL = 1, + STAT_CTR_INTERVAL = 10000000, + FAILURE_INTERVAL = 30, +}; + +#ifdef HAVE_LIBCURL +/* Stage another work item from the work returned in a longpoll */ +static void convert_to_work(json_t *val, int rolltime, struct pool *pool, struct timeval *tv_lp, struct timeval *tv_lp_reply) +{ + struct work *work; + bool rc; + + work = make_work(); + + rc = work_decode(pool, work, val); + if (unlikely(!rc)) + { + applog(LOG_ERR, "Could not convert longpoll data to work"); + free_work(work); + return; + } + total_getworks++; + pool->getwork_requested++; + work->pool = pool; + work->rolltime = rolltime; + copy_time(&work->tv_getwork, tv_lp); + copy_time(&work->tv_getwork_reply, tv_lp_reply); + calc_diff(work, 0); + + if (pool->enabled == POOL_REJECTING) + work->mandatory = true; + + if (pool->has_gbt) + gen_gbt_work(pool, work); + work->longpoll = true; + work->getwork_mode = GETWORK_MODE_LP; + + /* We'll be checking this work item twice, but we already know it's + * from a new block so explicitly force the new block detection now + * rather than waiting for it to hit the stage thread. This also + * allows testwork to know whether LP discovered the block or not. */ + test_work_current(work); + + /* Don't use backup LPs as work if we have failover-only enabled. Use + * the longpoll work from a pool that has been rejecting shares as a + * way to detect when the pool has recovered. + */ + if (pool != current_pool() && opt_fail_only && pool->enabled != POOL_REJECTING) + { + free_work(work); + return; + } + + work = clone_work(work); + + applog(LOG_DEBUG, "Pushing converted work to stage thread"); + + stage_work(work); + applog(LOG_DEBUG, "Converted longpoll data to work"); +} + +/* If we want longpoll, enable it for the chosen default pool, or, if + * the pool does not support longpoll, find the first one that does + * and use its longpoll support */ +static struct pool *select_longpoll_pool(struct pool *cp) +{ + int i; + + if (cp->hdr_path || cp->has_gbt || cp->gbt_solo) + return cp; + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + + if (pool->has_stratum || pool->hdr_path) + return pool; + } + return NULL; +} +#endif /* HAVE_LIBCURL */ + +/* This will make the longpoll thread wait till it's the current pool, or it + * has been flagged as rejecting, before attempting to open any connections. + */ +static void wait_lpcurrent(struct pool *pool) +{ + while (!cnx_needed(pool) && (pool->enabled == POOL_DISABLED || + (pool != current_pool() && pool_strategy != POOL_LOADBALANCE && + pool_strategy != POOL_BALANCE))) + { + mutex_lock(&lp_lock); + pthread_cond_wait(&lp_cond, &lp_lock); + mutex_unlock(&lp_lock); + } +} + +#ifdef HAVE_LIBCURL +static void *longpoll_thread(void *userdata) +{ + struct pool *cp = (struct pool *)userdata; + /* This *pool is the source of the actual longpoll, not the pool we've + * tied it to */ + struct timeval start, reply, end; + struct pool *pool = NULL; + char threadname[16]; + CURL *curl = NULL; + int failures = 0; + char lpreq[1024]; + char *lp_url; + int rolltime; + + snprintf(threadname, sizeof(threadname), "%d/Longpoll", cp->pool_no); + RenameThread(threadname); + +retry_pool: + pool = select_longpoll_pool(cp); + if (!pool) + { + applog(LOG_WARNING, "No suitable long-poll found for %s", cp->rpc_url); + while (!pool) + { + cgsleep_ms(60000); + pool = select_longpoll_pool(cp); + } + } + + if (pool->has_stratum) + { + applog(LOG_WARNING, "Block change for %s detection via %s stratum", + cp->rpc_url, pool->rpc_url); + goto out; + } + + if (pool->gbt_solo) + { + applog(LOG_WARNING, "Block change for %s detection via getblockcount polling", + cp->rpc_url); + while (42) + { + json_t *val, *res_val = NULL; + + if (unlikely(pool->removed)) + return NULL; + + cgtime(&start); + wait_lpcurrent(cp); + sprintf(lpreq, "{\"id\": 0, \"method\": \"getblockcount\"}\n"); + + /* We will be making another call immediately after this + * one to get the height so allow this curl to be reused.*/ + get_gbt_curl(pool, 500); + curl_easy_setopt(pool->gbt_curl, CURLOPT_FORBID_REUSE, 0); + val = json_rpc_call(pool->gbt_curl, pool->rpc_url, pool->rpc_userpass, lpreq, true, + false, &rolltime, pool, false); + release_gbt_curl(pool); + + if (likely(val)) + res_val = json_object_get(val, "result"); + if (likely(res_val)) + { + int height = json_integer_value(res_val); + const char *prev_hash; + + failures = 0; + json_decref(val); + if (height >= cp->height) + { + applog(LOG_WARNING, "Block height change to %d detected on pool %d", + height, cp->pool_no); + update_gbt_solo(pool); + continue; + } + + sprintf(lpreq, "{\"id\": 0, \"method\": \"getblockhash\", \"params\": [%d]}\n", height); + get_gbt_curl(pool, 500); + curl_easy_setopt(pool->gbt_curl, CURLOPT_FORBID_REUSE, 1); + val = json_rpc_call(pool->gbt_curl, pool->rpc_url, pool->rpc_userpass, + lpreq, true, false, &rolltime, pool, false); + release_gbt_curl(pool); + + if (val) + { + /* Do a comparison on a short stretch of + * the hash to make sure it hasn't changed + * due to mining on an orphan branch. */ + prev_hash = json_string_value(json_object_get(val, "result")); + if (unlikely(prev_hash && strncasecmp(prev_hash + 56, pool->prev_hash, 8))) + { + applog(LOG_WARNING, "Mining on orphan branch detected, switching!"); + update_gbt_solo(pool); + } + json_decref(val); + } + + cgsleep_ms(500); + } + else + { + if (val) + json_decref(val); + cgtime(&end); + if (end.tv_sec - start.tv_sec > 30) + continue; + if (failures == 1) + applog(LOG_WARNING, "longpoll failed for %s, retrying every 30s", lp_url); + cgsleep_ms(30000); + } + } + } + + curl = curl_easy_init(); + if (unlikely(!curl)) + quit (1, "Longpoll CURL initialisation failed"); + + /* Any longpoll from any pool is enough for this to be true */ + have_longpoll = true; + + wait_lpcurrent(cp); + + if (pool->has_gbt) + { + lp_url = pool->rpc_url; + applog(LOG_WARNING, "GBT longpoll ID activated for %s", lp_url); + } + else + { + strcpy(lpreq, getwork_req); + + lp_url = pool->lp_url; + if (cp == pool) + applog(LOG_WARNING, "Long-polling activated for %s", lp_url); + else + applog(LOG_WARNING, "Long-polling activated for %s via %s", cp->rpc_url, lp_url); + } + + while (42) + { + json_t *val, *soval; + + wait_lpcurrent(cp); + + cgtime(&start); + + /* Update the longpollid every time, but do it under lock to + * avoid races */ + if (pool->has_gbt) + { + cg_rlock(&pool->gbt_lock); + snprintf(lpreq, sizeof(lpreq), + "{\"id\": 0, \"method\": \"getblocktemplate\", \"params\": " + "[{\"capabilities\": [\"coinbasetxn\", \"workid\", \"coinbase/append\"], " + "\"longpollid\": \"%s\"}]}\n", pool->longpollid); + cg_runlock(&pool->gbt_lock); + } + + /* Longpoll connections can be persistent for a very long time + * and any number of issues could have come up in the meantime + * so always establish a fresh connection instead of relying on + * a persistent one. */ + curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); + val = json_rpc_call(curl, lp_url, pool->rpc_userpass, + lpreq, false, true, &rolltime, pool, false); + + cgtime(&reply); + + if (likely(val)) + { + soval = json_object_get(json_object_get(val, "result"), "submitold"); + if (soval) + pool->submit_old = json_is_true(soval); + else + pool->submit_old = false; + convert_to_work(val, rolltime, pool, &start, &reply); + failures = 0; + json_decref(val); + } + else + { + /* Some pools regularly drop the longpoll request so + * only see this as longpoll failure if it happens + * immediately and just restart it the rest of the + * time. */ + cgtime(&end); + if (end.tv_sec - start.tv_sec > 30) + continue; + if (failures == 1) + applog(LOG_WARNING, "longpoll failed for %s, retrying every 30s", lp_url); + cgsleep_ms(30000); + } + + if (pool != cp) + { + pool = select_longpoll_pool(cp); + if (pool->has_stratum) + { + applog(LOG_WARNING, "Block change for %s detection via %s stratum", + cp->rpc_url, pool->rpc_url); + break; + } + if (unlikely(!pool)) + goto retry_pool; + } + + if (unlikely(pool->removed)) + break; + } + +out: + curl_easy_cleanup(curl); + + return NULL; +} +#else /* HAVE_LIBCURL */ +static void *longpoll_thread(void __maybe_unused *userdata) +{ + pthread_detach(pthread_self()); + return NULL; +} +#endif /* HAVE_LIBCURL */ + +void reinit_device(struct cgpu_info *cgpu) +{ + if (cgpu->deven == DEV_DISABLED) + return; + +#ifdef USE_USBUTILS + /* Attempt a usb device reset if the device has gone sick */ + if (cgpu->usbdev && cgpu->usbdev->handle) + libusb_reset_device(cgpu->usbdev->handle); +#endif + cgpu->drv->reinit_device(cgpu); +} + +static struct timeval rotate_tv; + +/* We reap curls if they are unused for over a minute */ +static void reap_curl(struct pool *pool) +{ + struct curl_ent *ent, *iter; + struct timeval now; + int reaped = 0; + + cgtime(&now); + + mutex_lock(&pool->pool_lock); + list_for_each_entry_safe(ent, iter, &pool->curlring, node) + { + if (pool->curls < 2) + break; + if (now.tv_sec - ent->tv.tv_sec > 300) + { + reaped++; + pool->curls--; + list_del(&ent->node); + curl_easy_cleanup(ent->curl); + free(ent); + } + } + mutex_unlock(&pool->pool_lock); + + if (reaped) + applog(LOG_DEBUG, "Reaped %d curl%s from pool %d", reaped, reaped > 1 ? "s" : "", pool->pool_no); +} + +/* Prune old shares we haven't had a response about for over 2 minutes in case + * the pool never plans to respond and we're just leaking memory. If we get a + * response beyond that time they will be seen as untracked shares. */ +static void prune_stratum_shares(struct pool *pool) +{ + struct stratum_share *sshare, *tmpshare; + time_t current_time = time(NULL); + int cleared = 0; + + mutex_lock(&sshare_lock); + HASH_ITER(hh, stratum_shares, sshare, tmpshare) + { + if (sshare->work->pool == pool && current_time > sshare->sshare_time + 120) + { + HASH_DEL(stratum_shares, sshare); + free_work(sshare->work); + free(sshare); + cleared++; + } + } + mutex_unlock(&sshare_lock); + + if (cleared) + { + applog(LOG_WARNING, "Lost %d shares due to no stratum share response from pool %d", + cleared, pool->pool_no); + pool->stale_shares += cleared; + total_stale += cleared; + } +} + +static void *watchpool_thread(void __maybe_unused *userdata) +{ + int intervals = 0; + + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + RenameThread("Watchpool"); + + set_lowprio(); + + while (42) + { + struct timeval now; + int i; + + if (++intervals > 20) + intervals = 0; + cgtime(&now); + + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + + if (!opt_benchmark && !opt_benchfile) + { + reap_curl(pool); + prune_stratum_shares(pool); + } + + /* Get a rolling utility per pool over 10 mins */ + if (intervals > 19) + { + double shares = pool->diff1 - pool->last_shares; + + pool->last_shares = pool->diff1; + pool->utility = (pool->utility + shares * 0.63) / 1.63; + pool->shares = pool->utility; + } + + if (pool->enabled == POOL_DISABLED) + continue; + + /* Don't start testing a pool if its test thread + * from startup is still doing its first attempt. */ + if (unlikely(pool->testing)) + continue; + + /* Test pool is idle once every minute */ + if (pool->idle && now.tv_sec - pool->tv_idle.tv_sec > 30) + { + if (pool_active(pool, true) && pool_tclear(pool, &pool->idle)) + pool_resus(pool); + else + cgtime(&pool->tv_idle); + } + + /* Only switch pools if the failback pool has been + * alive for more than 5 minutes to prevent + * intermittently failing pools from being used. */ + if (!pool->idle && pool_strategy == POOL_FAILOVER && pool->prio < cp_prio() && + now.tv_sec - pool->tv_idle.tv_sec > 300) + { + applog(LOG_WARNING, "Pool %d %s stable for 5 mins", + pool->pool_no, pool->rpc_url); + switch_pools(NULL); + } + } + + if (current_pool()->idle) + switch_pools(NULL); + + if (pool_strategy == POOL_ROTATE && now.tv_sec - rotate_tv.tv_sec > 60 * opt_rotate_period) + { + cgtime(&rotate_tv); + switch_pools(NULL); + } + + cgsleep_ms(30000); + + } + return NULL; +} + +/* Makes sure the hashmeter keeps going even if mining threads stall, updates + * the screen at regular intervals, and restarts threads if they appear to have + * died. */ +#define WATCHDOG_INTERVAL 2 +#define WATCHDOG_SICK_TIME 120 +#define WATCHDOG_DEAD_TIME 600 +#define WATCHDOG_SICK_COUNT (WATCHDOG_SICK_TIME/WATCHDOG_INTERVAL) +#define WATCHDOG_DEAD_COUNT (WATCHDOG_DEAD_TIME/WATCHDOG_INTERVAL) + +static void *watchdog_thread(void __maybe_unused *userdata) +{ + const unsigned int interval = WATCHDOG_INTERVAL; + struct timeval zero_tv; + + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + RenameThread("Watchdog"); + + set_lowprio(); + memset(&zero_tv, 0, sizeof(struct timeval)); + cgtime(&rotate_tv); + + while (1) + { + int i; + struct timeval now; + + sleep(interval); + + discard_stale(); + + hashmeter(-1, 0); + +#ifdef HAVE_CURSES + if (curses_active_locked()) + { + struct cgpu_info *cgpu; + int count; + + change_logwinsize(); + curses_print_status(); + count = 0; + for (i = 0; i < total_devices; i++) + { + cgpu = get_devices(i); +#ifndef USE_USBUTILS + if (cgpu) +#else + if (cgpu && !cgpu->usbinfo.nodev) +#endif + curses_print_devstatus(cgpu, i, count++); + } +#ifdef USE_USBUTILS + for (i = 0; i < total_devices; i++) + { + cgpu = get_devices(i); + if (cgpu && cgpu->usbinfo.nodev) + curses_print_devstatus(cgpu, i, count++); + } +#endif + touchwin(statuswin); + wrefresh(statuswin); + touchwin(logwin); + wrefresh(logwin); + unlock_curses(); + } +#endif + + cgtime(&now); + + if (!sched_paused && !should_run()) + { + applog(LOG_WARNING, "Pausing execution as per stop time %02d:%02d scheduled", + schedstop.tm.tm_hour, schedstop.tm.tm_min); + if (!schedstart.enable) + { + quit(0, "Terminating execution as planned"); + break; + } + + applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d", + schedstart.tm.tm_hour, schedstart.tm.tm_min); + sched_paused = true; + + rd_lock(&mining_thr_lock); + for (i = 0; i < mining_threads; i++) + mining_thr[i]->pause = true; + rd_unlock(&mining_thr_lock); + } + else if (sched_paused && should_run()) + { + applog(LOG_WARNING, "Restarting execution as per start time %02d:%02d scheduled", + schedstart.tm.tm_hour, schedstart.tm.tm_min); + if (schedstop.enable) + applog(LOG_WARNING, "Will pause execution as scheduled at %02d:%02d", + schedstop.tm.tm_hour, schedstop.tm.tm_min); + sched_paused = false; + + for (i = 0; i < mining_threads; i++) + { + struct thr_info *thr; + + thr = get_thread(i); + + /* Don't touch disabled devices */ + if (thr->cgpu->deven == DEV_DISABLED) + continue; + thr->pause = false; + applog(LOG_DEBUG, "Pushing sem post to thread %d", thr->id); + cgsem_post(&thr->sem); + } + } + + for (i = 0; i < total_devices; ++i) + { + struct cgpu_info *cgpu = get_devices(i); + struct thr_info *thr = cgpu->thr[0]; + enum dev_enable *denable; + char dev_str[8]; + + if (!thr) + continue; + + cgpu->drv->get_stats(cgpu); + + denable = &cgpu->deven; + snprintf(dev_str, sizeof(dev_str), "%s %d", cgpu->drv->name, cgpu->device_id); + + /* Thread is waiting on getwork or disabled */ + if (thr->getwork || *denable == DEV_DISABLED) + continue; + + if (cgpu->status != LIFE_WELL && (now.tv_sec - thr->last.tv_sec < WATCHDOG_SICK_TIME)) + { + if (cgpu->status != LIFE_INIT) + applog(LOG_ERR, "%s: Recovered, declaring WELL!", dev_str); + cgpu->status = LIFE_WELL; + cgpu->device_last_well = time(NULL); + } + else if (cgpu->status == LIFE_WELL && (now.tv_sec - thr->last.tv_sec > WATCHDOG_SICK_TIME)) + { + cgpu->rolling = 0; + cgpu->status = LIFE_SICK; + applog(LOG_ERR, "%s: Idle for more than 60 seconds, declaring SICK!", dev_str); + cgtime(&thr->sick); + + dev_error(cgpu, REASON_DEV_SICK_IDLE_60); + if (opt_restart) + { + applog(LOG_ERR, "%s: Attempting to restart", dev_str); + reinit_device(cgpu); + } + } + else if (cgpu->status == LIFE_SICK && (now.tv_sec - thr->last.tv_sec > WATCHDOG_DEAD_TIME)) + { + cgpu->status = LIFE_DEAD; + applog(LOG_ERR, "%s: Not responded for more than 10 minutes, declaring DEAD!", dev_str); + cgtime(&thr->sick); + + dev_error(cgpu, REASON_DEV_DEAD_IDLE_600); + } + else if (now.tv_sec - thr->sick.tv_sec > 60 && + (cgpu->status == LIFE_SICK || cgpu->status == LIFE_DEAD)) + { + /* Attempt to restart a GPU that's sick or dead once every minute */ + cgtime(&thr->sick); + if (opt_restart) + reinit_device(cgpu); + } + } + } + + return NULL; +} + +static void log_print_status(struct cgpu_info *cgpu) +{ + char logline[255]; + + get_statline(logline, sizeof(logline), cgpu); + applog(LOG_WARNING, "%s", logline); +} + +static void noop_get_statline(char __maybe_unused *buf, size_t __maybe_unused bufsiz, struct cgpu_info __maybe_unused *cgpu); +void blank_get_statline_before(char *buf, size_t bufsiz, struct cgpu_info __maybe_unused *cgpu); + +void print_summary(void) +{ + struct timeval diff; + int hours, mins, secs, i; + double utility, displayed_hashes, work_util; + + timersub(&total_tv_end, &total_tv_start, &diff); + hours = diff.tv_sec / 3600; + mins = (diff.tv_sec % 3600) / 60; + secs = diff.tv_sec % 60; + + utility = total_accepted / total_secs * 60; + work_util = total_diff1 / total_secs * 60; + + applog(LOG_WARNING, "\nSummary of runtime statistics:\n"); + applog(LOG_WARNING, "Started at %s", datestamp); + if (total_pools == 1) + applog(LOG_WARNING, "Pool: %s", pools[0]->rpc_url); + applog(LOG_WARNING, "Runtime: %d hrs : %d mins : %d secs", hours, mins, secs); + displayed_hashes = total_mhashes_done / total_secs; + + applog(LOG_WARNING, "Average hashrate: %.1f Mhash/s", displayed_hashes); + applog(LOG_WARNING, "Solved blocks: %d", found_blocks); + applog(LOG_WARNING, "Best share difficulty: %s", best_share); + applog(LOG_WARNING, "Share submissions: %"PRId64, total_accepted + total_rejected); + applog(LOG_WARNING, "Accepted shares: %"PRId64, total_accepted); + applog(LOG_WARNING, "Rejected shares: %"PRId64, total_rejected); + applog(LOG_WARNING, "Accepted difficulty shares: %1.f", total_diff_accepted); + applog(LOG_WARNING, "Rejected difficulty shares: %1.f", total_diff_rejected); + if (total_accepted || total_rejected) + applog(LOG_WARNING, "Reject ratio: %.1f%%", (double)(total_rejected * 100) / (double)(total_accepted + total_rejected)); + applog(LOG_WARNING, "Hardware errors: %d", hw_errors); + applog(LOG_WARNING, "Utility (accepted shares / min): %.2f/min", utility); + applog(LOG_WARNING, "Work Utility (diff1 shares solved / min): %.2f/min\n", work_util); + + applog(LOG_WARNING, "Stale submissions discarded due to new blocks: %"PRId64, total_stale); + applog(LOG_WARNING, "Unable to get work from server occasions: %d", total_go); + applog(LOG_WARNING, "Work items generated locally: %d", local_work); + applog(LOG_WARNING, "Submitting work remotely delay occasions: %d", total_ro); + applog(LOG_WARNING, "New blocks detected on network: %d\n", new_blocks); + + if (total_pools > 1) + { + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + + applog(LOG_WARNING, "Pool: %s", pool->rpc_url); + if (pool->solved) + applog(LOG_WARNING, "SOLVED %d BLOCK%s!", pool->solved, pool->solved > 1 ? "S" : ""); + applog(LOG_WARNING, " Share submissions: %"PRId64, pool->accepted + pool->rejected); + applog(LOG_WARNING, " Accepted shares: %"PRId64, pool->accepted); + applog(LOG_WARNING, " Rejected shares: %"PRId64, pool->rejected); + applog(LOG_WARNING, " Accepted difficulty shares: %1.f", pool->diff_accepted); + applog(LOG_WARNING, " Rejected difficulty shares: %1.f", pool->diff_rejected); + if (pool->accepted || pool->rejected) + applog(LOG_WARNING, " Reject ratio: %.1f%%", (double)(pool->rejected * 100) / (double)(pool->accepted + pool->rejected)); + + applog(LOG_WARNING, " Items worked on: %d", pool->works); + applog(LOG_WARNING, " Stale submissions discarded due to new blocks: %d", pool->stale_shares); + applog(LOG_WARNING, " Unable to get work from server occasions: %d", pool->getfail_occasions); + applog(LOG_WARNING, " Submitting work remotely delay occasions: %d\n", pool->remotefail_occasions); + } + } + + applog(LOG_WARNING, "Summary of per device statistics:\n"); + for (i = 0; i < total_devices; ++i) + { + struct cgpu_info *cgpu = get_devices(i); + + cgpu->drv->get_statline_before = &blank_get_statline_before; + cgpu->drv->get_statline = &noop_get_statline; + log_print_status(cgpu); + } + + if (opt_shares) + { + applog(LOG_WARNING, "Mined %.0f accepted shares of %d requested\n", total_diff_accepted, opt_shares); + if (opt_shares > total_diff_accepted) + applog(LOG_WARNING, "WARNING - Mined only %.0f shares of %d requested.", total_diff_accepted, opt_shares); + } + applog(LOG_WARNING, " "); + + fflush(stderr); + fflush(stdout); +} + +static void clean_up(bool restarting) +{ +#ifdef USE_USBUTILS + usb_polling = false; + pthread_join(usb_poll_thread, NULL); + libusb_exit(NULL); +#endif + + cgtime(&total_tv_end); +#ifdef WIN32 + timeEndPeriod(1); +#endif +#ifdef HAVE_CURSES + disable_curses(); +#endif + if (!restarting && !opt_realquiet && successful_connect) + print_summary(); + + curl_global_cleanup(); +} + +/* Should all else fail and we're unable to clean up threads due to locking + * issues etc, just silently exit. */ +static void *killall_thread(void __maybe_unused *arg) +{ + pthread_detach(pthread_self()); + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + sleep(5); + exit(1); + return NULL; +} + +void __quit(int status, bool clean) +{ + pthread_t killall_t; + + if (unlikely(pthread_create(&killall_t, NULL, killall_thread, NULL))) + exit(1); + + if (clean) + clean_up(false); +#ifdef HAVE_CURSES + else + disable_curses(); +#endif + +#if defined(unix) || defined(__APPLE__) + if (forkpid > 0) + { + kill(forkpid, SIGTERM); + forkpid = 0; + } +#endif + pthread_cancel(killall_t); + + exit(status); +} + +void _quit(int status) +{ + __quit(status, true); +} + +#ifdef HAVE_CURSES +char *curses_input(const char *query) +{ + char *input; + + echo(); + input = malloc(255); + if (!input) + quit(1, "Failed to malloc input"); + leaveok(logwin, false); + wlogprint("%s:\n", query); + wgetnstr(logwin, input, 255); + if (!strlen(input)) + strcpy(input, "-1"); + leaveok(logwin, true); + noecho(); + return input; +} +#endif + +static bool pools_active = false; + +static void *test_pool_thread(void *arg) +{ + struct pool *pool = (struct pool *)arg; + + if (!pool->blocking) + pthread_detach(pthread_self()); +retry: + if (pool_active(pool, false)) + { + pool_tset(pool, &pool->lagging); + pool_tclear(pool, &pool->idle); + bool first_pool = false; + + cg_wlock(&control_lock); + if (!pools_active) + { + currentpool = pool; + if (pool->pool_no != 0) + first_pool = true; + pools_active = true; + } + cg_wunlock(&control_lock); + + if (unlikely(first_pool)) + applog(LOG_NOTICE, "Switching to pool %d %s - first alive pool", pool->pool_no, pool->rpc_url); + + pool_resus(pool); + switch_pools(NULL); + } + else + { + pool_died(pool); + sleep(5); + goto retry; + } + + pool->testing = false; + + return NULL; +} + +/* Always returns true that the pool details were added unless we are not + * live, implying this is the only pool being added, so if no pools are + * active it returns false. */ +bool add_pool_details(struct pool *pool, bool live, char *url, char *user, char *pass) +{ + size_t siz; + + url = get_proxy(url, pool); + + pool->rpc_url = url; + pool->rpc_user = user; + pool->rpc_pass = pass; + siz = strlen(pool->rpc_user) + strlen(pool->rpc_pass) + 2; + pool->rpc_userpass = malloc(siz); + if (!pool->rpc_userpass) + quit(1, "Failed to malloc userpass"); + snprintf(pool->rpc_userpass, siz, "%s:%s", pool->rpc_user, pool->rpc_pass); + + pool->testing = true; + pool->idle = true; + pool->blocking = !live; + enable_pool(pool); + + pthread_create(&pool->test_thread, NULL, test_pool_thread, (void *)pool); + if (!live) + { + pthread_join(pool->test_thread, NULL); + return pools_active; + } + return true; +} + +#ifdef HAVE_CURSES +static bool input_pool(bool live) +{ + char *url = NULL, *user = NULL, *pass = NULL; + struct pool *pool; + bool ret = false; + + immedok(logwin, true); + wlogprint("Input server details.\n"); + + url = curses_input("URL"); + if (!strcmp(url, "-1")) + goto out; + + user = curses_input("Username"); + if (!strcmp(user, "-1")) + goto out; + + pass = curses_input("Password"); + if (!strcmp(pass, "-1")) + { + free(pass); + pass = strdup(""); + } + + pool = add_pool(); + + if (!detect_stratum(pool, url) && strncmp(url, "http://", 7) && + strncmp(url, "https://", 8)) + { + char *httpinput; + + httpinput = malloc(256); + if (!httpinput) + quit(1, "Failed to malloc httpinput"); + strcpy(httpinput, "http://"); + strncat(httpinput, url, 248); + free(url); + url = httpinput; + } + + ret = add_pool_details(pool, live, url, user, pass); +out: + immedok(logwin, false); + + if (!ret) + { + free(url); + free(user); + free(pass); + } + return ret; +} +#endif + +#if defined(unix) || defined(__APPLE__) +static void fork_monitor() +{ + // Make a pipe: [readFD, writeFD] + int pfd[2]; + int r = pipe(pfd); + + if (r < 0) + { + perror("pipe - failed to create pipe for --monitor"); + exit(1); + } + + // Make stderr write end of pipe + fflush(stderr); + r = dup2(pfd[1], 2); + if (r < 0) + { + perror("dup2 - failed to alias stderr to write end of pipe for --monitor"); + exit(1); + } + r = close(pfd[1]); + if (r < 0) + { + perror("close - failed to close write end of pipe for --monitor"); + exit(1); + } + + // Don't allow a dying monitor to kill the main process + sighandler_t sr0 = signal(SIGPIPE, SIG_IGN); + sighandler_t sr1 = signal(SIGPIPE, SIG_IGN); + if (SIG_ERR == sr0 || SIG_ERR == sr1) + { + perror("signal - failed to edit signal mask for --monitor"); + exit(1); + } + + // Fork a child process + forkpid = fork(); + if (forkpid < 0) + { + perror("fork - failed to fork child process for --monitor"); + exit(1); + } + + // Child: launch monitor command + if (0 == forkpid) + { + // Make stdin read end of pipe + r = dup2(pfd[0], 0); + if (r < 0) + { + perror("dup2 - in child, failed to alias read end of pipe to stdin for --monitor"); + exit(1); + } + close(pfd[0]); + if (r < 0) + { + perror("close - in child, failed to close read end of pipe for --monitor"); + exit(1); + } + + // Launch user specified command + execl("/bin/bash", "/bin/bash", "-c", opt_stderr_cmd, (char*)NULL); + perror("execl - in child failed to exec user specified command for --monitor"); + exit(1); + } + + // Parent: clean up unused fds and bail + r = close(pfd[0]); + if (r < 0) + { + perror("close - failed to close read end of pipe for --monitor"); + exit(1); + } +} +#endif // defined(unix) + +#ifdef HAVE_CURSES +static void enable_curses_windows(void) +{ + int x,y; + + getmaxyx(mainwin, y, x); + statuswin = newwin(logstart, x, 0, 0); + leaveok(statuswin, true); + logwin = newwin(y - logcursor, 0, logcursor, 0); + idlok(logwin, true); + scrollok(logwin, true); + leaveok(logwin, true); + cbreak(); + noecho(); +} +void enable_curses(void) +{ + lock_curses(); + if (curses_active) + { + unlock_curses(); + return; + } + + mainwin = initscr(); + enable_curses_windows(); + curses_active = true; + statusy = logstart; + unlock_curses(); +} +#endif + +static int cgminer_id_count = 0; + +/* Various noop functions for drivers that don't support or need their + * variants. */ +static void noop_reinit_device(struct cgpu_info __maybe_unused *cgpu) +{ +} + +void blank_get_statline_before(char __maybe_unused *buf,size_t __maybe_unused bufsiz, struct cgpu_info __maybe_unused *cgpu) +{ +} + +static void noop_get_statline(char __maybe_unused *buf, size_t __maybe_unused bufsiz, struct cgpu_info __maybe_unused *cgpu) +{ +} + +static bool noop_get_stats(struct cgpu_info __maybe_unused *cgpu) +{ + return true; +} + +static bool noop_thread_prepare(struct thr_info __maybe_unused *thr) +{ + return true; +} + +static uint64_t noop_can_limit_work(struct thr_info __maybe_unused *thr) +{ + return 0xffffffff; +} + +static bool noop_thread_init(struct thr_info __maybe_unused *thr) +{ + return true; +} + +static bool noop_prepare_work(struct thr_info __maybe_unused *thr, struct work __maybe_unused *work) +{ + return true; +} + +static void noop_hw_error(struct thr_info __maybe_unused *thr) +{ +} + +static void noop_thread_shutdown(struct thr_info __maybe_unused *thr) +{ +} + +static void noop_thread_enable(struct thr_info __maybe_unused *thr) +{ +} + +static void noop_detect(bool __maybe_unused hotplug) +{ +} + +static struct api_data *noop_get_api_stats(struct cgpu_info __maybe_unused *cgpu) +{ + return NULL; +} + +static void noop_hash_work(struct thr_info __maybe_unused *thr) +{ +} + +#define noop_flush_work noop_reinit_device +#define noop_update_work noop_reinit_device +#define noop_queue_full noop_get_stats +#define noop_zero_stats noop_reinit_device +#define noop_identify_device noop_reinit_device + +/* Fill missing driver drv functions with noops */ +void fill_device_drv(struct device_drv *drv) +{ + if (!drv->drv_detect) + drv->drv_detect = &noop_detect; + if (!drv->reinit_device) + drv->reinit_device = &noop_reinit_device; + if (!drv->get_statline_before) + drv->get_statline_before = &blank_get_statline_before; + if (!drv->get_statline) + drv->get_statline = &noop_get_statline; + if (!drv->get_stats) + drv->get_stats = &noop_get_stats; + if (!drv->thread_prepare) + drv->thread_prepare = &noop_thread_prepare; + if (!drv->can_limit_work) + drv->can_limit_work = &noop_can_limit_work; + if (!drv->thread_init) + drv->thread_init = &noop_thread_init; + if (!drv->prepare_work) + drv->prepare_work = &noop_prepare_work; + if (!drv->hw_error) + drv->hw_error = &noop_hw_error; + if (!drv->thread_shutdown) + drv->thread_shutdown = &noop_thread_shutdown; + if (!drv->thread_enable) + drv->thread_enable = &noop_thread_enable; + if (!drv->hash_work) + drv->hash_work = &hash_sole_work; + if (!drv->flush_work) + drv->flush_work = &noop_flush_work; + if (!drv->update_work) + drv->update_work = &noop_update_work; + if (!drv->queue_full) + drv->queue_full = &noop_queue_full; + if (!drv->zero_stats) + drv->zero_stats = &noop_zero_stats; + /* If drivers support internal diff they should set a max_diff or + * we will assume they don't and set max to 1. */ + if (!drv->max_diff) + drv->max_diff = 1; +} + +void null_device_drv(struct device_drv *drv) +{ + drv->drv_detect = &noop_detect; + drv->reinit_device = &noop_reinit_device; + drv->get_statline_before = &blank_get_statline_before; + drv->get_statline = &noop_get_statline; + drv->get_api_stats = &noop_get_api_stats; + drv->get_stats = &noop_get_stats; + drv->identify_device = &noop_identify_device; + drv->set_device = NULL; + + drv->thread_prepare = &noop_thread_prepare; + drv->can_limit_work = &noop_can_limit_work; + drv->thread_init = &noop_thread_init; + drv->prepare_work = &noop_prepare_work; + + /* This should make the miner thread just exit */ + drv->hash_work = &noop_hash_work; + + drv->hw_error = &noop_hw_error; + drv->thread_shutdown = &noop_thread_shutdown; + drv->thread_enable = &noop_thread_enable; + + drv->zero_stats = &noop_zero_stats; + + drv->hash_work = &noop_hash_work; + + drv->queue_full = &noop_queue_full; + drv->flush_work = &noop_flush_work; + drv->update_work = &noop_update_work; + + drv->zero_stats = &noop_zero_stats; + drv->max_diff = 1; + drv->min_diff = 1; +} + +void enable_device(struct cgpu_info *cgpu) +{ + cgpu->deven = DEV_ENABLED; + + wr_lock(&devices_lock); + devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu; + wr_unlock(&devices_lock); + + if (hotplug_mode) + new_threads += cgpu->threads; + else + mining_threads += cgpu->threads; + + rwlock_init(&cgpu->qlock); + cgpu->queued_work = NULL; +} + +struct _cgpu_devid_counter +{ + char name[4]; + int lastid; + UT_hash_handle hh; +}; + +static void adjust_mostdevs(void) +{ + if (total_devices - zombie_devs > most_devices) + most_devices = total_devices - zombie_devs; +} + +#ifdef USE_ICARUS +bool icarus_get_device_id(struct cgpu_info *cgpu) +{ + static struct _cgpu_devid_counter *devids = NULL; + struct _cgpu_devid_counter *d; + + HASH_FIND_STR(devids, cgpu->drv->name, d); + if (d) + return (d->lastid + 1); + else + return 0; +} +#endif + +bool add_cgpu(struct cgpu_info *cgpu) +{ + static struct _cgpu_devid_counter *devids = NULL; + struct _cgpu_devid_counter *d; + + HASH_FIND_STR(devids, cgpu->drv->name, d); + if (d) + cgpu->device_id = ++d->lastid; + else + { + d = malloc(sizeof(*d)); + memcpy(d->name, cgpu->drv->name, sizeof(d->name)); + cgpu->device_id = d->lastid = 0; + HASH_ADD_STR(devids, name, d); + } + + wr_lock(&devices_lock); + devices = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + new_devices + 2)); + wr_unlock(&devices_lock); + + mutex_lock(&stats_lock); + cgpu->last_device_valid_work = time(NULL); + mutex_unlock(&stats_lock); + + if (hotplug_mode) + devices[total_devices + new_devices++] = cgpu; + else + devices[total_devices++] = cgpu; + + adjust_mostdevs(); +#ifdef USE_USBUTILS + if (cgpu->usbdev && !cgpu->unique_id && cgpu->usbdev->serial_string && + strlen(cgpu->usbdev->serial_string) > 4) + cgpu->unique_id = str_text(cgpu->usbdev->serial_string); +#endif + return true; +} + +struct device_drv *copy_drv(struct device_drv *drv) +{ + struct device_drv *copy; + + if (unlikely(!(copy = malloc(sizeof(*copy))))) + { + quit(1, "Failed to allocate device_drv copy of %s (%s)", + drv->name, drv->copy ? "copy" : "original"); + } + memcpy(copy, drv, sizeof(*copy)); + copy->copy = true; + return copy; +} + +#ifdef USE_USBUTILS +static void hotplug_process(void) +{ + struct thr_info *thr; + int i, j; + + for (i = 0; i < new_devices; i++) + { + struct cgpu_info *cgpu; + int dev_no = total_devices + i; + + cgpu = devices[dev_no]; + enable_device(cgpu); + cgpu->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; + cgpu->rolling = cgpu->total_mhashes = 0; + } + + wr_lock(&mining_thr_lock); + mining_thr = realloc(mining_thr, sizeof(thr) * (mining_threads + new_threads + 1)); + + if (!mining_thr) + quit(1, "Failed to hotplug realloc mining_thr"); + for (i = 0; i < new_threads; i++) + { + mining_thr[mining_threads + i] = calloc(1, sizeof(*thr)); + if (!mining_thr[mining_threads + i]) + quit(1, "Failed to hotplug calloc mining_thr[%d]", i); + } + + // Start threads + for (i = 0; i < new_devices; ++i) + { + struct cgpu_info *cgpu = devices[total_devices]; + cgpu->thr = malloc(sizeof(*cgpu->thr) * (cgpu->threads+1)); + cgpu->thr[cgpu->threads] = NULL; + cgpu->status = LIFE_INIT; + cgtime(&(cgpu->dev_start_tv)); + + for (j = 0; j < cgpu->threads; ++j) + { + thr = __get_thread(mining_threads); + thr->id = mining_threads; + thr->cgpu = cgpu; + thr->device_thread = j; + + if (!cgpu->drv->thread_prepare(thr)) + { + null_device_drv(cgpu->drv); + cgpu->deven = DEV_DISABLED; + continue; + } + + if (unlikely(thr_info_create(thr, NULL, miner_thread, thr))) + quit(1, "hotplug thread %d create failed", thr->id); + + cgpu->thr[j] = thr; + + /* Enable threads for devices set not to mine but disable + * their queue in case we wish to enable them later */ + if (cgpu->deven != DEV_DISABLED) + { + applog(LOG_DEBUG, "Pushing sem post to thread %d", thr->id); + cgsem_post(&thr->sem); + } + + mining_threads++; + } + total_devices++; + applog(LOG_WARNING, "Hotplug: %s added %s %i", cgpu->drv->dname, cgpu->drv->name, cgpu->device_id); + } + wr_unlock(&mining_thr_lock); + + adjust_mostdevs(); +#ifdef HAVE_CURSES + switch_logsize(true); +#endif +} + +#define DRIVER_DRV_DETECT_HOTPLUG(X) X##_drv.drv_detect(true); + +static void *hotplug_thread(void __maybe_unused *userdata) +{ + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + RenameThread("Hotplug"); + + hotplug_mode = true; + + cgsleep_ms(5000); + + while (0x2a) + { +// Version 0.1 just add the devices on - worry about using nodev later + + if (hotplug_time == 0) + cgsleep_ms(5000); + else + { + new_devices = 0; + new_threads = 0; + + /* Use the DRIVER_PARSE_COMMANDS macro to detect all + * devices */ + DRIVER_PARSE_COMMANDS(DRIVER_DRV_DETECT_HOTPLUG) + + if (new_devices) + hotplug_process(); + + // hotplug_time >0 && <=9999 + cgsleep_ms(hotplug_time * 1000); + } + } + + return NULL; +} +#endif + +static void probe_pools(void) +{ + int i; + + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + + pool->testing = true; + pthread_create(&pool->test_thread, NULL, test_pool_thread, (void *)pool); + } +} + +#define DRIVER_FILL_DEVICE_DRV(X) fill_device_drv(&X##_drv); +#define DRIVER_DRV_DETECT_ALL(X) X##_drv.drv_detect(false); + +#ifdef USE_USBUTILS +static void *libusb_poll_thread(void __maybe_unused *arg) +{ + struct timeval tv_end = {1, 0}; + + RenameThread("USBPoll"); + + while (usb_polling) + libusb_handle_events_timeout_completed(NULL, &tv_end, NULL); + + /* Cancel any cancellable usb transfers */ + cancel_usb_transfers(); + + /* Keep event handling going until there are no async transfers in + * flight. */ + do + { + libusb_handle_events_timeout_completed(NULL, &tv_end, NULL); + } + while (async_usb_transfers()); + + return NULL; +} + +static void initialise_usb(void) +{ + int err = libusb_init(NULL); + + if (err) + { + fprintf(stderr, "libusb_init() failed err %d", err); + fflush(stderr); + quit(1, "libusb_init() failed"); + } + initialise_usblocks(); + usb_polling = true; + pthread_create(&usb_poll_thread, NULL, libusb_poll_thread, NULL); +} +#else +#define initialise_usb() {} +#endif + +int main(int argc, char *argv[]) +{ + struct sigaction handler; + struct work *work = NULL; + bool pool_msg = false; + struct thr_info *thr; + struct block *block; + int i, j, slept = 0; + unsigned int k; + char *s; + + g_logfile_enable = false; + strcpy(g_logfile_path, "bmminer.log"); + strcpy(g_logfile_openflag, "a+"); + /* This dangerous functions tramples random dynamically allocated + * variables so do it before anything at all */ + if (unlikely(curl_global_init(CURL_GLOBAL_ALL))) + early_quit(1, "Failed to curl_global_init"); + +# ifdef __linux + /* If we're on a small lowspec platform with only one CPU, we should + * yield after dropping a lock to allow a thread waiting for it to be + * able to get CPU time to grab the lock. */ + if (sysconf(_SC_NPROCESSORS_ONLN) == 1) + selective_yield = &sched_yield; +#endif + +#if LOCK_TRACKING + // Must be first + if (unlikely(pthread_mutex_init(&lockstat_lock, NULL))) + quithere(1, "Failed to pthread_mutex_init lockstat_lock errno=%d", errno); +#endif + + initial_args = malloc(sizeof(char *) * (argc + 1)); + for (i = 0; i < argc; i++) + initial_args[i] = strdup(argv[i]); + initial_args[argc] = NULL; + + mutex_init(&hash_lock); + mutex_init(&update_job_lock); + mutex_init(&console_lock); + cglock_init(&control_lock); + mutex_init(&stats_lock); + mutex_init(&sharelog_lock); + cglock_init(&ch_lock); + mutex_init(&sshare_lock); + rwlock_init(&blk_lock); + rwlock_init(&netacc_lock); + rwlock_init(&mining_thr_lock); + rwlock_init(&devices_lock); + + mutex_init(&lp_lock); + if (unlikely(pthread_cond_init(&lp_cond, NULL))) + early_quit(1, "Failed to pthread_cond_init lp_cond"); + + mutex_init(&restart_lock); + if (unlikely(pthread_cond_init(&restart_cond, NULL))) + early_quit(1, "Failed to pthread_cond_init restart_cond"); + + if (unlikely(pthread_cond_init(&gws_cond, NULL))) + early_quit(1, "Failed to pthread_cond_init gws_cond"); + + /* Create a unique get work queue */ + getq = tq_new(); + if (!getq) + early_quit(1, "Failed to create getq"); + /* We use the getq mutex as the staged lock */ + stgd_lock = &getq->mutex; + + initialise_usb(); + + snprintf(packagename, sizeof(packagename), "%s %s", PACKAGE, VERSION); + + handler.sa_handler = &sighandler; + handler.sa_flags = 0; + sigemptyset(&handler.sa_mask); + sigaction(SIGTERM, &handler, &termhandler); + sigaction(SIGINT, &handler, &inthandler); +#ifndef WIN32 + signal(SIGPIPE, SIG_IGN); +#else + timeBeginPeriod(1); +#endif + opt_kernel_path = alloca(PATH_MAX); + strcpy(opt_kernel_path, CGMINER_PREFIX); + cgminer_path = alloca(PATH_MAX); + s = strdup(argv[0]); + strcpy(cgminer_path, dirname(s)); + free(s); + strcat(cgminer_path, "/"); + + devcursor = 8; + logstart = devcursor + 1; + logcursor = logstart + 1; + + block = calloc(sizeof(struct block), 1); + if (unlikely(!block)) + quit (1, "main OOM"); + for (i = 0; i < 36; i++) + strcat(block->hash, "0"); + HASH_ADD_STR(blocks, hash, block); + strcpy(current_hash, block->hash); + + INIT_LIST_HEAD(&scan_devices); + + /* parse command line */ + opt_register_table(opt_config_table, + "Options for both config file and command line"); + opt_register_table(opt_cmdline_table, + "Options for command line only"); + + opt_parse(&argc, argv, applog_and_exit); + if (argc != 1) + early_quit(1, "Unexpected extra commandline arguments"); + + if (!config_loaded) + load_default_config(); + + if (opt_benchmark || opt_benchfile) + { + struct pool *pool; + + pool = add_pool(); + pool->rpc_url = malloc(255); + if (opt_benchfile) + strcpy(pool->rpc_url, "Benchfile"); + else + strcpy(pool->rpc_url, "Benchmark"); + pool->rpc_user = pool->rpc_url; + pool->rpc_pass = pool->rpc_url; + pool->rpc_userpass = pool->rpc_url; + pool->sockaddr_url = pool->rpc_url; + strncpy(pool->diff, "?", sizeof(pool->diff)-1); + pool->diff[sizeof(pool->diff)-1] = '\0'; + enable_pool(pool); + pool->idle = false; + successful_connect = true; + + for (i = 0; i < 16; i++) + { + hex2bin(&bench_hidiff_bins[i][0], &bench_hidiffs[i][0], 160); + hex2bin(&bench_lodiff_bins[i][0], &bench_lodiffs[i][0], 160); + } + set_target(bench_target, 32); + } + + if(opt_version_path) + { + FILE * fpversion = fopen(opt_version_path, "rb"); + char tmp[256] = {0}; + int len = 0; + char * start = 0; + if(fpversion == NULL) + { + applog(LOG_ERR, "Open miner version file %s error", opt_version_path); + } + else + { + len = fread(tmp, 1, 256, fpversion); + if(len <= 0) + { + applog(LOG_ERR, "Read miner version file %s error %d", opt_version_path, len); + } + else + { + start = strstr(tmp, "\n"); + if(start == NULL) + { + strcpy(g_miner_compiletime, tmp); + } + else + { + memcpy(g_miner_compiletime, tmp, start-tmp); + strcpy(g_miner_type, start+1); + } + if(g_miner_compiletime[strlen(g_miner_compiletime)-1] == '\n') + g_miner_compiletime[strlen(g_miner_compiletime)-1] = 0; + if(g_miner_compiletime[strlen(g_miner_compiletime)-1] == '\r') + g_miner_compiletime[strlen(g_miner_compiletime)-1] = 0; + if(g_miner_type[strlen(g_miner_type)-1] == '\n') + g_miner_type[strlen(g_miner_type)-1] = 0; + if(g_miner_type[strlen(g_miner_type)-1] == '\r') + g_miner_type[strlen(g_miner_type)-1] = 0; + } + } + applog(LOG_ERR, "Miner compile time: %s type: %s", g_miner_compiletime, g_miner_type); + } + + if(opt_logfile_path) + { + g_logfile_enable = true; + strcpy(g_logfile_path, opt_logfile_path); + if(opt_logfile_openflag) + { + strcpy(g_logfile_openflag, opt_logfile_openflag); + } + applog(LOG_ERR, "Log file path: %s Open flag: %s", g_logfile_path, g_logfile_openflag); + } + + if(opt_logwork_path) + { + char szfilepath[256] = {0}; + if(opt_logwork_asicnum) + { + if(strlen(opt_logwork_asicnum) <= 0) + { + quit(1, "Log work asic num empty"); + } + g_logwork_asicnum = atoi(opt_logwork_asicnum); + if(g_logwork_asicnum != 1 && g_logwork_asicnum != 32 && g_logwork_asicnum != 64) + { + quit(1, "Log work asic num must be 1, 32, 64"); + } + applog(LOG_ERR, "Log work path: %s Asic num: %s", opt_logwork_path, opt_logwork_asicnum); + } + else + { + applog(LOG_ERR, "Log work path: %s", opt_logwork_path); + } + + sprintf(szfilepath, "%s.txt", opt_logwork_path); + g_logwork_file = fopen(szfilepath, "a+"); + applog(LOG_ERR, "Log work open file %s", szfilepath); + + if(g_logwork_asicnum == 1) + { + sprintf(szfilepath, "%s%02d.txt", opt_logwork_path, g_logwork_asicnum); + g_logwork_files[0] = fopen(szfilepath, "a+"); + applog(LOG_ERR, "Log work open asic %d file %s", g_logwork_asicnum, szfilepath); + } + else if(g_logwork_asicnum == 32 || g_logwork_asicnum == 64) + { + for(i = 0; i <= g_logwork_asicnum; i++) + { + sprintf(szfilepath, "%s%02d_%02d.txt", opt_logwork_path, g_logwork_asicnum, i); + g_logwork_files[i] = fopen(szfilepath, "a+"); + applog(LOG_ERR, "Log work open asic %d file %s", g_logwork_asicnum, szfilepath); + } + } + + if(opt_logwork_diff) + { + for(i = 0; i <= 64; i++) + { + sprintf(szfilepath, "%s_diff_%02d.txt", opt_logwork_path, i); + g_logwork_diffs[i] = fopen(szfilepath, "a+"); + applog(LOG_ERR, "Log work open diff file %s", szfilepath); + } + } + } + +#ifdef HAVE_CURSES + if (opt_realquiet || opt_display_devs) + use_curses = false; + + if (use_curses) + enable_curses(); +#endif + + applog(LOG_WARNING, "Started %s", packagename); + if (cnfbuf) + { + applog(LOG_NOTICE, "Loaded configuration file %s", cnfbuf); + switch (fileconf_load) + { + case 0: + applog(LOG_WARNING, "Fatal JSON error in configuration file."); + applog(LOG_WARNING, "Configuration file could not be used."); + break; + case -1: + applog(LOG_WARNING, "Error in configuration file, partially loaded."); + if (use_curses) + applog(LOG_WARNING, "Start bmminer with -T to see what failed to load."); + break; + default: + break; + } + free(cnfbuf); + cnfbuf = NULL; + } + + strcat(opt_kernel_path, "/"); + + if (want_per_device_stats) + opt_log_output = true; + + if (opt_scantime < 0) + opt_scantime = 60; + + total_control_threads = 8; + control_thr = calloc(total_control_threads, sizeof(*thr)); + if (!control_thr) + early_quit(1, "Failed to calloc control_thr"); + + gwsched_thr_id = 0; + +#ifdef USE_USBUTILS + usb_initialise(); + + // before device detection + cgsem_init(&usb_resource_sem); + usbres_thr_id = 1; + thr = &control_thr[usbres_thr_id]; + if (thr_info_create(thr, NULL, usb_resource_thread, thr)) + early_quit(1, "usb resource thread create failed"); + pthread_detach(thr->pth); +#endif + + /* Use the DRIVER_PARSE_COMMANDS macro to fill all the device_drvs */ + DRIVER_PARSE_COMMANDS(DRIVER_FILL_DEVICE_DRV) + + /* Use the DRIVER_PARSE_COMMANDS macro to detect all devices */ + DRIVER_PARSE_COMMANDS(DRIVER_DRV_DETECT_ALL) + + if (opt_display_devs) + { + applog(LOG_ERR, "Devices detected:"); + for (i = 0; i < total_devices; ++i) + { + struct cgpu_info *cgpu = devices[i]; + if (cgpu->name) + applog(LOG_ERR, " %2d. %s %d: %s (driver: %s)", i, cgpu->drv->name, cgpu->device_id, cgpu->name, cgpu->drv->dname); + else + applog(LOG_ERR, " %2d. %s %d (driver: %s)", i, cgpu->drv->name, cgpu->device_id, cgpu->drv->dname); + } + early_quit(0, "%d devices listed", total_devices); + } + + mining_threads = 0; + for (i = 0; i < total_devices; ++i) + enable_device(devices[i]); + +#ifdef USE_USBUTILS + if (!total_devices) + { + applog(LOG_WARNING, "No devices detected!"); + applog(LOG_WARNING, "Waiting for USB hotplug devices or press q to quit"); + } +#else + if (!total_devices) + early_quit(1, "All devices disabled, cannot mine!"); +#endif + + most_devices = total_devices; + + load_temp_cutoffs(); + + for (i = 0; i < total_devices; ++i) + devices[i]->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; + + if (!opt_compact) + { + logstart += most_devices; + logcursor = logstart + 1; +#ifdef HAVE_CURSES + check_winsizes(); +#endif + } + + if (!total_pools) + { + applog(LOG_WARNING, "Need to specify at least one pool server."); +#ifdef HAVE_CURSES + if (!use_curses || !input_pool(false)) +#endif + early_quit(1, "Pool setup failed"); + } + + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + size_t siz; + + pool->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; + pool->cgminer_pool_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; + + if (!pool->rpc_userpass) + { + if (!pool->rpc_pass) + pool->rpc_pass = strdup(""); + if (!pool->rpc_user) + early_quit(1, "No login credentials supplied for pool %u %s", i, pool->rpc_url); + siz = strlen(pool->rpc_user) + strlen(pool->rpc_pass) + 2; + pool->rpc_userpass = malloc(siz); + if (!pool->rpc_userpass) + early_quit(1, "Failed to malloc userpass"); + snprintf(pool->rpc_userpass, siz, "%s:%s", pool->rpc_user, pool->rpc_pass); + } + } + /* Set the currentpool to pool 0 */ + currentpool = pools[0]; + +#ifdef HAVE_SYSLOG_H + if (use_syslog) + openlog(PACKAGE, LOG_PID, LOG_USER); +#endif + +#if defined(unix) || defined(__APPLE__) + if (opt_stderr_cmd) + fork_monitor(); +#endif // defined(unix) + + mining_thr = calloc(mining_threads, sizeof(thr)); + if (!mining_thr) + early_quit(1, "Failed to calloc mining_thr"); + for (i = 0; i < mining_threads; i++) + { + mining_thr[i] = calloc(1, sizeof(*thr)); + if (!mining_thr[i]) + early_quit(1, "Failed to calloc mining_thr[%d]", i); + } + + // Start threads + k = 0; + for (i = 0; i < total_devices; ++i) + { + struct cgpu_info *cgpu = devices[i]; + cgpu->thr = malloc(sizeof(*cgpu->thr) * (cgpu->threads+1)); + cgpu->thr[cgpu->threads] = NULL; + cgpu->status = LIFE_INIT; + + for (j = 0; j < cgpu->threads; ++j, ++k) + { + thr = get_thread(k); + thr->id = k; + thr->cgpu = cgpu; + thr->device_thread = j; + + if (!cgpu->drv->thread_prepare(thr)) + continue; + + if (unlikely(thr_info_create(thr, NULL, miner_thread, thr))) + early_quit(1, "thread %d create failed", thr->id); + + cgpu->thr[j] = thr; + + /* Enable threads for devices set not to mine but disable + * their queue in case we wish to enable them later */ + if (cgpu->deven != DEV_DISABLED) + { + applog(LOG_DEBUG, "Pushing sem post to thread %d", thr->id); + cgsem_post(&thr->sem); + } + } + } + + if (opt_benchmark || opt_benchfile) + goto begin_bench; + + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + + enable_pool(pool); + pool->idle = true; + } + + /* Look for at least one active pool before starting */ + applog(LOG_NOTICE, "Probing for an alive pool"); + probe_pools(); + do + { + sleep(1); + slept++; + } + while (!pools_active && slept < 60); + + while (!pools_active) + { + if (!pool_msg) + { + applog(LOG_ERR, "No servers were found that could be used to get work from."); + applog(LOG_ERR, "Please check the details from the list below of the servers you have input"); + applog(LOG_ERR, "Most likely you have input the wrong URL, forgotten to add a port, or have not set up workers"); + for (i = 0; i < total_pools; i++) + { + struct pool *pool = pools[i]; + + applog(LOG_WARNING, "Pool: %d URL: %s User: %s Password: %s", + i, pool->rpc_url, pool->rpc_user, pool->rpc_pass); + } + pool_msg = true; + if (use_curses) + applog(LOG_ERR, "Press any key to exit, or bmminer will wait indefinitely for an alive pool."); + } + if (!use_curses) + early_quit(0, "No servers could be used! Exiting."); +#ifdef HAVE_CURSES + touchwin(logwin); + wrefresh(logwin); + halfdelay(10); + if (getch() != ERR) + early_quit(0, "No servers could be used! Exiting."); + cbreak(); +#endif + }; + +begin_bench: + total_mhashes_done = 0; + for(i = 0; i < CG_LOCAL_MHASHES_MAX_NUM; i++) + { + g_local_mhashes_dones[i] = 0; + } + g_local_mhashes_index = 0; + for (i = 0; i < total_devices; i++) + { + struct cgpu_info *cgpu = devices[i]; + + cgpu->rolling = cgpu->total_mhashes = 0; + } + + cgtime(&total_tv_start); + cgtime(&total_tv_end); + cgtime(&tv_hashmeter); + get_datestamp(datestamp, sizeof(datestamp), &total_tv_start); + + watchpool_thr_id = 2; + thr = &control_thr[watchpool_thr_id]; + /* start watchpool thread */ + if (thr_info_create(thr, NULL, watchpool_thread, NULL)) + early_quit(1, "watchpool thread create failed"); + pthread_detach(thr->pth); + + watchdog_thr_id = 3; + thr = &control_thr[watchdog_thr_id]; + /* start watchdog thread */ + if (thr_info_create(thr, NULL, watchdog_thread, NULL)) + early_quit(1, "watchdog thread create failed"); + pthread_detach(thr->pth); + + /* Create API socket thread */ + api_thr_id = 5; + thr = &control_thr[api_thr_id]; + if (thr_info_create(thr, NULL, api_thread, thr)) + early_quit(1, "API thread create failed"); + +#ifdef USE_USBUTILS + hotplug_thr_id = 6; + thr = &control_thr[hotplug_thr_id]; + if (thr_info_create(thr, NULL, hotplug_thread, thr)) + early_quit(1, "hotplug thread create failed"); + pthread_detach(thr->pth); +#endif + +#ifdef HAVE_CURSES + /* Create curses input thread for keyboard input. Create this last so + * that we know all threads are created since this can call kill_work + * to try and shut down all previous threads. */ + input_thr_id = 7; + thr = &control_thr[input_thr_id]; + if (thr_info_create(thr, NULL, input_thread, thr)) + early_quit(1, "input thread create failed"); + pthread_detach(thr->pth); +#endif + + /* Just to be sure */ + if (total_control_threads != 8) + early_quit(1, "incorrect total_control_threads (%d) should be 8", total_control_threads); + + set_highprio(); + + /* Once everything is set up, main() becomes the getwork scheduler */ + while (42) + { + int ts, max_staged = max_queue; + struct pool *pool, *cp; + bool lagging = false; + + if (opt_work_update) + signal_work_update(); + opt_work_update = false; + cp = current_pool(); + + /* If the primary pool is a getwork pool and cannot roll work, + * try to stage one extra work per mining thread */ + if (!pool_localgen(cp) && !staged_rollable) + max_staged += mining_threads; + + mutex_lock(stgd_lock); + ts = __total_staged(); + + if (!pool_localgen(cp) && !ts && !opt_fail_only) + lagging = true; + + /* Wait until hash_pop tells us we need to create more work */ + if (ts > max_staged) + { + if (work_emptied && max_queue < opt_queue) + { + max_queue++; + work_emptied = false; + } + work_filled = true; + pthread_cond_wait(&gws_cond, stgd_lock); + ts = __total_staged(); + } + mutex_unlock(stgd_lock); + + if (ts > max_staged) + { + /* Keeps slowly generating work even if it's not being + * used to keep last_getwork incrementing and to see + * if pools are still alive. */ + if (work_emptied && max_queue < opt_queue) + { + max_queue++; + work_emptied = false; + } + work_filled = true; + work = hash_pop(false); + if (work) + discard_work(work); + continue; + } + + if (work) + discard_work(work); + work = make_work(); + + if (lagging && !pool_tset(cp, &cp->lagging)) + { + applog(LOG_WARNING, "Pool %d not providing work fast enough", cp->pool_no); + cp->getfail_occasions++; + total_go++; + if (!pool_localgen(cp) && max_queue < opt_queue) + applog(LOG_INFO, "Increasing queue to %d", ++max_queue); + } + pool = select_pool(lagging); + retry: + if (pool->has_stratum) + { + while (!pool->stratum_active || !pool->stratum_notify) + { + struct pool *altpool = select_pool(true); + + cgsleep_ms(5000); + if (altpool != pool) + { + pool = altpool; + goto retry; + } + } + gen_stratum_work(pool, work); + applog(LOG_DEBUG, "Generated stratum work"); + stage_work(work); + continue; + } + + if (opt_benchfile) + { + get_benchfile_work(work); + applog(LOG_DEBUG, "Generated benchfile work"); + stage_work(work); + continue; + } + else if (opt_benchmark) + { + get_benchmark_work(work); + applog(LOG_DEBUG, "Generated benchmark work"); + stage_work(work); + continue; + } + +#ifdef HAVE_LIBCURL + struct curl_ent *ce; + + if (pool->gbt_solo) + { + while (pool->idle) + { + struct pool *altpool = select_pool(true); + + cgsleep_ms(5000); + if (altpool != pool) + { + pool = altpool; + goto retry; + } + } + gen_solo_work(pool, work); + applog(LOG_DEBUG, "Generated GBT SOLO work"); + stage_work(work); + continue; + } + + if (pool->has_gbt) + { + while (pool->idle) + { + struct pool *altpool = select_pool(true); + + cgsleep_ms(5000); + if (altpool != pool) + { + pool = altpool; + goto retry; + } + } + gen_gbt_work(pool, work); + applog(LOG_DEBUG, "Generated GBT work"); + stage_work(work); + continue; + } + + if (clone_available()) + { + applog(LOG_DEBUG, "Cloned getwork work"); + free_work(work); + continue; + } + + work->pool = pool; + ce = pop_curl_entry(pool); + /* obtain new work from bitcoin via JSON-RPC */ + if (!get_upstream_work(work, ce->curl)) + { + applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get work, retrying in 5s", pool->pool_no); + /* Make sure the pool just hasn't stopped serving + * requests but is up as we'll keep hammering it */ + if (++pool->seq_getfails > mining_threads + opt_queue) + pool_died(pool); + cgsleep_ms(5000); + push_curl_entry(ce, pool); + pool = select_pool(!opt_fail_only); + free_work(work); + goto retry; + } + if (ts >= max_staged) + pool_tclear(pool, &pool->lagging); + if (pool_tclear(pool, &pool->idle)) + pool_resus(pool); + + applog(LOG_DEBUG, "Generated getwork work"); + stage_work(work); + push_curl_entry(ce, pool); +#endif + } + + return 0; +} diff --git a/compat.h b/compat.h new file mode 100644 index 0000000..ed18baa --- /dev/null +++ b/compat.h @@ -0,0 +1,88 @@ +#ifndef __COMPAT_H__ +#define __COMPAT_H__ + +#ifdef WIN32 +#include "config.h" +#include +#include +#include +#include + +#include "miner.h" // for timersub +#include "util.h" + +#include + +#ifndef HAVE_LIBWINPTHREAD +static inline int nanosleep(const struct timespec *req, struct timespec *rem) +{ + struct timeval tstart; + DWORD msecs; + + cgtime(&tstart); + msecs = (req->tv_sec * 1000) + ((999999 + req->tv_nsec) / 1000000); + + if (SleepEx(msecs, true) == WAIT_IO_COMPLETION) { + if (rem) { + struct timeval tdone, tnow, tleft; + tdone.tv_sec = tstart.tv_sec + req->tv_sec; + tdone.tv_usec = tstart.tv_usec + ((999 + req->tv_nsec) / 1000); + if (tdone.tv_usec > 1000000) { + tdone.tv_usec -= 1000000; + ++tdone.tv_sec; + } + + cgtime(&tnow); + if (timercmp(&tnow, &tdone, >)) + return 0; + timersub(&tdone, &tnow, &tleft); + + rem->tv_sec = tleft.tv_sec; + rem->tv_nsec = tleft.tv_usec * 1000; + } + errno = EINTR; + return -1; + } + return 0; +} +#endif + +static inline int sleep(unsigned int secs) +{ + struct timespec req, rem; + req.tv_sec = secs; + req.tv_nsec = 0; + if (!nanosleep(&req, &rem)) + return 0; + return rem.tv_sec + (rem.tv_nsec ? 1 : 0); +} + +enum { + PRIO_PROCESS = 0, +}; + +static inline int setpriority(__maybe_unused int which, __maybe_unused int who, __maybe_unused int prio) +{ + /* FIXME - actually do something */ + return 0; +} + +typedef unsigned long int ulong; +typedef unsigned short int ushort; +typedef unsigned int uint; + +#ifndef __SUSECONDS_T_TYPE +typedef long suseconds_t; +#endif + +#ifdef HAVE_LIBWINPTHREAD +#define PTH(thr) ((thr)->pth) +#else +#define PTH(thr) ((thr)->pth.p) +#endif + +#else +#define PTH(thr) ((thr)->pth) +#endif /* WIN32 */ + +#endif /* __COMPAT_H__ */ diff --git a/compat/.gitignore b/compat/.gitignore new file mode 100644 index 0000000..8e687f8 --- /dev/null +++ b/compat/.gitignore @@ -0,0 +1,3 @@ +libusb-1.0/libusb/libusb-1.0.la +libusb-1.0/libusb/*.lo +libusb-1.0/libusb/os/*.lo diff --git a/compat/Makefile.am b/compat/Makefile.am new file mode 100644 index 0000000..5a32214 --- /dev/null +++ b/compat/Makefile.am @@ -0,0 +1,8 @@ + +SUBDIRS = jansson-2.6 + +if WANT_USBUTILS +if WANT_STATIC_LIBUSB +SUBDIRS += libusb-1.0 +endif +endif diff --git a/compat/jansson-2.6/CHANGES b/compat/jansson-2.6/CHANGES new file mode 100644 index 0000000..99d1647 --- /dev/null +++ b/compat/jansson-2.6/CHANGES @@ -0,0 +1,583 @@ +Version 2.6 +=========== + +Released 2014-02-11 + +* Security: + + - CVE-2013-6401: The hash function used by the hashtable + implementation has been changed, and is automatically seeded with + random data when the first JSON object is created. This prevents + an attacker from causing large JSON objects with specially crafted + keys perform poorly. + +* New features: + + - `json_object_seed()`: Set the seed value of the hash function. + +* Bug fixes: + + - Include CMake specific files in the release tarball. + +* Documentation: + + - Fix tutorial source to send a User-Agent header, which is now + required by the GitHub API. + + - Set all memory to zero in secure_free() example. + + +Version 2.5 +=========== + +Released 2013-09-19 + +* New features: + + - `json_pack()` and friends: Add format specifiers ``s#``, ``+`` and + ``+#``. + + - Add ``JSON_DECODE_INT_AS_REAL`` decoding flag to treat all numbers + as real in the decoder (#123). + + - Add `json_array_foreach()`, paralleling `json_object_foreach()` + (#118). + +* Bug fixes: + + - `json_dumps()` and friends: Don't crash if json is *NULL* and + ``JSON_ENCODE_ANY`` is set. + + - Fix a theoretical integer overflow in `jsonp_strdup()`. + + - Fix `l_isxdigit()` macro (#97). + + - Fix an off-by-one error in `json_array_remove()`. + +* Build: + + - Support CMake in addition to GNU Autotools (#106, #107, #112, + #115, #120, #127). + + - Support building for Android (#109). + + - Don't use ``-Werror`` by default. + + - Support building and testing with VPATH (#93). + + - Fix compilation when ``NDEBUG`` is defined (#128) + +* Tests: + + - Fix a refleak in ``test/bin/json_process.c``. + +* Documentation: + + - Clarify the return value of `json_load_callback_t`. + + - Document how to circumvent problems with separate heaps on Windows. + + - Fix memory leaks and warnings in ``github_commits.c``. + + - Use `json_decref()` properly in tutorial. + +* Other: + + - Make it possible to forward declare ``struct json_t``. + + +Version 2.4 +=========== + +Released 2012-09-23 + +* New features: + + - Add `json_boolean()` macro that returns the JSON true or false + value based on its argument (#86). + + - Add `json_load_callback()` that calls a callback function + repeatedly to read the JSON input (#57). + + - Add JSON_ESCAPE_SLASH encoding flag to escape all occurences of + ``/`` with ``\/``. + +* Bug fixes: + + - Check for and reject NaN and Inf values for reals. Encoding these + values resulted in invalid JSON. + + - Fix `json_real_set()` to return -1 on error. + +* Build: + + - Jansson now builds on Windows with Visual Studio 2010, and + includes solution and project files in ``win32/vs2010/`` + directory. + + - Fix build warnings (#77, #78). + + - Add ``-no-undefined`` to LDFLAGS (#90). + +* Tests: + + - Fix the symbol exports test on Linux/PPC64 (#88). + +* Documentation: + + - Fix typos (#73, #84). + + +Version 2.3.1 +============= + +Released 2012-04-20 + +* Build issues: + + - Only use ``long long`` if ``strtoll()`` is also available. + +* Documentation: + + - Fix the names of library version constants in documentation. (#52) + + - Change the tutorial to use GitHub API v3. (#65) + +* Tests: + + - Make some tests locale independent. (#51) + + - Distribute the library exports test in the tarball. + + - Make test run on shells that don't support the ``export FOO=bar`` + syntax. + + +Version 2.3 +=========== + +Released 2012-01-27 + +* New features: + + - `json_unpack()` and friends: Add support for optional object keys + with the ``{s?o}`` syntax. + + - Add `json_object_update_existing()` and + `json_object_update_missing()`, for updating only existing keys or + only adding missing keys to an object. (#37) + + - Add `json_object_foreach()` for more convenient iteration over + objects. (#45, #46) + + - When decoding JSON, write the number of bytes that were read from + input to ``error.position`` also on success. This is handy with + ``JSON_DISABLE_EOF_CHECK``. + + - Add support for decoding any JSON value, not just arrays or + objects. The support is enabled with the new ``JSON_DECODE_ANY`` + flag. Patch by Andrea Marchesini. (#4) + +* Bug fixes + + - Avoid problems with object's serial number growing too big. (#40, + #41) + + - Decoding functions now return NULL if the first argument is NULL. + Patch by Andrea Marchesini. + + - Include ``jansson_config.h.win32`` in the distribution tarball. + + - Remove ``+`` and leading zeros from exponents in the encoder. + (#39) + + - Make Jansson build and work on MinGW. (#39, #38) + +* Documentation + + - Note that the same JSON values must not be encoded in parallel by + separate threads. (#42) + + - Document MinGW support. + + +Version 2.2.1 +============= + +Released 2011-10-06 + +* Bug fixes: + + - Fix real number encoding and decoding under non-C locales. (#32) + + - Fix identifier decoding under non-UTF-8 locales. (#35) + + - `json_load_file()`: Open the input file in binary mode for maximum + compatiblity. + +* Documentation: + + - Clarify the lifecycle of the result of the ``s`` fromat of + `json_unpack()`. (#31) + + - Add some portability info. (#36) + + - Little clarifications here and there. + +* Other: + + - Some style fixes, issues detected by static analyzers. + + +Version 2.2 +=========== + +Released 2011-09-03 + +* New features: + + - `json_dump_callback()`: Pass the encoder output to a callback + function in chunks. + +* Bug fixes: + + - `json_string_set()`: Check that target is a string and value is + not NULL. + +* Other: + + - Documentation typo fixes and clarifications. + + +Version 2.1 +=========== + +Released 2011-06-10 + +* New features: + + - `json_loadb()`: Decode a string with a given size, useful if the + string is not null terminated. + + - Add ``JSON_ENCODE_ANY`` encoding flag to allow encoding any JSON + value. By default, only arrays and objects can be encoded. (#19) + + - Add ``JSON_REJECT_DUPLICATES`` decoding flag to issue a decoding + error if any JSON object in the input contins duplicate keys. (#3) + + - Add ``JSON_DISABLE_EOF_CHECK`` decoding flag to stop decoding after a + valid JSON input. This allows other data after the JSON data. + +* Bug fixes: + + - Fix an additional memory leak when memory allocation fails in + `json_object_set()` and friends. + + - Clear errno before calling `strtod()` for better portability. (#27) + +* Building: + + - Avoid set-but-not-used warning/error in a test. (#20) + +* Other: + + - Minor clarifications to documentation. + + +Version 2.0.1 +============= + +Released 2011-03-31 + +* Bug fixes: + + - Replace a few `malloc()` and `free()` calls with their + counterparts that support custom memory management. + + - Fix object key hashing in json_unpack() strict checking mode. + + - Fix the parentheses in ``JANSSON_VERSION_HEX`` macro. + + - Fix `json_object_size()` return value. + + - Fix a few compilation issues. + +* Portability: + + - Enhance portability of `va_copy()`. + + - Test framework portability enhancements. + +* Documentation: + + - Distribute ``doc/upgrading.rst`` with the source tarball. + + - Build documentation in strict mode in ``make distcheck``. + + +Version 2.0 +=========== + +Released 2011-02-28 + +This release is backwards incompatible with the 1.x release series. +See the chapter "Upgrading from older versions" in documentation for +details. + +* Backwards incompatible changes: + + - Unify unsigned integer usage in the API: All occurences of + unsigned int and unsigned long have been replaced with size_t. + + - Change JSON integer's underlying type to the widest signed integer + type available, i.e. long long if it's supported, otherwise long. + Add a typedef json_int_t that defines the type. + + - Change the maximum indentation depth to 31 spaces in encoder. This + frees up bits from the flags parameter of encoding functions + `json_dumpf()`, `json_dumps()` and `json_dump_file()`. + + - For future needs, add a flags parameter to all decoding functions + `json_loadf()`, `json_loads()` and `json_load_file()`. + +* New features + + - `json_pack()`, `json_pack_ex()`, `json_vpack_ex()`: Create JSON + values based on a format string. + + - `json_unpack()`, `json_unpack_ex()`, `json_vunpack_ex()`: Simple + value extraction and validation functionality based on a format + string. + + - Add column, position and source fields to the ``json_error_t`` + struct. + + - Enhance error reporting in the decoder. + + - ``JANSSON_VERSION`` et al.: Preprocessor constants that define the + library version. + + - `json_set_alloc_funcs()`: Set custom memory allocation functions. + +* Fix many portability issues, especially on Windows. + +* Configuration + + - Add file ``jansson_config.h`` that contains site specific + configuration. It's created automatically by the configure script, + or can be created by hand if the configure script cannot be used. + The file ``jansson_config.h.win32`` can be used without + modifications on Windows systems. + + - Add a section to documentation describing how to build Jansson on + Windows. + + - Documentation now requires Sphinx 1.0 or newer. + + +Version 1.3 +=========== + +Released 2010-06-13 + +* New functions: + + - `json_object_iter_set()`, `json_object_iter_set_new()`: Change + object contents while iterating over it. + + - `json_object_iter_at()`: Return an iterator that points to a + specific object item. + +* New encoding flags: + + - ``JSON_PRESERVE_ORDER``: Preserve the insertion order of object + keys. + +* Bug fixes: + + - Fix an error that occured when an array or object was first + encoded as empty, then populated with some data, and then + re-encoded + + - Fix the situation like above, but when the first encoding resulted + in an error + +* Documentation: + + - Clarify the documentation on reference stealing, providing an + example usage pattern + + +Version 1.2.1 +============= + +Released 2010-04-03 + +* Bug fixes: + + - Fix reference counting on ``true``, ``false`` and ``null`` + - Estimate real number underflows in decoder with 0.0 instead of + issuing an error + +* Portability: + + - Make ``int32_t`` available on all systems + - Support compilers that don't have the ``inline`` keyword + - Require Autoconf 2.60 (for ``int32_t``) + +* Tests: + + - Print test names correctly when ``VERBOSE=1`` + - ``test/suites/api``: Fail when a test fails + - Enhance tests for iterators + - Enhance tests for decoding texts that contain null bytes + +* Documentation: + + - Don't remove ``changes.rst`` in ``make clean`` + - Add a chapter on RFC conformance + + +Version 1.2 +=========== + +Released 2010-01-21 + +* New functions: + + - `json_equal()`: Test whether two JSON values are equal + - `json_copy()` and `json_deep_copy()`: Make shallow and deep copies + of JSON values + - Add a version of all functions taking a string argument that + doesn't check for valid UTF-8: `json_string_nocheck()`, + `json_string_set_nocheck()`, `json_object_set_nocheck()`, + `json_object_set_new_nocheck()` + +* New encoding flags: + + - ``JSON_SORT_KEYS``: Sort objects by key + - ``JSON_ENSURE_ASCII``: Escape all non-ASCII Unicode characters + - ``JSON_COMPACT``: Use a compact representation with all unneeded + whitespace stripped + +* Bug fixes: + + - Revise and unify whitespace usage in encoder: Add spaces between + array and object items, never append newline to output. + - Remove const qualifier from the ``json_t`` parameter in + `json_string_set()`, `json_integer_set()` and `json_real_set`. + - Use ``int32_t`` internally for representing Unicode code points + (int is not enough on all platforms) + +* Other changes: + + - Convert ``CHANGES`` (this file) to reStructured text and add it to + HTML documentation + - The test system has been refactored. Python is no longer required + to run the tests. + - Documentation can now be built by invoking ``make html`` + - Support for pkg-config + + +Version 1.1.3 +============= + +Released 2009-12-18 + +* Encode reals correctly, so that first encoding and then decoding a + real always produces the same value +* Don't export private symbols in ``libjansson.so`` + + +Version 1.1.2 +============= + +Released 2009-11-08 + +* Fix a bug where an error message was not produced if the input file + could not be opened in `json_load_file()` +* Fix an assertion failure in decoder caused by a minus sign without a + digit after it +* Remove an unneeded include of ``stdint.h`` in ``jansson.h`` + + +Version 1.1.1 +============= + +Released 2009-10-26 + +* All documentation files were not distributed with v1.1; build + documentation in make distcheck to prevent this in the future +* Fix v1.1 release date in ``CHANGES`` + + +Version 1.1 +=========== + +Released 2009-10-20 + +* API additions and improvements: + + - Extend array and object APIs + - Add functions to modify integer, real and string values + - Improve argument validation + - Use unsigned int instead of ``uint32_t`` for encoding flags + +* Enhance documentation + + - Add getting started guide and tutorial + - Fix some typos + - General clarifications and cleanup + +* Check for integer and real overflows and underflows in decoder +* Make singleton values thread-safe (``true``, ``false`` and ``null``) +* Enhance circular reference handling +* Don't define ``-std=c99`` in ``AM_CFLAGS`` +* Add C++ guards to ``jansson.h`` +* Minor performance and portability improvements +* Expand test coverage + + +Version 1.0.4 +============= + +Released 2009-10-11 + +* Relax Autoconf version requirement to 2.59 +* Make Jansson compile on platforms where plain ``char`` is unsigned +* Fix API tests for object + + +Version 1.0.3 +============= + +Released 2009-09-14 + +* Check for integer and real overflows and underflows in decoder +* Use the Python json module for tests, or simplejson if the json + module is not found +* Distribute changelog (this file) + + +Version 1.0.2 +============= + +Released 2009-09-08 + +* Handle EOF correctly in decoder + + +Version 1.0.1 +============= + +Released 2009-09-04 + +* Fixed broken `json_is_boolean()` + + +Version 1.0 +=========== + +Released 2009-08-25 + +* Initial release diff --git a/compat/jansson-2.6/LICENSE b/compat/jansson-2.6/LICENSE new file mode 100644 index 0000000..a8fb5b8 --- /dev/null +++ b/compat/jansson-2.6/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2009-2013 Petri Lehtinen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/compat/jansson-2.6/Makefile.am b/compat/jansson-2.6/Makefile.am new file mode 100644 index 0000000..a6efeb0 --- /dev/null +++ b/compat/jansson-2.6/Makefile.am @@ -0,0 +1,17 @@ +ACLOCAL_AMFLAGS = -I m4 + +EXTRA_DIST = CHANGES LICENSE README.rst +SUBDIRS = src + +# "make distcheck" builds the dvi target, so use it to check that the +# documentation is built correctly. +dvi: + $(MAKE) SPHINXOPTS_EXTRA=-W html + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = jansson.pc + +if GCC +# These flags are gcc specific +export AM_CFLAGS = -Wall -Wextra -Wdeclaration-after-statement +endif diff --git a/compat/jansson-2.6/README.rst b/compat/jansson-2.6/README.rst new file mode 100644 index 0000000..a01cbc0 --- /dev/null +++ b/compat/jansson-2.6/README.rst @@ -0,0 +1,63 @@ +Jansson README +============== + +.. image:: https://travis-ci.org/akheron/jansson.png + :alt: Build status + :target: https://travis-ci.org/akheron/jansson + +Jansson_ is a C library for encoding, decoding and manipulating JSON +data. Its main features and design principles are: + +- Simple and intuitive API and data model + +- Comprehensive documentation + +- No dependencies on other libraries + +- Full Unicode support (UTF-8) + +- Extensive test suite + +Jansson is licensed under the `MIT license`_; see LICENSE in the +source distribution for details. + + +Compilation and Installation +---------------------------- + +If you obtained a source tarball, just use the standard autotools +commands:: + + $ ./configure + $ make + $ make install + +To run the test suite, invoke:: + + $ make check + +If the source has been checked out from a Git repository, the +./configure script has to be generated first. The easiest way is to +use autoreconf:: + + $ autoreconf -i + + +Documentation +------------- + +Prebuilt HTML documentation is available at +http://www.digip.org/jansson/doc/. + +The documentation source is in the ``doc/`` subdirectory. To generate +HTML documentation, invoke:: + + $ make html + +Then, point your browser to ``doc/_build/html/index.html``. Sphinx_ +1.0 or newer is required to generate the documentation. + + +.. _Jansson: http://www.digip.org/jansson/ +.. _`MIT license`: http://www.opensource.org/licenses/mit-license.php +.. _Sphinx: http://sphinx.pocoo.org/ diff --git a/compat/jansson-2.6/configure.ac b/compat/jansson-2.6/configure.ac new file mode 100644 index 0000000..1977aa6 --- /dev/null +++ b/compat/jansson-2.6/configure.ac @@ -0,0 +1,101 @@ +AC_PREREQ([2.60]) +AC_INIT([jansson], [2.6], [petri@digip.org]) + +AC_CONFIG_MACRO_DIR([m4]) + +AM_INIT_AUTOMAKE([1.10 foreign]) +m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) + +AC_CONFIG_SRCDIR([src/value.c]) +AC_CONFIG_HEADERS([config.h]) + +# Checks for programs. +AC_PROG_CC +AC_PROG_LIBTOOL +AM_CONDITIONAL([GCC], [test x$GCC = xyes]) + +# Checks for libraries. + +# Checks for header files. +AC_CHECK_HEADERS([endian.h fcntl.h locale.h sched.h unistd.h sys/param.h sys/stat.h sys/time.h sys/types.h]) + +# Checks for typedefs, structures, and compiler characteristics. +AC_TYPE_INT32_T +AC_TYPE_UINT32_T +AC_TYPE_LONG_LONG_INT + +AC_C_INLINE +case $ac_cv_c_inline in + yes) json_inline=inline;; + no) json_inline=;; + *) json_inline=$ac_cv_c_inline;; +esac +AC_SUBST([json_inline]) + +# Checks for library functions. +AC_CHECK_FUNCS([close getpid gettimeofday localeconv open read sched_yield strtoll]) + +AC_MSG_CHECKING([for gcc __sync builtins]) +have_sync_builtins=no +AC_TRY_LINK( + [], [unsigned long val; __sync_bool_compare_and_swap(&val, 0, 1);], + [have_sync_builtins=yes], +) +if test "x$have_sync_builtins" = "xyes"; then + AC_DEFINE([HAVE_SYNC_BUILTINS], [1], + [Define to 1 if gcc's __sync builtins are available]) +fi +AC_MSG_RESULT([$have_sync_builtins]) + +AC_MSG_CHECKING([for gcc __atomic builtins]) +have_atomic_builtins=no +AC_TRY_LINK( + [], [char l; unsigned long v; __atomic_test_and_set(&l, __ATOMIC_RELAXED); __atomic_store_n(&v, 1, __ATOMIC_ACQ_REL); __atomic_load_n(&v, __ATOMIC_ACQUIRE);], + [have_atomic_builtins=yes], +) +if test "x$have_atomic_builtins" = "xyes"; then + AC_DEFINE([HAVE_ATOMIC_BUILTINS], [1], + [Define to 1 if gcc's __atomic builtins are available]) +fi +AC_MSG_RESULT([$have_atomic_builtins]) + +case "$ac_cv_type_long_long_int$ac_cv_func_strtoll" in + yesyes) json_have_long_long=1;; + *) json_have_long_long=0;; +esac +AC_SUBST([json_have_long_long]) + +case "$ac_cv_header_locale_h$ac_cv_func_localeconv" in + yesyes) json_have_localeconv=1;; + *) json_have_localeconv=0;; +esac +AC_SUBST([json_have_localeconv]) + +# Features +AC_ARG_ENABLE([urandom], + [AS_HELP_STRING([--disable-urandom], + [Don't use /dev/urandom to seed the hash function])], + [use_urandom=$enableval], [use_urandom=yes]) + +if test "x$use_urandom" = xyes; then +AC_DEFINE([USE_URANDOM], [1], + [Define to 1 if /dev/urandom should be used for seeding the hash function]) +fi + +AC_ARG_ENABLE([windows-cryptoapi], + [AS_HELP_STRING([--disable-windows-cryptoapi], + [Don't use CryptGenRandom to seed the hash function])], + [use_windows_cryptoapi=$enableval], [use_windows_cryptoapi=yes]) + +if test "x$use_windows_cryptoapi" = xyes; then +AC_DEFINE([USE_WINDOWS_CRYPTOAPI], [1], + [Define to 1 if CryptGenRandom should be used for seeding the hash function]) +fi + +AC_CONFIG_FILES([ + jansson.pc + Makefile + src/Makefile + src/jansson_config.h +]) +AC_OUTPUT diff --git a/compat/jansson-2.6/jansson.pc.in b/compat/jansson-2.6/jansson.pc.in new file mode 100644 index 0000000..d9bf4da --- /dev/null +++ b/compat/jansson-2.6/jansson.pc.in @@ -0,0 +1,10 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=${prefix}/include + +Name: Jansson +Description: Library for encoding, decoding and manipulating JSON data +Version: @VERSION@ +Libs: -L${libdir} -ljansson +Cflags: -I${includedir} diff --git a/compat/jansson-2.6/m4/.gitignore b/compat/jansson-2.6/m4/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/compat/jansson-2.6/src/Makefile.am b/compat/jansson-2.6/src/Makefile.am new file mode 100644 index 0000000..e1a5493 --- /dev/null +++ b/compat/jansson-2.6/src/Makefile.am @@ -0,0 +1,24 @@ +EXTRA_DIST = jansson.def + +include_HEADERS = jansson.h jansson_config.h + +lib_LTLIBRARIES = libjansson.la +libjansson_la_SOURCES = \ + dump.c \ + error.c \ + hashtable.c \ + hashtable.h \ + jansson_private.h \ + load.c \ + memory.c \ + pack_unpack.c \ + strbuffer.c \ + strbuffer.h \ + strconv.c \ + utf.c \ + utf.h \ + value.c +libjansson_la_LDFLAGS = \ + -no-undefined \ + -export-symbols-regex '^json_' \ + -version-info 9:0:5 diff --git a/compat/jansson-2.6/src/dump.c b/compat/jansson-2.6/src/dump.c new file mode 100644 index 0000000..3b19c73 --- /dev/null +++ b/compat/jansson-2.6/src/dump.c @@ -0,0 +1,456 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include +#include +#include +#include + +#include "jansson.h" +#include "jansson_private.h" +#include "strbuffer.h" +#include "utf.h" + +#define MAX_INTEGER_STR_LENGTH 100 +#define MAX_REAL_STR_LENGTH 100 + +struct object_key { + size_t serial; + const char *key; +}; + +static int dump_to_strbuffer(const char *buffer, size_t size, void *data) +{ + return strbuffer_append_bytes((strbuffer_t *)data, buffer, size); +} + +static int dump_to_file(const char *buffer, size_t size, void *data) +{ + FILE *dest = (FILE *)data; + if(fwrite(buffer, size, 1, dest) != 1) + return -1; + return 0; +} + +/* 32 spaces (the maximum indentation size) */ +static const char whitespace[] = " "; + +static int dump_indent(size_t flags, int depth, int space, json_dump_callback_t dump, void *data) +{ + if(JSON_INDENT(flags) > 0) + { + int i, ws_count = JSON_INDENT(flags); + + if(dump("\n", 1, data)) + return -1; + + for(i = 0; i < depth; i++) + { + if(dump(whitespace, ws_count, data)) + return -1; + } + } + else if(space && !(flags & JSON_COMPACT)) + { + return dump(" ", 1, data); + } + return 0; +} + +static int dump_string(const char *str, json_dump_callback_t dump, void *data, size_t flags) +{ + const char *pos, *end; + int32_t codepoint; + + if(dump("\"", 1, data)) + return -1; + + end = pos = str; + while(1) + { + const char *text; + char seq[13]; + int length; + + while(*end) + { + end = utf8_iterate(pos, &codepoint); + if(!end) + return -1; + + /* mandatory escape or control char */ + if(codepoint == '\\' || codepoint == '"' || codepoint < 0x20) + break; + + /* slash */ + if((flags & JSON_ESCAPE_SLASH) && codepoint == '/') + break; + + /* non-ASCII */ + if((flags & JSON_ENSURE_ASCII) && codepoint > 0x7F) + break; + + pos = end; + } + + if(pos != str) { + if(dump(str, pos - str, data)) + return -1; + } + + if(end == pos) + break; + + /* handle \, /, ", and control codes */ + length = 2; + switch(codepoint) + { + case '\\': text = "\\\\"; break; + case '\"': text = "\\\""; break; + case '\b': text = "\\b"; break; + case '\f': text = "\\f"; break; + case '\n': text = "\\n"; break; + case '\r': text = "\\r"; break; + case '\t': text = "\\t"; break; + case '/': text = "\\/"; break; + default: + { + /* codepoint is in BMP */ + if(codepoint < 0x10000) + { + sprintf(seq, "\\u%04x", codepoint); + length = 6; + } + + /* not in BMP -> construct a UTF-16 surrogate pair */ + else + { + int32_t first, last; + + codepoint -= 0x10000; + first = 0xD800 | ((codepoint & 0xffc00) >> 10); + last = 0xDC00 | (codepoint & 0x003ff); + + sprintf(seq, "\\u%04x\\u%04x", first, last); + length = 12; + } + + text = seq; + break; + } + } + + if(dump(text, length, data)) + return -1; + + str = pos = end; + } + + return dump("\"", 1, data); +} + +static int object_key_compare_keys(const void *key1, const void *key2) +{ + return strcmp(((const struct object_key *)key1)->key, + ((const struct object_key *)key2)->key); +} + +static int object_key_compare_serials(const void *key1, const void *key2) +{ + size_t a = ((const struct object_key *)key1)->serial; + size_t b = ((const struct object_key *)key2)->serial; + + return a < b ? -1 : a == b ? 0 : 1; +} + +static int do_dump(const json_t *json, size_t flags, int depth, + json_dump_callback_t dump, void *data) +{ + if(!json) + return -1; + + switch(json_typeof(json)) { + case JSON_NULL: + return dump("null", 4, data); + + case JSON_TRUE: + return dump("true", 4, data); + + case JSON_FALSE: + return dump("false", 5, data); + + case JSON_INTEGER: + { + char buffer[MAX_INTEGER_STR_LENGTH]; + int size; + + size = snprintf(buffer, MAX_INTEGER_STR_LENGTH, + "%" JSON_INTEGER_FORMAT, + json_integer_value(json)); + if(size < 0 || size >= MAX_INTEGER_STR_LENGTH) + return -1; + + return dump(buffer, size, data); + } + + case JSON_REAL: + { + char buffer[MAX_REAL_STR_LENGTH]; + int size; + double value = json_real_value(json); + + size = jsonp_dtostr(buffer, MAX_REAL_STR_LENGTH, value); + if(size < 0) + return -1; + + return dump(buffer, size, data); + } + + case JSON_STRING: + return dump_string(json_string_value(json), dump, data, flags); + + case JSON_ARRAY: + { + int i; + int n; + json_array_t *array; + + /* detect circular references */ + array = json_to_array(json); + if(array->visited) + goto array_error; + array->visited = 1; + + n = json_array_size(json); + + if(dump("[", 1, data)) + goto array_error; + if(n == 0) { + array->visited = 0; + return dump("]", 1, data); + } + if(dump_indent(flags, depth + 1, 0, dump, data)) + goto array_error; + + for(i = 0; i < n; ++i) { + if(do_dump(json_array_get(json, i), flags, depth + 1, + dump, data)) + goto array_error; + + if(i < n - 1) + { + if(dump(",", 1, data) || + dump_indent(flags, depth + 1, 1, dump, data)) + goto array_error; + } + else + { + if(dump_indent(flags, depth, 0, dump, data)) + goto array_error; + } + } + + array->visited = 0; + return dump("]", 1, data); + + array_error: + array->visited = 0; + return -1; + } + + case JSON_OBJECT: + { + json_object_t *object; + void *iter; + const char *separator; + int separator_length; + + if(flags & JSON_COMPACT) { + separator = ":"; + separator_length = 1; + } + else { + separator = ": "; + separator_length = 2; + } + + /* detect circular references */ + object = json_to_object(json); + if(object->visited) + goto object_error; + object->visited = 1; + + iter = json_object_iter((json_t *)json); + + if(dump("{", 1, data)) + goto object_error; + if(!iter) { + object->visited = 0; + return dump("}", 1, data); + } + if(dump_indent(flags, depth + 1, 0, dump, data)) + goto object_error; + + if(flags & JSON_SORT_KEYS || flags & JSON_PRESERVE_ORDER) + { + struct object_key *keys; + size_t size, i; + int (*cmp_func)(const void *, const void *); + + size = json_object_size(json); + keys = jsonp_malloc(size * sizeof(struct object_key)); + if(!keys) + goto object_error; + + i = 0; + while(iter) + { + keys[i].serial = hashtable_iter_serial(iter); + keys[i].key = json_object_iter_key(iter); + iter = json_object_iter_next((json_t *)json, iter); + i++; + } + assert(i == size); + + if(flags & JSON_SORT_KEYS) + cmp_func = object_key_compare_keys; + else + cmp_func = object_key_compare_serials; + + qsort(keys, size, sizeof(struct object_key), cmp_func); + + for(i = 0; i < size; i++) + { + const char *key; + json_t *value; + + key = keys[i].key; + value = json_object_get(json, key); + assert(value); + + dump_string(key, dump, data, flags); + if(dump(separator, separator_length, data) || + do_dump(value, flags, depth + 1, dump, data)) + { + jsonp_free(keys); + goto object_error; + } + + if(i < size - 1) + { + if(dump(",", 1, data) || + dump_indent(flags, depth + 1, 1, dump, data)) + { + jsonp_free(keys); + goto object_error; + } + } + else + { + if(dump_indent(flags, depth, 0, dump, data)) + { + jsonp_free(keys); + goto object_error; + } + } + } + + jsonp_free(keys); + } + else + { + /* Don't sort keys */ + + while(iter) + { + void *next = json_object_iter_next((json_t *)json, iter); + + dump_string(json_object_iter_key(iter), dump, data, flags); + if(dump(separator, separator_length, data) || + do_dump(json_object_iter_value(iter), flags, depth + 1, + dump, data)) + goto object_error; + + if(next) + { + if(dump(",", 1, data) || + dump_indent(flags, depth + 1, 1, dump, data)) + goto object_error; + } + else + { + if(dump_indent(flags, depth, 0, dump, data)) + goto object_error; + } + + iter = next; + } + } + + object->visited = 0; + return dump("}", 1, data); + + object_error: + object->visited = 0; + return -1; + } + + default: + /* not reached */ + return -1; + } +} + +char *json_dumps(const json_t *json, size_t flags) +{ + strbuffer_t strbuff; + char *result; + + if(strbuffer_init(&strbuff)) + return NULL; + + if(json_dump_callback(json, dump_to_strbuffer, (void *)&strbuff, flags)) + result = NULL; + else + result = jsonp_strdup(strbuffer_value(&strbuff)); + + strbuffer_close(&strbuff); + return result; +} + +int json_dumpf(const json_t *json, FILE *output, size_t flags) +{ + return json_dump_callback(json, dump_to_file, (void *)output, flags); +} + +int json_dump_file(const json_t *json, const char *path, size_t flags) +{ + int result; + + FILE *output = fopen(path, "w"); + if(!output) + return -1; + + result = json_dumpf(json, output, flags); + + fclose(output); + return result; +} + +int json_dump_callback(const json_t *json, json_dump_callback_t callback, void *data, size_t flags) +{ + if(!(flags & JSON_ENCODE_ANY)) { + if(!json_is_array(json) && !json_is_object(json)) + return -1; + } + + return do_dump(json, flags, 0, callback, data); +} diff --git a/compat/jansson-2.6/src/error.c b/compat/jansson-2.6/src/error.c new file mode 100644 index 0000000..a544a59 --- /dev/null +++ b/compat/jansson-2.6/src/error.c @@ -0,0 +1,63 @@ +#include +#include "jansson_private.h" + +void jsonp_error_init(json_error_t *error, const char *source) +{ + if(error) + { + error->text[0] = '\0'; + error->line = -1; + error->column = -1; + error->position = 0; + if(source) + jsonp_error_set_source(error, source); + else + error->source[0] = '\0'; + } +} + +void jsonp_error_set_source(json_error_t *error, const char *source) +{ + size_t length; + + if(!error || !source) + return; + + length = strlen(source); + if(length < JSON_ERROR_SOURCE_LENGTH) + strcpy(error->source, source); + else { + size_t extra = length - JSON_ERROR_SOURCE_LENGTH + 4; + strcpy(error->source, "..."); + strcpy(error->source + 3, source + extra); + } +} + +void jsonp_error_set(json_error_t *error, int line, int column, + size_t position, const char *msg, ...) +{ + va_list ap; + + va_start(ap, msg); + jsonp_error_vset(error, line, column, position, msg, ap); + va_end(ap); +} + +void jsonp_error_vset(json_error_t *error, int line, int column, + size_t position, const char *msg, va_list ap) +{ + if(!error) + return; + + if(error->text[0] != '\0') { + /* error already set */ + return; + } + + error->line = line; + error->column = column; + error->position = position; + + vsnprintf(error->text, JSON_ERROR_TEXT_LENGTH, msg, ap); + error->text[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; +} diff --git a/compat/jansson-2.6/src/hashtable.c b/compat/jansson-2.6/src/hashtable.c new file mode 100644 index 0000000..5fb0467 --- /dev/null +++ b/compat/jansson-2.6/src/hashtable.c @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include +#include /* for JSON_INLINE */ +#include "jansson_private.h" /* for container_of() */ +#include "hashtable.h" + +typedef struct hashtable_list list_t; +typedef struct hashtable_pair pair_t; +typedef struct hashtable_bucket bucket_t; + +#define list_to_pair(list_) container_of(list_, pair_t, list) + +/* From http://www.cse.yorku.ca/~oz/hash.html */ +static size_t hash_str(const void *ptr) +{ + const char *str = (const char *)ptr; + + size_t hash = 5381; + size_t c; + + while((c = (size_t)*str)) + { + hash = ((hash << 5) + hash) + c; + str++; + } + + return hash; +} + +static JSON_INLINE void list_init(list_t *list) +{ + list->next = list; + list->prev = list; +} + +static JSON_INLINE void list_insert(list_t *list, list_t *node) +{ + node->next = list; + node->prev = list->prev; + list->prev->next = node; + list->prev = node; +} + +static JSON_INLINE void list_remove(list_t *list) +{ + list->prev->next = list->next; + list->next->prev = list->prev; +} + +static JSON_INLINE int bucket_is_empty(hashtable_t *hashtable, bucket_t *bucket) +{ + return bucket->first == &hashtable->list && bucket->first == bucket->last; +} + +static void insert_to_bucket(hashtable_t *hashtable, bucket_t *bucket, + list_t *list) +{ + if(bucket_is_empty(hashtable, bucket)) + { + list_insert(&hashtable->list, list); + bucket->first = bucket->last = list; + } + else + { + list_insert(bucket->first, list); + bucket->first = list; + } +} + +static const size_t primes[] = { + 5, 13, 23, 53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593, + 49157, 98317, 196613, 393241, 786433, 1572869, 3145739, 6291469, + 12582917, 25165843, 50331653, 100663319, 201326611, 402653189, + 805306457, 1610612741 +}; + +static JSON_INLINE size_t num_buckets(hashtable_t *hashtable) +{ + return primes[hashtable->num_buckets]; +} + + +static pair_t *hashtable_find_pair(hashtable_t *hashtable, bucket_t *bucket, + const char *key, size_t hash) +{ + list_t *list; + pair_t *pair; + + if(bucket_is_empty(hashtable, bucket)) + return NULL; + + list = bucket->first; + while(1) + { + pair = list_to_pair(list); + if(pair->hash == hash && strcmp(pair->key, key) == 0) + return pair; + + if(list == bucket->last) + break; + + list = list->next; + } + + return NULL; +} + +/* returns 0 on success, -1 if key was not found */ +static int hashtable_do_del(hashtable_t *hashtable, + const char *key, size_t hash) +{ + pair_t *pair; + bucket_t *bucket; + size_t index; + + index = hash % num_buckets(hashtable); + bucket = &hashtable->buckets[index]; + + pair = hashtable_find_pair(hashtable, bucket, key, hash); + if(!pair) + return -1; + + if(&pair->list == bucket->first && &pair->list == bucket->last) + bucket->first = bucket->last = &hashtable->list; + + else if(&pair->list == bucket->first) + bucket->first = pair->list.next; + + else if(&pair->list == bucket->last) + bucket->last = pair->list.prev; + + list_remove(&pair->list); + json_decref(pair->value); + + jsonp_free(pair); + hashtable->size--; + + return 0; +} + +static void hashtable_do_clear(hashtable_t *hashtable) +{ + list_t *list, *next; + pair_t *pair; + + for(list = hashtable->list.next; list != &hashtable->list; list = next) + { + next = list->next; + pair = list_to_pair(list); + json_decref(pair->value); + jsonp_free(pair); + } +} + +static int hashtable_do_rehash(hashtable_t *hashtable) +{ + list_t *list, *next; + pair_t *pair; + size_t i, index, new_size; + + jsonp_free(hashtable->buckets); + + hashtable->num_buckets++; + new_size = num_buckets(hashtable); + + hashtable->buckets = jsonp_malloc(new_size * sizeof(bucket_t)); + if(!hashtable->buckets) + return -1; + + for(i = 0; i < num_buckets(hashtable); i++) + { + hashtable->buckets[i].first = hashtable->buckets[i].last = + &hashtable->list; + } + + list = hashtable->list.next; + list_init(&hashtable->list); + + for(; list != &hashtable->list; list = next) { + next = list->next; + pair = list_to_pair(list); + index = pair->hash % new_size; + insert_to_bucket(hashtable, &hashtable->buckets[index], &pair->list); + } + + return 0; +} + + +int hashtable_init(hashtable_t *hashtable) +{ + size_t i; + + hashtable->size = 0; + hashtable->num_buckets = 0; /* index to primes[] */ + hashtable->buckets = jsonp_malloc(num_buckets(hashtable) * sizeof(bucket_t)); + if(!hashtable->buckets) + return -1; + + list_init(&hashtable->list); + + for(i = 0; i < num_buckets(hashtable); i++) + { + hashtable->buckets[i].first = hashtable->buckets[i].last = + &hashtable->list; + } + + return 0; +} + +void hashtable_close(hashtable_t *hashtable) +{ + hashtable_do_clear(hashtable); + jsonp_free(hashtable->buckets); +} + +int hashtable_set(hashtable_t *hashtable, + const char *key, size_t serial, + json_t *value) +{ + pair_t *pair; + bucket_t *bucket; + size_t hash, index; + + /* rehash if the load ratio exceeds 1 */ + if(hashtable->size >= num_buckets(hashtable)) + if(hashtable_do_rehash(hashtable)) + return -1; + + hash = hash_str(key); + index = hash % num_buckets(hashtable); + bucket = &hashtable->buckets[index]; + pair = hashtable_find_pair(hashtable, bucket, key, hash); + + if(pair) + { + json_decref(pair->value); + pair->value = value; + } + else + { + /* offsetof(...) returns the size of pair_t without the last, + flexible member. This way, the correct amount is + allocated. */ + pair = jsonp_malloc(offsetof(pair_t, key) + strlen(key) + 1); + if(!pair) + return -1; + + pair->hash = hash; + pair->serial = serial; + strcpy(pair->key, key); + pair->value = value; + list_init(&pair->list); + + insert_to_bucket(hashtable, bucket, &pair->list); + + hashtable->size++; + } + return 0; +} + +void *hashtable_get(hashtable_t *hashtable, const char *key) +{ + pair_t *pair; + size_t hash; + bucket_t *bucket; + + hash = hash_str(key); + bucket = &hashtable->buckets[hash % num_buckets(hashtable)]; + + pair = hashtable_find_pair(hashtable, bucket, key, hash); + if(!pair) + return NULL; + + return pair->value; +} + +int hashtable_del(hashtable_t *hashtable, const char *key) +{ + size_t hash = hash_str(key); + return hashtable_do_del(hashtable, key, hash); +} + +void hashtable_clear(hashtable_t *hashtable) +{ + size_t i; + + hashtable_do_clear(hashtable); + + for(i = 0; i < num_buckets(hashtable); i++) + { + hashtable->buckets[i].first = hashtable->buckets[i].last = + &hashtable->list; + } + + list_init(&hashtable->list); + hashtable->size = 0; +} + +void *hashtable_iter(hashtable_t *hashtable) +{ + return hashtable_iter_next(hashtable, &hashtable->list); +} + +void *hashtable_iter_at(hashtable_t *hashtable, const char *key) +{ + pair_t *pair; + size_t hash; + bucket_t *bucket; + + hash = hash_str(key); + bucket = &hashtable->buckets[hash % num_buckets(hashtable)]; + + pair = hashtable_find_pair(hashtable, bucket, key, hash); + if(!pair) + return NULL; + + return &pair->list; +} + +void *hashtable_iter_next(hashtable_t *hashtable, void *iter) +{ + list_t *list = (list_t *)iter; + if(list->next == &hashtable->list) + return NULL; + return list->next; +} + +void *hashtable_iter_key(void *iter) +{ + pair_t *pair = list_to_pair((list_t *)iter); + return pair->key; +} + +size_t hashtable_iter_serial(void *iter) +{ + pair_t *pair = list_to_pair((list_t *)iter); + return pair->serial; +} + +void *hashtable_iter_value(void *iter) +{ + pair_t *pair = list_to_pair((list_t *)iter); + return pair->value; +} + +void hashtable_iter_set(void *iter, json_t *value) +{ + pair_t *pair = list_to_pair((list_t *)iter); + + json_decref(pair->value); + pair->value = value; +} diff --git a/compat/jansson-2.6/src/hashtable.h b/compat/jansson-2.6/src/hashtable.h new file mode 100644 index 0000000..4a7ce6f --- /dev/null +++ b/compat/jansson-2.6/src/hashtable.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef HASHTABLE_H +#define HASHTABLE_H + +struct hashtable_list { + struct hashtable_list *prev; + struct hashtable_list *next; +}; + +/* "pair" may be a bit confusing a name, but think of it as a + key-value pair. In this case, it just encodes some extra data, + too */ +struct hashtable_pair { + size_t hash; + struct hashtable_list list; + json_t *value; + size_t serial; + char key[1]; +}; + +struct hashtable_bucket { + struct hashtable_list *first; + struct hashtable_list *last; +}; + +typedef struct hashtable { + size_t size; + struct hashtable_bucket *buckets; + size_t num_buckets; /* index to primes[] */ + struct hashtable_list list; +} hashtable_t; + + +#define hashtable_key_to_iter(key_) \ + (&(container_of(key_, struct hashtable_pair, key)->list)) + +/** + * hashtable_init - Initialize a hashtable object + * + * @hashtable: The (statically allocated) hashtable object + * + * Initializes a statically allocated hashtable object. The object + * should be cleared with hashtable_close when it's no longer used. + * + * Returns 0 on success, -1 on error (out of memory). + */ +int hashtable_init(hashtable_t *hashtable); + +/** + * hashtable_close - Release all resources used by a hashtable object + * + * @hashtable: The hashtable + * + * Destroys a statically allocated hashtable object. + */ +void hashtable_close(hashtable_t *hashtable); + +/** + * hashtable_set - Add/modify value in hashtable + * + * @hashtable: The hashtable object + * @key: The key + * @serial: For addition order of keys + * @value: The value + * + * If a value with the given key already exists, its value is replaced + * with the new value. Value is "stealed" in the sense that hashtable + * doesn't increment its refcount but decreases the refcount when the + * value is no longer needed. + * + * Returns 0 on success, -1 on failure (out of memory). + */ +int hashtable_set(hashtable_t *hashtable, + const char *key, size_t serial, + json_t *value); + +/** + * hashtable_get - Get a value associated with a key + * + * @hashtable: The hashtable object + * @key: The key + * + * Returns value if it is found, or NULL otherwise. + */ +void *hashtable_get(hashtable_t *hashtable, const char *key); + +/** + * hashtable_del - Remove a value from the hashtable + * + * @hashtable: The hashtable object + * @key: The key + * + * Returns 0 on success, or -1 if the key was not found. + */ +int hashtable_del(hashtable_t *hashtable, const char *key); + +/** + * hashtable_clear - Clear hashtable + * + * @hashtable: The hashtable object + * + * Removes all items from the hashtable. + */ +void hashtable_clear(hashtable_t *hashtable); + +/** + * hashtable_iter - Iterate over hashtable + * + * @hashtable: The hashtable object + * + * Returns an opaque iterator to the first element in the hashtable. + * The iterator should be passed to hashtable_iter_* functions. + * The hashtable items are not iterated over in any particular order. + * + * There's no need to free the iterator in any way. The iterator is + * valid as long as the item that is referenced by the iterator is not + * deleted. Other values may be added or deleted. In particular, + * hashtable_iter_next() may be called on an iterator, and after that + * the key/value pair pointed by the old iterator may be deleted. + */ +void *hashtable_iter(hashtable_t *hashtable); + +/** + * hashtable_iter_at - Return an iterator at a specific key + * + * @hashtable: The hashtable object + * @key: The key that the iterator should point to + * + * Like hashtable_iter() but returns an iterator pointing to a + * specific key. + */ +void *hashtable_iter_at(hashtable_t *hashtable, const char *key); + +/** + * hashtable_iter_next - Advance an iterator + * + * @hashtable: The hashtable object + * @iter: The iterator + * + * Returns a new iterator pointing to the next element in the + * hashtable or NULL if the whole hastable has been iterated over. + */ +void *hashtable_iter_next(hashtable_t *hashtable, void *iter); + +/** + * hashtable_iter_key - Retrieve the key pointed by an iterator + * + * @iter: The iterator + */ +void *hashtable_iter_key(void *iter); + +/** + * hashtable_iter_serial - Retrieve the serial number pointed to by an iterator + * + * @iter: The iterator + */ +size_t hashtable_iter_serial(void *iter); + +/** + * hashtable_iter_value - Retrieve the value pointed by an iterator + * + * @iter: The iterator + */ +void *hashtable_iter_value(void *iter); + +/** + * hashtable_iter_set - Set the value pointed by an iterator + * + * @iter: The iterator + * @value: The value to set + */ +void hashtable_iter_set(void *iter, json_t *value); + +#endif diff --git a/compat/jansson-2.6/src/jansson.def b/compat/jansson-2.6/src/jansson.def new file mode 100644 index 0000000..8cc2e9c --- /dev/null +++ b/compat/jansson-2.6/src/jansson.def @@ -0,0 +1,63 @@ +EXPORTS + json_delete + json_true + json_false + json_null + json_string + json_string_nocheck + json_string_value + json_string_set + json_string_set_nocheck + json_integer + json_integer_value + json_integer_set + json_real + json_real_value + json_real_set + json_number_value + json_array + json_array_size + json_array_get + json_array_set_new + json_array_append_new + json_array_insert_new + json_array_remove + json_array_clear + json_array_extend + json_object + json_object_size + json_object_get + json_object_set_new + json_object_set_new_nocheck + json_object_del + json_object_clear + json_object_update + json_object_update_existing + json_object_update_missing + json_object_iter + json_object_iter_at + json_object_iter_next + json_object_iter_key + json_object_iter_value + json_object_iter_set_new + json_object_key_to_iter + json_dumps + json_dumpf + json_dump_file + json_dump_callback + json_loads + json_loadb + json_loadf + json_load_file + json_load_callback + json_equal + json_copy + json_deep_copy + json_pack + json_pack_ex + json_vpack_ex + json_unpack + json_unpack_ex + json_vunpack_ex + json_set_alloc_funcs + diff --git a/compat/jansson-2.6/src/jansson.h b/compat/jansson-2.6/src/jansson.h new file mode 100644 index 0000000..52c8077 --- /dev/null +++ b/compat/jansson-2.6/src/jansson.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef JANSSON_H +#define JANSSON_H + +#include +#include /* for size_t */ +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* version */ + +#define JANSSON_MAJOR_VERSION 2 +#define JANSSON_MINOR_VERSION 5 +#define JANSSON_MICRO_VERSION 0 + +/* Micro version is omitted if it's 0 */ +#define JANSSON_VERSION "2.5" + +/* Version as a 3-byte hex number, e.g. 0x010201 == 1.2.1. Use this + for numeric comparisons, e.g. #if JANSSON_VERSION_HEX >= ... */ +#define JANSSON_VERSION_HEX ((JANSSON_MAJOR_VERSION << 16) | \ + (JANSSON_MINOR_VERSION << 8) | \ + (JANSSON_MICRO_VERSION << 0)) + + +/* types */ + +typedef enum { + JSON_OBJECT, + JSON_ARRAY, + JSON_STRING, + JSON_INTEGER, + JSON_REAL, + JSON_TRUE, + JSON_FALSE, + JSON_NULL +} json_type; + +typedef struct json_t { + json_type type; + size_t refcount; +} json_t; + +#ifndef JANSSON_USING_CMAKE /* disabled if using cmake */ +#if JSON_INTEGER_IS_LONG_LONG +#ifdef _WIN32 +#define JSON_INTEGER_FORMAT "I64d" +#else +#define JSON_INTEGER_FORMAT "lld" +#endif +typedef long long json_int_t; +#else +#define JSON_INTEGER_FORMAT "ld" +typedef long json_int_t; +#endif /* JSON_INTEGER_IS_LONG_LONG */ +#endif + +#define json_typeof(json) ((json)->type) +#define json_is_object(json) (json && json_typeof(json) == JSON_OBJECT) +#define json_is_array(json) (json && json_typeof(json) == JSON_ARRAY) +#define json_is_string(json) (json && json_typeof(json) == JSON_STRING) +#define json_is_integer(json) (json && json_typeof(json) == JSON_INTEGER) +#define json_is_real(json) (json && json_typeof(json) == JSON_REAL) +#define json_is_number(json) (json_is_integer(json) || json_is_real(json)) +#define json_is_true(json) (json && json_typeof(json) == JSON_TRUE) +#define json_is_false(json) (json && json_typeof(json) == JSON_FALSE) +#define json_is_boolean(json) (json_is_true(json) || json_is_false(json)) +#define json_is_null(json) (json && json_typeof(json) == JSON_NULL) + +/* construction, destruction, reference counting */ + +json_t *json_object(void); +json_t *json_array(void); +json_t *json_string(const char *value); +json_t *json_string_nocheck(const char *value); +json_t *json_integer(json_int_t value); +json_t *json_real(double value); +json_t *json_true(void); +json_t *json_false(void); +#define json_boolean(val) ((val) ? json_true() : json_false()) +json_t *json_null(void); + +static JSON_INLINE +json_t *json_incref(json_t *json) +{ + if(json && json->refcount != (size_t)-1) + ++json->refcount; + return json; +} + +/* do not call json_delete directly */ +void json_delete(json_t *json); + +static JSON_INLINE +void json_decref(json_t *json) +{ + if(json && json->refcount != (size_t)-1 && --json->refcount == 0) + json_delete(json); +} + + +/* error reporting */ + +#define JSON_ERROR_TEXT_LENGTH 160 +#define JSON_ERROR_SOURCE_LENGTH 80 + +typedef struct { + int line; + int column; + int position; + char source[JSON_ERROR_SOURCE_LENGTH]; + char text[JSON_ERROR_TEXT_LENGTH]; +} json_error_t; + + +/* getters, setters, manipulation */ + +size_t json_object_size(const json_t *object); +json_t *json_object_get(const json_t *object, const char *key); +int json_object_set_new(json_t *object, const char *key, json_t *value); +int json_object_set_new_nocheck(json_t *object, const char *key, json_t *value); +int json_object_del(json_t *object, const char *key); +int json_object_clear(json_t *object); +int json_object_update(json_t *object, json_t *other); +int json_object_update_existing(json_t *object, json_t *other); +int json_object_update_missing(json_t *object, json_t *other); +void *json_object_iter(json_t *object); +void *json_object_iter_at(json_t *object, const char *key); +void *json_object_key_to_iter(const char *key); +void *json_object_iter_next(json_t *object, void *iter); +const char *json_object_iter_key(void *iter); +json_t *json_object_iter_value(void *iter); +int json_object_iter_set_new(json_t *object, void *iter, json_t *value); + +#define json_object_foreach(object, key, value) \ + for(key = json_object_iter_key(json_object_iter(object)); \ + key && (value = json_object_iter_value(json_object_key_to_iter(key))); \ + key = json_object_iter_key(json_object_iter_next(object, json_object_key_to_iter(key)))) + +#define json_array_foreach(array, index, value) \ + for(index = 0; \ + index < json_array_size(array) && (value = json_array_get(array, index)); \ + index++) + +static JSON_INLINE +int json_object_set(json_t *object, const char *key, json_t *value) +{ + return json_object_set_new(object, key, json_incref(value)); +} + +static JSON_INLINE +int json_object_set_nocheck(json_t *object, const char *key, json_t *value) +{ + return json_object_set_new_nocheck(object, key, json_incref(value)); +} + +static JSON_INLINE +int json_object_iter_set(json_t *object, void *iter, json_t *value) +{ + return json_object_iter_set_new(object, iter, json_incref(value)); +} + +size_t json_array_size(const json_t *array); +json_t *json_array_get(const json_t *array, size_t index); +int json_array_set_new(json_t *array, size_t index, json_t *value); +int json_array_append_new(json_t *array, json_t *value); +int json_array_insert_new(json_t *array, size_t index, json_t *value); +int json_array_remove(json_t *array, size_t index); +int json_array_clear(json_t *array); +int json_array_extend(json_t *array, json_t *other); + +static JSON_INLINE +int json_array_set(json_t *array, size_t ind, json_t *value) +{ + return json_array_set_new(array, ind, json_incref(value)); +} + +static JSON_INLINE +int json_array_append(json_t *array, json_t *value) +{ + return json_array_append_new(array, json_incref(value)); +} + +static JSON_INLINE +int json_array_insert(json_t *array, size_t ind, json_t *value) +{ + return json_array_insert_new(array, ind, json_incref(value)); +} + +const char *json_string_value(const json_t *string); +json_int_t json_integer_value(const json_t *integer); +double json_real_value(const json_t *real); +double json_number_value(const json_t *json); + +int json_string_set(json_t *string, const char *value); +int json_string_set_nocheck(json_t *string, const char *value); +int json_integer_set(json_t *integer, json_int_t value); +int json_real_set(json_t *real, double value); + + +/* pack, unpack */ + +json_t *json_pack(const char *fmt, ...); +json_t *json_pack_ex(json_error_t *error, size_t flags, const char *fmt, ...); +json_t *json_vpack_ex(json_error_t *error, size_t flags, const char *fmt, va_list ap); + +#define JSON_VALIDATE_ONLY 0x1 +#define JSON_STRICT 0x2 + +int json_unpack(json_t *root, const char *fmt, ...); +int json_unpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, ...); +int json_vunpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, va_list ap); + + +/* equality */ + +int json_equal(json_t *value1, json_t *value2); + + +/* copying */ + +json_t *json_copy(json_t *value); +json_t *json_deep_copy(const json_t *value); + + +/* decoding */ + +#define JSON_REJECT_DUPLICATES 0x1 +#define JSON_DISABLE_EOF_CHECK 0x2 +#define JSON_DECODE_ANY 0x4 +#define JSON_DECODE_INT_AS_REAL 0x8 + +typedef size_t (*json_load_callback_t)(void *buffer, size_t buflen, void *data); + +json_t *json_loads(const char *input, size_t flags, json_error_t *error); +json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error); +json_t *json_loadf(FILE *input, size_t flags, json_error_t *error); +json_t *json_load_file(const char *path, size_t flags, json_error_t *error); +json_t *json_load_callback(json_load_callback_t callback, void *data, size_t flags, json_error_t *error); + + +/* encoding */ + +#define JSON_INDENT(n) (n & 0x1F) +#define JSON_COMPACT 0x20 +#define JSON_ENSURE_ASCII 0x40 +#define JSON_SORT_KEYS 0x80 +#define JSON_PRESERVE_ORDER 0x100 +#define JSON_ENCODE_ANY 0x200 +#define JSON_ESCAPE_SLASH 0x400 + +typedef int (*json_dump_callback_t)(const char *buffer, size_t size, void *data); + +char *json_dumps(const json_t *json, size_t flags); +int json_dumpf(const json_t *json, FILE *output, size_t flags); +int json_dump_file(const json_t *json, const char *path, size_t flags); +int json_dump_callback(const json_t *json, json_dump_callback_t callback, void *data, size_t flags); + +/* custom memory allocation */ + +typedef void *(*json_malloc_t)(size_t); +typedef void (*json_free_t)(void *); + +void json_set_alloc_funcs(json_malloc_t malloc_fn, json_free_t free_fn); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/compat/jansson-2.6/src/jansson_config.h.in b/compat/jansson-2.6/src/jansson_config.h.in new file mode 100644 index 0000000..785801f --- /dev/null +++ b/compat/jansson-2.6/src/jansson_config.h.in @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2010-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + * + * + * This file specifies a part of the site-specific configuration for + * Jansson, namely those things that affect the public API in + * jansson.h. + * + * The configure script copies this file to jansson_config.h and + * replaces @var@ substitutions by values that fit your system. If you + * cannot run the configure script, you can do the value substitution + * by hand. + */ + +#ifndef JANSSON_CONFIG_H +#define JANSSON_CONFIG_H + +/* If your compiler supports the inline keyword in C, JSON_INLINE is + defined to `inline', otherwise empty. In C++, the inline is always + supported. */ +#ifdef __cplusplus +#define JSON_INLINE inline +#else +#define JSON_INLINE @json_inline@ +#endif + +/* If your compiler supports the `long long` type and the strtoll() + library function, JSON_INTEGER_IS_LONG_LONG is defined to 1, + otherwise to 0. */ +#define JSON_INTEGER_IS_LONG_LONG @json_have_long_long@ + +/* If locale.h and localeconv() are available, define to 1, + otherwise to 0. */ +#define JSON_HAVE_LOCALECONV @json_have_localeconv@ + +#endif diff --git a/compat/jansson-2.6/src/jansson_private.h b/compat/jansson-2.6/src/jansson_private.h new file mode 100644 index 0000000..403b53a --- /dev/null +++ b/compat/jansson-2.6/src/jansson_private.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef JANSSON_PRIVATE_H +#define JANSSON_PRIVATE_H + +#include +#include "jansson.h" +#include "hashtable.h" +#include "strbuffer.h" + +#define container_of(ptr_, type_, member_) \ + ((type_ *)((char *)ptr_ - offsetof(type_, member_))) + +/* On some platforms, max() may already be defined */ +#ifndef max +#define max(a, b) ((a) > (b) ? (a) : (b)) +#endif + +/* va_copy is a C99 feature. In C89 implementations, it's sometimes + available as __va_copy. If not, memcpy() should do the trick. */ +#ifndef va_copy +#ifdef __va_copy +#define va_copy __va_copy +#else +#define va_copy(a, b) memcpy(&(a), &(b), sizeof(va_list)) +#endif +#endif + +typedef struct { + json_t json; + hashtable_t hashtable; + size_t serial; + int visited; +} json_object_t; + +typedef struct { + json_t json; + size_t size; + size_t entries; + json_t **table; + int visited; +} json_array_t; + +typedef struct { + json_t json; + char *value; +} json_string_t; + +typedef struct { + json_t json; + double value; +} json_real_t; + +typedef struct { + json_t json; + json_int_t value; +} json_integer_t; + +#define json_to_object(json_) container_of(json_, json_object_t, json) +#define json_to_array(json_) container_of(json_, json_array_t, json) +#define json_to_string(json_) container_of(json_, json_string_t, json) +#define json_to_real(json_) container_of(json_, json_real_t, json) +#define json_to_integer(json_) container_of(json_, json_integer_t, json) + +void jsonp_error_init(json_error_t *error, const char *source); +void jsonp_error_set_source(json_error_t *error, const char *source); +void jsonp_error_set(json_error_t *error, int line, int column, + size_t position, const char *msg, ...); +void jsonp_error_vset(json_error_t *error, int line, int column, + size_t position, const char *msg, va_list ap); + +/* Locale independent string<->double conversions */ +int jsonp_strtod(strbuffer_t *strbuffer, double *out); +int jsonp_dtostr(char *buffer, size_t size, double value); + +/* Wrappers for custom memory functions */ +void* jsonp_malloc(size_t size); +void jsonp_free(void *ptr); +char *jsonp_strndup(const char *str, size_t length); +char *jsonp_strdup(const char *str); + +/* Windows compatibility */ +#ifdef _WIN32 +#define snprintf _snprintf +#define vsnprintf _vsnprintf +#endif + +#endif diff --git a/compat/jansson-2.6/src/load.c b/compat/jansson-2.6/src/load.c new file mode 100644 index 0000000..c5536f5 --- /dev/null +++ b/compat/jansson-2.6/src/load.c @@ -0,0 +1,1077 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include +#include +#include +#include +#include +#include + +#include "jansson.h" +#include "jansson_private.h" +#include "strbuffer.h" +#include "utf.h" + +#define STREAM_STATE_OK 0 +#define STREAM_STATE_EOF -1 +#define STREAM_STATE_ERROR -2 + +#define TOKEN_INVALID -1 +#define TOKEN_EOF 0 +#define TOKEN_STRING 256 +#define TOKEN_INTEGER 257 +#define TOKEN_REAL 258 +#define TOKEN_TRUE 259 +#define TOKEN_FALSE 260 +#define TOKEN_NULL 261 + +/* Locale independent versions of isxxx() functions */ +#define l_isupper(c) ('A' <= (c) && (c) <= 'Z') +#define l_islower(c) ('a' <= (c) && (c) <= 'z') +#define l_isalpha(c) (l_isupper(c) || l_islower(c)) +#define l_isdigit(c) ('0' <= (c) && (c) <= '9') +#define l_isxdigit(c) \ + (l_isdigit(c) || ('A' <= (c) && (c) <= 'F') || ('a' <= (c) && (c) <= 'f')) + +/* Read one byte from stream, convert to unsigned char, then int, and + return. return EOF on end of file. This corresponds to the + behaviour of fgetc(). */ +typedef int (*get_func)(void *data); + +typedef struct { + get_func get; + void *data; + char buffer[5]; + size_t buffer_pos; + int state; + int line; + int column, last_column; + size_t position; +} stream_t; + +typedef struct { + stream_t stream; + strbuffer_t saved_text; + int token; + union { + char *string; + json_int_t integer; + double real; + } value; +} lex_t; + +#define stream_to_lex(stream) container_of(stream, lex_t, stream) + + +/*** error reporting ***/ + +static void error_set(json_error_t *error, const lex_t *lex, + const char *msg, ...) +{ + va_list ap; + char msg_text[JSON_ERROR_TEXT_LENGTH]; + char msg_with_context[JSON_ERROR_TEXT_LENGTH]; + + int line = -1, col = -1; + size_t pos = 0; + const char *result = msg_text; + + if(!error) + return; + + va_start(ap, msg); + vsnprintf(msg_text, JSON_ERROR_TEXT_LENGTH, msg, ap); + msg_text[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; + va_end(ap); + + if(lex) + { + const char *saved_text = strbuffer_value(&lex->saved_text); + + line = lex->stream.line; + col = lex->stream.column; + pos = lex->stream.position; + + if(saved_text && saved_text[0]) + { + if(lex->saved_text.length <= 20) { + snprintf(msg_with_context, JSON_ERROR_TEXT_LENGTH, + "%s near '%s'", msg_text, saved_text); + msg_with_context[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; + result = msg_with_context; + } + } + else + { + if(lex->stream.state == STREAM_STATE_ERROR) { + /* No context for UTF-8 decoding errors */ + result = msg_text; + } + else { + snprintf(msg_with_context, JSON_ERROR_TEXT_LENGTH, + "%s near end of file", msg_text); + msg_with_context[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; + result = msg_with_context; + } + } + } + + jsonp_error_set(error, line, col, pos, "%s", result); +} + + +/*** lexical analyzer ***/ + +static void +stream_init(stream_t *stream, get_func get, void *data) +{ + stream->get = get; + stream->data = data; + stream->buffer[0] = '\0'; + stream->buffer_pos = 0; + + stream->state = STREAM_STATE_OK; + stream->line = 1; + stream->column = 0; + stream->position = 0; +} + +static int stream_get(stream_t *stream, json_error_t *error) +{ + int c; + + if(stream->state != STREAM_STATE_OK) + return stream->state; + + if(!stream->buffer[stream->buffer_pos]) + { + c = stream->get(stream->data); + if(c == EOF) { + stream->state = STREAM_STATE_EOF; + return STREAM_STATE_EOF; + } + + stream->buffer[0] = c; + stream->buffer_pos = 0; + + if(0x80 <= c && c <= 0xFF) + { + /* multi-byte UTF-8 sequence */ + int i, count; + + count = utf8_check_first(c); + if(!count) + goto out; + + assert(count >= 2); + + for(i = 1; i < count; i++) + stream->buffer[i] = stream->get(stream->data); + + if(!utf8_check_full(stream->buffer, count, NULL)) + goto out; + + stream->buffer[count] = '\0'; + } + else + stream->buffer[1] = '\0'; + } + + c = stream->buffer[stream->buffer_pos++]; + + stream->position++; + if(c == '\n') { + stream->line++; + stream->last_column = stream->column; + stream->column = 0; + } + else if(utf8_check_first(c)) { + /* track the Unicode character column, so increment only if + this is the first character of a UTF-8 sequence */ + stream->column++; + } + + return c; + +out: + stream->state = STREAM_STATE_ERROR; + error_set(error, stream_to_lex(stream), "unable to decode byte 0x%x", c); + return STREAM_STATE_ERROR; +} + +static void stream_unget(stream_t *stream, int c) +{ + if(c == STREAM_STATE_EOF || c == STREAM_STATE_ERROR) + return; + + stream->position--; + if(c == '\n') { + stream->line--; + stream->column = stream->last_column; + } + else if(utf8_check_first(c)) + stream->column--; + + assert(stream->buffer_pos > 0); + stream->buffer_pos--; + assert(stream->buffer[stream->buffer_pos] == c); +} + + +static int lex_get(lex_t *lex, json_error_t *error) +{ + return stream_get(&lex->stream, error); +} + +static void lex_save(lex_t *lex, int c) +{ + strbuffer_append_byte(&lex->saved_text, c); +} + +static int lex_get_save(lex_t *lex, json_error_t *error) +{ + int c = stream_get(&lex->stream, error); + if(c != STREAM_STATE_EOF && c != STREAM_STATE_ERROR) + lex_save(lex, c); + return c; +} + +static void lex_unget(lex_t *lex, int c) +{ + stream_unget(&lex->stream, c); +} + +static void lex_unget_unsave(lex_t *lex, int c) +{ + if(c != STREAM_STATE_EOF && c != STREAM_STATE_ERROR) { + /* Since we treat warnings as errors, when assertions are turned + * off the "d" variable would be set but never used. Which is + * treated as an error by GCC. + */ + #ifndef NDEBUG + char d; + #endif + stream_unget(&lex->stream, c); + #ifndef NDEBUG + d = + #endif + strbuffer_pop(&lex->saved_text); + assert(c == d); + } +} + +static void lex_save_cached(lex_t *lex) +{ + while(lex->stream.buffer[lex->stream.buffer_pos] != '\0') + { + lex_save(lex, lex->stream.buffer[lex->stream.buffer_pos]); + lex->stream.buffer_pos++; + lex->stream.position++; + } +} + +/* assumes that str points to 'u' plus at least 4 valid hex digits */ +static int32_t decode_unicode_escape(const char *str) +{ + int i; + int32_t value = 0; + + assert(str[0] == 'u'); + + for(i = 1; i <= 4; i++) { + char c = str[i]; + value <<= 4; + if(l_isdigit(c)) + value += c - '0'; + else if(l_islower(c)) + value += c - 'a' + 10; + else if(l_isupper(c)) + value += c - 'A' + 10; + else + assert(0); + } + + return value; +} + +static void lex_scan_string(lex_t *lex, json_error_t *error) +{ + int c; + const char *p; + char *t; + int i; + + lex->value.string = NULL; + lex->token = TOKEN_INVALID; + + c = lex_get_save(lex, error); + + while(c != '"') { + if(c == STREAM_STATE_ERROR) + goto out; + + else if(c == STREAM_STATE_EOF) { + error_set(error, lex, "premature end of input"); + goto out; + } + + else if(0 <= c && c <= 0x1F) { + /* control character */ + lex_unget_unsave(lex, c); + if(c == '\n') + error_set(error, lex, "unexpected newline", c); + else + error_set(error, lex, "control character 0x%x", c); + goto out; + } + + else if(c == '\\') { + c = lex_get_save(lex, error); + if(c == 'u') { + c = lex_get_save(lex, error); + for(i = 0; i < 4; i++) { + if(!l_isxdigit(c)) { + error_set(error, lex, "invalid escape"); + goto out; + } + c = lex_get_save(lex, error); + } + } + else if(c == '"' || c == '\\' || c == '/' || c == 'b' || + c == 'f' || c == 'n' || c == 'r' || c == 't') + c = lex_get_save(lex, error); + else { + error_set(error, lex, "invalid escape"); + goto out; + } + } + else + c = lex_get_save(lex, error); + } + + /* the actual value is at most of the same length as the source + string, because: + - shortcut escapes (e.g. "\t") (length 2) are converted to 1 byte + - a single \uXXXX escape (length 6) is converted to at most 3 bytes + - two \uXXXX escapes (length 12) forming an UTF-16 surrogate pair + are converted to 4 bytes + */ + lex->value.string = jsonp_malloc(lex->saved_text.length + 1); + if(!lex->value.string) { + /* this is not very nice, since TOKEN_INVALID is returned */ + goto out; + } + + /* the target */ + t = lex->value.string; + + /* + 1 to skip the " */ + p = strbuffer_value(&lex->saved_text) + 1; + + while(*p != '"') { + if(*p == '\\') { + p++; + if(*p == 'u') { + char buffer[4]; + int length; + int32_t value; + + value = decode_unicode_escape(p); + p += 5; + + if(0xD800 <= value && value <= 0xDBFF) { + /* surrogate pair */ + if(*p == '\\' && *(p + 1) == 'u') { + int32_t value2 = decode_unicode_escape(++p); + p += 5; + + if(0xDC00 <= value2 && value2 <= 0xDFFF) { + /* valid second surrogate */ + value = + ((value - 0xD800) << 10) + + (value2 - 0xDC00) + + 0x10000; + } + else { + /* invalid second surrogate */ + error_set(error, lex, + "invalid Unicode '\\u%04X\\u%04X'", + value, value2); + goto out; + } + } + else { + /* no second surrogate */ + error_set(error, lex, "invalid Unicode '\\u%04X'", + value); + goto out; + } + } + else if(0xDC00 <= value && value <= 0xDFFF) { + error_set(error, lex, "invalid Unicode '\\u%04X'", value); + goto out; + } + else if(value == 0) + { + error_set(error, lex, "\\u0000 is not allowed"); + goto out; + } + + if(utf8_encode(value, buffer, &length)) + assert(0); + + memcpy(t, buffer, length); + t += length; + } + else { + switch(*p) { + case '"': case '\\': case '/': + *t = *p; break; + case 'b': *t = '\b'; break; + case 'f': *t = '\f'; break; + case 'n': *t = '\n'; break; + case 'r': *t = '\r'; break; + case 't': *t = '\t'; break; + default: assert(0); + } + t++; + p++; + } + } + else + *(t++) = *(p++); + } + *t = '\0'; + lex->token = TOKEN_STRING; + return; + +out: + jsonp_free(lex->value.string); +} + +#ifndef JANSSON_USING_CMAKE /* disabled if using cmake */ +#if JSON_INTEGER_IS_LONG_LONG +#ifdef _MSC_VER /* Microsoft Visual Studio */ +#define json_strtoint _strtoi64 +#else +#define json_strtoint strtoll +#endif +#else +#define json_strtoint strtol +#endif +#endif + +static int lex_scan_number(lex_t *lex, int c, json_error_t *error) +{ + const char *saved_text; + char *end; + double value; + + lex->token = TOKEN_INVALID; + + if(c == '-') + c = lex_get_save(lex, error); + + if(c == '0') { + c = lex_get_save(lex, error); + if(l_isdigit(c)) { + lex_unget_unsave(lex, c); + goto out; + } + } + else if(l_isdigit(c)) { + c = lex_get_save(lex, error); + while(l_isdigit(c)) + c = lex_get_save(lex, error); + } + else { + lex_unget_unsave(lex, c); + goto out; + } + + if(c != '.' && c != 'E' && c != 'e') { + json_int_t value; + + lex_unget_unsave(lex, c); + + saved_text = strbuffer_value(&lex->saved_text); + + errno = 0; + value = json_strtoint(saved_text, &end, 10); + if(errno == ERANGE) { + if(value < 0) + error_set(error, lex, "too big negative integer"); + else + error_set(error, lex, "too big integer"); + goto out; + } + + assert(end == saved_text + lex->saved_text.length); + + lex->token = TOKEN_INTEGER; + lex->value.integer = value; + return 0; + } + + if(c == '.') { + c = lex_get(lex, error); + if(!l_isdigit(c)) { + lex_unget(lex, c); + goto out; + } + lex_save(lex, c); + + c = lex_get_save(lex, error); + while(l_isdigit(c)) + c = lex_get_save(lex, error); + } + + if(c == 'E' || c == 'e') { + c = lex_get_save(lex, error); + if(c == '+' || c == '-') + c = lex_get_save(lex, error); + + if(!l_isdigit(c)) { + lex_unget_unsave(lex, c); + goto out; + } + + c = lex_get_save(lex, error); + while(l_isdigit(c)) + c = lex_get_save(lex, error); + } + + lex_unget_unsave(lex, c); + + if(jsonp_strtod(&lex->saved_text, &value)) { + error_set(error, lex, "real number overflow"); + goto out; + } + + lex->token = TOKEN_REAL; + lex->value.real = value; + return 0; + +out: + return -1; +} + +static int lex_scan(lex_t *lex, json_error_t *error) +{ + int c; + + strbuffer_clear(&lex->saved_text); + + if(lex->token == TOKEN_STRING) { + jsonp_free(lex->value.string); + lex->value.string = NULL; + } + + c = lex_get(lex, error); + while(c == ' ' || c == '\t' || c == '\n' || c == '\r') + c = lex_get(lex, error); + + if(c == STREAM_STATE_EOF) { + lex->token = TOKEN_EOF; + goto out; + } + + if(c == STREAM_STATE_ERROR) { + lex->token = TOKEN_INVALID; + goto out; + } + + lex_save(lex, c); + + if(c == '{' || c == '}' || c == '[' || c == ']' || c == ':' || c == ',') + lex->token = c; + + else if(c == '"') + lex_scan_string(lex, error); + + else if(l_isdigit(c) || c == '-') { + if(lex_scan_number(lex, c, error)) + goto out; + } + + else if(l_isalpha(c)) { + /* eat up the whole identifier for clearer error messages */ + const char *saved_text; + + c = lex_get_save(lex, error); + while(l_isalpha(c)) + c = lex_get_save(lex, error); + lex_unget_unsave(lex, c); + + saved_text = strbuffer_value(&lex->saved_text); + + if(strcmp(saved_text, "true") == 0) + lex->token = TOKEN_TRUE; + else if(strcmp(saved_text, "false") == 0) + lex->token = TOKEN_FALSE; + else if(strcmp(saved_text, "null") == 0) + lex->token = TOKEN_NULL; + else + lex->token = TOKEN_INVALID; + } + + else { + /* save the rest of the input UTF-8 sequence to get an error + message of valid UTF-8 */ + lex_save_cached(lex); + lex->token = TOKEN_INVALID; + } + +out: + return lex->token; +} + +static char *lex_steal_string(lex_t *lex) +{ + char *result = NULL; + if(lex->token == TOKEN_STRING) + { + result = lex->value.string; + lex->value.string = NULL; + } + return result; +} + +static int lex_init(lex_t *lex, get_func get, void *data) +{ + stream_init(&lex->stream, get, data); + if(strbuffer_init(&lex->saved_text)) + return -1; + + lex->token = TOKEN_INVALID; + return 0; +} + +static void lex_close(lex_t *lex) +{ + if(lex->token == TOKEN_STRING) + jsonp_free(lex->value.string); + strbuffer_close(&lex->saved_text); +} + + +/*** parser ***/ + +static json_t *parse_value(lex_t *lex, size_t flags, json_error_t *error); + +static json_t *parse_object(lex_t *lex, size_t flags, json_error_t *error) +{ + json_t *object = json_object(); + if(!object) + return NULL; + + lex_scan(lex, error); + if(lex->token == '}') + return object; + + while(1) { + char *key; + json_t *value; + + if(lex->token != TOKEN_STRING) { + error_set(error, lex, "string or '}' expected"); + goto error; + } + + key = lex_steal_string(lex); + if(!key) + return NULL; + + if(flags & JSON_REJECT_DUPLICATES) { + if(json_object_get(object, key)) { + jsonp_free(key); + error_set(error, lex, "duplicate object key"); + goto error; + } + } + + lex_scan(lex, error); + if(lex->token != ':') { + jsonp_free(key); + error_set(error, lex, "':' expected"); + goto error; + } + + lex_scan(lex, error); + value = parse_value(lex, flags, error); + if(!value) { + jsonp_free(key); + goto error; + } + + if(json_object_set_nocheck(object, key, value)) { + jsonp_free(key); + json_decref(value); + goto error; + } + + json_decref(value); + jsonp_free(key); + + lex_scan(lex, error); + if(lex->token != ',') + break; + + lex_scan(lex, error); + } + + if(lex->token != '}') { + error_set(error, lex, "'}' expected"); + goto error; + } + + return object; + +error: + json_decref(object); + return NULL; +} + +static json_t *parse_array(lex_t *lex, size_t flags, json_error_t *error) +{ + json_t *array = json_array(); + if(!array) + return NULL; + + lex_scan(lex, error); + if(lex->token == ']') + return array; + + while(lex->token) { + json_t *elem = parse_value(lex, flags, error); + if(!elem) + goto error; + + if(json_array_append(array, elem)) { + json_decref(elem); + goto error; + } + json_decref(elem); + + lex_scan(lex, error); + if(lex->token != ',') + break; + + lex_scan(lex, error); + } + + if(lex->token != ']') { + error_set(error, lex, "']' expected"); + goto error; + } + + return array; + +error: + json_decref(array); + return NULL; +} + +static json_t *parse_value(lex_t *lex, size_t flags, json_error_t *error) +{ + json_t *json; + double value; + + switch(lex->token) { + case TOKEN_STRING: { + json = json_string_nocheck(lex->value.string); + break; + } + + case TOKEN_INTEGER: { + if (flags & JSON_DECODE_INT_AS_REAL) { + if(jsonp_strtod(&lex->saved_text, &value)) { + error_set(error, lex, "real number overflow"); + return NULL; + } + json = json_real(value); + } else { + json = json_integer(lex->value.integer); + } + break; + } + + case TOKEN_REAL: { + json = json_real(lex->value.real); + break; + } + + case TOKEN_TRUE: + json = json_true(); + break; + + case TOKEN_FALSE: + json = json_false(); + break; + + case TOKEN_NULL: + json = json_null(); + break; + + case '{': + json = parse_object(lex, flags, error); + break; + + case '[': + json = parse_array(lex, flags, error); + break; + + case TOKEN_INVALID: + error_set(error, lex, "invalid token"); + return NULL; + + default: + error_set(error, lex, "unexpected token"); + return NULL; + } + + if(!json) + return NULL; + + return json; +} + +static json_t *parse_json(lex_t *lex, size_t flags, json_error_t *error) +{ + json_t *result; + + lex_scan(lex, error); + if(!(flags & JSON_DECODE_ANY)) { + if(lex->token != '[' && lex->token != '{') { + error_set(error, lex, "'[' or '{' expected"); + return NULL; + } + } + + result = parse_value(lex, flags, error); + if(!result) + return NULL; + + if(!(flags & JSON_DISABLE_EOF_CHECK)) { + lex_scan(lex, error); + if(lex->token != TOKEN_EOF) { + error_set(error, lex, "end of file expected"); + json_decref(result); + return NULL; + } + } + + if(error) { + /* Save the position even though there was no error */ + error->position = lex->stream.position; + } + + return result; +} + +typedef struct +{ + const char *data; + int pos; +} string_data_t; + +static int string_get(void *data) +{ + char c; + string_data_t *stream = (string_data_t *)data; + c = stream->data[stream->pos]; + if(c == '\0') + return EOF; + else + { + stream->pos++; + return (unsigned char)c; + } +} + +json_t *json_loads(const char *string, size_t flags, json_error_t *error) +{ + lex_t lex; + json_t *result; + string_data_t stream_data; + + jsonp_error_init(error, ""); + + if (string == NULL) { + error_set(error, NULL, "wrong arguments"); + return NULL; + } + + stream_data.data = string; + stream_data.pos = 0; + + if(lex_init(&lex, string_get, (void *)&stream_data)) + return NULL; + + result = parse_json(&lex, flags, error); + + lex_close(&lex); + return result; +} + +typedef struct +{ + const char *data; + size_t len; + size_t pos; +} buffer_data_t; + +static int buffer_get(void *data) +{ + char c; + buffer_data_t *stream = data; + if(stream->pos >= stream->len) + return EOF; + + c = stream->data[stream->pos]; + stream->pos++; + return (unsigned char)c; +} + +json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error) +{ + lex_t lex; + json_t *result; + buffer_data_t stream_data; + + jsonp_error_init(error, ""); + + if (buffer == NULL) { + error_set(error, NULL, "wrong arguments"); + return NULL; + } + + stream_data.data = buffer; + stream_data.pos = 0; + stream_data.len = buflen; + + if(lex_init(&lex, buffer_get, (void *)&stream_data)) + return NULL; + + result = parse_json(&lex, flags, error); + + lex_close(&lex); + return result; +} + +json_t *json_loadf(FILE *input, size_t flags, json_error_t *error) +{ + lex_t lex; + const char *source; + json_t *result; + + if(input == stdin) + source = ""; + else + source = ""; + + jsonp_error_init(error, source); + + if (input == NULL) { + error_set(error, NULL, "wrong arguments"); + return NULL; + } + + if(lex_init(&lex, (get_func)fgetc, input)) + return NULL; + + result = parse_json(&lex, flags, error); + + lex_close(&lex); + return result; +} + +json_t *json_load_file(const char *path, size_t flags, json_error_t *error) +{ + json_t *result; + FILE *fp; + + jsonp_error_init(error, path); + + if (path == NULL) { + error_set(error, NULL, "wrong arguments"); + return NULL; + } + + fp = fopen(path, "rb"); + if(!fp) + { + error_set(error, NULL, "unable to open %s: %s", + path, strerror(errno)); + return NULL; + } + + result = json_loadf(fp, flags, error); + + fclose(fp); + return result; +} + +#define MAX_BUF_LEN 1024 + +typedef struct +{ + char data[MAX_BUF_LEN]; + size_t len; + size_t pos; + json_load_callback_t callback; + void *arg; +} callback_data_t; + +static int callback_get(void *data) +{ + char c; + callback_data_t *stream = data; + + if(stream->pos >= stream->len) { + stream->pos = 0; + stream->len = stream->callback(stream->data, MAX_BUF_LEN, stream->arg); + if(stream->len == 0 || stream->len == (size_t)-1) + return EOF; + } + + c = stream->data[stream->pos]; + stream->pos++; + return (unsigned char)c; +} + +json_t *json_load_callback(json_load_callback_t callback, void *arg, size_t flags, json_error_t *error) +{ + lex_t lex; + json_t *result; + + callback_data_t stream_data; + + memset(&stream_data, 0, sizeof(stream_data)); + stream_data.callback = callback; + stream_data.arg = arg; + + jsonp_error_init(error, ""); + + if (callback == NULL) { + error_set(error, NULL, "wrong arguments"); + return NULL; + } + + if(lex_init(&lex, (get_func)callback_get, &stream_data)) + return NULL; + + result = parse_json(&lex, flags, error); + + lex_close(&lex); + return result; +} diff --git a/compat/jansson-2.6/src/memory.c b/compat/jansson-2.6/src/memory.c new file mode 100644 index 0000000..eb6cec5 --- /dev/null +++ b/compat/jansson-2.6/src/memory.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * Copyright (c) 2011-2012 Basile Starynkevitch + * + * Jansson is free software; you can redistribute it and/or modify it + * under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include + +#include "jansson.h" +#include "jansson_private.h" + +/* memory function pointers */ +static json_malloc_t do_malloc = malloc; +static json_free_t do_free = free; + +void *jsonp_malloc(size_t size) +{ + if(!size) + return NULL; + + return (*do_malloc)(size); +} + +void jsonp_free(void *ptr) +{ + if(!ptr) + return; + + (*do_free)(ptr); +} + +char *jsonp_strdup(const char *str) +{ + char *new_str; + size_t len; + + len = strlen(str); + if(len == (size_t)-1) + return NULL; + + new_str = jsonp_malloc(len + 1); + if(!new_str) + return NULL; + + memcpy(new_str, str, len + 1); + return new_str; +} + +void json_set_alloc_funcs(json_malloc_t malloc_fn, json_free_t free_fn) +{ + do_malloc = malloc_fn; + do_free = free_fn; +} diff --git a/compat/jansson-2.6/src/pack_unpack.c b/compat/jansson-2.6/src/pack_unpack.c new file mode 100644 index 0000000..0d932f7 --- /dev/null +++ b/compat/jansson-2.6/src/pack_unpack.c @@ -0,0 +1,762 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * Copyright (c) 2011-2012 Graeme Smecher + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include "jansson.h" +#include "jansson_private.h" +#include "utf.h" + +typedef struct { + int line; + int column; + size_t pos; + char token; +} token_t; + +typedef struct { + const char *start; + const char *fmt; + token_t prev_token; + token_t token; + token_t next_token; + json_error_t *error; + size_t flags; + int line; + int column; + size_t pos; +} scanner_t; + +#define token(scanner) ((scanner)->token.token) + +static const char * const type_names[] = { + "object", + "array", + "string", + "integer", + "real", + "true", + "false", + "null" +}; + +#define type_name(x) type_names[json_typeof(x)] + +static const char unpack_value_starters[] = "{[siIbfFOon"; + + +static void scanner_init(scanner_t *s, json_error_t *error, + size_t flags, const char *fmt) +{ + s->error = error; + s->flags = flags; + s->fmt = s->start = fmt; + memset(&s->prev_token, 0, sizeof(token_t)); + memset(&s->token, 0, sizeof(token_t)); + memset(&s->next_token, 0, sizeof(token_t)); + s->line = 1; + s->column = 0; + s->pos = 0; +} + +static void next_token(scanner_t *s) +{ + const char *t; + s->prev_token = s->token; + + if(s->next_token.line) { + s->token = s->next_token; + s->next_token.line = 0; + return; + } + + t = s->fmt; + s->column++; + s->pos++; + + /* skip space and ignored chars */ + while(*t == ' ' || *t == '\t' || *t == '\n' || *t == ',' || *t == ':') { + if(*t == '\n') { + s->line++; + s->column = 1; + } + else + s->column++; + + s->pos++; + t++; + } + + s->token.token = *t; + s->token.line = s->line; + s->token.column = s->column; + s->token.pos = s->pos; + + t++; + s->fmt = t; +} + +static void prev_token(scanner_t *s) +{ + s->next_token = s->token; + s->token = s->prev_token; +} + +static void set_error(scanner_t *s, const char *source, const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + + jsonp_error_vset(s->error, s->token.line, s->token.column, s->token.pos, + fmt, ap); + + jsonp_error_set_source(s->error, source); + + va_end(ap); +} + +static json_t *pack(scanner_t *s, va_list *ap); + + +/* ours will be set to 1 if jsonp_free() must be called for the result + afterwards */ +static char *read_string(scanner_t *s, va_list *ap, + const char *purpose, int *ours) +{ + char t; + strbuffer_t strbuff; + const char *str; + size_t length; + char *result; + + next_token(s); + t = token(s); + prev_token(s); + + if(t != '#' && t != '+') { + /* Optimize the simple case */ + str = va_arg(*ap, const char *); + + if(!str) { + set_error(s, "", "NULL string argument"); + return NULL; + } + + if(!utf8_check_string(str, -1)) { + set_error(s, "", "Invalid UTF-8 %s", purpose); + return NULL; + } + + *ours = 0; + return (char *)str; + } + + strbuffer_init(&strbuff); + + while(1) { + str = va_arg(*ap, const char *); + if(!str) { + set_error(s, "", "NULL string argument"); + strbuffer_close(&strbuff); + return NULL; + } + + next_token(s); + + if(token(s) == '#') { + length = va_arg(*ap, int); + } + else { + prev_token(s); + length = strlen(str); + } + + if(strbuffer_append_bytes(&strbuff, str, length) == -1) { + set_error(s, "", "Out of memory"); + strbuffer_close(&strbuff); + return NULL; + } + + next_token(s); + if(token(s) != '+') { + prev_token(s); + break; + } + } + + result = strbuffer_steal_value(&strbuff); + + if(!utf8_check_string(result, -1)) { + set_error(s, "", "Invalid UTF-8 %s", purpose); + return NULL; + } + + *ours = 1; + return result; +} + +static json_t *pack_object(scanner_t *s, va_list *ap) +{ + json_t *object = json_object(); + next_token(s); + + while(token(s) != '}') { + char *key; + int ours; + json_t *value; + + if(!token(s)) { + set_error(s, "", "Unexpected end of format string"); + goto error; + } + + if(token(s) != 's') { + set_error(s, "", "Expected format 's', got '%c'", token(s)); + goto error; + } + + key = read_string(s, ap, "object key", &ours); + if(!key) + goto error; + + next_token(s); + + value = pack(s, ap); + if(!value) + goto error; + + if(json_object_set_new_nocheck(object, key, value)) { + if(ours) + jsonp_free(key); + + set_error(s, "", "Unable to add key \"%s\"", key); + goto error; + } + + if(ours) + jsonp_free(key); + + next_token(s); + } + + return object; + +error: + json_decref(object); + return NULL; +} + +static json_t *pack_array(scanner_t *s, va_list *ap) +{ + json_t *array = json_array(); + next_token(s); + + while(token(s) != ']') { + json_t *value; + + if(!token(s)) { + set_error(s, "", "Unexpected end of format string"); + goto error; + } + + value = pack(s, ap); + if(!value) + goto error; + + if(json_array_append_new(array, value)) { + set_error(s, "", "Unable to append to array"); + goto error; + } + + next_token(s); + } + return array; + +error: + json_decref(array); + return NULL; +} + +static json_t *pack(scanner_t *s, va_list *ap) +{ + switch(token(s)) { + case '{': + return pack_object(s, ap); + + case '[': + return pack_array(s, ap); + + case 's': { /* string */ + char *str; + int ours; + json_t *result; + + str = read_string(s, ap, "string", &ours); + if(!str) + return NULL; + + result = json_string_nocheck(str); + if(ours) + jsonp_free(str); + + return result; + } + + case 'n': /* null */ + return json_null(); + + case 'b': /* boolean */ + return va_arg(*ap, int) ? json_true() : json_false(); + + case 'i': /* integer from int */ + return json_integer(va_arg(*ap, int)); + + case 'I': /* integer from json_int_t */ + return json_integer(va_arg(*ap, json_int_t)); + + case 'f': /* real */ + return json_real(va_arg(*ap, double)); + + case 'O': /* a json_t object; increments refcount */ + return json_incref(va_arg(*ap, json_t *)); + + case 'o': /* a json_t object; doesn't increment refcount */ + return va_arg(*ap, json_t *); + + default: + set_error(s, "", "Unexpected format character '%c'", + token(s)); + return NULL; + } +} + +static int unpack(scanner_t *s, json_t *root, va_list *ap); + +static int unpack_object(scanner_t *s, json_t *root, va_list *ap) +{ + int ret = -1; + int strict = 0; + + /* Use a set (emulated by a hashtable) to check that all object + keys are accessed. Checking that the correct number of keys + were accessed is not enough, as the same key can be unpacked + multiple times. + */ + hashtable_t key_set; + + if(hashtable_init(&key_set)) { + set_error(s, "", "Out of memory"); + return -1; + } + + if(root && !json_is_object(root)) { + set_error(s, "", "Expected object, got %s", + type_name(root)); + goto out; + } + next_token(s); + + while(token(s) != '}') { + const char *key; + json_t *value; + int opt = 0; + + if(strict != 0) { + set_error(s, "", "Expected '}' after '%c', got '%c'", + (strict == 1 ? '!' : '*'), token(s)); + goto out; + } + + if(!token(s)) { + set_error(s, "", "Unexpected end of format string"); + goto out; + } + + if(token(s) == '!' || token(s) == '*') { + strict = (token(s) == '!' ? 1 : -1); + next_token(s); + continue; + } + + if(token(s) != 's') { + set_error(s, "", "Expected format 's', got '%c'", token(s)); + goto out; + } + + key = va_arg(*ap, const char *); + if(!key) { + set_error(s, "", "NULL object key"); + goto out; + } + + next_token(s); + + if(token(s) == '?') { + opt = 1; + next_token(s); + } + + if(!root) { + /* skipping */ + value = NULL; + } + else { + value = json_object_get(root, key); + if(!value && !opt) { + set_error(s, "", "Object item not found: %s", key); + goto out; + } + } + + if(unpack(s, value, ap)) + goto out; + + hashtable_set(&key_set, key, 0, json_null()); + next_token(s); + } + + if(strict == 0 && (s->flags & JSON_STRICT)) + strict = 1; + + if(root && strict == 1 && key_set.size != json_object_size(root)) { + long diff = (long)json_object_size(root) - (long)key_set.size; + set_error(s, "", "%li object item(s) left unpacked", diff); + goto out; + } + + ret = 0; + +out: + hashtable_close(&key_set); + return ret; +} + +static int unpack_array(scanner_t *s, json_t *root, va_list *ap) +{ + size_t i = 0; + int strict = 0; + + if(root && !json_is_array(root)) { + set_error(s, "", "Expected array, got %s", type_name(root)); + return -1; + } + next_token(s); + + while(token(s) != ']') { + json_t *value; + + if(strict != 0) { + set_error(s, "", "Expected ']' after '%c', got '%c'", + (strict == 1 ? '!' : '*'), + token(s)); + return -1; + } + + if(!token(s)) { + set_error(s, "", "Unexpected end of format string"); + return -1; + } + + if(token(s) == '!' || token(s) == '*') { + strict = (token(s) == '!' ? 1 : -1); + next_token(s); + continue; + } + + if(!strchr(unpack_value_starters, token(s))) { + set_error(s, "", "Unexpected format character '%c'", + token(s)); + return -1; + } + + if(!root) { + /* skipping */ + value = NULL; + } + else { + value = json_array_get(root, i); + if(!value) { + set_error(s, "", "Array index %lu out of range", + (unsigned long)i); + return -1; + } + } + + if(unpack(s, value, ap)) + return -1; + + next_token(s); + i++; + } + + if(strict == 0 && (s->flags & JSON_STRICT)) + strict = 1; + + if(root && strict == 1 && i != json_array_size(root)) { + long diff = (long)json_array_size(root) - (long)i; + set_error(s, "", "%li array item(s) left unpacked", diff); + return -1; + } + + return 0; +} + +static int unpack(scanner_t *s, json_t *root, va_list *ap) +{ + switch(token(s)) + { + case '{': + return unpack_object(s, root, ap); + + case '[': + return unpack_array(s, root, ap); + + case 's': + if(root && !json_is_string(root)) { + set_error(s, "", "Expected string, got %s", + type_name(root)); + return -1; + } + + if(!(s->flags & JSON_VALIDATE_ONLY)) { + const char **target; + + target = va_arg(*ap, const char **); + if(!target) { + set_error(s, "", "NULL string argument"); + return -1; + } + + if(root) + *target = json_string_value(root); + } + return 0; + + case 'i': + if(root && !json_is_integer(root)) { + set_error(s, "", "Expected integer, got %s", + type_name(root)); + return -1; + } + + if(!(s->flags & JSON_VALIDATE_ONLY)) { + int *target = va_arg(*ap, int*); + if(root) + *target = (int)json_integer_value(root); + } + + return 0; + + case 'I': + if(root && !json_is_integer(root)) { + set_error(s, "", "Expected integer, got %s", + type_name(root)); + return -1; + } + + if(!(s->flags & JSON_VALIDATE_ONLY)) { + json_int_t *target = va_arg(*ap, json_int_t*); + if(root) + *target = json_integer_value(root); + } + + return 0; + + case 'b': + if(root && !json_is_boolean(root)) { + set_error(s, "", "Expected true or false, got %s", + type_name(root)); + return -1; + } + + if(!(s->flags & JSON_VALIDATE_ONLY)) { + int *target = va_arg(*ap, int*); + if(root) + *target = json_is_true(root); + } + + return 0; + + case 'f': + if(root && !json_is_real(root)) { + set_error(s, "", "Expected real, got %s", + type_name(root)); + return -1; + } + + if(!(s->flags & JSON_VALIDATE_ONLY)) { + double *target = va_arg(*ap, double*); + if(root) + *target = json_real_value(root); + } + + return 0; + + case 'F': + if(root && !json_is_number(root)) { + set_error(s, "", "Expected real or integer, got %s", + type_name(root)); + return -1; + } + + if(!(s->flags & JSON_VALIDATE_ONLY)) { + double *target = va_arg(*ap, double*); + if(root) + *target = json_number_value(root); + } + + return 0; + + case 'O': + if(root && !(s->flags & JSON_VALIDATE_ONLY)) + json_incref(root); + /* Fall through */ + + case 'o': + if(!(s->flags & JSON_VALIDATE_ONLY)) { + json_t **target = va_arg(*ap, json_t**); + if(root) + *target = root; + } + + return 0; + + case 'n': + /* Never assign, just validate */ + if(root && !json_is_null(root)) { + set_error(s, "", "Expected null, got %s", + type_name(root)); + return -1; + } + return 0; + + default: + set_error(s, "", "Unexpected format character '%c'", + token(s)); + return -1; + } +} + +json_t *json_vpack_ex(json_error_t *error, size_t flags, + const char *fmt, va_list ap) +{ + scanner_t s; + va_list ap_copy; + json_t *value; + + if(!fmt || !*fmt) { + jsonp_error_init(error, ""); + jsonp_error_set(error, -1, -1, 0, "NULL or empty format string"); + return NULL; + } + jsonp_error_init(error, NULL); + + scanner_init(&s, error, flags, fmt); + next_token(&s); + + va_copy(ap_copy, ap); + value = pack(&s, &ap_copy); + va_end(ap_copy); + + if(!value) + return NULL; + + next_token(&s); + if(token(&s)) { + json_decref(value); + set_error(&s, "", "Garbage after format string"); + return NULL; + } + + return value; +} + +json_t *json_pack_ex(json_error_t *error, size_t flags, const char *fmt, ...) +{ + json_t *value; + va_list ap; + + va_start(ap, fmt); + value = json_vpack_ex(error, flags, fmt, ap); + va_end(ap); + + return value; +} + +json_t *json_pack(const char *fmt, ...) +{ + json_t *value; + va_list ap; + + va_start(ap, fmt); + value = json_vpack_ex(NULL, 0, fmt, ap); + va_end(ap); + + return value; +} + +int json_vunpack_ex(json_t *root, json_error_t *error, size_t flags, + const char *fmt, va_list ap) +{ + scanner_t s; + va_list ap_copy; + + if(!root) { + jsonp_error_init(error, ""); + jsonp_error_set(error, -1, -1, 0, "NULL root value"); + return -1; + } + + if(!fmt || !*fmt) { + jsonp_error_init(error, ""); + jsonp_error_set(error, -1, -1, 0, "NULL or empty format string"); + return -1; + } + jsonp_error_init(error, NULL); + + scanner_init(&s, error, flags, fmt); + next_token(&s); + + va_copy(ap_copy, ap); + if(unpack(&s, root, &ap_copy)) { + va_end(ap_copy); + return -1; + } + va_end(ap_copy); + + next_token(&s); + if(token(&s)) { + set_error(&s, "", "Garbage after format string"); + return -1; + } + + return 0; +} + +int json_unpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, ...) +{ + int ret; + va_list ap; + + va_start(ap, fmt); + ret = json_vunpack_ex(root, error, flags, fmt, ap); + va_end(ap); + + return ret; +} + +int json_unpack(json_t *root, const char *fmt, ...) +{ + int ret; + va_list ap; + + va_start(ap, fmt); + ret = json_vunpack_ex(root, NULL, 0, fmt, ap); + va_end(ap); + + return ret; +} diff --git a/compat/jansson-2.6/src/strbuffer.c b/compat/jansson-2.6/src/strbuffer.c new file mode 100644 index 0000000..2d6ff31 --- /dev/null +++ b/compat/jansson-2.6/src/strbuffer.c @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include +#include +#include "jansson_private.h" +#include "strbuffer.h" + +#define STRBUFFER_MIN_SIZE 16 +#define STRBUFFER_FACTOR 2 +#define STRBUFFER_SIZE_MAX ((size_t)-1) + +int strbuffer_init(strbuffer_t *strbuff) +{ + strbuff->size = STRBUFFER_MIN_SIZE; + strbuff->length = 0; + + strbuff->value = jsonp_malloc(strbuff->size); + if(!strbuff->value) + return -1; + + /* initialize to empty */ + strbuff->value[0] = '\0'; + return 0; +} + +void strbuffer_close(strbuffer_t *strbuff) +{ + if(strbuff->value) + jsonp_free(strbuff->value); + + strbuff->size = 0; + strbuff->length = 0; + strbuff->value = NULL; +} + +void strbuffer_clear(strbuffer_t *strbuff) +{ + strbuff->length = 0; + strbuff->value[0] = '\0'; +} + +const char *strbuffer_value(const strbuffer_t *strbuff) +{ + return strbuff->value; +} + +char *strbuffer_steal_value(strbuffer_t *strbuff) +{ + char *result = strbuff->value; + strbuff->value = NULL; + return result; +} + +int strbuffer_append(strbuffer_t *strbuff, const char *string) +{ + return strbuffer_append_bytes(strbuff, string, strlen(string)); +} + +int strbuffer_append_byte(strbuffer_t *strbuff, char byte) +{ + return strbuffer_append_bytes(strbuff, &byte, 1); +} + +int strbuffer_append_bytes(strbuffer_t *strbuff, const char *data, size_t size) +{ + if(size >= strbuff->size - strbuff->length) + { + size_t new_size; + char *new_value; + + /* avoid integer overflow */ + if (strbuff->size > STRBUFFER_SIZE_MAX / STRBUFFER_FACTOR + || size > STRBUFFER_SIZE_MAX - 1 + || strbuff->length > STRBUFFER_SIZE_MAX - 1 - size) + return -1; + + new_size = max(strbuff->size * STRBUFFER_FACTOR, + strbuff->length + size + 1); + + new_value = jsonp_malloc(new_size); + if(!new_value) + return -1; + + memcpy(new_value, strbuff->value, strbuff->length); + + jsonp_free(strbuff->value); + strbuff->value = new_value; + strbuff->size = new_size; + } + + memcpy(strbuff->value + strbuff->length, data, size); + strbuff->length += size; + strbuff->value[strbuff->length] = '\0'; + + return 0; +} + +char strbuffer_pop(strbuffer_t *strbuff) +{ + if(strbuff->length > 0) { + char c = strbuff->value[--strbuff->length]; + strbuff->value[strbuff->length] = '\0'; + return c; + } + else + return '\0'; +} diff --git a/compat/jansson-2.6/src/strbuffer.h b/compat/jansson-2.6/src/strbuffer.h new file mode 100644 index 0000000..06fd065 --- /dev/null +++ b/compat/jansson-2.6/src/strbuffer.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef STRBUFFER_H +#define STRBUFFER_H + +typedef struct { + char *value; + size_t length; /* bytes used */ + size_t size; /* bytes allocated */ +} strbuffer_t; + +int strbuffer_init(strbuffer_t *strbuff); +void strbuffer_close(strbuffer_t *strbuff); + +void strbuffer_clear(strbuffer_t *strbuff); + +const char *strbuffer_value(const strbuffer_t *strbuff); + +/* Steal the value and close the strbuffer */ +char *strbuffer_steal_value(strbuffer_t *strbuff); + +int strbuffer_append(strbuffer_t *strbuff, const char *string); +int strbuffer_append_byte(strbuffer_t *strbuff, char byte); +int strbuffer_append_bytes(strbuffer_t *strbuff, const char *data, size_t size); + +char strbuffer_pop(strbuffer_t *strbuff); + +#endif diff --git a/compat/jansson-2.6/src/strconv.c b/compat/jansson-2.6/src/strconv.c new file mode 100644 index 0000000..3e2cb7c --- /dev/null +++ b/compat/jansson-2.6/src/strconv.c @@ -0,0 +1,134 @@ +#include +#include +#include +#include +#include "jansson_private.h" +#include "strbuffer.h" + +/* need config.h to get the correct snprintf */ +#ifdef HAVE_CONFIG_H +#include +#endif + +#if JSON_HAVE_LOCALECONV +#include + +/* + - This code assumes that the decimal separator is exactly one + character. + + - If setlocale() is called by another thread between the call to + localeconv() and the call to sprintf() or strtod(), the result may + be wrong. setlocale() is not thread-safe and should not be used + this way. Multi-threaded programs should use uselocale() instead. +*/ + +static void to_locale(strbuffer_t *strbuffer) +{ + const char *point; + char *pos; + + point = localeconv()->decimal_point; + if(*point == '.') { + /* No conversion needed */ + return; + } + + pos = strchr(strbuffer->value, '.'); + if(pos) + *pos = *point; +} + +static void from_locale(char *buffer) +{ + const char *point; + char *pos; + + point = localeconv()->decimal_point; + if(*point == '.') { + /* No conversion needed */ + return; + } + + pos = strchr(buffer, *point); + if(pos) + *pos = '.'; +} +#endif + +int jsonp_strtod(strbuffer_t *strbuffer, double *out) +{ + double value; + char *end; + +#if JSON_HAVE_LOCALECONV + to_locale(strbuffer); +#endif + + errno = 0; + value = strtod(strbuffer->value, &end); + assert(end == strbuffer->value + strbuffer->length); + + if(errno == ERANGE && value != 0) { + /* Overflow */ + return -1; + } + + *out = value; + return 0; +} + +int jsonp_dtostr(char *buffer, size_t size, double value) +{ + int ret; + char *start, *end; + size_t length; + + ret = snprintf(buffer, size, "%.17g", value); + if(ret < 0) + return -1; + + length = (size_t)ret; + if(length >= size) + return -1; + +#if JSON_HAVE_LOCALECONV + from_locale(buffer); +#endif + + /* Make sure there's a dot or 'e' in the output. Otherwise + a real is converted to an integer when decoding */ + if(strchr(buffer, '.') == NULL && + strchr(buffer, 'e') == NULL) + { + if(length + 3 >= size) { + /* No space to append ".0" */ + return -1; + } + buffer[length] = '.'; + buffer[length + 1] = '0'; + buffer[length + 2] = '\0'; + length += 2; + } + + /* Remove leading '+' from positive exponent. Also remove leading + zeros from exponents (added by some printf() implementations) */ + start = strchr(buffer, 'e'); + if(start) { + start++; + end = start + 1; + + if(*start == '-') + start++; + + while(*end == '0') + end++; + + if(end != start) { + memmove(start, end, length - (size_t)(end - buffer)); + length -= (size_t)(end - start); + } + } + + return (int)length; +} diff --git a/compat/jansson-2.6/src/utf.c b/compat/jansson-2.6/src/utf.c new file mode 100644 index 0000000..65b849b --- /dev/null +++ b/compat/jansson-2.6/src/utf.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include "utf.h" + +int utf8_encode(int32_t codepoint, char *buffer, int *size) +{ + if(codepoint < 0) + return -1; + else if(codepoint < 0x80) + { + buffer[0] = (char)codepoint; + *size = 1; + } + else if(codepoint < 0x800) + { + buffer[0] = 0xC0 + ((codepoint & 0x7C0) >> 6); + buffer[1] = 0x80 + ((codepoint & 0x03F)); + *size = 2; + } + else if(codepoint < 0x10000) + { + buffer[0] = 0xE0 + ((codepoint & 0xF000) >> 12); + buffer[1] = 0x80 + ((codepoint & 0x0FC0) >> 6); + buffer[2] = 0x80 + ((codepoint & 0x003F)); + *size = 3; + } + else if(codepoint <= 0x10FFFF) + { + buffer[0] = 0xF0 + ((codepoint & 0x1C0000) >> 18); + buffer[1] = 0x80 + ((codepoint & 0x03F000) >> 12); + buffer[2] = 0x80 + ((codepoint & 0x000FC0) >> 6); + buffer[3] = 0x80 + ((codepoint & 0x00003F)); + *size = 4; + } + else + return -1; + + return 0; +} + +int utf8_check_first(char byte) +{ + unsigned char u = (unsigned char)byte; + + if(u < 0x80) + return 1; + + if(0x80 <= u && u <= 0xBF) { + /* second, third or fourth byte of a multi-byte + sequence, i.e. a "continuation byte" */ + return 0; + } + else if(u == 0xC0 || u == 0xC1) { + /* overlong encoding of an ASCII byte */ + return 0; + } + else if(0xC2 <= u && u <= 0xDF) { + /* 2-byte sequence */ + return 2; + } + + else if(0xE0 <= u && u <= 0xEF) { + /* 3-byte sequence */ + return 3; + } + else if(0xF0 <= u && u <= 0xF4) { + /* 4-byte sequence */ + return 4; + } + else { /* u >= 0xF5 */ + /* Restricted (start of 4-, 5- or 6-byte sequence) or invalid + UTF-8 */ + return 0; + } +} + +int utf8_check_full(const char *buffer, int size, int32_t *codepoint) +{ + int i; + int32_t value = 0; + unsigned char u = (unsigned char)buffer[0]; + + if(size == 2) + { + value = u & 0x1F; + } + else if(size == 3) + { + value = u & 0xF; + } + else if(size == 4) + { + value = u & 0x7; + } + else + return 0; + + for(i = 1; i < size; i++) + { + u = (unsigned char)buffer[i]; + + if(u < 0x80 || u > 0xBF) { + /* not a continuation byte */ + return 0; + } + + value = (value << 6) + (u & 0x3F); + } + + if(value > 0x10FFFF) { + /* not in Unicode range */ + return 0; + } + + else if(0xD800 <= value && value <= 0xDFFF) { + /* invalid code point (UTF-16 surrogate halves) */ + return 0; + } + + else if((size == 2 && value < 0x80) || + (size == 3 && value < 0x800) || + (size == 4 && value < 0x10000)) { + /* overlong encoding */ + return 0; + } + + if(codepoint) + *codepoint = value; + + return 1; +} + +const char *utf8_iterate(const char *buffer, int32_t *codepoint) +{ + int count; + int32_t value; + + if(!*buffer) + return buffer; + + count = utf8_check_first(buffer[0]); + if(count <= 0) + return NULL; + + if(count == 1) + value = (unsigned char)buffer[0]; + else + { + if(!utf8_check_full(buffer, count, &value)) + return NULL; + } + + if(codepoint) + *codepoint = value; + + return buffer + count; +} + +int utf8_check_string(const char *string, int length) +{ + int i; + + if(length == -1) + length = strlen(string); + + for(i = 0; i < length; i++) + { + int count = utf8_check_first(string[i]); + if(count == 0) + return 0; + else if(count > 1) + { + if(i + count > length) + return 0; + + if(!utf8_check_full(&string[i], count, NULL)) + return 0; + + i += count - 1; + } + } + + return 1; +} diff --git a/compat/jansson-2.6/src/utf.h b/compat/jansson-2.6/src/utf.h new file mode 100644 index 0000000..cb10c24 --- /dev/null +++ b/compat/jansson-2.6/src/utf.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef UTF_H +#define UTF_H + +#ifdef HAVE_CONFIG_H +#include + +#ifdef HAVE_INTTYPES_H +/* inttypes.h includes stdint.h in a standard environment, so there's +no need to include stdint.h separately. If inttypes.h doesn't define +int32_t, it's defined in config.h. */ +#include +#endif /* HAVE_INTTYPES_H */ + +#else /* !HAVE_CONFIG_H */ +#ifdef _WIN32 +typedef int int32_t; +#else /* !_WIN32 */ +/* Assume a standard environment */ +#include +#endif /* _WIN32 */ + +#endif /* HAVE_CONFIG_H */ + +int utf8_encode(int codepoint, char *buffer, int *size); + +int utf8_check_first(char byte); +int utf8_check_full(const char *buffer, int size, int32_t *codepoint); +const char *utf8_iterate(const char *buffer, int32_t *codepoint); + +int utf8_check_string(const char *string, int length); + +#endif diff --git a/compat/jansson-2.6/src/value.c b/compat/jansson-2.6/src/value.c new file mode 100644 index 0000000..582849b --- /dev/null +++ b/compat/jansson-2.6/src/value.c @@ -0,0 +1,950 @@ +/* + * Copyright (c) 2009-2013 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include +#include +#include +#include + +#include "jansson.h" +#include "hashtable.h" +#include "jansson_private.h" +#include "utf.h" + +/* Work around nonstandard isnan() and isinf() implementations */ +#ifndef isnan +static JSON_INLINE int isnan(double x) { return x != x; } +#endif +#ifndef isinf +static JSON_INLINE int isinf(double x) { return !isnan(x) && isnan(x - x); } +#endif + +static JSON_INLINE void json_init(json_t *json, json_type type) +{ + json->type = type; + json->refcount = 1; +} + + +/*** object ***/ + +json_t *json_object(void) +{ + json_object_t *object = jsonp_malloc(sizeof(json_object_t)); + if(!object) + return NULL; + json_init(&object->json, JSON_OBJECT); + + if(hashtable_init(&object->hashtable)) + { + jsonp_free(object); + return NULL; + } + + object->serial = 0; + object->visited = 0; + + return &object->json; +} + +static void json_delete_object(json_object_t *object) +{ + hashtable_close(&object->hashtable); + jsonp_free(object); +} + +size_t json_object_size(const json_t *json) +{ + json_object_t *object; + + if(!json_is_object(json)) + return 0; + + object = json_to_object(json); + return object->hashtable.size; +} + +json_t *json_object_get(const json_t *json, const char *key) +{ + json_object_t *object; + + if(!json_is_object(json)) + return NULL; + + object = json_to_object(json); + return hashtable_get(&object->hashtable, key); +} + +int json_object_set_new_nocheck(json_t *json, const char *key, json_t *value) +{ + json_object_t *object; + + if(!value) + return -1; + + if(!key || !json_is_object(json) || json == value) + { + json_decref(value); + return -1; + } + object = json_to_object(json); + + if(hashtable_set(&object->hashtable, key, object->serial++, value)) + { + json_decref(value); + return -1; + } + + return 0; +} + +int json_object_set_new(json_t *json, const char *key, json_t *value) +{ + if(!key || !utf8_check_string(key, -1)) + { + json_decref(value); + return -1; + } + + return json_object_set_new_nocheck(json, key, value); +} + +int json_object_del(json_t *json, const char *key) +{ + json_object_t *object; + + if(!json_is_object(json)) + return -1; + + object = json_to_object(json); + return hashtable_del(&object->hashtable, key); +} + +int json_object_clear(json_t *json) +{ + json_object_t *object; + + if(!json_is_object(json)) + return -1; + + object = json_to_object(json); + + hashtable_clear(&object->hashtable); + object->serial = 0; + + return 0; +} + +int json_object_update(json_t *object, json_t *other) +{ + const char *key; + json_t *value; + + if(!json_is_object(object) || !json_is_object(other)) + return -1; + + json_object_foreach(other, key, value) { + if(json_object_set_nocheck(object, key, value)) + return -1; + } + + return 0; +} + +int json_object_update_existing(json_t *object, json_t *other) +{ + const char *key; + json_t *value; + + if(!json_is_object(object) || !json_is_object(other)) + return -1; + + json_object_foreach(other, key, value) { + if(json_object_get(object, key)) + json_object_set_nocheck(object, key, value); + } + + return 0; +} + +int json_object_update_missing(json_t *object, json_t *other) +{ + const char *key; + json_t *value; + + if(!json_is_object(object) || !json_is_object(other)) + return -1; + + json_object_foreach(other, key, value) { + if(!json_object_get(object, key)) + json_object_set_nocheck(object, key, value); + } + + return 0; +} + +void *json_object_iter(json_t *json) +{ + json_object_t *object; + + if(!json_is_object(json)) + return NULL; + + object = json_to_object(json); + return hashtable_iter(&object->hashtable); +} + +void *json_object_iter_at(json_t *json, const char *key) +{ + json_object_t *object; + + if(!key || !json_is_object(json)) + return NULL; + + object = json_to_object(json); + return hashtable_iter_at(&object->hashtable, key); +} + +void *json_object_iter_next(json_t *json, void *iter) +{ + json_object_t *object; + + if(!json_is_object(json) || iter == NULL) + return NULL; + + object = json_to_object(json); + return hashtable_iter_next(&object->hashtable, iter); +} + +const char *json_object_iter_key(void *iter) +{ + if(!iter) + return NULL; + + return hashtable_iter_key(iter); +} + +json_t *json_object_iter_value(void *iter) +{ + if(!iter) + return NULL; + + return (json_t *)hashtable_iter_value(iter); +} + +int json_object_iter_set_new(json_t *json, void *iter, json_t *value) +{ + if(!json_is_object(json) || !iter || !value) + return -1; + + hashtable_iter_set(iter, value); + return 0; +} + +void *json_object_key_to_iter(const char *key) +{ + if(!key) + return NULL; + + return hashtable_key_to_iter(key); +} + +static int json_object_equal(json_t *object1, json_t *object2) +{ + const char *key; + json_t *value1, *value2; + + if(json_object_size(object1) != json_object_size(object2)) + return 0; + + json_object_foreach(object1, key, value1) { + value2 = json_object_get(object2, key); + + if(!json_equal(value1, value2)) + return 0; + } + + return 1; +} + +static json_t *json_object_copy(json_t *object) +{ + json_t *result; + + const char *key; + json_t *value; + + result = json_object(); + if(!result) + return NULL; + + json_object_foreach(object, key, value) + json_object_set_nocheck(result, key, value); + + return result; +} + +static json_t *json_object_deep_copy(const json_t *object) +{ + json_t *result; + void *iter; + + result = json_object(); + if(!result) + return NULL; + + /* Cannot use json_object_foreach because object has to be cast + non-const */ + iter = json_object_iter((json_t *)object); + while(iter) { + const char *key; + const json_t *value; + key = json_object_iter_key(iter); + value = json_object_iter_value(iter); + + json_object_set_new_nocheck(result, key, json_deep_copy(value)); + iter = json_object_iter_next((json_t *)object, iter); + } + + return result; +} + + +/*** array ***/ + +json_t *json_array(void) +{ + json_array_t *array = jsonp_malloc(sizeof(json_array_t)); + if(!array) + return NULL; + json_init(&array->json, JSON_ARRAY); + + array->entries = 0; + array->size = 8; + + array->table = jsonp_malloc(array->size * sizeof(json_t *)); + if(!array->table) { + jsonp_free(array); + return NULL; + } + + array->visited = 0; + + return &array->json; +} + +static void json_delete_array(json_array_t *array) +{ + size_t i; + + for(i = 0; i < array->entries; i++) + json_decref(array->table[i]); + + jsonp_free(array->table); + jsonp_free(array); +} + +size_t json_array_size(const json_t *json) +{ + if(!json_is_array(json)) + return 0; + + return json_to_array(json)->entries; +} + +json_t *json_array_get(const json_t *json, size_t index) +{ + json_array_t *array; + if(!json_is_array(json)) + return NULL; + array = json_to_array(json); + + if(index >= array->entries) + return NULL; + + return array->table[index]; +} + +int json_array_set_new(json_t *json, size_t index, json_t *value) +{ + json_array_t *array; + + if(!value) + return -1; + + if(!json_is_array(json) || json == value) + { + json_decref(value); + return -1; + } + array = json_to_array(json); + + if(index >= array->entries) + { + json_decref(value); + return -1; + } + + json_decref(array->table[index]); + array->table[index] = value; + + return 0; +} + +static void array_move(json_array_t *array, size_t dest, + size_t src, size_t count) +{ + memmove(&array->table[dest], &array->table[src], count * sizeof(json_t *)); +} + +static void array_copy(json_t **dest, size_t dpos, + json_t **src, size_t spos, + size_t count) +{ + memcpy(&dest[dpos], &src[spos], count * sizeof(json_t *)); +} + +static json_t **json_array_grow(json_array_t *array, + size_t amount, + int copy) +{ + size_t new_size; + json_t **old_table, **new_table; + + if(array->entries + amount <= array->size) + return array->table; + + old_table = array->table; + + new_size = max(array->size + amount, array->size * 2); + new_table = jsonp_malloc(new_size * sizeof(json_t *)); + if(!new_table) + return NULL; + + array->size = new_size; + array->table = new_table; + + if(copy) { + array_copy(array->table, 0, old_table, 0, array->entries); + jsonp_free(old_table); + return array->table; + } + + return old_table; +} + +int json_array_append_new(json_t *json, json_t *value) +{ + json_array_t *array; + + if(!value) + return -1; + + if(!json_is_array(json) || json == value) + { + json_decref(value); + return -1; + } + array = json_to_array(json); + + if(!json_array_grow(array, 1, 1)) { + json_decref(value); + return -1; + } + + array->table[array->entries] = value; + array->entries++; + + return 0; +} + +int json_array_insert_new(json_t *json, size_t index, json_t *value) +{ + json_array_t *array; + json_t **old_table; + + if(!value) + return -1; + + if(!json_is_array(json) || json == value) { + json_decref(value); + return -1; + } + array = json_to_array(json); + + if(index > array->entries) { + json_decref(value); + return -1; + } + + old_table = json_array_grow(array, 1, 0); + if(!old_table) { + json_decref(value); + return -1; + } + + if(old_table != array->table) { + array_copy(array->table, 0, old_table, 0, index); + array_copy(array->table, index + 1, old_table, index, + array->entries - index); + jsonp_free(old_table); + } + else + array_move(array, index + 1, index, array->entries - index); + + array->table[index] = value; + array->entries++; + + return 0; +} + +int json_array_remove(json_t *json, size_t index) +{ + json_array_t *array; + + if(!json_is_array(json)) + return -1; + array = json_to_array(json); + + if(index >= array->entries) + return -1; + + json_decref(array->table[index]); + + /* If we're removing the last element, nothing has to be moved */ + if(index < array->entries - 1) + array_move(array, index, index + 1, array->entries - index - 1); + + array->entries--; + + return 0; +} + +int json_array_clear(json_t *json) +{ + json_array_t *array; + size_t i; + + if(!json_is_array(json)) + return -1; + array = json_to_array(json); + + for(i = 0; i < array->entries; i++) + json_decref(array->table[i]); + + array->entries = 0; + return 0; +} + +int json_array_extend(json_t *json, json_t *other_json) +{ + json_array_t *array, *other; + size_t i; + + if(!json_is_array(json) || !json_is_array(other_json)) + return -1; + array = json_to_array(json); + other = json_to_array(other_json); + + if(!json_array_grow(array, other->entries, 1)) + return -1; + + for(i = 0; i < other->entries; i++) + json_incref(other->table[i]); + + array_copy(array->table, array->entries, other->table, 0, other->entries); + + array->entries += other->entries; + return 0; +} + +static int json_array_equal(json_t *array1, json_t *array2) +{ + size_t i, size; + + size = json_array_size(array1); + if(size != json_array_size(array2)) + return 0; + + for(i = 0; i < size; i++) + { + json_t *value1, *value2; + + value1 = json_array_get(array1, i); + value2 = json_array_get(array2, i); + + if(!json_equal(value1, value2)) + return 0; + } + + return 1; +} + +static json_t *json_array_copy(json_t *array) +{ + json_t *result; + size_t i; + + result = json_array(); + if(!result) + return NULL; + + for(i = 0; i < json_array_size(array); i++) + json_array_append(result, json_array_get(array, i)); + + return result; +} + +static json_t *json_array_deep_copy(const json_t *array) +{ + json_t *result; + size_t i; + + result = json_array(); + if(!result) + return NULL; + + for(i = 0; i < json_array_size(array); i++) + json_array_append_new(result, json_deep_copy(json_array_get(array, i))); + + return result; +} + +/*** string ***/ + +json_t *json_string_nocheck(const char *value) +{ + json_string_t *string; + + if(!value) + return NULL; + + string = jsonp_malloc(sizeof(json_string_t)); + if(!string) + return NULL; + json_init(&string->json, JSON_STRING); + + string->value = jsonp_strdup(value); + if(!string->value) { + jsonp_free(string); + return NULL; + } + + return &string->json; +} + +json_t *json_string(const char *value) +{ + if(!value || !utf8_check_string(value, -1)) + return NULL; + + return json_string_nocheck(value); +} + +const char *json_string_value(const json_t *json) +{ + if(!json_is_string(json)) + return NULL; + + return json_to_string(json)->value; +} + +int json_string_set_nocheck(json_t *json, const char *value) +{ + char *dup; + json_string_t *string; + + if(!json_is_string(json) || !value) + return -1; + + dup = jsonp_strdup(value); + if(!dup) + return -1; + + string = json_to_string(json); + jsonp_free(string->value); + string->value = dup; + + return 0; +} + +int json_string_set(json_t *json, const char *value) +{ + if(!value || !utf8_check_string(value, -1)) + return -1; + + return json_string_set_nocheck(json, value); +} + +static void json_delete_string(json_string_t *string) +{ + jsonp_free(string->value); + jsonp_free(string); +} + +static int json_string_equal(json_t *string1, json_t *string2) +{ + return strcmp(json_string_value(string1), json_string_value(string2)) == 0; +} + +static json_t *json_string_copy(const json_t *string) +{ + return json_string_nocheck(json_string_value(string)); +} + + +/*** integer ***/ + +json_t *json_integer(json_int_t value) +{ + json_integer_t *integer = jsonp_malloc(sizeof(json_integer_t)); + if(!integer) + return NULL; + json_init(&integer->json, JSON_INTEGER); + + integer->value = value; + return &integer->json; +} + +json_int_t json_integer_value(const json_t *json) +{ + if(!json_is_integer(json)) + return 0; + + return json_to_integer(json)->value; +} + +int json_integer_set(json_t *json, json_int_t value) +{ + if(!json_is_integer(json)) + return -1; + + json_to_integer(json)->value = value; + + return 0; +} + +static void json_delete_integer(json_integer_t *integer) +{ + jsonp_free(integer); +} + +static int json_integer_equal(json_t *integer1, json_t *integer2) +{ + return json_integer_value(integer1) == json_integer_value(integer2); +} + +static json_t *json_integer_copy(const json_t *integer) +{ + return json_integer(json_integer_value(integer)); +} + + +/*** real ***/ + +json_t *json_real(double value) +{ + json_real_t *real; + + if(isnan(value) || isinf(value)) + return NULL; + + real = jsonp_malloc(sizeof(json_real_t)); + if(!real) + return NULL; + json_init(&real->json, JSON_REAL); + + real->value = value; + return &real->json; +} + +double json_real_value(const json_t *json) +{ + if(!json_is_real(json)) + return 0; + + return json_to_real(json)->value; +} + +int json_real_set(json_t *json, double value) +{ + if(!json_is_real(json) || isnan(value) || isinf(value)) + return -1; + + json_to_real(json)->value = value; + + return 0; +} + +static void json_delete_real(json_real_t *real) +{ + jsonp_free(real); +} + +static int json_real_equal(json_t *real1, json_t *real2) +{ + return json_real_value(real1) == json_real_value(real2); +} + +static json_t *json_real_copy(const json_t *real) +{ + return json_real(json_real_value(real)); +} + + +/*** number ***/ + +double json_number_value(const json_t *json) +{ + if(json_is_integer(json)) + return (double)json_integer_value(json); + else if(json_is_real(json)) + return json_real_value(json); + else + return 0.0; +} + + +/*** simple values ***/ + +json_t *json_true(void) +{ + static json_t the_true = {JSON_TRUE, (size_t)-1}; + return &the_true; +} + + +json_t *json_false(void) +{ + static json_t the_false = {JSON_FALSE, (size_t)-1}; + return &the_false; +} + + +json_t *json_null(void) +{ + static json_t the_null = {JSON_NULL, (size_t)-1}; + return &the_null; +} + + +/*** deletion ***/ + +void json_delete(json_t *json) +{ + if(json_is_object(json)) + json_delete_object(json_to_object(json)); + + else if(json_is_array(json)) + json_delete_array(json_to_array(json)); + + else if(json_is_string(json)) + json_delete_string(json_to_string(json)); + + else if(json_is_integer(json)) + json_delete_integer(json_to_integer(json)); + + else if(json_is_real(json)) + json_delete_real(json_to_real(json)); + + /* json_delete is not called for true, false or null */ +} + + +/*** equality ***/ + +int json_equal(json_t *json1, json_t *json2) +{ + if(!json1 || !json2) + return 0; + + if(json_typeof(json1) != json_typeof(json2)) + return 0; + + /* this covers true, false and null as they are singletons */ + if(json1 == json2) + return 1; + + if(json_is_object(json1)) + return json_object_equal(json1, json2); + + if(json_is_array(json1)) + return json_array_equal(json1, json2); + + if(json_is_string(json1)) + return json_string_equal(json1, json2); + + if(json_is_integer(json1)) + return json_integer_equal(json1, json2); + + if(json_is_real(json1)) + return json_real_equal(json1, json2); + + return 0; +} + + +/*** copying ***/ + +json_t *json_copy(json_t *json) +{ + if(!json) + return NULL; + + if(json_is_object(json)) + return json_object_copy(json); + + if(json_is_array(json)) + return json_array_copy(json); + + if(json_is_string(json)) + return json_string_copy(json); + + if(json_is_integer(json)) + return json_integer_copy(json); + + if(json_is_real(json)) + return json_real_copy(json); + + if(json_is_true(json) || json_is_false(json) || json_is_null(json)) + return json; + + return NULL; +} + +json_t *json_deep_copy(const json_t *json) +{ + if(!json) + return NULL; + + if(json_is_object(json)) + return json_object_deep_copy(json); + + if(json_is_array(json)) + return json_array_deep_copy(json); + + /* for the rest of the types, deep copying doesn't differ from + shallow copying */ + + if(json_is_string(json)) + return json_string_copy(json); + + if(json_is_integer(json)) + return json_integer_copy(json); + + if(json_is_real(json)) + return json_real_copy(json); + + if(json_is_true(json) || json_is_false(json) || json_is_null(json)) + return (json_t *)json; + + return NULL; +} diff --git a/compat/libusb-1.0/AUTHORS b/compat/libusb-1.0/AUTHORS new file mode 100644 index 0000000..32d6948 --- /dev/null +++ b/compat/libusb-1.0/AUTHORS @@ -0,0 +1,46 @@ +Copyright (C) 2007-2009 Daniel Drake +Copyright (c) 2001 Johannes Erdfelt +Copyright (C) 2008-2013 Nathan Hjelm +Copyright (C) 2009-2012 Pete Batard +Copyright (C) 2010 Michael Plante +Copyright (C) 2010-2012 Peter Stuge +Copyright (C) 2011-2012 Hans de Goede +Copyright (C) 2012 Martin Pieuchot + +Other contributors: +Alan Ott +Alan Stern +Alex Vatchenko +Artem Egorkine +Aurelien Jarno +Bastien Nocera +Brian Shirley +David Engraf +David Moore +Felipe Balbi +Graeme Gill +Hans de Goede +Hans Ulrich Niedermann +Hector Martin +Hoi-Ho Chan +James Hanko +Konrad Rzepecki +Ludovic Rousseau +Martin Koegler +Martin Pieuchot +Maya Erez +Mike Frysinger +Mikhail Gusarov +Orin Eman +Pekka Nikander +Peter Stuge +Rob Walker +Sean McBride +Sebastian Pipping +Stephan Meyer +Thomas Röfer +Toby Peterson +Trygve Laugstøl +Vasily Khoruzhick +Vitali Lovich +Xiaofan Chen diff --git a/compat/libusb-1.0/COPYING b/compat/libusb-1.0/COPYING new file mode 100644 index 0000000..5ab7695 --- /dev/null +++ b/compat/libusb-1.0/COPYING @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/compat/libusb-1.0/Makefile.am b/compat/libusb-1.0/Makefile.am new file mode 100644 index 0000000..3f1e7d2 --- /dev/null +++ b/compat/libusb-1.0/Makefile.am @@ -0,0 +1,24 @@ +AUTOMAKE_OPTIONS = dist-bzip2 no-dist-gzip +ACLOCAL_AMFLAGS = -I m4 +DISTCLEANFILES = libusb-1.0.pc +MAINTAINERCLEANFILES = ChangeLog +EXTRA_DIST = TODO PORTING +SUBDIRS = libusb + +pkgconfigdir=$(libdir)/pkgconfig +pkgconfig_DATA=libusb-1.0.pc + +.PHONY: ChangeLog dist-up +ChangeLog: + git --git-dir $(top_srcdir)/.git log > ChangeLog || touch ChangeLog + +dist-hook: ChangeLog + +reldir = .release/$(distdir) +dist-up: dist + rm -rf $(reldir) + mkdir -p $(reldir) + cp $(distdir).tar.bz2 $(reldir) + rsync -rv $(reldir) frs.sourceforge.net:/home/frs/project/l/li/libusb/libusb-1.0/ + rm -rf $(reldir) + diff --git a/compat/libusb-1.0/NEWS b/compat/libusb-1.0/NEWS new file mode 100644 index 0000000..f948700 --- /dev/null +++ b/compat/libusb-1.0/NEWS @@ -0,0 +1,65 @@ +This file lists notable changes in each release. For the full history of all +changes, see ChangeLog. + +2012-04-20: 1.0.9 +* Numerous bug fixes and improvements +* Backend for Windows, for devices using the WinUSB.sys driver +* Backend for OpenBSD and NetBSD, for devices using the ugen driver +* Add libusb_get_device_speed() +* Add libusb_has_capability() +* Add libusb_error_name() +* Add libusb_get_version() + +2010-05-07: v1.0.8 +* Bug fixes + +2010-04-19: v1.0.7 +* Bug fixes and documentation tweaks +* Add more interface class definitions + +2009-11-22: v1.0.6 +* Bug fixes +* Increase libusb_handle_events() timeout to 60s for powersaving + +2009-11-15: v1.0.5 + * Use timerfd when available for timer management + * Small fixes/updates + +2009-11-06: v1.0.4 release + * Bug fixes including transfer locking to fix some potential threading races + * More flexibility with clock types on Linux + * Use new bulk continuation tracking in Linux 2.6.32 for improved handling + of short/failed transfers + +2009-08-27: v1.0.3 release + * Bug fixes + * Add libusb_get_max_iso_packet_size() + +2009-06-13: v1.0.2 release + * Bug fixes + +2009-05-12: v1.0.1 release + * Bug fixes + * Darwin backend + +2008-12-13: v1.0.0 release + * Bug fixes + +2008-11-21: v0.9.4 release + * Bug fixes + * Add libusb_attach_kernel_driver() + +2008-08-23: v0.9.3 release + * Bug fixes + +2008-07-19: v0.9.2 release + * Bug fixes + +2008-06-28: v0.9.1 release + * Bug fixes + * Introduce contexts to the API + * Compatibility with new Linux kernel features + +2008-05-25: v0.9.0 release + * First libusb-1.0 beta release + diff --git a/compat/libusb-1.0/PORTING b/compat/libusb-1.0/PORTING new file mode 100644 index 0000000..7070784 --- /dev/null +++ b/compat/libusb-1.0/PORTING @@ -0,0 +1,95 @@ +PORTING LIBUSB TO OTHER PLATFORMS + +Introduction +============ + +This document is aimed at developers wishing to port libusb to unsupported +platforms. I believe the libusb API is OS-independent, so by supporting +multiple operating systems we pave the way for cross-platform USB device +drivers. + +Implementation-wise, the basic idea is that you provide an interface to +libusb's internal "backend" API, which performs the appropriate operations on +your target platform. + +In terms of USB I/O, your backend provides functionality to submit +asynchronous transfers (synchronous transfers are implemented in the higher +layers, based on the async interface). Your backend must also provide +functionality to cancel those transfers. + +Your backend must also provide an event handling function to "reap" ongoing +transfers and process their results. + +The backend must also provide standard functions for other USB operations, +e.g. setting configuration, obtaining descriptors, etc. + + +File descriptors for I/O polling +================================ + +For libusb to work, your event handling function obviously needs to be called +at various points in time. Your backend must provide a set of file descriptors +which libusb and its users can pass to poll() or select() to determine when +it is time to call the event handling function. + +On Linux, this is easy: the usbfs kernel interface exposes a file descriptor +which can be passed to poll(). If something similar is not true for your +platform, you can emulate this using an internal library thread to reap I/O as +necessary, and a pipe() with the main library to raise events. The file +descriptor of the pipe can then be provided to libusb as an event source. + + +Interface semantics and documentation +===================================== + +Documentation of the backend interface can be found in libusbi.h inside the +usbi_os_backend structure definition. + +Your implementations of these functions will need to call various internal +libusb functions, prefixed with "usbi_". Documentation for these functions +can be found in the .c files where they are implemented. + +You probably want to skim over *all* the documentation before starting your +implementation. For example, you probably need to allocate and store private +OS-specific data for device handles, but the documentation for the mechanism +for doing so is probably not the first thing you will see. + +The Linux backend acts as a good example - view it as a reference +implementation which you should try to match the behaviour of. + + +Getting started +=============== + +1. Modify configure.ac to detect your platform appropriately (see the OS_LINUX +stuff for an example). + +2. Implement your backend in the libusb/os/ directory, modifying +libusb/os/Makefile.am appropriately. + +3. Add preprocessor logic to the top of libusb/core.c to statically assign the +right usbi_backend for your platform. + +4. Produce and test your implementation. + +5. Send your implementation to libusb-devel mailing list. + + +Implementation difficulties? Questions? +======================================= + +If you encounter difficulties porting libusb to your platform, please raise +these issues on the libusb-devel mailing list. Where possible and sensible, I +am interested in solving problems preventing libusb from operating on other +platforms. + +The libusb-devel mailing list is also a good place to ask questions and +make suggestions about the internal API. Hopefully we can produce some +better documentation based on your questions and other input. + +You are encouraged to get involved in the process; if the library needs +some infrastructure additions/modifications to better support your platform, +you are encouraged to make such changes (in cleanly distinct patch +submissions). Even if you do not make such changes yourself, please do raise +the issues on the mailing list at the very minimum. + diff --git a/compat/libusb-1.0/README b/compat/libusb-1.0/README new file mode 100644 index 0000000..08ae169 --- /dev/null +++ b/compat/libusb-1.0/README @@ -0,0 +1,22 @@ +libusb +====== + +libusb is a library for USB device access from Linux, Mac OS X, +OpenBSD, NetBSD, and Windows userspace. +It is written in C and licensed under the LGPL-2.1 (see COPYING). + +libusb is abstracted internally in such a way that it can hopefully +be ported to other operating systems. See the PORTING file for some +information, if you fancy a challenge. :) + +libusb homepage: +http://libusb.org/ + +Developers will wish to consult the API documentation: +http://libusb.sourceforge.net/api-1.0/ + +Use the mailing list for questions, comments, etc: +http://libusb.org/wiki/MailingList + +- Peter Stuge +(use the mailing list rather than mailing developers directly) diff --git a/compat/libusb-1.0/THANKS b/compat/libusb-1.0/THANKS new file mode 100644 index 0000000..d926126 --- /dev/null +++ b/compat/libusb-1.0/THANKS @@ -0,0 +1,8 @@ +Development contributors are listed in the AUTHORS file. Other community +members who have made significant contributions in other areas are listed +in this file: + +Alan Stern +Ludovic Rousseau +Tim Roberts +Xiaofan Chen diff --git a/compat/libusb-1.0/TODO b/compat/libusb-1.0/TODO new file mode 100644 index 0000000..6c162a3 --- /dev/null +++ b/compat/libusb-1.0/TODO @@ -0,0 +1,9 @@ +for 1.1 or future +================== +optional timerfd support (runtime detection) +notifications of hotplugged/unplugged devices +offer API to create/destroy handle_events thread +isochronous sync I/O? +exposing of parent-child device relationships +"usb primer" introduction docs +more examples diff --git a/compat/libusb-1.0/configure.ac b/compat/libusb-1.0/configure.ac new file mode 100644 index 0000000..1fccea7 --- /dev/null +++ b/compat/libusb-1.0/configure.ac @@ -0,0 +1,229 @@ +dnl These m4 macros are whitespace sensitive and break if moved around much. +m4_define([LU_VERSION_H], m4_include([libusb/version.h])) +m4_define([LU_DEFINE_VERSION_ATOM], + [m4_define([$1], m4_bregexp(LU_VERSION_H, + [^#define\s*$1\s*\([0-9]*\).*], [\1]))]) +m4_define([LU_DEFINE_VERSION_RC_ATOM], + [m4_define([$1], m4_bregexp(LU_VERSION_H, + [^#define\s*$1\s*"\(-rc[0-9]*\)".*], [\1]))]) +dnl The m4_bregexp() returns (only) the numbers following the #define named +dnl in the first macro parameter. m4_define() then defines the name for use +dnl in AC_INIT(). + +LU_DEFINE_VERSION_ATOM([LIBUSB_MAJOR]) +LU_DEFINE_VERSION_ATOM([LIBUSB_MINOR]) +LU_DEFINE_VERSION_ATOM([LIBUSB_MICRO]) +LU_DEFINE_VERSION_RC_ATOM([LIBUSB_RC]) + +AC_INIT([libusb], LIBUSB_MAJOR[.]LIBUSB_MINOR[.]LIBUSB_MICRO[]LIBUSB_RC, [libusb-devel@lists.sourceforge.net], [libusb], [http://www.libusb.org/]) + +# Library versioning +# These numbers should be tweaked on every release. Read carefully: +# http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html +# http://sourceware.org/autobook/autobook/autobook_91.html +lt_current="2" +lt_revision="0" +lt_age="0" +LTLDFLAGS="-version-info ${lt_current}:${lt_revision}:${lt_age}" + +AM_INIT_AUTOMAKE([foreign subdir-objects]) +AM_MAINTAINER_MODE + +AC_CONFIG_SRCDIR([libusb/core.c]) +AC_CONFIG_MACRO_DIR([m4]) +AC_CONFIG_HEADERS([config.h]) +m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])]) + +AC_PREREQ([2.50]) +AC_PROG_CC +AC_PROG_LIBTOOL +LT_LANG([Windows Resource]) +AC_C_INLINE +AM_PROG_CC_C_O +AC_DEFINE([_GNU_SOURCE], 1, [Use GNU extensions]) + +LTLDFLAGS="${LTLDFLAGS} -no-undefined" + +AC_MSG_CHECKING([operating system]) +case $host in +*-linux*) + AC_MSG_RESULT([Linux]) + backend="linux" + ;; +*-darwin*) + AC_MSG_RESULT([Darwin/Mac OS X]) + backend="darwin" + ;; +*-openbsd*) + AC_MSG_RESULT([OpenBSD]) + backend="openbsd" + ;; +*-netbsd*) + AC_MSG_RESULT([NetBSD (using OpenBSD backend)]) + backend="openbsd" + ;; +*-mingw*) + AC_MSG_RESULT([Windows]) + backend="windows" + ;; +*-cygwin*) + AC_MSG_RESULT([Cygwin (using Windows backend)]) + backend="windows" + threads="posix" + ;; +*) + AC_MSG_ERROR([unsupported operating system]) +esac +case $backend in +linux) + AC_DEFINE(OS_LINUX, 1, [Linux backend]) + AC_SUBST(OS_LINUX) + AC_CHECK_LIB(rt, clock_gettime, -pthread) + AC_ARG_ENABLE([udev], + [AC_HELP_STRING([--with-udev], [use udev for device enumeration and hotplug support (recommended, default: yes)])], + [], [enable_udev="yes"]) + if test "x$enable_udev" = "xyes" ; then + # system has udev. use it or fail! + AC_CHECK_HEADERS([libudev.h],[],[AC_ERROR(["udev support requested but libudev not installed"])]) + AC_CHECK_LIB([udev], [udev_new], [], [AC_ERROR(["udev support requested but libudev not installed"])]) + AC_DEFINE(USE_UDEV, 1, [Use udev for device enumeration/hotplug]) + else + AC_CHECK_HEADERS([linux/netlink.h linux/filter.h], [], [AC_ERROR(["Linux netlink headers not found"])]) + fi + AC_SUBST(USE_UDEV) + threads="posix" + THREAD_CFLAGS="-pthread" + LIBS="${LIBS} -pthread" + AC_CHECK_HEADERS([poll.h]) + AC_DEFINE([POLL_NFDS_TYPE],[nfds_t],[type of second poll() argument]) + ;; +darwin) + AC_DEFINE(OS_DARWIN, 1, [Darwin backend]) + AC_SUBST(OS_DARWIN) + threads="posix" + LIBS="-lobjc -Wl,-framework,IOKit -Wl,-framework,CoreFoundation" + LTLDFLAGS="${LTLDFLAGS} -Wl,-prebind" + AC_CHECK_HEADERS([poll.h]) + AC_CHECK_TYPE([nfds_t], + [AC_DEFINE([POLL_NFDS_TYPE],[nfds_t],[type of second poll() argument])], + [AC_DEFINE([POLL_NFDS_TYPE],[unsigned int],[type of second poll() argument])], + [#include ]) + ;; +openbsd) + AC_DEFINE(OS_OPENBSD, 1, [OpenBSD backend]) + AC_SUBST(OS_OPENBSD) + threads="posix" + THREAD_CFLAGS="-pthread" + LIBS="-pthread" + AC_CHECK_HEADERS([poll.h]) + AC_DEFINE([POLL_NFDS_TYPE],[nfds_t],[type of second poll() argument]) + ;; +windows) + AC_DEFINE(OS_WINDOWS, 1, [Windows backend]) + AC_SUBST(OS_WINDOWS) + LIBS="" + LTLDFLAGS="${LTLDFLAGS} -avoid-version -Wl,--add-stdcall-alias" + AC_DEFINE([POLL_NFDS_TYPE],[unsigned int],[type of second poll() argument]) + ;; +esac +AC_SUBST(LIBS) + +AM_CONDITIONAL(OS_LINUX, test "x$backend" = xlinux) +AM_CONDITIONAL(OS_DARWIN, test "x$backend" = xdarwin) +AM_CONDITIONAL(OS_OPENBSD, test "x$backend" = xopenbsd) +AM_CONDITIONAL(OS_WINDOWS, test "x$backend" = xwindows) +AM_CONDITIONAL(THREADS_POSIX, test "x$threads" = xposix) +AM_CONDITIONAL(USE_UDEV, test "x$enable_udev" = xyes) +if test "$threads" = posix; then + AC_DEFINE(THREADS_POSIX, 1, [Use POSIX Threads]) +fi + +# timerfd +AC_CHECK_HEADER([sys/timerfd.h], [timerfd_h=1], [timerfd_h=0]) +AC_ARG_ENABLE([timerfd], + [AS_HELP_STRING([--enable-timerfd], + [use timerfd for timing (default auto)])], + [use_timerfd=$enableval], [use_timerfd='auto']) + +if test "x$use_timerfd" = "xyes" -a "x$timerfd_h" = "x0"; then + AC_MSG_ERROR([timerfd header not available; glibc 2.9+ required]) +fi + +AC_CHECK_DECL([TFD_NONBLOCK], [tfd_hdr_ok=yes], [tfd_hdr_ok=no], [#include ]) +if test "x$use_timerfd" = "xyes" -a "x$tfd_hdr_ok" = "xno"; then + AC_MSG_ERROR([timerfd header not usable; glibc 2.9+ required]) +fi + +AC_MSG_CHECKING([whether to use timerfd for timing]) +if test "x$use_timerfd" = "xno"; then + AC_MSG_RESULT([no (disabled by user)]) +else + if test "x$timerfd_h" = "x1" -a "x$tfd_hdr_ok" = "xyes"; then + AC_MSG_RESULT([yes]) + AC_DEFINE(USBI_TIMERFD_AVAILABLE, 1, [timerfd headers available]) + else + AC_MSG_RESULT([no (header not available)]) + fi +fi + +AC_CHECK_TYPES(struct timespec) + +# Message logging +AC_ARG_ENABLE([log], [AS_HELP_STRING([--disable-log], [disable all logging])], + [log_enabled=$enableval], + [log_enabled='yes']) +if test "x$log_enabled" != "xno"; then + AC_DEFINE([ENABLE_LOGGING], 1, [Message logging]) +fi + +AC_ARG_ENABLE([debug-log], [AS_HELP_STRING([--enable-debug-log], + [enable debug logging (default n)])], + [debug_log_enabled=$enableval], + [debug_log_enabled='no']) +if test "x$debug_log_enabled" != "xno"; then + AC_DEFINE([ENABLE_DEBUG_LOGGING], 1, [Debug message logging]) +fi + +# Examples build +AC_ARG_ENABLE([examples-build], [AS_HELP_STRING([--enable-examples-build], + [build example applications (default n)])], + [build_examples=$enableval], + [build_examples='no']) +AM_CONDITIONAL([BUILD_EXAMPLES], [test "x$build_examples" != "xno"]) + +# check for -fvisibility=hidden compiler support (GCC >= 3.4) +saved_cflags="$CFLAGS" +# -Werror required for cygwin +CFLAGS="$CFLAGS -Werror -fvisibility=hidden" +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([])], + [VISIBILITY_CFLAGS="-fvisibility=hidden" + AC_DEFINE([DEFAULT_VISIBILITY], [__attribute__((visibility("default")))], [Default visibility]) ], + [ VISIBILITY_CFLAGS="" + AC_DEFINE([DEFAULT_VISIBILITY], [], [Default visibility]) ], + ]) +CFLAGS="$saved_cflags" + +# check for -Wno-pointer-sign compiler support (GCC >= 4) +saved_cflags="$CFLAGS" +CFLAGS="$CFLAGS -Wno-pointer-sign" +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([])], + nopointersign_cflags="-Wno-pointer-sign", nopointersign_cflags="") +CFLAGS="$saved_cflags" + +# sigaction not available on MinGW +AC_CHECK_FUNC([sigaction], [have_sigaction=yes], [have_sigaction=no]) +AM_CONDITIONAL([HAVE_SIGACTION], [test "x$have_sigaction" = "xyes"]) + +# headers not available on all platforms but required on others +AC_CHECK_HEADERS([sys/time.h]) +AC_CHECK_FUNCS(gettimeofday) + +AM_CFLAGS="-std=gnu99 -Wall -Wundef -Wunused -Wstrict-prototypes -Werror-implicit-function-declaration $nopointersign_cflags -Wshadow ${THREAD_CFLAGS} ${VISIBILITY_CFLAGS}" + +AC_SUBST(AM_CFLAGS) +AC_SUBST(LTLDFLAGS) + +AC_CONFIG_FILES([libusb-1.0.pc]) +AC_CONFIG_FILES([Makefile]) +AC_CONFIG_FILES([libusb/Makefile]) +AC_OUTPUT diff --git a/compat/libusb-1.0/libusb-1.0.pc.in b/compat/libusb-1.0/libusb-1.0.pc.in new file mode 100644 index 0000000..bb371d1 --- /dev/null +++ b/compat/libusb-1.0/libusb-1.0.pc.in @@ -0,0 +1,12 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: libusb-1.0 +Description: C API for USB device access from Linux, Mac OS X, OpenBSD, NetBSD and Windows userspace +Version: @VERSION@ +Libs: -L${libdir} -lusb-1.0 +Libs.private: @LIBS@ +Cflags: -I${includedir}/libusb-1.0 + diff --git a/compat/libusb-1.0/libusb/Makefile.am b/compat/libusb-1.0/libusb/Makefile.am new file mode 100644 index 0000000..2f3f95f --- /dev/null +++ b/compat/libusb-1.0/libusb/Makefile.am @@ -0,0 +1,55 @@ +lib_LTLIBRARIES = libusb-1.0.la + +LINUX_USBFS_SRC = os/linux_usbfs.c +DARWIN_USB_SRC = os/darwin_usb.c +OPENBSD_USB_SRC = os/openbsd_usb.c +WINDOWS_USB_SRC = os/poll_windows.c os/windows_usb.c libusb-1.0.rc \ + libusb-1.0.def + +EXTRA_DIST = $(LINUX_USBFS_SRC) $(DARWIN_USB_SRC) $(OPENBSD_USB_SRC) \ + $(WINDOWS_USB_SRC) os/threads_posix.c os/threads_windows.c \ + os/linux_udev.c os/linux_netlink.c + +if OS_LINUX + +if USE_UDEV +OS_SRC = $(LINUX_USBFS_SRC) os/linux_udev.c +else +OS_SRC = $(LINUX_USBFS_SRC) os/linux_netlink.c +endif + +endif + +if OS_DARWIN +OS_SRC = $(DARWIN_USB_SRC) +AM_CFLAGS_EXT = -no-cpp-precomp +endif + +if OS_OPENBSD +OS_SRC = $(OPENBSD_USB_SRC) +endif + +if OS_WINDOWS +OS_SRC = $(WINDOWS_USB_SRC) + +.rc.lo: + $(AM_V_GEN)$(LIBTOOL) $(AM_V_lt) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --tag=RC --mode=compile $(RC) $(RCFLAGS) -i $< -o $@ + +libusb-1.0.rc: version.h +endif + +if THREADS_POSIX +THREADS_SRC = os/threads_posix.h os/threads_posix.c +else +THREADS_SRC = os/threads_windows.h os/threads_windows.c +endif + +libusb_1_0_la_CFLAGS = $(AM_CFLAGS) \ + -DLIBUSB_DESCRIBE=\"`git --git-dir "$(top_srcdir)/.git" describe --tags 2>/dev/null`\" +libusb_1_0_la_LDFLAGS = $(LTLDFLAGS) +libusb_1_0_la_SOURCES = libusbi.h core.c descriptor.c io.c sync.c $(OS_SRC) \ + hotplug.h hotplug.c os/linux_usbfs.h os/darwin_usb.h os/windows_usb.h \ + $(THREADS_SRC) os/poll_posix.h os/poll_windows.h + +hdrdir = $(includedir)/libusb-1.0 +hdr_HEADERS = libusb.h diff --git a/compat/libusb-1.0/libusb/core.c b/compat/libusb-1.0/libusb/core.c new file mode 100644 index 0000000..ed8c326 --- /dev/null +++ b/compat/libusb-1.0/libusb/core.c @@ -0,0 +1,2049 @@ +/* -*- Mode: C; indent-tabs-mode:nil ; c-basic-offset:8 -*- */ +/* + * Core functions for libusb + * Copyright (c) 2012-2013 Nathan Hjelm + * Copyright (C) 2007-2008 Daniel Drake + * Copyright (c) 2001 Johannes Erdfelt + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_SYS_TIME_H +#include +#endif + +#include "libusbi.h" +#include "hotplug.h" + +#if defined(OS_LINUX) +const struct usbi_os_backend * const usbi_backend = &linux_usbfs_backend; +#elif defined(OS_DARWIN) +const struct usbi_os_backend * const usbi_backend = &darwin_backend; +#elif defined(OS_OPENBSD) +const struct usbi_os_backend * const usbi_backend = &openbsd_backend; +#elif defined(OS_WINDOWS) +const struct usbi_os_backend * const usbi_backend = &windows_backend; +#else +#error "Unsupported OS" +#endif + +const struct libusb_version libusb_version_internal = { + LIBUSB_MAJOR, LIBUSB_MINOR, LIBUSB_MICRO, LIBUSB_NANO, LIBUSB_RC, + LIBUSB_DESCRIBE +}; + +struct libusb_context *usbi_default_context = NULL; +static int default_context_refcnt = 0; +static usbi_mutex_static_t default_context_lock = USBI_MUTEX_INITIALIZER; + +usbi_mutex_static_t active_contexts_lock = USBI_MUTEX_INITIALIZER; +struct list_head active_contexts_list; + +/** + * \mainpage libusb-1.0 API Reference + * + * \section intro Introduction + * + * libusb is an open source library that allows you to communicate with USB + * devices from userspace. For more info, see the + * libusb homepage. + * + * This documentation is aimed at application developers wishing to + * communicate with USB peripherals from their own software. After reviewing + * this documentation, feedback and questions can be sent to the + * libusb-devel mailing + * list. + * + * This documentation assumes knowledge of how to operate USB devices from + * a software standpoint (descriptors, configurations, interfaces, endpoints, + * control/bulk/interrupt/isochronous transfers, etc). Full information + * can be found in the USB 2.0 + * Specification which is available for free download. You can probably + * find less verbose introductions by searching the web. + * + * \section features Library features + * + * - All transfer types supported (control/bulk/interrupt/isochronous) + * - 2 transfer interfaces: + * -# Synchronous (simple) + * -# Asynchronous (more complicated, but more powerful) + * - Thread safe (although the asynchronous interface means that you + * usually won't need to thread) + * - Lightweight with lean API + * - Compatible with libusb-0.1 through the libusb-compat-0.1 translation layer + * - Hotplug support (see \ref hotplug) + * + * \section gettingstarted Getting Started + * + * To begin reading the API documentation, start with the Modules page which + * links to the different categories of libusb's functionality. + * + * One decision you will have to make is whether to use the synchronous + * or the asynchronous data transfer interface. The \ref io documentation + * provides some insight into this topic. + * + * Some example programs can be found in the libusb source distribution under + * the "examples" subdirectory. The libusb homepage includes a list of + * real-life project examples which use libusb. + * + * \section errorhandling Error handling + * + * libusb functions typically return 0 on success or a negative error code + * on failure. These negative error codes relate to LIBUSB_ERROR constants + * which are listed on the \ref misc "miscellaneous" documentation page. + * + * \section msglog Debug message logging + * + * libusb does not log any messages by default. Your application is therefore + * free to close stdout/stderr and those descriptors may be reused without + * worry. + * + * The libusb_set_debug() function can be used to enable stdout/stderr logging + * of certain messages. Under standard configuration, libusb doesn't really + * log much at all, so you are advised to use this function to enable all + * error/warning/informational messages. It will help you debug problems with + * your software. + * + * The logged messages are unstructured. There is no one-to-one correspondence + * between messages being logged and success or failure return codes from + * libusb functions. There is no format to the messages, so you should not + * try to capture or parse them. They are not and will not be localized. + * These messages are not suitable for being passed to your application user; + * instead, you should interpret the error codes returned from libusb functions + * and provide appropriate notification to the user. The messages are simply + * there to aid you as a programmer, and if you're confused because you're + * getting a strange error code from a libusb function, enabling message + * logging may give you a suitable explanation. + * + * The LIBUSB_DEBUG environment variable can be used to enable message logging + * at run-time. This environment variable should be set to a number, which is + * interpreted the same as the libusb_set_debug() parameter. When this + * environment variable is set, the message logging verbosity level is fixed + * and libusb_set_debug() effectively does nothing. + * + * libusb can be compiled without any logging functions, useful for embedded + * systems. In this case, libusb_set_debug() and the LIBUSB_DEBUG environment + * variable have no effects. + * + * libusb can also be compiled with verbose debugging messages. When the + * library is compiled in this way, all messages of all verbosities are always + * logged. libusb_set_debug() and the LIBUSB_DEBUG environment variable have + * no effects. + * + * \section remarks Other remarks + * + * libusb does have imperfections. The \ref caveats "caveats" page attempts + * to document these. + */ + +/** + * \page caveats Caveats + * + * \section devresets Device resets + * + * The libusb_reset_device() function allows you to reset a device. If your + * program has to call such a function, it should obviously be aware that + * the reset will cause device state to change (e.g. register values may be + * reset). + * + * The problem is that any other program could reset the device your program + * is working with, at any time. libusb does not offer a mechanism to inform + * you when this has happened, so if someone else resets your device it will + * not be clear to your own program why the device state has changed. + * + * Ultimately, this is a limitation of writing drivers in userspace. + * Separation from the USB stack in the underlying kernel makes it difficult + * for the operating system to deliver such notifications to your program. + * The Linux kernel USB stack allows such reset notifications to be delivered + * to in-kernel USB drivers, but it is not clear how such notifications could + * be delivered to second-class drivers that live in userspace. + * + * \section blockonly Blocking-only functionality + * + * The functionality listed below is only available through synchronous, + * blocking functions. There are no asynchronous/non-blocking alternatives, + * and no clear ways of implementing these. + * + * - Configuration activation (libusb_set_configuration()) + * - Interface/alternate setting activation (libusb_set_interface_alt_setting()) + * - Releasing of interfaces (libusb_release_interface()) + * - Clearing of halt/stall condition (libusb_clear_halt()) + * - Device resets (libusb_reset_device()) + * + * \section configsel Configuration selection and handling + * + * When libusb presents a device handle to an application, there is a chance + * that the corresponding device may be in unconfigured state. For devices + * with multiple configurations, there is also a chance that the configuration + * currently selected is not the one that the application wants to use. + * + * The obvious solution is to add a call to libusb_set_configuration() early + * on during your device initialization routines, but there are caveats to + * be aware of: + * -# If the device is already in the desired configuration, calling + * libusb_set_configuration() using the same configuration value will cause + * a lightweight device reset. This may not be desirable behaviour. + * -# libusb will be unable to change configuration if the device is in + * another configuration and other programs or drivers have claimed + * interfaces under that configuration. + * -# In the case where the desired configuration is already active, libusb + * may not even be able to perform a lightweight device reset. For example, + * take my USB keyboard with fingerprint reader: I'm interested in driving + * the fingerprint reader interface through libusb, but the kernel's + * USB-HID driver will almost always have claimed the keyboard interface. + * Because the kernel has claimed an interface, it is not even possible to + * perform the lightweight device reset, so libusb_set_configuration() will + * fail. (Luckily the device in question only has a single configuration.) + * + * One solution to some of the above problems is to consider the currently + * active configuration. If the configuration we want is already active, then + * we don't have to select any configuration: +\code +cfg = libusb_get_configuration(dev); +if (cfg != desired) + libusb_set_configuration(dev, desired); +\endcode + * + * This is probably suitable for most scenarios, but is inherently racy: + * another application or driver may change the selected configuration + * after the libusb_get_configuration() call. + * + * Even in cases where libusb_set_configuration() succeeds, consider that other + * applications or drivers may change configuration after your application + * calls libusb_set_configuration(). + * + * One possible way to lock your device into a specific configuration is as + * follows: + * -# Set the desired configuration (or use the logic above to realise that + * it is already in the desired configuration) + * -# Claim the interface that you wish to use + * -# Check that the currently active configuration is the one that you want + * to use. + * + * The above method works because once an interface is claimed, no application + * or driver is able to select another configuration. + * + * \section earlycomp Early transfer completion + * + * NOTE: This section is currently Linux-centric. I am not sure if any of these + * considerations apply to Darwin or other platforms. + * + * When a transfer completes early (i.e. when less data is received/sent in + * any one packet than the transfer buffer allows for) then libusb is designed + * to terminate the transfer immediately, not transferring or receiving any + * more data unless other transfers have been queued by the user. + * + * On legacy platforms, libusb is unable to do this in all situations. After + * the incomplete packet occurs, "surplus" data may be transferred. Prior to + * libusb v1.0.2, this information was lost (and for device-to-host transfers, + * the corresponding data was discarded). As of libusb v1.0.3, this information + * is kept (the data length of the transfer is updated) and, for device-to-host + * transfers, any surplus data was added to the buffer. Still, this is not + * a nice solution because it loses the information about the end of the short + * packet, and the user probably wanted that surplus data to arrive in the next + * logical transfer. + * + * A previous workaround was to only ever submit transfers of size 16kb or + * less. + * + * As of libusb v1.0.4 and Linux v2.6.32, this is fixed. A technical + * explanation of this issue follows. + * + * When you ask libusb to submit a bulk transfer larger than 16kb in size, + * libusb breaks it up into a number of smaller subtransfers. This is because + * the usbfs kernel interface only accepts transfers of up to 16kb in size. + * The subtransfers are submitted all at once so that the kernel can queue + * them at the hardware level, therefore maximizing bus throughput. + * + * On legacy platforms, this caused problems when transfers completed early. + * Upon this event, the kernel would terminate all further packets in that + * subtransfer (but not any following ones). libusb would note this event and + * immediately cancel any following subtransfers that had been queued, + * but often libusb was not fast enough, and the following subtransfers had + * started before libusb got around to cancelling them. + * + * Thanks to an API extension to usbfs, this is fixed with recent kernel and + * libusb releases. The solution was to allow libusb to communicate to the + * kernel where boundaries occur between logical libusb-level transfers. When + * a short transfer (or other error) occurs, the kernel will cancel all the + * subtransfers until the boundary without allowing those transfers to start. + * + * \section zlp Zero length packets + * + * - libusb is able to send a packet of zero length to an endpoint simply by + * submitting a transfer of zero length. On Linux, this did not work with + * libusb versions prior to 1.0.3 and kernel versions prior to 2.6.31. + * - The \ref libusb_transfer_flags::LIBUSB_TRANSFER_ADD_ZERO_PACKET + * "LIBUSB_TRANSFER_ADD_ZERO_PACKET" flag is currently only supported on Linux. + */ + +/** + * \page contexts Contexts + * + * It is possible that libusb may be used simultaneously from two independent + * libraries linked into the same executable. For example, if your application + * has a plugin-like system which allows the user to dynamically load a range + * of modules into your program, it is feasible that two independently + * developed modules may both use libusb. + * + * libusb is written to allow for these multiple user scenarios. The two + * "instances" of libusb will not interfere: libusb_set_debug() calls + * from one user will not affect the same settings for other users, other + * users can continue using libusb after one of them calls libusb_exit(), etc. + * + * This is made possible through libusb's context concept. When you + * call libusb_init(), you are (optionally) given a context. You can then pass + * this context pointer back into future libusb functions. + * + * In order to keep things simple for more simplistic applications, it is + * legal to pass NULL to all functions requiring a context pointer (as long as + * you're sure no other code will attempt to use libusb from the same process). + * When you pass NULL, the default context will be used. The default context + * is created the first time a process calls libusb_init() when no other + * context is alive. Contexts are destroyed during libusb_exit(). + * + * The default context is reference-counted and can be shared. That means that + * if libusb_init(NULL) is called twice within the same process, the two + * users end up sharing the same context. The deinitialization and freeing of + * the default context will only happen when the last user calls libusb_exit(). + * In other words, the default context is created and initialized when its + * reference count goes from 0 to 1, and is deinitialized and destroyed when + * its reference count goes from 1 to 0. + * + * You may be wondering why only a subset of libusb functions require a + * context pointer in their function definition. Internally, libusb stores + * context pointers in other objects (e.g. libusb_device instances) and hence + * can infer the context from those objects. + */ + +/** + * @defgroup lib Library initialization/deinitialization + * This page details how to initialize and deinitialize libusb. Initialization + * must be performed before using any libusb functionality, and similarly you + * must not call any libusb functions after deinitialization. + */ + +/** + * @defgroup dev Device handling and enumeration + * The functionality documented below is designed to help with the following + * operations: + * - Enumerating the USB devices currently attached to the system + * - Choosing a device to operate from your software + * - Opening and closing the chosen device + * + * \section nutshell In a nutshell... + * + * The description below really makes things sound more complicated than they + * actually are. The following sequence of function calls will be suitable + * for almost all scenarios and does not require you to have such a deep + * understanding of the resource management issues: + * \code +// discover devices +libusb_device **list; +libusb_device *found = NULL; +ssize_t cnt = libusb_get_device_list(NULL, &list); +ssize_t i = 0; +int err = 0; +if (cnt < 0) + error(); + +for (i = 0; i < cnt; i++) { + libusb_device *device = list[i]; + if (is_interesting(device)) { + found = device; + break; + } +} + +if (found) { + libusb_device_handle *handle; + + err = libusb_open(found, &handle); + if (err) + error(); + // etc +} + +libusb_free_device_list(list, 1); +\endcode + * + * The two important points: + * - You asked libusb_free_device_list() to unreference the devices (2nd + * parameter) + * - You opened the device before freeing the list and unreferencing the + * devices + * + * If you ended up with a handle, you can now proceed to perform I/O on the + * device. + * + * \section devshandles Devices and device handles + * libusb has a concept of a USB device, represented by the + * \ref libusb_device opaque type. A device represents a USB device that + * is currently or was previously connected to the system. Using a reference + * to a device, you can determine certain information about the device (e.g. + * you can read the descriptor data). + * + * The libusb_get_device_list() function can be used to obtain a list of + * devices currently connected to the system. This is known as device + * discovery. + * + * Just because you have a reference to a device does not mean it is + * necessarily usable. The device may have been unplugged, you may not have + * permission to operate such device, or another program or driver may be + * using the device. + * + * When you've found a device that you'd like to operate, you must ask + * libusb to open the device using the libusb_open() function. Assuming + * success, libusb then returns you a device handle + * (a \ref libusb_device_handle pointer). All "real" I/O operations then + * operate on the handle rather than the original device pointer. + * + * \section devref Device discovery and reference counting + * + * Device discovery (i.e. calling libusb_get_device_list()) returns a + * freshly-allocated list of devices. The list itself must be freed when + * you are done with it. libusb also needs to know when it is OK to free + * the contents of the list - the devices themselves. + * + * To handle these issues, libusb provides you with two separate items: + * - A function to free the list itself + * - A reference counting system for the devices inside + * + * New devices presented by the libusb_get_device_list() function all have a + * reference count of 1. You can increase and decrease reference count using + * libusb_ref_device() and libusb_unref_device(). A device is destroyed when + * its reference count reaches 0. + * + * With the above information in mind, the process of opening a device can + * be viewed as follows: + * -# Discover devices using libusb_get_device_list(). + * -# Choose the device that you want to operate, and call libusb_open(). + * -# Unref all devices in the discovered device list. + * -# Free the discovered device list. + * + * The order is important - you must not unreference the device before + * attempting to open it, because unreferencing it may destroy the device. + * + * For convenience, the libusb_free_device_list() function includes a + * parameter to optionally unreference all the devices in the list before + * freeing the list itself. This combines steps 3 and 4 above. + * + * As an implementation detail, libusb_open() actually adds a reference to + * the device in question. This is because the device remains available + * through the handle via libusb_get_device(). The reference is deleted during + * libusb_close(). + */ + +/** @defgroup misc Miscellaneous */ + +/* we traverse usbfs without knowing how many devices we are going to find. + * so we create this discovered_devs model which is similar to a linked-list + * which grows when required. it can be freed once discovery has completed, + * eliminating the need for a list node in the libusb_device structure + * itself. */ +#define DISCOVERED_DEVICES_SIZE_STEP 8 + +static struct discovered_devs *discovered_devs_alloc(void) +{ + struct discovered_devs *ret = + malloc(sizeof(*ret) + (sizeof(void *) * DISCOVERED_DEVICES_SIZE_STEP)); + + if (ret) { + ret->len = 0; + ret->capacity = DISCOVERED_DEVICES_SIZE_STEP; + } + return ret; +} + +/* append a device to the discovered devices collection. may realloc itself, + * returning new discdevs. returns NULL on realloc failure. */ +struct discovered_devs *discovered_devs_append( + struct discovered_devs *discdevs, struct libusb_device *dev) +{ + size_t len = discdevs->len; + size_t capacity; + + /* if there is space, just append the device */ + if (len < discdevs->capacity) { + discdevs->devices[len] = libusb_ref_device(dev); + discdevs->len++; + return discdevs; + } + + /* exceeded capacity, need to grow */ + usbi_dbg("need to increase capacity"); + capacity = discdevs->capacity + DISCOVERED_DEVICES_SIZE_STEP; + discdevs = realloc(discdevs, + sizeof(*discdevs) + (sizeof(void *) * capacity)); + if (discdevs) { + discdevs->capacity = capacity; + discdevs->devices[len] = libusb_ref_device(dev); + discdevs->len++; + } + + return discdevs; +} + +static void discovered_devs_free(struct discovered_devs *discdevs) +{ + size_t i; + + for (i = 0; i < discdevs->len; i++) + libusb_unref_device(discdevs->devices[i]); + + free(discdevs); +} + +/* Allocate a new device with a specific session ID. The returned device has + * a reference count of 1. */ +struct libusb_device *usbi_alloc_device(struct libusb_context *ctx, + unsigned long session_id) +{ + size_t priv_size = usbi_backend->device_priv_size; + struct libusb_device *dev = calloc(1, sizeof(*dev) + priv_size); + int r; + + if (!dev) + return NULL; + + r = usbi_mutex_init(&dev->lock, NULL); + if (r) { + free(dev); + return NULL; + } + + dev->ctx = ctx; + dev->refcnt = 1; + dev->session_data = session_id; + dev->speed = LIBUSB_SPEED_UNKNOWN; + memset(&dev->os_priv, 0, priv_size); + + if (!libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG)) { + usbi_connect_device (dev); + } + + return dev; +} + +void usbi_connect_device(struct libusb_device *dev) +{ + libusb_hotplug_message message = {LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED, dev}; + int ret; + + dev->attached = 1; + + usbi_mutex_lock(&dev->ctx->usb_devs_lock); + list_add(&dev->list, &dev->ctx->usb_devs); + usbi_mutex_unlock(&dev->ctx->usb_devs_lock); + + /* Signal that an event has occurred for this device if we support hotplug AND + the hotplug pipe is ready. This prevents an event from getting raised during + initial enumeration. */ + if (libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG) && dev->ctx->hotplug_pipe[1] > 0) { + ret = usbi_write(dev->ctx->hotplug_pipe[1], &message, sizeof (message)); + if (sizeof (message) != ret) { + usbi_err(DEVICE_CTX(dev), "error writing hotplug message"); + } + } +} + +void usbi_disconnect_device(struct libusb_device *dev) +{ + libusb_hotplug_message message = {LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT, dev}; + struct libusb_context *ctx = dev->ctx; + int ret; + + usbi_mutex_lock(&dev->lock); + dev->attached = 0; + usbi_mutex_unlock(&dev->lock); + + /* Signal that an event has occurred for this device if we support hotplug AND + the hotplug pipe is ready. This prevents an event from getting raised during + initial enumeration. libusb_handle_events will take care of dereferencing the + device. */ + if (libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG) && dev->ctx->hotplug_pipe[1] > 0) { + ret = usbi_write(dev->ctx->hotplug_pipe[1], &message, sizeof (message)); + if (sizeof(message) != ret) { + usbi_err(DEVICE_CTX(dev), "error writing hotplug message"); + } + } + + usbi_mutex_lock(&ctx->usb_devs_lock); + list_del(&dev->list); + usbi_mutex_unlock(&ctx->usb_devs_lock); +} + +/* Perform some final sanity checks on a newly discovered device. If this + * function fails (negative return code), the device should not be added + * to the discovered device list. */ +int usbi_sanitize_device(struct libusb_device *dev) +{ + int r; + uint8_t num_configurations; + + r = usbi_device_cache_descriptor(dev); + if (r < 0) + return r; + + num_configurations = dev->device_descriptor.bNumConfigurations; + if (num_configurations > USB_MAXCONFIG) { + usbi_err(DEVICE_CTX(dev), "too many configurations"); + return LIBUSB_ERROR_IO; + } else if (0 == num_configurations) + usbi_dbg("zero configurations, maybe an unauthorized device"); + + dev->num_configurations = num_configurations; + return 0; +} + +/* Examine libusb's internal list of known devices, looking for one with + * a specific session ID. Returns the matching device if it was found, and + * NULL otherwise. */ +struct libusb_device *usbi_get_device_by_session_id(struct libusb_context *ctx, + unsigned long session_id) +{ + struct libusb_device *dev; + struct libusb_device *ret = NULL; + + usbi_mutex_lock(&ctx->usb_devs_lock); + list_for_each_entry(dev, &ctx->usb_devs, list, struct libusb_device) + if (dev->session_data == session_id) { + ret = dev; + break; + } + usbi_mutex_unlock(&ctx->usb_devs_lock); + + return ret; +} + +/** @ingroup dev + * Returns a list of USB devices currently attached to the system. This is + * your entry point into finding a USB device to operate. + * + * You are expected to unreference all the devices when you are done with + * them, and then free the list with libusb_free_device_list(). Note that + * libusb_free_device_list() can unref all the devices for you. Be careful + * not to unreference a device you are about to open until after you have + * opened it. + * + * This return value of this function indicates the number of devices in + * the resultant list. The list is actually one element larger, as it is + * NULL-terminated. + * + * \param ctx the context to operate on, or NULL for the default context + * \param list output location for a list of devices. Must be later freed with + * libusb_free_device_list(). + * \returns The number of devices in the outputted list, or any + * \ref libusb_error according to errors encountered by the backend. + */ +ssize_t API_EXPORTED libusb_get_device_list(libusb_context *ctx, + libusb_device ***list) +{ + struct discovered_devs *discdevs = discovered_devs_alloc(); + struct libusb_device **ret; + int r = 0; + ssize_t i, len; + USBI_GET_CONTEXT(ctx); + usbi_dbg(""); + + if (!discdevs) + return LIBUSB_ERROR_NO_MEM; + + if (libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG)) { + /* backend provides hotplug support */ + struct libusb_device *dev; + + usbi_mutex_lock(&ctx->usb_devs_lock); + list_for_each_entry(dev, &ctx->usb_devs, list, struct libusb_device) { + discdevs = discovered_devs_append(discdevs, dev); + + if (!discdevs) { + r = LIBUSB_ERROR_NO_MEM; + break; + } + } + usbi_mutex_unlock(&ctx->usb_devs_lock); + } else { + /* backend does not provide hotplug support */ + r = usbi_backend->get_device_list(ctx, &discdevs); + } + + if (r < 0) { + len = r; + goto out; + } + + /* convert discovered_devs into a list */ + len = discdevs->len; + ret = malloc(sizeof(void *) * (len + 1)); + if (!ret) { + len = LIBUSB_ERROR_NO_MEM; + goto out; + } + + ret[len] = NULL; + for (i = 0; i < len; i++) { + struct libusb_device *dev = discdevs->devices[i]; + ret[i] = libusb_ref_device(dev); + } + *list = ret; + +out: + discovered_devs_free(discdevs); + return len; +} + +/** \ingroup dev + * Frees a list of devices previously discovered using + * libusb_get_device_list(). If the unref_devices parameter is set, the + * reference count of each device in the list is decremented by 1. + * \param list the list to free + * \param unref_devices whether to unref the devices in the list + */ +void API_EXPORTED libusb_free_device_list(libusb_device **list, + int unref_devices) +{ + if (!list) + return; + + if (unref_devices) { + int i = 0; + struct libusb_device *dev; + + while ((dev = list[i++]) != NULL) + libusb_unref_device(dev); + } + free(list); +} + +/** \ingroup dev + * Get the number of the bus that a device is connected to. + * \param dev a device + * \returns the bus number + */ +uint8_t API_EXPORTED libusb_get_bus_number(libusb_device *dev) +{ + return dev->bus_number; +} + +/** \ingroup dev + * Get the address of the device on the bus it is connected to. + * \param dev a device + * \returns the device address + */ +uint8_t API_EXPORTED libusb_get_device_address(libusb_device *dev) +{ + return dev->device_address; +} + +/** \ingroup dev + * Get the negotiated connection speed for a device. + * \param dev a device + * \returns a \ref libusb_speed code, where LIBUSB_SPEED_UNKNOWN means that + * the OS doesn't know or doesn't support returning the negotiated speed. + */ +int API_EXPORTED libusb_get_device_speed(libusb_device *dev) +{ + return dev->speed; +} + +static const struct libusb_endpoint_descriptor *find_endpoint( + struct libusb_config_descriptor *config, unsigned char endpoint) +{ + int iface_idx; + for (iface_idx = 0; iface_idx < config->bNumInterfaces; iface_idx++) { + const struct libusb_interface *iface = &config->interface[iface_idx]; + int altsetting_idx; + + for (altsetting_idx = 0; altsetting_idx < iface->num_altsetting; + altsetting_idx++) { + const struct libusb_interface_descriptor *altsetting + = &iface->altsetting[altsetting_idx]; + int ep_idx; + + for (ep_idx = 0; ep_idx < altsetting->bNumEndpoints; ep_idx++) { + const struct libusb_endpoint_descriptor *ep = + &altsetting->endpoint[ep_idx]; + if (ep->bEndpointAddress == endpoint) + return ep; + } + } + } + return NULL; +} + +/** \ingroup dev + * Convenience function to retrieve the wMaxPacketSize value for a particular + * endpoint in the active device configuration. + * + * This function was originally intended to be of assistance when setting up + * isochronous transfers, but a design mistake resulted in this function + * instead. It simply returns the wMaxPacketSize value without considering + * its contents. If you're dealing with isochronous transfers, you probably + * want libusb_get_max_iso_packet_size() instead. + * + * \param dev a device + * \param endpoint address of the endpoint in question + * \returns the wMaxPacketSize value + * \returns LIBUSB_ERROR_NOT_FOUND if the endpoint does not exist + * \returns LIBUSB_ERROR_OTHER on other failure + */ +int API_EXPORTED libusb_get_max_packet_size(libusb_device *dev, + unsigned char endpoint) +{ + struct libusb_config_descriptor *config; + const struct libusb_endpoint_descriptor *ep; + int r; + + r = libusb_get_active_config_descriptor(dev, &config); + if (r < 0) { + usbi_err(DEVICE_CTX(dev), + "could not retrieve active config descriptor"); + return LIBUSB_ERROR_OTHER; + } + + ep = find_endpoint(config, endpoint); + if (!ep) + return LIBUSB_ERROR_NOT_FOUND; + + r = ep->wMaxPacketSize; + libusb_free_config_descriptor(config); + return r; +} + +/** \ingroup dev + * Calculate the maximum packet size which a specific endpoint is capable is + * sending or receiving in the duration of 1 microframe + * + * Only the active configution is examined. The calculation is based on the + * wMaxPacketSize field in the endpoint descriptor as described in section + * 9.6.6 in the USB 2.0 specifications. + * + * If acting on an isochronous or interrupt endpoint, this function will + * multiply the value found in bits 0:10 by the number of transactions per + * microframe (determined by bits 11:12). Otherwise, this function just + * returns the numeric value found in bits 0:10. + * + * This function is useful for setting up isochronous transfers, for example + * you might pass the return value from this function to + * libusb_set_iso_packet_lengths() in order to set the length field of every + * isochronous packet in a transfer. + * + * Since v1.0.3. + * + * \param dev a device + * \param endpoint address of the endpoint in question + * \returns the maximum packet size which can be sent/received on this endpoint + * \returns LIBUSB_ERROR_NOT_FOUND if the endpoint does not exist + * \returns LIBUSB_ERROR_OTHER on other failure + */ +int API_EXPORTED libusb_get_max_iso_packet_size(libusb_device *dev, + unsigned char endpoint) +{ + struct libusb_config_descriptor *config; + const struct libusb_endpoint_descriptor *ep; + enum libusb_transfer_type ep_type; + uint16_t val; + int r; + + r = libusb_get_active_config_descriptor(dev, &config); + if (r < 0) { + usbi_err(DEVICE_CTX(dev), + "could not retrieve active config descriptor"); + return LIBUSB_ERROR_OTHER; + } + + ep = find_endpoint(config, endpoint); + if (!ep) + return LIBUSB_ERROR_NOT_FOUND; + + val = ep->wMaxPacketSize; + ep_type = ep->bmAttributes & 0x3; + libusb_free_config_descriptor(config); + + r = val & 0x07ff; + if (ep_type == LIBUSB_TRANSFER_TYPE_ISOCHRONOUS + || ep_type == LIBUSB_TRANSFER_TYPE_INTERRUPT) + r *= (1 + ((val >> 11) & 3)); + return r; +} + +/** \ingroup dev + * Increment the reference count of a device. + * \param dev the device to reference + * \returns the same device + */ +DEFAULT_VISIBILITY +libusb_device * LIBUSB_CALL libusb_ref_device(libusb_device *dev) +{ + usbi_mutex_lock(&dev->lock); + dev->refcnt++; + usbi_mutex_unlock(&dev->lock); + return dev; +} + +/** \ingroup dev + * Decrement the reference count of a device. If the decrement operation + * causes the reference count to reach zero, the device shall be destroyed. + * \param dev the device to unreference + */ +void API_EXPORTED libusb_unref_device(libusb_device *dev) +{ + int refcnt; + + if (!dev) + return; + + usbi_mutex_lock(&dev->lock); + refcnt = --dev->refcnt; + usbi_mutex_unlock(&dev->lock); + + if (refcnt == 0) { + usbi_dbg("destroy device %d.%d", dev->bus_number, dev->device_address); + + if (usbi_backend->destroy_device) + usbi_backend->destroy_device(dev); + + if (!libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG)) { + /* backend does not support hotplug */ + usbi_disconnect_device(dev); + } + + usbi_mutex_destroy(&dev->lock); + free(dev); + } +} + +/* + * Interrupt the iteration of the event handling thread, so that it picks + * up the new fd. + */ +void usbi_fd_notification(struct libusb_context *ctx) +{ + unsigned char dummy = 1; + ssize_t r; + + if (ctx == NULL) + return; + + /* record that we are messing with poll fds */ + usbi_mutex_lock(&ctx->pollfd_modify_lock); + ctx->pollfd_modify++; + usbi_mutex_unlock(&ctx->pollfd_modify_lock); + + /* write some data on control pipe to interrupt event handlers */ + r = usbi_write(ctx->ctrl_pipe[1], &dummy, sizeof(dummy)); + if (r <= 0) { + usbi_warn(ctx, "internal signalling write failed"); + usbi_mutex_lock(&ctx->pollfd_modify_lock); + ctx->pollfd_modify--; + usbi_mutex_unlock(&ctx->pollfd_modify_lock); + return; + } + + /* take event handling lock */ + libusb_lock_events(ctx); + + /* read the dummy data */ + r = usbi_read(ctx->ctrl_pipe[0], &dummy, sizeof(dummy)); + if (r <= 0) + usbi_warn(ctx, "internal signalling read failed"); + + /* we're done with modifying poll fds */ + usbi_mutex_lock(&ctx->pollfd_modify_lock); + ctx->pollfd_modify--; + usbi_mutex_unlock(&ctx->pollfd_modify_lock); + + /* Release event handling lock and wake up event waiters */ + libusb_unlock_events(ctx); +} + +/** \ingroup dev + * Open a device and obtain a device handle. A handle allows you to perform + * I/O on the device in question. + * + * Internally, this function adds a reference to the device and makes it + * available to you through libusb_get_device(). This reference is removed + * during libusb_close(). + * + * This is a non-blocking function; no requests are sent over the bus. + * + * \param dev the device to open + * \param handle output location for the returned device handle pointer. Only + * populated when the return code is 0. + * \returns 0 on success + * \returns LIBUSB_ERROR_NO_MEM on memory allocation failure + * \returns LIBUSB_ERROR_ACCESS if the user has insufficient permissions + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns another LIBUSB_ERROR code on other failure + */ +int API_EXPORTED libusb_open(libusb_device *dev, + libusb_device_handle **handle) +{ + struct libusb_context *ctx = DEVICE_CTX(dev); + struct libusb_device_handle *_handle; + size_t priv_size = usbi_backend->device_handle_priv_size; + int r; + usbi_dbg("open %d.%d", dev->bus_number, dev->device_address); + + if (!dev->attached) { + return LIBUSB_ERROR_NO_DEVICE; + } + + _handle = malloc(sizeof(*_handle) + priv_size); + if (!_handle) + return LIBUSB_ERROR_NO_MEM; + + r = usbi_mutex_init(&_handle->lock, NULL); + if (r) { + free(_handle); + return LIBUSB_ERROR_OTHER; + } + + _handle->dev = libusb_ref_device(dev); + _handle->claimed_interfaces = 0; + memset(&_handle->os_priv, 0, priv_size); + + r = usbi_backend->open(_handle); + if (r < 0) { + usbi_dbg("open %d.%d returns %d", dev->bus_number, dev->device_address, r); + libusb_unref_device(dev); + usbi_mutex_destroy(&_handle->lock); + free(_handle); + return r; + } + + usbi_mutex_lock(&ctx->open_devs_lock); + list_add(&_handle->list, &ctx->open_devs); + usbi_mutex_unlock(&ctx->open_devs_lock); + *handle = _handle; + + /* At this point, we want to interrupt any existing event handlers so + * that they realise the addition of the new device's poll fd. One + * example when this is desirable is if the user is running a separate + * dedicated libusb events handling thread, which is running with a long + * or infinite timeout. We want to interrupt that iteration of the loop, + * so that it picks up the new fd, and then continues. */ + usbi_fd_notification(ctx); + + return 0; +} + +/** \ingroup dev + * Convenience function for finding a device with a particular + * idVendor/idProduct combination. This function is intended + * for those scenarios where you are using libusb to knock up a quick test + * application - it allows you to avoid calling libusb_get_device_list() and + * worrying about traversing/freeing the list. + * + * This function has limitations and is hence not intended for use in real + * applications: if multiple devices have the same IDs it will only + * give you the first one, etc. + * + * \param ctx the context to operate on, or NULL for the default context + * \param vendor_id the idVendor value to search for + * \param product_id the idProduct value to search for + * \returns a handle for the first found device, or NULL on error or if the + * device could not be found. */ +DEFAULT_VISIBILITY +libusb_device_handle * LIBUSB_CALL libusb_open_device_with_vid_pid( + libusb_context *ctx, uint16_t vendor_id, uint16_t product_id) +{ + struct libusb_device **devs; + struct libusb_device *found = NULL; + struct libusb_device *dev; + struct libusb_device_handle *handle = NULL; + size_t i = 0; + int r; + + if (libusb_get_device_list(ctx, &devs) < 0) + return NULL; + + while ((dev = devs[i++]) != NULL) { + struct libusb_device_descriptor desc; + r = libusb_get_device_descriptor(dev, &desc); + if (r < 0) + goto out; + if (desc.idVendor == vendor_id && desc.idProduct == product_id) { + found = dev; + break; + } + } + + if (found) { + r = libusb_open(found, &handle); + if (r < 0) + handle = NULL; + } + +out: + libusb_free_device_list(devs, 1); + return handle; +} + +static void do_close(struct libusb_context *ctx, + struct libusb_device_handle *dev_handle) +{ + struct usbi_transfer *itransfer; + struct usbi_transfer *tmp; + + libusb_lock_events(ctx); + + /* remove any transfers in flight that are for this device */ + usbi_mutex_lock(&ctx->flying_transfers_lock); + + /* safe iteration because transfers may be being deleted */ + list_for_each_entry_safe(itransfer, tmp, &ctx->flying_transfers, list, struct usbi_transfer) { + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + if (transfer->dev_handle != dev_handle) + continue; + + if (!(itransfer->flags & USBI_TRANSFER_DEVICE_DISAPPEARED)) { + usbi_err(ctx, "Device handle closed while transfer was still being processed, but the device is still connected as far as we know"); + + if (itransfer->flags & USBI_TRANSFER_CANCELLING) + usbi_warn(ctx, "A cancellation for an in-flight transfer hasn't completed but closing the device handle"); + else + usbi_err(ctx, "A cancellation hasn't even been scheduled on the transfer for which the device is closing"); + } + + /* remove from the list of in-flight transfers and make sure + * we don't accidentally use the device handle in the future + * (or that such accesses will be easily caught and identified as a crash) + */ + usbi_mutex_lock(&itransfer->lock); + list_del(&itransfer->list); + transfer->dev_handle = NULL; + usbi_mutex_unlock(&itransfer->lock); + + /* it is up to the user to free up the actual transfer struct. this is + * just making sure that we don't attempt to process the transfer after + * the device handle is invalid + */ + usbi_dbg("Removed transfer %p from the in-flight list because device handle %p closed", + transfer, dev_handle); + } + usbi_mutex_unlock(&ctx->flying_transfers_lock); + + libusb_unlock_events(ctx); + + usbi_mutex_lock(&ctx->open_devs_lock); + list_del(&dev_handle->list); + usbi_mutex_unlock(&ctx->open_devs_lock); + + usbi_backend->close(dev_handle); + libusb_unref_device(dev_handle->dev); + usbi_mutex_destroy(&dev_handle->lock); + free(dev_handle); +} + +/** \ingroup dev + * Close a device handle. Should be called on all open handles before your + * application exits. + * + * Internally, this function destroys the reference that was added by + * libusb_open() on the given device. + * + * This is a non-blocking function; no requests are sent over the bus. + * + * \param dev_handle the handle to close + */ +void API_EXPORTED libusb_close(libusb_device_handle *dev_handle) +{ + struct libusb_context *ctx; + unsigned char dummy = 1; + ssize_t r; + + if (!dev_handle) + return; + usbi_dbg(""); + + ctx = HANDLE_CTX(dev_handle); + + /* Similarly to libusb_open(), we want to interrupt all event handlers + * at this point. More importantly, we want to perform the actual close of + * the device while holding the event handling lock (preventing any other + * thread from doing event handling) because we will be removing a file + * descriptor from the polling loop. */ + + /* record that we are messing with poll fds */ + usbi_mutex_lock(&ctx->pollfd_modify_lock); + ctx->pollfd_modify++; + usbi_mutex_unlock(&ctx->pollfd_modify_lock); + + /* write some data on control pipe to interrupt event handlers */ + r = usbi_write(ctx->ctrl_pipe[1], &dummy, sizeof(dummy)); + if (r <= 0) { + usbi_warn(ctx, "internal signalling write failed, closing anyway"); + do_close(ctx, dev_handle); + usbi_mutex_lock(&ctx->pollfd_modify_lock); + ctx->pollfd_modify--; + usbi_mutex_unlock(&ctx->pollfd_modify_lock); + return; + } + + /* take event handling lock */ + libusb_lock_events(ctx); + + /* read the dummy data */ + r = usbi_read(ctx->ctrl_pipe[0], &dummy, sizeof(dummy)); + if (r <= 0) + usbi_warn(ctx, "internal signalling read failed, closing anyway"); + + /* Close the device */ + do_close(ctx, dev_handle); + + /* we're done with modifying poll fds */ + usbi_mutex_lock(&ctx->pollfd_modify_lock); + ctx->pollfd_modify--; + usbi_mutex_unlock(&ctx->pollfd_modify_lock); + + /* Release event handling lock and wake up event waiters */ + libusb_unlock_events(ctx); +} + +/** \ingroup dev + * Get the underlying device for a handle. This function does not modify + * the reference count of the returned device, so do not feel compelled to + * unreference it when you are done. + * \param dev_handle a device handle + * \returns the underlying device + */ +DEFAULT_VISIBILITY +libusb_device * LIBUSB_CALL libusb_get_device(libusb_device_handle *dev_handle) +{ + return dev_handle->dev; +} + +/** \ingroup dev + * Determine the bConfigurationValue of the currently active configuration. + * + * You could formulate your own control request to obtain this information, + * but this function has the advantage that it may be able to retrieve the + * information from operating system caches (no I/O involved). + * + * If the OS does not cache this information, then this function will block + * while a control transfer is submitted to retrieve the information. + * + * This function will return a value of 0 in the config output + * parameter if the device is in unconfigured state. + * + * \param dev a device handle + * \param config output location for the bConfigurationValue of the active + * configuration (only valid for return code 0) + * \returns 0 on success + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns another LIBUSB_ERROR code on other failure + */ +int API_EXPORTED libusb_get_configuration(libusb_device_handle *dev, + int *config) +{ + int r = LIBUSB_ERROR_NOT_SUPPORTED; + + usbi_dbg(""); + if (usbi_backend->get_configuration) + r = usbi_backend->get_configuration(dev, config); + + if (r == LIBUSB_ERROR_NOT_SUPPORTED) { + uint8_t tmp = 0; + usbi_dbg("falling back to control message"); + r = libusb_control_transfer(dev, LIBUSB_ENDPOINT_IN, + LIBUSB_REQUEST_GET_CONFIGURATION, 0, 0, &tmp, 1, 1000); + if (r == 0) { + usbi_err(HANDLE_CTX(dev), "zero bytes returned in ctrl transfer?"); + r = LIBUSB_ERROR_IO; + } else if (r == 1) { + r = 0; + *config = tmp; + } else { + usbi_dbg("control failed, error %d", r); + } + } + + if (r == 0) + usbi_dbg("active config %d", *config); + + return r; +} + +/** \ingroup dev + * Set the active configuration for a device. + * + * The operating system may or may not have already set an active + * configuration on the device. It is up to your application to ensure the + * correct configuration is selected before you attempt to claim interfaces + * and perform other operations. + * + * If you call this function on a device already configured with the selected + * configuration, then this function will act as a lightweight device reset: + * it will issue a SET_CONFIGURATION request using the current configuration, + * causing most USB-related device state to be reset (altsetting reset to zero, + * endpoint halts cleared, toggles reset). + * + * You cannot change/reset configuration if your application has claimed + * interfaces - you should free them with libusb_release_interface() first. + * You cannot change/reset configuration if other applications or drivers have + * claimed interfaces. + * + * A configuration value of -1 will put the device in unconfigured state. + * The USB specifications state that a configuration value of 0 does this, + * however buggy devices exist which actually have a configuration 0. + * + * You should always use this function rather than formulating your own + * SET_CONFIGURATION control request. This is because the underlying operating + * system needs to know when such changes happen. + * + * This is a blocking function. + * + * \param dev a device handle + * \param configuration the bConfigurationValue of the configuration you + * wish to activate, or -1 if you wish to put the device in unconfigured state + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if the requested configuration does not exist + * \returns LIBUSB_ERROR_BUSY if interfaces are currently claimed + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns another LIBUSB_ERROR code on other failure + */ +int API_EXPORTED libusb_set_configuration(libusb_device_handle *dev, + int configuration) +{ + usbi_dbg("configuration %d", configuration); + return usbi_backend->set_configuration(dev, configuration); +} + +/** \ingroup dev + * Claim an interface on a given device handle. You must claim the interface + * you wish to use before you can perform I/O on any of its endpoints. + * + * It is legal to attempt to claim an already-claimed interface, in which + * case libusb just returns 0 without doing anything. + * + * Claiming of interfaces is a purely logical operation; it does not cause + * any requests to be sent over the bus. Interface claiming is used to + * instruct the underlying operating system that your application wishes + * to take ownership of the interface. + * + * This is a non-blocking function. + * + * \param dev a device handle + * \param interface_number the bInterfaceNumber of the interface you + * wish to claim + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if the requested interface does not exist + * \returns LIBUSB_ERROR_BUSY if another program or driver has claimed the + * interface + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns a LIBUSB_ERROR code on other failure + */ +int API_EXPORTED libusb_claim_interface(libusb_device_handle *dev, + int interface_number) +{ + int r = 0; + + usbi_dbg("interface %d", interface_number); + if (interface_number >= USB_MAXINTERFACES) + return LIBUSB_ERROR_INVALID_PARAM; + + if (!dev->dev->attached) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_mutex_lock(&dev->lock); + if (dev->claimed_interfaces & (1 << interface_number)) + goto out; + + r = usbi_backend->claim_interface(dev, interface_number); + if (r == 0) + dev->claimed_interfaces |= 1 << interface_number; + +out: + usbi_mutex_unlock(&dev->lock); + return r; +} + +/** \ingroup dev + * Release an interface previously claimed with libusb_claim_interface(). You + * should release all claimed interfaces before closing a device handle. + * + * This is a blocking function. A SET_INTERFACE control request will be sent + * to the device, resetting interface state to the first alternate setting. + * + * \param dev a device handle + * \param interface_number the bInterfaceNumber of the + * previously-claimed interface + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if the interface was not claimed + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns another LIBUSB_ERROR code on other failure + */ +int API_EXPORTED libusb_release_interface(libusb_device_handle *dev, + int interface_number) +{ + int r; + + usbi_dbg("interface %d", interface_number); + if (interface_number >= USB_MAXINTERFACES) + return LIBUSB_ERROR_INVALID_PARAM; + + usbi_mutex_lock(&dev->lock); + if (!(dev->claimed_interfaces & (1 << interface_number))) { + r = LIBUSB_ERROR_NOT_FOUND; + goto out; + } + + r = usbi_backend->release_interface(dev, interface_number); + if (r == 0) + dev->claimed_interfaces &= ~(1 << interface_number); + +out: + usbi_mutex_unlock(&dev->lock); + return r; +} + +/** \ingroup dev + * Activate an alternate setting for an interface. The interface must have + * been previously claimed with libusb_claim_interface(). + * + * You should always use this function rather than formulating your own + * SET_INTERFACE control request. This is because the underlying operating + * system needs to know when such changes happen. + * + * This is a blocking function. + * + * \param dev a device handle + * \param interface_number the bInterfaceNumber of the + * previously-claimed interface + * \param alternate_setting the bAlternateSetting of the alternate + * setting to activate + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if the interface was not claimed, or the + * requested alternate setting does not exist + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns another LIBUSB_ERROR code on other failure + */ +int API_EXPORTED libusb_set_interface_alt_setting(libusb_device_handle *dev, + int interface_number, int alternate_setting) +{ + usbi_dbg("interface %d altsetting %d", + interface_number, alternate_setting); + if (interface_number >= USB_MAXINTERFACES) + return LIBUSB_ERROR_INVALID_PARAM; + + usbi_mutex_lock(&dev->lock); + if (!dev->dev->attached) { + usbi_mutex_unlock(&dev->lock); + return LIBUSB_ERROR_NO_DEVICE; + } + + if (!(dev->claimed_interfaces & (1 << interface_number))) { + usbi_mutex_unlock(&dev->lock); + return LIBUSB_ERROR_NOT_FOUND; + } + usbi_mutex_unlock(&dev->lock); + + return usbi_backend->set_interface_altsetting(dev, interface_number, + alternate_setting); +} + +/** \ingroup dev + * Clear the halt/stall condition for an endpoint. Endpoints with halt status + * are unable to receive or transmit data until the halt condition is stalled. + * + * You should cancel all pending transfers before attempting to clear the halt + * condition. + * + * This is a blocking function. + * + * \param dev a device handle + * \param endpoint the endpoint to clear halt status + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if the endpoint does not exist + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns another LIBUSB_ERROR code on other failure + */ +int API_EXPORTED libusb_clear_halt(libusb_device_handle *dev, + unsigned char endpoint) +{ + usbi_dbg("endpoint %x", endpoint); + if (!dev->dev->attached) + return LIBUSB_ERROR_NO_DEVICE; + + return usbi_backend->clear_halt(dev, endpoint); +} + +/** \ingroup dev + * Perform a USB port reset to reinitialize a device. The system will attempt + * to restore the previous configuration and alternate settings after the + * reset has completed. + * + * If the reset fails, the descriptors change, or the previous state cannot be + * restored, the device will appear to be disconnected and reconnected. This + * means that the device handle is no longer valid (you should close it) and + * rediscover the device. A return code of LIBUSB_ERROR_NOT_FOUND indicates + * when this is the case. + * + * This is a blocking function which usually incurs a noticeable delay. + * + * \param dev a handle of the device to reset + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if re-enumeration is required, or if the + * device has been disconnected + * \returns another LIBUSB_ERROR code on other failure + */ +int API_EXPORTED libusb_reset_device(libusb_device_handle *dev) +{ + usbi_dbg(""); + if (!dev->dev->attached) + return LIBUSB_ERROR_NO_DEVICE; + + return usbi_backend->reset_device(dev); +} + +/** \ingroup dev + * Determine if a kernel driver is active on an interface. If a kernel driver + * is active, you cannot claim the interface, and libusb will be unable to + * perform I/O. + * + * This functionality is not available on Windows. + * + * \param dev a device handle + * \param interface_number the interface to check + * \returns 0 if no kernel driver is active + * \returns 1 if a kernel driver is active + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns LIBUSB_ERROR_NOT_SUPPORTED on platforms where the functionality + * is not available + * \returns another LIBUSB_ERROR code on other failure + * \see libusb_detach_kernel_driver() + */ +int API_EXPORTED libusb_kernel_driver_active(libusb_device_handle *dev, + int interface_number) +{ + usbi_dbg("interface %d", interface_number); + + if (!dev->dev->attached) + return LIBUSB_ERROR_NO_DEVICE; + + if (usbi_backend->kernel_driver_active) + return usbi_backend->kernel_driver_active(dev, interface_number); + else + return LIBUSB_ERROR_NOT_SUPPORTED; +} + +/** \ingroup dev + * Detach a kernel driver from an interface. If successful, you will then be + * able to claim the interface and perform I/O. + * + * This functionality is not available on Darwin or Windows. + * + * \param dev a device handle + * \param interface_number the interface to detach the driver from + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if no kernel driver was active + * \returns LIBUSB_ERROR_INVALID_PARAM if the interface does not exist + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns LIBUSB_ERROR_NOT_SUPPORTED on platforms where the functionality + * is not available + * \returns another LIBUSB_ERROR code on other failure + * \see libusb_kernel_driver_active() + */ +int API_EXPORTED libusb_detach_kernel_driver(libusb_device_handle *dev, + int interface_number) +{ + usbi_dbg("interface %d", interface_number); + + if (!dev->dev->attached) + return LIBUSB_ERROR_NO_DEVICE; + + if (usbi_backend->detach_kernel_driver) + return usbi_backend->detach_kernel_driver(dev, interface_number); + else + return LIBUSB_ERROR_NOT_SUPPORTED; +} + +/** \ingroup dev + * Re-attach an interface's kernel driver, which was previously detached + * using libusb_detach_kernel_driver(). This call is only effective on + * Linux and returns LIBUSB_ERROR_NOT_SUPPORTED on all other platforms. + * + * This functionality is not available on Darwin or Windows. + * + * \param dev a device handle + * \param interface_number the interface to attach the driver from + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if no kernel driver was active + * \returns LIBUSB_ERROR_INVALID_PARAM if the interface does not exist + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns LIBUSB_ERROR_NOT_SUPPORTED on platforms where the functionality + * is not available + * \returns LIBUSB_ERROR_BUSY if the driver cannot be attached because the + * interface is claimed by a program or driver + * \returns another LIBUSB_ERROR code on other failure + * \see libusb_kernel_driver_active() + */ +int API_EXPORTED libusb_attach_kernel_driver(libusb_device_handle *dev, + int interface_number) +{ + usbi_dbg("interface %d", interface_number); + + if (!dev->dev->attached) + return LIBUSB_ERROR_NO_DEVICE; + + if (usbi_backend->attach_kernel_driver) + return usbi_backend->attach_kernel_driver(dev, interface_number); + else + return LIBUSB_ERROR_NOT_SUPPORTED; +} + +/** \ingroup lib + * Set message verbosity. + * - Level 0: no messages ever printed by the library (default) + * - Level 1: error messages are printed to stderr + * - Level 2: warning and error messages are printed to stderr + * - Level 3: informational messages are printed to stdout, warning and error + * messages are printed to stderr + * + * The default level is 0, which means no messages are ever printed. If you + * choose to increase the message verbosity level, ensure that your + * application does not close the stdout/stderr file descriptors. + * + * You are advised to set level 3. libusb is conservative with its message + * logging and most of the time, will only log messages that explain error + * conditions and other oddities. This will help you debug your software. + * + * If the LIBUSB_DEBUG environment variable was set when libusb was + * initialized, this function does nothing: the message verbosity is fixed + * to the value in the environment variable. + * + * If libusb was compiled without any message logging, this function does + * nothing: you'll never get any messages. + * + * If libusb was compiled with verbose debug message logging, this function + * does nothing: you'll always get messages from all levels. + * + * \param ctx the context to operate on, or NULL for the default context + * \param level debug level to set + */ +void API_EXPORTED libusb_set_debug(libusb_context *ctx, int level) +{ + USBI_GET_CONTEXT(ctx); + if (!ctx->debug_fixed) + ctx->debug = level; +} + +/** \ingroup lib + * Initialize libusb. This function must be called before calling any other + * libusb function. + * + * If you do not provide an output location for a context pointer, a default + * context will be created. If there was already a default context, it will + * be reused (and nothing will be initialized/reinitialized). + * + * \param context Optional output location for context pointer. + * Only valid on return code 0. + * \returns 0 on success, or a LIBUSB_ERROR code on failure + * \see contexts + */ +int API_EXPORTED libusb_init(libusb_context **context) +{ + struct libusb_device *dev, *next; + char *dbg = getenv("LIBUSB_DEBUG"); + struct libusb_context *ctx; + static int first_init = 1; + int r = 0; + + usbi_mutex_static_lock(&default_context_lock); + if (!context && usbi_default_context) { + usbi_dbg("reusing default context"); + default_context_refcnt++; + usbi_mutex_static_unlock(&default_context_lock); + return 0; + } + + ctx = malloc(sizeof(*ctx)); + if (!ctx) { + r = LIBUSB_ERROR_NO_MEM; + goto err_unlock; + } + memset(ctx, 0, sizeof(*ctx)); + + if (dbg) { + ctx->debug = atoi(dbg); + if (ctx->debug) + ctx->debug_fixed = 1; + } + + usbi_dbg("libusb-%d.%d.%d%s%s%s", + libusb_version_internal.major, + libusb_version_internal.minor, + libusb_version_internal.micro, + libusb_version_internal.rc, + libusb_version_internal.describe[0] ? " git:" : "", + libusb_version_internal.describe); + + usbi_mutex_init(&ctx->usb_devs_lock, NULL); + usbi_mutex_init(&ctx->open_devs_lock, NULL); + usbi_mutex_init(&ctx->hotplug_cbs_lock, NULL); + list_init(&ctx->usb_devs); + list_init(&ctx->open_devs); + list_init(&ctx->hotplug_cbs); + + if (usbi_backend->init) { + r = usbi_backend->init(ctx); + if (r) + goto err_free_ctx; + } + + r = usbi_io_init(ctx); + if (r < 0) { + if (usbi_backend->exit) + usbi_backend->exit(); + goto err_destroy_mutex; + } + + if (context) { + *context = ctx; + } else if (!usbi_default_context) { + usbi_dbg("created default context"); + usbi_default_context = ctx; + default_context_refcnt++; + } + usbi_mutex_static_unlock(&default_context_lock); + + usbi_mutex_static_lock(&active_contexts_lock); + if (first_init) { + first_init = 0; + list_init (&active_contexts_list); + } + + list_add (&ctx->list, &active_contexts_list); + usbi_mutex_static_unlock(&active_contexts_lock); + + return 0; + +err_destroy_mutex: + usbi_mutex_destroy(&ctx->open_devs_lock); + usbi_mutex_destroy(&ctx->usb_devs_lock); +err_free_ctx: + usbi_mutex_lock(&ctx->usb_devs_lock); + list_for_each_entry_safe(dev, next, &ctx->usb_devs, list, struct libusb_device) { + list_del(&dev->list); + libusb_unref_device(dev); + } + usbi_mutex_unlock(&ctx->usb_devs_lock); + free(ctx); +err_unlock: + usbi_mutex_static_unlock(&default_context_lock); + return r; +} + +/** \ingroup lib + * Deinitialize libusb. Should be called after closing all open devices and + * before your application terminates. + * \param ctx the context to deinitialize, or NULL for the default context + */ +void API_EXPORTED libusb_exit(struct libusb_context *ctx) +{ + struct libusb_device *dev, *next; + + usbi_dbg(""); + USBI_GET_CONTEXT(ctx); + + /* if working with default context, only actually do the deinitialization + * if we're the last user */ + if (ctx == usbi_default_context) { + usbi_mutex_static_lock(&default_context_lock); + if (--default_context_refcnt > 0) { + usbi_dbg("not destroying default context"); + usbi_mutex_static_unlock(&default_context_lock); + return; + } + usbi_dbg("destroying default context"); + usbi_default_context = NULL; + usbi_mutex_static_unlock(&default_context_lock); + } + + usbi_mutex_static_lock(&active_contexts_lock); + list_del (&ctx->list); + usbi_mutex_static_unlock(&active_contexts_lock); + + usbi_hotplug_deregister_all(ctx); + + usbi_mutex_lock(&ctx->usb_devs_lock); + list_for_each_entry_safe(dev, next, &ctx->usb_devs, list, struct libusb_device) { + list_del(&dev->list); + libusb_unref_device(dev); + } + usbi_mutex_unlock(&ctx->usb_devs_lock); + + /* a little sanity check. doesn't bother with open_devs locking because + * unless there is an application bug, nobody will be accessing this. */ + if (!list_empty(&ctx->open_devs)) + usbi_warn(ctx, "application left some devices open"); + + usbi_io_exit(ctx); + if (usbi_backend->exit) + usbi_backend->exit(); + + usbi_mutex_destroy(&ctx->open_devs_lock); + usbi_mutex_destroy(&ctx->usb_devs_lock); + usbi_mutex_destroy(&ctx->hotplug_cbs_lock); + free(ctx); +} + +/** \ingroup misc + * Check at runtime if the loaded library has a given capability. + * + * \param capability the \ref libusb_capability to check for + * \returns 1 if the running library has the capability, 0 otherwise + */ +int API_EXPORTED libusb_has_capability(uint32_t capability) +{ + enum libusb_capability cap = capability; + switch (cap) { + case LIBUSB_CAP_HAS_CAPABILITY: + return 1; + case LIBUSB_CAP_HAS_HOTPLUG: + return !(usbi_backend->get_device_list); + } + return 0; +} + +/* this is defined in libusbi.h if needed */ +#ifdef LIBUSB_GETTIMEOFDAY_WIN32 +/* + * gettimeofday + * Implementation according to: + * The Open Group Base Specifications Issue 6 + * IEEE Std 1003.1, 2004 Edition + */ + +/* + * THIS SOFTWARE IS NOT COPYRIGHTED + * + * This source code is offered for use in the public domain. You may + * use, modify or distribute it freely. + * + * This code is distributed in the hope that it will be useful but + * WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESS OR IMPLIED ARE HEREBY + * DISCLAIMED. This includes but is not limited to warranties of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Contributed by: + * Danny Smith + */ + +/* Offset between 1/1/1601 and 1/1/1970 in 100 nanosec units */ +#define _W32_FT_OFFSET (116444736000000000) + +int usbi_gettimeofday(struct timeval *tp, void *tzp) + { + union { + unsigned __int64 ns100; /*time since 1 Jan 1601 in 100ns units */ + FILETIME ft; + } _now; + + if(tp) + { + GetSystemTimeAsFileTime (&_now.ft); + tp->tv_usec=(long)((_now.ns100 / 10) % 1000000 ); + tp->tv_sec= (long)((_now.ns100 - _W32_FT_OFFSET) / 10000000); + } + /* Always return 0 as per Open Group Base Specifications Issue 6. + Do not set errno on error. */ + return 0; +} +#endif + +void usbi_log_v(struct libusb_context *ctx, enum usbi_log_level level, + const char *function, const char *format, va_list args) +{ + FILE *stream = stdout; + const char *prefix; + struct timeval now; + static struct timeval first = { 0, 0 }; + +#ifndef ENABLE_DEBUG_LOGGING + USBI_GET_CONTEXT(ctx); + if (!ctx) + return; + if (!ctx->debug) + return; + if (level == LOG_LEVEL_WARNING && ctx->debug < 2) + return; + if (level == LOG_LEVEL_INFO && ctx->debug < 3) + return; +#endif + + usbi_gettimeofday(&now, NULL); + if (!first.tv_sec) { + first.tv_sec = now.tv_sec; + first.tv_usec = now.tv_usec; + } + if (now.tv_usec < first.tv_usec) { + now.tv_sec--; + now.tv_usec += 1000000; + } + now.tv_sec -= first.tv_sec; + now.tv_usec -= first.tv_usec; + + switch (level) { + case LOG_LEVEL_INFO: + prefix = "info"; + break; + case LOG_LEVEL_WARNING: + stream = stderr; + prefix = "warning"; + break; + case LOG_LEVEL_ERROR: + stream = stderr; + prefix = "error"; + break; + case LOG_LEVEL_DEBUG: + stream = stderr; + prefix = "debug"; + break; + default: + stream = stderr; + prefix = "unknown"; + break; + } + + fprintf(stream, "libusb: %d.%06d %s [%s] ", + (int)now.tv_sec, (int)now.tv_usec, prefix, function); + + vfprintf(stream, format, args); + + fprintf(stream, "\n"); +} + +void usbi_log(struct libusb_context *ctx, enum usbi_log_level level, + const char *function, const char *format, ...) +{ + va_list args; + + va_start (args, format); + usbi_log_v(ctx, level, function, format, args); + va_end (args); +} + +/** \ingroup misc + * Returns a constant NULL-terminated string with the ASCII name of a libusb + * error code. The caller must not free() the returned string. + * + * \param error_code The \ref libusb_error code to return the name of. + * \returns The error name, or the string **UNKNOWN** if the value of + * error_code is not a known error code. + */ +DEFAULT_VISIBILITY const char * LIBUSB_CALL libusb_error_name(int error_code) +{ + enum libusb_error error = error_code; + switch (error) { + case LIBUSB_SUCCESS: + return "LIBUSB_SUCCESS"; + case LIBUSB_ERROR_IO: + return "LIBUSB_ERROR_IO"; + case LIBUSB_ERROR_INVALID_PARAM: + return "LIBUSB_ERROR_INVALID_PARAM"; + case LIBUSB_ERROR_ACCESS: + return "LIBUSB_ERROR_ACCESS"; + case LIBUSB_ERROR_NO_DEVICE: + return "LIBUSB_ERROR_NO_DEVICE"; + case LIBUSB_ERROR_NOT_FOUND: + return "LIBUSB_ERROR_NOT_FOUND"; + case LIBUSB_ERROR_BUSY: + return "LIBUSB_ERROR_BUSY"; + case LIBUSB_ERROR_TIMEOUT: + return "LIBUSB_ERROR_TIMEOUT"; + case LIBUSB_ERROR_OVERFLOW: + return "LIBUSB_ERROR_OVERFLOW"; + case LIBUSB_ERROR_PIPE: + return "LIBUSB_ERROR_PIPE"; + case LIBUSB_ERROR_INTERRUPTED: + return "LIBUSB_ERROR_INTERRUPTED"; + case LIBUSB_ERROR_NO_MEM: + return "LIBUSB_ERROR_NO_MEM"; + case LIBUSB_ERROR_NOT_SUPPORTED: + return "LIBUSB_ERROR_NOT_SUPPORTED"; + case LIBUSB_ERROR_OTHER: + return "LIBUSB_ERROR_OTHER"; + } + return "**UNKNOWN**"; +} + +/** \ingroup misc + * Returns a constant string with an English short description of the given + * error code. The caller should never free() the returned pointer since it + * points to a constant string. + * The returned string is encoded in ASCII form and always starts with a capital + * letter and ends without any dot. + * \param errcode the error code whose description is desired + * \returns a short description of the error code in English + */ +API_EXPORTED const char* libusb_strerror(enum libusb_error errcode) +{ + switch (errcode) { + case LIBUSB_SUCCESS: + return "Success"; + case LIBUSB_ERROR_IO: + return "Input/output error"; + case LIBUSB_ERROR_INVALID_PARAM: + return "Invalid parameter"; + case LIBUSB_ERROR_ACCESS: + return "Access denied (insufficient permissions)"; + case LIBUSB_ERROR_NO_DEVICE: + return "No such device (it may have been disconnected)"; + case LIBUSB_ERROR_NOT_FOUND: + return "Entity not found"; + case LIBUSB_ERROR_BUSY: + return "Resource busy"; + case LIBUSB_ERROR_TIMEOUT: + return "Operation timed out"; + case LIBUSB_ERROR_OVERFLOW: + return "Overflow"; + case LIBUSB_ERROR_PIPE: + return "Pipe error"; + case LIBUSB_ERROR_INTERRUPTED: + return "System call interrupted (perhaps due to signal)"; + case LIBUSB_ERROR_NO_MEM: + return "Insufficient memory"; + case LIBUSB_ERROR_NOT_SUPPORTED: + return "Operation not supported or unimplemented on this platform"; + case LIBUSB_ERROR_OTHER: + return "Other error"; + } + + return "Unknown error"; +} + +/** \ingroup misc + * Returns a pointer to const struct libusb_version with the version + * (major, minor, micro, rc, and nano) of the running library. + */ +DEFAULT_VISIBILITY +const struct libusb_version * LIBUSB_CALL libusb_get_version(void) +{ + return &libusb_version_internal; +} diff --git a/compat/libusb-1.0/libusb/descriptor.c b/compat/libusb-1.0/libusb/descriptor.c new file mode 100644 index 0000000..4f81dab --- /dev/null +++ b/compat/libusb-1.0/libusb/descriptor.c @@ -0,0 +1,872 @@ +/* + * USB descriptor handling functions for libusb + * Copyright (C) 2007 Daniel Drake + * Copyright (c) 2001 Johannes Erdfelt + * Copyright (c) 2012-2013 Nathan Hjelm + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include + +#include "libusbi.h" + +#define DESC_HEADER_LENGTH 2 +#define DEVICE_DESC_LENGTH 18 +#define CONFIG_DESC_LENGTH 9 +#define INTERFACE_DESC_LENGTH 9 +#define ENDPOINT_DESC_LENGTH 7 +#define ENDPOINT_AUDIO_DESC_LENGTH 9 + +/** @defgroup desc USB descriptors + * This page details how to examine the various standard USB descriptors + * for detected devices + */ + +/* set host_endian if the w values are already in host endian format, + * as opposed to bus endian. */ +int usbi_parse_descriptor(const unsigned char *source, const char *descriptor, + void *dest, int host_endian) +{ + const unsigned char *sp = source; + unsigned char *dp = dest; + uint16_t w; + const char *cp; + uint32_t d; + + for (cp = descriptor; *cp; cp++) { + switch (*cp) { + case 'b': /* 8-bit byte */ + *dp++ = *sp++; + break; + case 'w': /* 16-bit word, convert from little endian to CPU */ + dp += ((uintptr_t)dp & 1); /* Align to word boundary */ + + if (host_endian) { + memcpy(dp, sp, 2); + } else { + w = (sp[1] << 8) | sp[0]; + *((uint16_t *)dp) = w; + } + sp += 2; + dp += 2; + break; + /* 32-bit word, convert from little endian to CPU */ + case 'd': + /* Align to word boundary */ + dp += ((unsigned long)dp & 1); + + if (host_endian) { + memcpy(dp, sp, 4); + } else { + d = (sp[3] << 24) | (sp[2] << 16) | + (sp[1] << 8) | sp[0]; + *((uint32_t *)dp) = d; + } + sp += 4; + dp += 4; + break; + } + } + + return (int) (sp - source); +} + +static void clear_endpoint(struct libusb_endpoint_descriptor *endpoint) +{ + if (endpoint->extra) + free((unsigned char *) endpoint->extra); +} + +static int parse_endpoint(struct libusb_context *ctx, + struct libusb_endpoint_descriptor *endpoint, unsigned char *buffer, + int size, int host_endian) +{ + struct usb_descriptor_header header; + unsigned char *extra; + unsigned char *begin; + int parsed = 0; + int len; + + usbi_parse_descriptor(buffer, "bb", &header, 0); + + /* Everything should be fine being passed into here, but we sanity */ + /* check JIC */ + if (header.bLength > size) { + usbi_err(ctx, "ran out of descriptors parsing"); + return -1; + } + + if (header.bDescriptorType != LIBUSB_DT_ENDPOINT) { + usbi_err(ctx, "unexpected descriptor %x (expected %x)", + header.bDescriptorType, LIBUSB_DT_ENDPOINT); + return parsed; + } + + if (header.bLength >= ENDPOINT_AUDIO_DESC_LENGTH) + usbi_parse_descriptor(buffer, "bbbbwbbb", endpoint, host_endian); + else if (header.bLength >= ENDPOINT_DESC_LENGTH) + usbi_parse_descriptor(buffer, "bbbbwb", endpoint, host_endian); + + buffer += header.bLength; + size -= header.bLength; + parsed += header.bLength; + + /* Skip over the rest of the Class Specific or Vendor Specific */ + /* descriptors */ + begin = buffer; + while (size >= DESC_HEADER_LENGTH) { + usbi_parse_descriptor(buffer, "bb", &header, 0); + + if (header.bLength < 2) { + usbi_err(ctx, "invalid descriptor length %d", header.bLength); + return -1; + } + + /* If we find another "proper" descriptor then we're done */ + if ((header.bDescriptorType == LIBUSB_DT_ENDPOINT) || + (header.bDescriptorType == LIBUSB_DT_INTERFACE) || + (header.bDescriptorType == LIBUSB_DT_CONFIG) || + (header.bDescriptorType == LIBUSB_DT_DEVICE)) + break; + + usbi_dbg("skipping descriptor %x", header.bDescriptorType); + buffer += header.bLength; + size -= header.bLength; + parsed += header.bLength; + } + + /* Copy any unknown descriptors into a storage area for drivers */ + /* to later parse */ + len = (int)(buffer - begin); + if (!len) { + endpoint->extra = NULL; + endpoint->extra_length = 0; + return parsed; + } + + extra = malloc(len); + endpoint->extra = extra; + if (!extra) { + endpoint->extra_length = 0; + return LIBUSB_ERROR_NO_MEM; + } + + memcpy(extra, begin, len); + endpoint->extra_length = len; + + return parsed; +} + +static void clear_interface(struct libusb_interface *usb_interface) +{ + int i; + int j; + + if (usb_interface->altsetting) { + for (i = 0; i < usb_interface->num_altsetting; i++) { + struct libusb_interface_descriptor *ifp = + (struct libusb_interface_descriptor *) + usb_interface->altsetting + i; + if (ifp->extra) + free((void *) ifp->extra); + if (ifp->endpoint) { + for (j = 0; j < ifp->bNumEndpoints; j++) + clear_endpoint((struct libusb_endpoint_descriptor *) + ifp->endpoint + j); + free((void *) ifp->endpoint); + } + } + free((void *) usb_interface->altsetting); + usb_interface->altsetting = NULL; + } + +} + +static int parse_interface(libusb_context *ctx, + struct libusb_interface *usb_interface, unsigned char *buffer, int size, + int host_endian) +{ + int i; + int len; + int r; + int parsed = 0; + size_t tmp; + struct usb_descriptor_header header; + struct libusb_interface_descriptor *ifp; + unsigned char *begin; + + usb_interface->num_altsetting = 0; + + while (size >= INTERFACE_DESC_LENGTH) { + struct libusb_interface_descriptor *altsetting = + (struct libusb_interface_descriptor *) usb_interface->altsetting; + altsetting = realloc(altsetting, + sizeof(struct libusb_interface_descriptor) * + (usb_interface->num_altsetting + 1)); + if (!altsetting) { + r = LIBUSB_ERROR_NO_MEM; + goto err; + } + usb_interface->altsetting = altsetting; + + ifp = altsetting + usb_interface->num_altsetting; + usb_interface->num_altsetting++; + usbi_parse_descriptor(buffer, "bbbbbbbbb", ifp, 0); + ifp->extra = NULL; + ifp->extra_length = 0; + ifp->endpoint = NULL; + + /* Skip over the interface */ + buffer += ifp->bLength; + parsed += ifp->bLength; + size -= ifp->bLength; + + begin = buffer; + + /* Skip over any interface, class or vendor descriptors */ + while (size >= DESC_HEADER_LENGTH) { + usbi_parse_descriptor(buffer, "bb", &header, 0); + if (header.bLength < 2) { + usbi_err(ctx, "invalid descriptor of length %d", + header.bLength); + r = LIBUSB_ERROR_IO; + goto err; + } + + /* If we find another "proper" descriptor then we're done */ + if ((header.bDescriptorType == LIBUSB_DT_INTERFACE) || + (header.bDescriptorType == LIBUSB_DT_ENDPOINT) || + (header.bDescriptorType == LIBUSB_DT_CONFIG) || + (header.bDescriptorType == LIBUSB_DT_DEVICE) || + (header.bDescriptorType == + LIBUSB_DT_SS_ENDPOINT_COMPANION)) + break; + + buffer += header.bLength; + parsed += header.bLength; + size -= header.bLength; + } + + /* Copy any unknown descriptors into a storage area for */ + /* drivers to later parse */ + len = (int)(buffer - begin); + if (len) { + ifp->extra = malloc(len); + if (!ifp->extra) { + r = LIBUSB_ERROR_NO_MEM; + goto err; + } + memcpy((unsigned char *) ifp->extra, begin, len); + ifp->extra_length = len; + } + + /* Did we hit an unexpected descriptor? */ + if (size >= DESC_HEADER_LENGTH) { + usbi_parse_descriptor(buffer, "bb", &header, 0); + if ((header.bDescriptorType == LIBUSB_DT_CONFIG) || + (header.bDescriptorType == LIBUSB_DT_DEVICE)) { + return parsed; + } + } + + if (ifp->bNumEndpoints > USB_MAXENDPOINTS) { + usbi_err(ctx, "too many endpoints (%d)", ifp->bNumEndpoints); + r = LIBUSB_ERROR_IO; + goto err; + } + + if (ifp->bNumEndpoints > 0) { + struct libusb_endpoint_descriptor *endpoint; + tmp = ifp->bNumEndpoints * sizeof(struct libusb_endpoint_descriptor); + endpoint = malloc(tmp); + ifp->endpoint = endpoint; + if (!endpoint) { + r = LIBUSB_ERROR_NO_MEM; + goto err; + } + + memset(endpoint, 0, tmp); + for (i = 0; i < ifp->bNumEndpoints; i++) { + usbi_parse_descriptor(buffer, "bb", &header, 0); + + if (header.bLength > size) { + usbi_err(ctx, "ran out of descriptors parsing"); + r = LIBUSB_ERROR_IO; + goto err; + } + + r = parse_endpoint(ctx, endpoint + i, buffer, size, + host_endian); + if (r < 0) + goto err; + + buffer += r; + parsed += r; + size -= r; + } + } + + /* We check to see if it's an alternate to this one */ + ifp = (struct libusb_interface_descriptor *) buffer; + if (size < LIBUSB_DT_INTERFACE_SIZE || + ifp->bDescriptorType != LIBUSB_DT_INTERFACE || + !ifp->bAlternateSetting) + return parsed; + } + + return parsed; +err: + clear_interface(usb_interface); + return r; +} + +static void clear_configuration(struct libusb_config_descriptor *config) +{ + if (config->interface) { + int i; + for (i = 0; i < config->bNumInterfaces; i++) + clear_interface((struct libusb_interface *) + config->interface + i); + free((void *) config->interface); + } + if (config->extra) + free((void *) config->extra); +} + +static int parse_configuration(struct libusb_context *ctx, + struct libusb_config_descriptor *config, unsigned char *buffer, + int host_endian) +{ + int i; + int r; + int size; + size_t tmp; + struct usb_descriptor_header header; + struct libusb_interface *usb_interface; + + usbi_parse_descriptor(buffer, "bbwbbbbb", config, host_endian); + size = config->wTotalLength; + + if (config->bNumInterfaces > USB_MAXINTERFACES) { + usbi_err(ctx, "too many interfaces (%d)", config->bNumInterfaces); + return LIBUSB_ERROR_IO; + } + + tmp = config->bNumInterfaces * sizeof(struct libusb_interface); + usb_interface = malloc(tmp); + config->interface = usb_interface; + if (!config->interface) + return LIBUSB_ERROR_NO_MEM; + + memset(usb_interface, 0, tmp); + buffer += config->bLength; + size -= config->bLength; + + config->extra = NULL; + config->extra_length = 0; + + for (i = 0; i < config->bNumInterfaces; i++) { + int len; + unsigned char *begin; + + /* Skip over the rest of the Class Specific or Vendor */ + /* Specific descriptors */ + begin = buffer; + while (size >= DESC_HEADER_LENGTH) { + usbi_parse_descriptor(buffer, "bb", &header, 0); + + if ((header.bLength > size) || + (header.bLength < DESC_HEADER_LENGTH)) { + usbi_err(ctx, "invalid descriptor length of %d", + header.bLength); + r = LIBUSB_ERROR_IO; + goto err; + } + + /* If we find another "proper" descriptor then we're done */ + if ((header.bDescriptorType == LIBUSB_DT_ENDPOINT) || + (header.bDescriptorType == LIBUSB_DT_INTERFACE) || + (header.bDescriptorType == LIBUSB_DT_CONFIG) || + (header.bDescriptorType == LIBUSB_DT_DEVICE) || + (header.bDescriptorType == + LIBUSB_DT_SS_ENDPOINT_COMPANION)) + break; + + usbi_dbg("skipping descriptor 0x%x\n", header.bDescriptorType); + buffer += header.bLength; + size -= header.bLength; + } + + /* Copy any unknown descriptors into a storage area for */ + /* drivers to later parse */ + len = (int)(buffer - begin); + if (len) { + /* FIXME: We should realloc and append here */ + if (!config->extra_length) { + config->extra = malloc(len); + if (!config->extra) { + r = LIBUSB_ERROR_NO_MEM; + goto err; + } + + memcpy((unsigned char *) config->extra, begin, len); + config->extra_length = len; + } + } + + r = parse_interface(ctx, usb_interface + i, buffer, size, host_endian); + if (r < 0) + goto err; + + buffer += r; + size -= r; + } + + return size; + +err: + clear_configuration(config); + return r; +} + +int usbi_device_cache_descriptor(libusb_device *dev) +{ + int r, host_endian; + + r = usbi_backend->get_device_descriptor(dev, (unsigned char *) &dev->device_descriptor, + &host_endian); + if (r < 0) + return r; + + if (!host_endian) { + dev->device_descriptor.bcdUSB = libusb_le16_to_cpu(dev->device_descriptor.bcdUSB); + dev->device_descriptor.idVendor = libusb_le16_to_cpu(dev->device_descriptor.idVendor); + dev->device_descriptor.idProduct = libusb_le16_to_cpu(dev->device_descriptor.idProduct); + dev->device_descriptor.bcdDevice = libusb_le16_to_cpu(dev->device_descriptor.bcdDevice); + } + + return LIBUSB_SUCCESS; +} + +/** \ingroup desc + * Get the USB device descriptor for a given device. + * + * This is a non-blocking function; the device descriptor is cached in memory. + * + * \param dev the device + * \param desc output location for the descriptor data + * \returns 0 on success or a LIBUSB_ERROR code on failure + */ +int API_EXPORTED libusb_get_device_descriptor(libusb_device *dev, + struct libusb_device_descriptor *desc) +{ + usbi_dbg(""); + memcpy((unsigned char *) desc, (unsigned char *) &dev->device_descriptor, + sizeof (dev->device_descriptor)); + return 0; +} + +/** \ingroup desc + * Get the USB configuration descriptor for the currently active configuration. + * This is a non-blocking function which does not involve any requests being + * sent to the device. + * + * \param dev a device + * \param config output location for the USB configuration descriptor. Only + * valid if 0 was returned. Must be freed with libusb_free_config_descriptor() + * after use. + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if the device is in unconfigured state + * \returns another LIBUSB_ERROR code on error + * \see libusb_get_config_descriptor + */ +int API_EXPORTED libusb_get_active_config_descriptor(libusb_device *dev, + struct libusb_config_descriptor **config) +{ + struct libusb_config_descriptor *_config = malloc(sizeof(*_config)); + unsigned char tmp[8]; + unsigned char *buf = NULL; + int host_endian = 0; + int r; + + usbi_dbg(""); + if (!_config) + return LIBUSB_ERROR_NO_MEM; + + r = usbi_backend->get_active_config_descriptor(dev, tmp, sizeof(tmp), + &host_endian); + if (r < 0) + goto err; + + usbi_parse_descriptor(tmp, "bbw", _config, host_endian); + buf = malloc(_config->wTotalLength); + if (!buf) { + r = LIBUSB_ERROR_NO_MEM; + goto err; + } + + r = usbi_backend->get_active_config_descriptor(dev, buf, + _config->wTotalLength, &host_endian); + if (r < 0) + goto err; + + r = parse_configuration(dev->ctx, _config, buf, host_endian); + if (r < 0) { + usbi_err(dev->ctx, "parse_configuration failed with error %d", r); + goto err; + } else if (r > 0) { + usbi_warn(dev->ctx, "descriptor data still left"); + } + + free(buf); + *config = _config; + return 0; + +err: + free(_config); + if (buf) + free(buf); + return r; +} + +/** \ingroup desc + * Get a USB configuration descriptor based on its index. + * This is a non-blocking function which does not involve any requests being + * sent to the device. + * + * \param dev a device + * \param config_index the index of the configuration you wish to retrieve + * \param config output location for the USB configuration descriptor. Only + * valid if 0 was returned. Must be freed with libusb_free_config_descriptor() + * after use. + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if the configuration does not exist + * \returns another LIBUSB_ERROR code on error + * \see libusb_get_active_config_descriptor() + * \see libusb_get_config_descriptor_by_value() + */ +int API_EXPORTED libusb_get_config_descriptor(libusb_device *dev, + uint8_t config_index, struct libusb_config_descriptor **config) +{ + struct libusb_config_descriptor *_config; + unsigned char tmp[8]; + unsigned char *buf = NULL; + int host_endian = 0; + int r; + + usbi_dbg("index %d", config_index); + if (config_index >= dev->num_configurations) + return LIBUSB_ERROR_NOT_FOUND; + + _config = malloc(sizeof(*_config)); + if (!_config) + return LIBUSB_ERROR_NO_MEM; + + r = usbi_backend->get_config_descriptor(dev, config_index, tmp, + sizeof(tmp), &host_endian); + if (r < 0) + goto err; + + usbi_parse_descriptor(tmp, "bbw", _config, host_endian); + buf = malloc(_config->wTotalLength); + if (!buf) { + r = LIBUSB_ERROR_NO_MEM; + goto err; + } + + host_endian = 0; + r = usbi_backend->get_config_descriptor(dev, config_index, buf, + _config->wTotalLength, &host_endian); + if (r < 0) + goto err; + + r = parse_configuration(dev->ctx, _config, buf, host_endian); + if (r < 0) { + usbi_err(dev->ctx, "parse_configuration failed with error %d", r); + goto err; + } else if (r > 0) { + usbi_warn(dev->ctx, "descriptor data still left"); + } + + free(buf); + *config = _config; + return 0; + +err: + free(_config); + if (buf) + free(buf); + return r; +} + +/* iterate through all configurations, returning the index of the configuration + * matching a specific bConfigurationValue in the idx output parameter, or -1 + * if the config was not found. + * returns 0 or a LIBUSB_ERROR code + */ +int usbi_get_config_index_by_value(struct libusb_device *dev, + uint8_t bConfigurationValue, int *idx) +{ + uint8_t i; + + usbi_dbg("value %d", bConfigurationValue); + for (i = 0; i < dev->num_configurations; i++) { + unsigned char tmp[6]; + int host_endian; + int r = usbi_backend->get_config_descriptor(dev, i, tmp, sizeof(tmp), + &host_endian); + if (r < 0) + return r; + if (tmp[5] == bConfigurationValue) { + *idx = i; + return 0; + } + } + + *idx = -1; + return 0; +} + +/** \ingroup desc + * Get a USB configuration descriptor with a specific bConfigurationValue. + * This is a non-blocking function which does not involve any requests being + * sent to the device. + * + * \param dev a device + * \param bConfigurationValue the bConfigurationValue of the configuration you + * wish to retrieve + * \param config output location for the USB configuration descriptor. Only + * valid if 0 was returned. Must be freed with libusb_free_config_descriptor() + * after use. + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if the configuration does not exist + * \returns another LIBUSB_ERROR code on error + * \see libusb_get_active_config_descriptor() + * \see libusb_get_config_descriptor() + */ +int API_EXPORTED libusb_get_config_descriptor_by_value(libusb_device *dev, + uint8_t bConfigurationValue, struct libusb_config_descriptor **config) +{ + int idx; + int r = usbi_get_config_index_by_value(dev, bConfigurationValue, &idx); + if (r < 0) + return r; + else if (idx == -1) + return LIBUSB_ERROR_NOT_FOUND; + else + return libusb_get_config_descriptor(dev, (uint8_t) idx, config); +} + +/** \ingroup desc + * Free a configuration descriptor obtained from + * libusb_get_active_config_descriptor() or libusb_get_config_descriptor(). + * It is safe to call this function with a NULL config parameter, in which + * case the function simply returns. + * + * \param config the configuration descriptor to free + */ +void API_EXPORTED libusb_free_config_descriptor( + struct libusb_config_descriptor *config) +{ + if (!config) + return; + + clear_configuration(config); + free(config); +} + +/** \ingroup desc + * Retrieve a string descriptor in C style ASCII. + * + * Wrapper around libusb_get_string_descriptor(). Uses the first language + * supported by the device. + * + * \param dev a device handle + * \param desc_index the index of the descriptor to retrieve + * \param data output buffer for ASCII string descriptor + * \param length size of data buffer + * \returns number of bytes returned in data, or LIBUSB_ERROR code on failure + */ +int API_EXPORTED libusb_get_string_descriptor_ascii(libusb_device_handle *dev, + uint8_t desc_index, unsigned char *data, int length) +{ + unsigned char tbuf[255]; /* Some devices choke on size > 255 */ + int r, si, di; + uint16_t langid; + + /* Asking for the zero'th index is special - it returns a string + * descriptor that contains all the language IDs supported by the + * device. Typically there aren't many - often only one. Language + * IDs are 16 bit numbers, and they start at the third byte in the + * descriptor. There's also no point in trying to read descriptor 0 + * with this function. See USB 2.0 specification section 9.6.7 for + * more information. + */ + + if (desc_index == 0) + return LIBUSB_ERROR_INVALID_PARAM; + + r = libusb_get_string_descriptor(dev, 0, 0, tbuf, sizeof(tbuf)); + if (r < 0) + return r; + + if (r < 4) + return LIBUSB_ERROR_IO; + + langid = tbuf[2] | (tbuf[3] << 8); + + r = libusb_get_string_descriptor(dev, desc_index, langid, tbuf, + sizeof(tbuf)); + if (r < 0) + return r; + + if (tbuf[1] != LIBUSB_DT_STRING) + return LIBUSB_ERROR_IO; + + if (tbuf[0] > r) + return LIBUSB_ERROR_IO; + + for (di = 0, si = 2; si < tbuf[0]; si += 2) { + if (di >= (length - 1)) + break; + + if (tbuf[si + 1]) /* high byte */ + data[di++] = '?'; + else + data[di++] = tbuf[si]; + } + + data[di] = 0; + return di; +} + +int API_EXPORTED libusb_parse_ss_endpoint_comp(const void *buf, int len, + struct libusb_ss_endpoint_companion_descriptor **ep_comp) +{ + struct libusb_ss_endpoint_companion_descriptor *ep_comp_desc; + struct usb_descriptor_header header; + + usbi_parse_descriptor(buf, "bb", &header, 0); + + /* Everything should be fine being passed into here, but we sanity */ + /* check JIC */ + if (header.bLength > len) { + usbi_err(NULL, "ran out of descriptors parsing"); + return LIBUSB_ERROR_NO_MEM; + } + + if (header.bDescriptorType != LIBUSB_DT_SS_ENDPOINT_COMPANION) { + usbi_err(NULL, "unexpected descriptor %x (expected %x)", + header.bDescriptorType, LIBUSB_DT_SS_ENDPOINT_COMPANION); + return LIBUSB_ERROR_INVALID_PARAM; + } + + ep_comp_desc = calloc(1, sizeof (*ep_comp_desc)); + if (!ep_comp_desc) { + return LIBUSB_ERROR_NO_MEM; + } + + if (header.bLength >= LIBUSB_DT_SS_ENDPOINT_COMPANION_SIZE) + usbi_parse_descriptor(buf, "bbbbw", ep_comp_desc, 0); + + *ep_comp = ep_comp_desc; + + return LIBUSB_SUCCESS; +} + +void API_EXPORTED libusb_free_ss_endpoint_comp(struct libusb_ss_endpoint_companion_descriptor *ep_comp) +{ + assert(ep_comp); + free(ep_comp); +} + +int API_EXPORTED libusb_parse_bos_descriptor(const void *buf, int len, + struct libusb_bos_descriptor **bos) +{ + const unsigned char *buffer = (const unsigned char *) buf; + struct libusb_bos_descriptor *bos_desc; + int i; + + len = len; + bos_desc = calloc (1, sizeof (*bos_desc)); + if (!bos_desc) { + return LIBUSB_ERROR_NO_MEM; + } + + usbi_parse_descriptor(buffer, "bbwb", bos_desc, 0); + buffer += LIBUSB_DT_BOS_SIZE; + + /* Get the device capability descriptors */ + for (i = 0; i < bos_desc->bNumDeviceCaps; ++i) { + if (buffer[2] == LIBUSB_USB_CAP_TYPE_EXT) { + if (!bos_desc->usb_2_0_ext_cap) { + bos_desc->usb_2_0_ext_cap = + (struct libusb_usb_2_0_device_capability_descriptor *) + malloc(sizeof(*bos_desc->usb_2_0_ext_cap)); + usbi_parse_descriptor(buffer, "bbbd", + bos_desc->usb_2_0_ext_cap, 0); + } else + usbi_warn(NULL, + "usb_2_0_ext_cap was already allocated"); + + /* move to the next device capability descriptor */ + buffer += LIBUSB_USB_2_0_EXTENSION_DEVICE_CAPABILITY_SIZE; + } else if (buffer[2] == LIBUSB_SS_USB_CAP_TYPE) { + if (!bos_desc->ss_usb_cap) { + bos_desc->ss_usb_cap = + (struct libusb_ss_usb_device_capability_descriptor *) + malloc(sizeof(*bos_desc->ss_usb_cap)); + usbi_parse_descriptor(buffer, "bbbbwbbw", + bos_desc->ss_usb_cap, 0); + } else + usbi_warn(NULL, + "ss_usb_cap was already allocated"); + + /* move to the next device capability descriptor */ + buffer += LIBUSB_SS_USB_DEVICE_CAPABILITY_SIZE; + } else { + usbi_info(NULL, "wireless/container_id capability " + "descriptor"); + + /* move to the next device capability descriptor */ + buffer += buffer[0]; + } + } + + *bos = bos_desc; + + return LIBUSB_SUCCESS; +} + +void API_EXPORTED libusb_free_bos_descriptor(struct libusb_bos_descriptor *bos) +{ + assert(bos); + + if (bos->usb_2_0_ext_cap) { + free(bos->usb_2_0_ext_cap); + } + + if (bos->ss_usb_cap) { + free(bos->ss_usb_cap); + } + + free(bos); +} diff --git a/compat/libusb-1.0/libusb/hotplug.c b/compat/libusb-1.0/libusb/hotplug.c new file mode 100644 index 0000000..d00d0b9 --- /dev/null +++ b/compat/libusb-1.0/libusb/hotplug.c @@ -0,0 +1,298 @@ +/* -*- Mode: C; indent-tabs-mode:nil ; c-basic-offset:8 -*- */ +/* + * Hotplug functions for libusb + * Copyright (C) 2012-2013 Nathan Hjelm + * Copyright (C) 2012-2013 Peter Stuge + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "libusbi.h" +#include "hotplug.h" + +/** + * @defgroup hotplug Device hotplug event notification + * This page details how to use the libusb hotplug interface. + * + * \page hotplug Device hotplug event notification + * + * \section intro Introduction + * + * Releases of libusb 1.0 newer than 1.X have added support for hotplug + * events. This interface allows you to request notification for the + * arrival and departure of matching USB devices. + * + * To receive hotplug notification you register a callback by calling + * libusb_hotplug_register_callback(). This function will optionally return + * a handle that can be passed to libusb_hotplug_deregister_callback(). + * + * A callback function must return an int (0 or 1) indicating whether the callback is + * expecting additional events. Returning 0 will rearm the callback and 1 will cause + * the callback to be deregistered. + * + * Callbacks for a particulat context are automatically deregistered by libusb_exit(). + * + * As of 1.X there are two supported hotplug events: + * - LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED: A device has arrived and is ready to use + * - LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT: A device has left and is no longer available + * + * A hotplug event can listen for either or both of these events. + * + * Note: If you receive notification that a device has left and you have any + * a libusb_device_handles for the device it is up to you to call libusb_close() + * on each handle to free up any remaining resources associated with the device. + * Once a device has left any libusb_device_handle associated with the device + * are invalid and will remain so even if the device comes back. + * + * When handling a LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED event it is considered + * safe to call any libusb function that takes a libusb_device. On the other hand, + * when handling a LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT event the only safe function + * is libusb_get_device_descriptor(). + * + * The following code provides an example of the usage of the hotplug interface: +\code +static int count = 0; + +int hotplug_callback(struct libusb_context *ctx, struct libusb_device *dev, + libusb_hotplug_event event, void *user_data) { + static libusb_device_handle *handle = NULL; + struct libusb_device_descriptor desc; + int rc; + + (void)libusb_get_device_descriptor(dev, &desc); + + if (LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED == event) { + rc = libusb_open(dev, &handle); + if (LIBUSB_SUCCESS != rc) { + printf("Could not open USB device\n"); + } + } else if (LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT == event) { + if (handle) { + libusb_close(handle); + handle = NULL; + } + } else { + printf("Unhandled event %d\n", event); + } + count++; + + return 0; +} + +int main (void) { + libusb_hotplug_callback_handle handle; + int rc; + + libusb_init(NULL); + + rc = libusb_hotplug_register_callback(NULL, LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED | + LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT, 0, 0x045a, 0x5005, + LIBUSB_HOTPLUG_MATCH_ANY, hotplug_callback, NULL, + &handle); + if (LIBUSB_SUCCESS != rc) { + printf("Error creating a hotplug callback\n"); + libusb_exit(NULL); + return EXIT_FAILURE; + } + + while (count < 2) { + usleep(10000); + } + + libusb_hotplug_deregister_callback(handle); + libusb_exit(NULL); + + return 0; +} +\endcode + */ + +static int usbi_hotplug_match_cb (struct libusb_device *dev, libusb_hotplug_event event, + struct libusb_hotplug_callback *hotplug_cb) { + struct libusb_context *ctx = dev->ctx; + + /* Handle lazy deregistration of callback */ + if (hotplug_cb->needs_free) { + /* Free callback */ + return 1; + } + + if (!(hotplug_cb->events & event)) { + return 0; + } + + if (LIBUSB_HOTPLUG_MATCH_ANY != hotplug_cb->vendor_id && + hotplug_cb->vendor_id != dev->device_descriptor.idVendor) { + return 0; + } + + if (LIBUSB_HOTPLUG_MATCH_ANY != hotplug_cb->product_id && + hotplug_cb->product_id != dev->device_descriptor.idProduct) { + return 0; + } + + if (LIBUSB_HOTPLUG_MATCH_ANY != hotplug_cb->dev_class && + hotplug_cb->dev_class != dev->device_descriptor.bDeviceClass) { + return 0; + } + + return hotplug_cb->cb (ctx == usbi_default_context ? NULL : ctx, + dev, event, hotplug_cb->user_data); +} + +void usbi_hotplug_match(struct libusb_device *dev, libusb_hotplug_event event) { + struct libusb_hotplug_callback *hotplug_cb, *next; + struct libusb_context *ctx = dev->ctx; + + usbi_mutex_lock(&ctx->hotplug_cbs_lock); + + list_for_each_entry_safe(hotplug_cb, next, &ctx->hotplug_cbs, list, struct libusb_hotplug_callback) { + usbi_mutex_unlock(&ctx->hotplug_cbs_lock); + int ret = usbi_hotplug_match_cb (dev, event, hotplug_cb); + usbi_mutex_lock(&ctx->hotplug_cbs_lock); + + if (ret) { + list_del(&hotplug_cb->list); + free(hotplug_cb); + } + } + + usbi_mutex_unlock(&ctx->hotplug_cbs_lock); + + /* loop through and disconnect all open handles for this device */ + if (LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT == event) { + struct libusb_device_handle *handle; + + usbi_mutex_lock(&ctx->open_devs_lock); + list_for_each_entry(handle, &ctx->open_devs, list, struct libusb_device_handle) { + if (dev == handle->dev) { + usbi_handle_disconnect (handle); + } + } + usbi_mutex_unlock(&ctx->open_devs_lock); + } +} + +int API_EXPORTED libusb_hotplug_register_callback(libusb_context *ctx, + libusb_hotplug_event events, + libusb_hotplug_flag flags, + int vendor_id, int product_id, + int dev_class, + libusb_hotplug_callback_fn cb_fn, + void *user_data, libusb_hotplug_callback_handle *handle) { + libusb_hotplug_callback *new_callback; + static int handle_id = 1; + + /* check for hotplug support */ + if (!libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG)) { + return LIBUSB_ERROR_NOT_SUPPORTED; + } + + /* check for sane values */ + if ((LIBUSB_HOTPLUG_MATCH_ANY != vendor_id && (~0xffff & vendor_id)) || + (LIBUSB_HOTPLUG_MATCH_ANY != product_id && (~0xffff & product_id)) || + (LIBUSB_HOTPLUG_MATCH_ANY != dev_class && (~0xff & dev_class)) || + !cb_fn) { + return LIBUSB_ERROR_INVALID_PARAM; + } + + USBI_GET_CONTEXT(ctx); + + new_callback = (libusb_hotplug_callback *)calloc(1, sizeof (*new_callback)); + if (!new_callback) { + return LIBUSB_ERROR_NO_MEM; + } + + new_callback->ctx = ctx; + new_callback->vendor_id = vendor_id; + new_callback->product_id = product_id; + new_callback->dev_class = dev_class; + new_callback->flags = flags; + new_callback->events = events; + new_callback->cb = cb_fn; + new_callback->user_data = user_data; + new_callback->needs_free = 0; + + usbi_mutex_lock(&ctx->hotplug_cbs_lock); + + /* protect the handle by the context hotplug lock. it doesn't matter if the same handle is used for different + contexts only that the handle is unique for this context */ + new_callback->handle = handle_id++; + + list_add(&new_callback->list, &ctx->hotplug_cbs); + + if (flags & LIBUSB_HOTPLUG_ENUMERATE) { + struct libusb_device *dev; + + usbi_mutex_lock(&ctx->usb_devs_lock); + + list_for_each_entry(dev, &ctx->usb_devs, list, struct libusb_device) { + (void) usbi_hotplug_match_cb (dev, LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED, new_callback); + } + + usbi_mutex_unlock(&ctx->usb_devs_lock); + } + + usbi_mutex_unlock(&ctx->hotplug_cbs_lock); + + if (handle) { + *handle = new_callback->handle; + } + + return LIBUSB_SUCCESS; +} + +void API_EXPORTED libusb_hotplug_deregister_callback (struct libusb_context *ctx, libusb_hotplug_callback_handle handle) { + struct libusb_hotplug_callback *hotplug_cb; + + /* check for hotplug support */ + if (!libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG)) { + return; + } + + USBI_GET_CONTEXT(ctx); + + usbi_mutex_lock(&ctx->hotplug_cbs_lock); + list_for_each_entry(hotplug_cb, &ctx->hotplug_cbs, list, + struct libusb_hotplug_callback) { + if (handle == hotplug_cb->handle) { + /* Mark this callback for deregistration */ + hotplug_cb->needs_free = 1; + } + } + usbi_mutex_unlock(&ctx->hotplug_cbs_lock); +} + +void usbi_hotplug_deregister_all(struct libusb_context *ctx) { + struct libusb_hotplug_callback *hotplug_cb, *next; + + usbi_mutex_lock(&ctx->hotplug_cbs_lock); + list_for_each_entry_safe(hotplug_cb, next, &ctx->hotplug_cbs, list, + struct libusb_hotplug_callback) { + list_del(&hotplug_cb->list); + free(hotplug_cb); + } + + usbi_mutex_unlock(&ctx->hotplug_cbs_lock); +} diff --git a/compat/libusb-1.0/libusb/hotplug.h b/compat/libusb-1.0/libusb/hotplug.h new file mode 100644 index 0000000..64d4c74 --- /dev/null +++ b/compat/libusb-1.0/libusb/hotplug.h @@ -0,0 +1,77 @@ +/* -*- Mode: C; indent-tabs-mode:nil ; c-basic-offset:8 -*- */ +/* + * Hotplug support for libusb 1.0 + * Copyright (C) 2012 Nathan Hjelm + * Copyright (C) 2012 Peter Stuge + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if !defined(USBI_HOTPLUG_H) +#define USBI_HOTPLUG_H + +/** \ingroup hotplug + * The hotplug callback structure. The user populates this structure with + * libusb_hotplug_prepare_callback() and then calls libusb_hotplug_register_callback() + * to receive notification of hotplug events. + */ +struct libusb_hotplug_callback { + /** Context this callback is associated with */ + struct libusb_context *ctx; + + /** Vendor ID to match or LIBUSB_HOTPLUG_MATCH_ANY */ + int vendor_id; + + /** Product ID to match or LIBUSB_HOTPLUG_MATCH_ANY */ + int product_id; + + /** Device class to match or LIBUSB_HOTPLUG_MATCH_ANY */ + int dev_class; + + /** Hotplug callback flags */ + libusb_hotplug_flag flags; + + /** Event(s) that will trigger this callback */ + libusb_hotplug_event events; + + /** Callback function to invoke for matching event/device */ + libusb_hotplug_callback_fn cb; + + /** Handle for this callback (used to match on deregister) */ + libusb_hotplug_callback_handle handle; + + /** User data that will be passed to the callback function */ + void *user_data; + + /** Callback is marked for deletion */ + int needs_free; + + /** List this callback is registered in (ctx->hotplug_cbs) */ + struct list_head list; +}; + +typedef struct libusb_hotplug_callback libusb_hotplug_callback; + +struct libusb_hotplug_message { + libusb_hotplug_event event; + struct libusb_device *device; +}; + +typedef struct libusb_hotplug_message libusb_hotplug_message; + +void usbi_hotplug_deregister_all(struct libusb_context *ctx); +void usbi_hotplug_match(struct libusb_device *dev, libusb_hotplug_event event); + +#endif diff --git a/compat/libusb-1.0/libusb/io.c b/compat/libusb-1.0/libusb/io.c new file mode 100644 index 0000000..55b17f1 --- /dev/null +++ b/compat/libusb-1.0/libusb/io.c @@ -0,0 +1,2503 @@ +/* + * I/O functions for libusb + * Copyright (C) 2007-2009 Daniel Drake + * Copyright (c) 2001 Johannes Erdfelt + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_SYS_TIME_H +#include +#endif + +#ifdef USBI_TIMERFD_AVAILABLE +#include +#endif + +#include "libusbi.h" +#include "hotplug.h" + +/** + * \page io Synchronous and asynchronous device I/O + * + * \section intro Introduction + * + * If you're using libusb in your application, you're probably wanting to + * perform I/O with devices - you want to perform USB data transfers. + * + * libusb offers two separate interfaces for device I/O. This page aims to + * introduce the two in order to help you decide which one is more suitable + * for your application. You can also choose to use both interfaces in your + * application by considering each transfer on a case-by-case basis. + * + * Once you have read through the following discussion, you should consult the + * detailed API documentation pages for the details: + * - \ref syncio + * - \ref asyncio + * + * \section theory Transfers at a logical level + * + * At a logical level, USB transfers typically happen in two parts. For + * example, when reading data from a endpoint: + * -# A request for data is sent to the device + * -# Some time later, the incoming data is received by the host + * + * or when writing data to an endpoint: + * + * -# The data is sent to the device + * -# Some time later, the host receives acknowledgement from the device that + * the data has been transferred. + * + * There may be an indefinite delay between the two steps. Consider a + * fictional USB input device with a button that the user can press. In order + * to determine when the button is pressed, you would likely submit a request + * to read data on a bulk or interrupt endpoint and wait for data to arrive. + * Data will arrive when the button is pressed by the user, which is + * potentially hours later. + * + * libusb offers both a synchronous and an asynchronous interface to performing + * USB transfers. The main difference is that the synchronous interface + * combines both steps indicated above into a single function call, whereas + * the asynchronous interface separates them. + * + * \section sync The synchronous interface + * + * The synchronous I/O interface allows you to perform a USB transfer with + * a single function call. When the function call returns, the transfer has + * completed and you can parse the results. + * + * If you have used the libusb-0.1 before, this I/O style will seem familar to + * you. libusb-0.1 only offered a synchronous interface. + * + * In our input device example, to read button presses you might write code + * in the following style: +\code +unsigned char data[4]; +int actual_length; +int r = libusb_bulk_transfer(handle, LIBUSB_ENDPOINT_IN, data, sizeof(data), &actual_length, 0); +if (r == 0 && actual_length == sizeof(data)) { + // results of the transaction can now be found in the data buffer + // parse them here and report button press +} else { + error(); +} +\endcode + * + * The main advantage of this model is simplicity: you did everything with + * a single simple function call. + * + * However, this interface has its limitations. Your application will sleep + * inside libusb_bulk_transfer() until the transaction has completed. If it + * takes the user 3 hours to press the button, your application will be + * sleeping for that long. Execution will be tied up inside the library - + * the entire thread will be useless for that duration. + * + * Another issue is that by tieing up the thread with that single transaction + * there is no possibility of performing I/O with multiple endpoints and/or + * multiple devices simultaneously, unless you resort to creating one thread + * per transaction. + * + * Additionally, there is no opportunity to cancel the transfer after the + * request has been submitted. + * + * For details on how to use the synchronous API, see the + * \ref syncio "synchronous I/O API documentation" pages. + * + * \section async The asynchronous interface + * + * Asynchronous I/O is the most significant new feature in libusb-1.0. + * Although it is a more complex interface, it solves all the issues detailed + * above. + * + * Instead of providing which functions that block until the I/O has complete, + * libusb's asynchronous interface presents non-blocking functions which + * begin a transfer and then return immediately. Your application passes a + * callback function pointer to this non-blocking function, which libusb will + * call with the results of the transaction when it has completed. + * + * Transfers which have been submitted through the non-blocking functions + * can be cancelled with a separate function call. + * + * The non-blocking nature of this interface allows you to be simultaneously + * performing I/O to multiple endpoints on multiple devices, without having + * to use threads. + * + * This added flexibility does come with some complications though: + * - In the interest of being a lightweight library, libusb does not create + * threads and can only operate when your application is calling into it. Your + * application must call into libusb from it's main loop when events are ready + * to be handled, or you must use some other scheme to allow libusb to + * undertake whatever work needs to be done. + * - libusb also needs to be called into at certain fixed points in time in + * order to accurately handle transfer timeouts. + * - Memory handling becomes more complex. You cannot use stack memory unless + * the function with that stack is guaranteed not to return until the transfer + * callback has finished executing. + * - You generally lose some linearity from your code flow because submitting + * the transfer request is done in a separate function from where the transfer + * results are handled. This becomes particularly obvious when you want to + * submit a second transfer based on the results of an earlier transfer. + * + * Internally, libusb's synchronous interface is expressed in terms of function + * calls to the asynchronous interface. + * + * For details on how to use the asynchronous API, see the + * \ref asyncio "asynchronous I/O API" documentation pages. + */ + + +/** + * \page packetoverflow Packets and overflows + * + * \section packets Packet abstraction + * + * The USB specifications describe how data is transmitted in packets, with + * constraints on packet size defined by endpoint descriptors. The host must + * not send data payloads larger than the endpoint's maximum packet size. + * + * libusb and the underlying OS abstract out the packet concept, allowing you + * to request transfers of any size. Internally, the request will be divided + * up into correctly-sized packets. You do not have to be concerned with + * packet sizes, but there is one exception when considering overflows. + * + * \section overflow Bulk/interrupt transfer overflows + * + * When requesting data on a bulk endpoint, libusb requires you to supply a + * buffer and the maximum number of bytes of data that libusb can put in that + * buffer. However, the size of the buffer is not communicated to the device - + * the device is just asked to send any amount of data. + * + * There is no problem if the device sends an amount of data that is less than + * or equal to the buffer size. libusb reports this condition to you through + * the \ref libusb_transfer::actual_length "libusb_transfer.actual_length" + * field. + * + * Problems may occur if the device attempts to send more data than can fit in + * the buffer. libusb reports LIBUSB_TRANSFER_OVERFLOW for this condition but + * other behaviour is largely undefined: actual_length may or may not be + * accurate, the chunk of data that can fit in the buffer (before overflow) + * may or may not have been transferred. + * + * Overflows are nasty, but can be avoided. Even though you were told to + * ignore packets above, think about the lower level details: each transfer is + * split into packets (typically small, with a maximum size of 512 bytes). + * Overflows can only happen if the final packet in an incoming data transfer + * is smaller than the actual packet that the device wants to transfer. + * Therefore, you will never see an overflow if your transfer buffer size is a + * multiple of the endpoint's packet size: the final packet will either + * fill up completely or will be only partially filled. + */ + +/** + * @defgroup asyncio Asynchronous device I/O + * + * This page details libusb's asynchronous (non-blocking) API for USB device + * I/O. This interface is very powerful but is also quite complex - you will + * need to read this page carefully to understand the necessary considerations + * and issues surrounding use of this interface. Simplistic applications + * may wish to consider the \ref syncio "synchronous I/O API" instead. + * + * The asynchronous interface is built around the idea of separating transfer + * submission and handling of transfer completion (the synchronous model + * combines both of these into one). There may be a long delay between + * submission and completion, however the asynchronous submission function + * is non-blocking so will return control to your application during that + * potentially long delay. + * + * \section asyncabstraction Transfer abstraction + * + * For the asynchronous I/O, libusb implements the concept of a generic + * transfer entity for all types of I/O (control, bulk, interrupt, + * isochronous). The generic transfer object must be treated slightly + * differently depending on which type of I/O you are performing with it. + * + * This is represented by the public libusb_transfer structure type. + * + * \section asynctrf Asynchronous transfers + * + * We can view asynchronous I/O as a 5 step process: + * -# Allocation: allocate a libusb_transfer + * -# Filling: populate the libusb_transfer instance with information + * about the transfer you wish to perform + * -# Submission: ask libusb to submit the transfer + * -# Completion handling: examine transfer results in the + * libusb_transfer structure + * -# Deallocation: clean up resources + * + * + * \subsection asyncalloc Allocation + * + * This step involves allocating memory for a USB transfer. This is the + * generic transfer object mentioned above. At this stage, the transfer + * is "blank" with no details about what type of I/O it will be used for. + * + * Allocation is done with the libusb_alloc_transfer() function. You must use + * this function rather than allocating your own transfers. + * + * \subsection asyncfill Filling + * + * This step is where you take a previously allocated transfer and fill it + * with information to determine the message type and direction, data buffer, + * callback function, etc. + * + * You can either fill the required fields yourself or you can use the + * helper functions: libusb_fill_control_transfer(), libusb_fill_bulk_transfer() + * and libusb_fill_interrupt_transfer(). + * + * \subsection asyncsubmit Submission + * + * When you have allocated a transfer and filled it, you can submit it using + * libusb_submit_transfer(). This function returns immediately but can be + * regarded as firing off the I/O request in the background. + * + * \subsection asynccomplete Completion handling + * + * After a transfer has been submitted, one of four things can happen to it: + * + * - The transfer completes (i.e. some data was transferred) + * - The transfer has a timeout and the timeout expires before all data is + * transferred + * - The transfer fails due to an error + * - The transfer is cancelled + * + * Each of these will cause the user-specified transfer callback function to + * be invoked. It is up to the callback function to determine which of the + * above actually happened and to act accordingly. + * + * The user-specified callback is passed a pointer to the libusb_transfer + * structure which was used to setup and submit the transfer. At completion + * time, libusb has populated this structure with results of the transfer: + * success or failure reason, number of bytes of data transferred, etc. See + * the libusb_transfer structure documentation for more information. + * + * \subsection Deallocation + * + * When a transfer has completed (i.e. the callback function has been invoked), + * you are advised to free the transfer (unless you wish to resubmit it, see + * below). Transfers are deallocated with libusb_free_transfer(). + * + * It is undefined behaviour to free a transfer which has not completed. + * + * \section asyncresubmit Resubmission + * + * You may be wondering why allocation, filling, and submission are all + * separated above where they could reasonably be combined into a single + * operation. + * + * The reason for separation is to allow you to resubmit transfers without + * having to allocate new ones every time. This is especially useful for + * common situations dealing with interrupt endpoints - you allocate one + * transfer, fill and submit it, and when it returns with results you just + * resubmit it for the next interrupt. + * + * \section asynccancel Cancellation + * + * Another advantage of using the asynchronous interface is that you have + * the ability to cancel transfers which have not yet completed. This is + * done by calling the libusb_cancel_transfer() function. + * + * libusb_cancel_transfer() is asynchronous/non-blocking in itself. When the + * cancellation actually completes, the transfer's callback function will + * be invoked, and the callback function should check the transfer status to + * determine that it was cancelled. + * + * Freeing the transfer after it has been cancelled but before cancellation + * has completed will result in undefined behaviour. + * + * When a transfer is cancelled, some of the data may have been transferred. + * libusb will communicate this to you in the transfer callback. Do not assume + * that no data was transferred. + * + * \section bulk_overflows Overflows on device-to-host bulk/interrupt endpoints + * + * If your device does not have predictable transfer sizes (or it misbehaves), + * your application may submit a request for data on an IN endpoint which is + * smaller than the data that the device wishes to send. In some circumstances + * this will cause an overflow, which is a nasty condition to deal with. See + * the \ref packetoverflow page for discussion. + * + * \section asyncctrl Considerations for control transfers + * + * The libusb_transfer structure is generic and hence does not + * include specific fields for the control-specific setup packet structure. + * + * In order to perform a control transfer, you must place the 8-byte setup + * packet at the start of the data buffer. To simplify this, you could + * cast the buffer pointer to type struct libusb_control_setup, or you can + * use the helper function libusb_fill_control_setup(). + * + * The wLength field placed in the setup packet must be the length you would + * expect to be sent in the setup packet: the length of the payload that + * follows (or the expected maximum number of bytes to receive). However, + * the length field of the libusb_transfer object must be the length of + * the data buffer - i.e. it should be wLength plus the size of + * the setup packet (LIBUSB_CONTROL_SETUP_SIZE). + * + * If you use the helper functions, this is simplified for you: + * -# Allocate a buffer of size LIBUSB_CONTROL_SETUP_SIZE plus the size of the + * data you are sending/requesting. + * -# Call libusb_fill_control_setup() on the data buffer, using the transfer + * request size as the wLength value (i.e. do not include the extra space you + * allocated for the control setup). + * -# If this is a host-to-device transfer, place the data to be transferred + * in the data buffer, starting at offset LIBUSB_CONTROL_SETUP_SIZE. + * -# Call libusb_fill_control_transfer() to associate the data buffer with + * the transfer (and to set the remaining details such as callback and timeout). + * - Note that there is no parameter to set the length field of the transfer. + * The length is automatically inferred from the wLength field of the setup + * packet. + * -# Submit the transfer. + * + * The multi-byte control setup fields (wValue, wIndex and wLength) must + * be given in little-endian byte order (the endianness of the USB bus). + * Endianness conversion is transparently handled by + * libusb_fill_control_setup() which is documented to accept host-endian + * values. + * + * Further considerations are needed when handling transfer completion in + * your callback function: + * - As you might expect, the setup packet will still be sitting at the start + * of the data buffer. + * - If this was a device-to-host transfer, the received data will be sitting + * at offset LIBUSB_CONTROL_SETUP_SIZE into the buffer. + * - The actual_length field of the transfer structure is relative to the + * wLength of the setup packet, rather than the size of the data buffer. So, + * if your wLength was 4, your transfer's length was 12, then you + * should expect an actual_length of 4 to indicate that the data was + * transferred in entirity. + * + * To simplify parsing of setup packets and obtaining the data from the + * correct offset, you may wish to use the libusb_control_transfer_get_data() + * and libusb_control_transfer_get_setup() functions within your transfer + * callback. + * + * Even though control endpoints do not halt, a completed control transfer + * may have a LIBUSB_TRANSFER_STALL status code. This indicates the control + * request was not supported. + * + * \section asyncintr Considerations for interrupt transfers + * + * All interrupt transfers are performed using the polling interval presented + * by the bInterval value of the endpoint descriptor. + * + * \section asynciso Considerations for isochronous transfers + * + * Isochronous transfers are more complicated than transfers to + * non-isochronous endpoints. + * + * To perform I/O to an isochronous endpoint, allocate the transfer by calling + * libusb_alloc_transfer() with an appropriate number of isochronous packets. + * + * During filling, set \ref libusb_transfer::type "type" to + * \ref libusb_transfer_type::LIBUSB_TRANSFER_TYPE_ISOCHRONOUS + * "LIBUSB_TRANSFER_TYPE_ISOCHRONOUS", and set + * \ref libusb_transfer::num_iso_packets "num_iso_packets" to a value less than + * or equal to the number of packets you requested during allocation. + * libusb_alloc_transfer() does not set either of these fields for you, given + * that you might not even use the transfer on an isochronous endpoint. + * + * Next, populate the length field for the first num_iso_packets entries in + * the \ref libusb_transfer::iso_packet_desc "iso_packet_desc" array. Section + * 5.6.3 of the USB2 specifications describe how the maximum isochronous + * packet length is determined by the wMaxPacketSize field in the endpoint + * descriptor. + * Two functions can help you here: + * + * - libusb_get_max_iso_packet_size() is an easy way to determine the max + * packet size for an isochronous endpoint. Note that the maximum packet + * size is actually the maximum number of bytes that can be transmitted in + * a single microframe, therefore this function multiplies the maximum number + * of bytes per transaction by the number of transaction opportunities per + * microframe. + * - libusb_set_iso_packet_lengths() assigns the same length to all packets + * within a transfer, which is usually what you want. + * + * For outgoing transfers, you'll obviously fill the buffer and populate the + * packet descriptors in hope that all the data gets transferred. For incoming + * transfers, you must ensure the buffer has sufficient capacity for + * the situation where all packets transfer the full amount of requested data. + * + * Completion handling requires some extra consideration. The + * \ref libusb_transfer::actual_length "actual_length" field of the transfer + * is meaningless and should not be examined; instead you must refer to the + * \ref libusb_iso_packet_descriptor::actual_length "actual_length" field of + * each individual packet. + * + * The \ref libusb_transfer::status "status" field of the transfer is also a + * little misleading: + * - If the packets were submitted and the isochronous data microframes + * completed normally, status will have value + * \ref libusb_transfer_status::LIBUSB_TRANSFER_COMPLETED + * "LIBUSB_TRANSFER_COMPLETED". Note that bus errors and software-incurred + * delays are not counted as transfer errors; the transfer.status field may + * indicate COMPLETED even if some or all of the packets failed. Refer to + * the \ref libusb_iso_packet_descriptor::status "status" field of each + * individual packet to determine packet failures. + * - The status field will have value + * \ref libusb_transfer_status::LIBUSB_TRANSFER_ERROR + * "LIBUSB_TRANSFER_ERROR" only when serious errors were encountered. + * - Other transfer status codes occur with normal behaviour. + * + * The data for each packet will be found at an offset into the buffer that + * can be calculated as if each prior packet completed in full. The + * libusb_get_iso_packet_buffer() and libusb_get_iso_packet_buffer_simple() + * functions may help you here. + * + * \section asyncmem Memory caveats + * + * In most circumstances, it is not safe to use stack memory for transfer + * buffers. This is because the function that fired off the asynchronous + * transfer may return before libusb has finished using the buffer, and when + * the function returns it's stack gets destroyed. This is true for both + * host-to-device and device-to-host transfers. + * + * The only case in which it is safe to use stack memory is where you can + * guarantee that the function owning the stack space for the buffer does not + * return until after the transfer's callback function has completed. In every + * other case, you need to use heap memory instead. + * + * \section asyncflags Fine control + * + * Through using this asynchronous interface, you may find yourself repeating + * a few simple operations many times. You can apply a bitwise OR of certain + * flags to a transfer to simplify certain things: + * - \ref libusb_transfer_flags::LIBUSB_TRANSFER_SHORT_NOT_OK + * "LIBUSB_TRANSFER_SHORT_NOT_OK" results in transfers which transferred + * less than the requested amount of data being marked with status + * \ref libusb_transfer_status::LIBUSB_TRANSFER_ERROR "LIBUSB_TRANSFER_ERROR" + * (they would normally be regarded as COMPLETED) + * - \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_BUFFER + * "LIBUSB_TRANSFER_FREE_BUFFER" allows you to ask libusb to free the transfer + * buffer when freeing the transfer. + * - \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_TRANSFER + * "LIBUSB_TRANSFER_FREE_TRANSFER" causes libusb to automatically free the + * transfer after the transfer callback returns. + * + * \section asyncevent Event handling + * + * In accordance of the aim of being a lightweight library, libusb does not + * create threads internally. This means that libusb code does not execute + * at any time other than when your application is calling a libusb function. + * However, an asynchronous model requires that libusb perform work at various + * points in time - namely processing the results of previously-submitted + * transfers and invoking the user-supplied callback function. + * + * This gives rise to the libusb_handle_events() function which your + * application must call into when libusb has work do to. This gives libusb + * the opportunity to reap pending transfers, invoke callbacks, etc. + * + * The first issue to discuss here is how your application can figure out + * when libusb has work to do. In fact, there are two naive options which + * do not actually require your application to know this: + * -# Periodically call libusb_handle_events() in non-blocking mode at fixed + * short intervals from your main loop + * -# Repeatedly call libusb_handle_events() in blocking mode from a dedicated + * thread. + * + * The first option is plainly not very nice, and will cause unnecessary + * CPU wakeups leading to increased power usage and decreased battery life. + * The second option is not very nice either, but may be the nicest option + * available to you if the "proper" approach can not be applied to your + * application (read on...). + * + * The recommended option is to integrate libusb with your application main + * event loop. libusb exposes a set of file descriptors which allow you to do + * this. Your main loop is probably already calling poll() or select() or a + * variant on a set of file descriptors for other event sources (e.g. keyboard + * button presses, mouse movements, network sockets, etc). You then add + * libusb's file descriptors to your poll()/select() calls, and when activity + * is detected on such descriptors you know it is time to call + * libusb_handle_events(). + * + * There is one final event handling complication. libusb supports + * asynchronous transfers which time out after a specified time period, and + * this requires that libusb is called into at or after the timeout so that + * the timeout can be handled. So, in addition to considering libusb's file + * descriptors in your main event loop, you must also consider that libusb + * sometimes needs to be called into at fixed points in time even when there + * is no file descriptor activity. + * + * For the details on retrieving the set of file descriptors and determining + * the next timeout, see the \ref poll "polling and timing" API documentation. + */ + +/** + * @defgroup poll Polling and timing + * + * This page documents libusb's functions for polling events and timing. + * These functions are only necessary for users of the + * \ref asyncio "asynchronous API". If you are only using the simpler + * \ref syncio "synchronous API" then you do not need to ever call these + * functions. + * + * The justification for the functionality described here has already been + * discussed in the \ref asyncevent "event handling" section of the + * asynchronous API documentation. In summary, libusb does not create internal + * threads for event processing and hence relies on your application calling + * into libusb at certain points in time so that pending events can be handled. + * In order to know precisely when libusb needs to be called into, libusb + * offers you a set of pollable file descriptors and information about when + * the next timeout expires. + * + * If you are using the asynchronous I/O API, you must take one of the two + * following options, otherwise your I/O will not complete. + * + * \section pollsimple The simple option + * + * If your application revolves solely around libusb and does not need to + * handle other event sources, you can have a program structure as follows: +\code +// initialize libusb +// find and open device +// maybe fire off some initial async I/O + +while (user_has_not_requested_exit) + libusb_handle_events(ctx); + +// clean up and exit +\endcode + * + * With such a simple main loop, you do not have to worry about managing + * sets of file descriptors or handling timeouts. libusb_handle_events() will + * handle those details internally. + * + * \section pollmain The more advanced option + * + * \note This functionality is currently only available on Unix-like platforms. + * On Windows, libusb_get_pollfds() simply returns NULL. Exposing event sources + * on Windows will require some further thought and design. + * + * In more advanced applications, you will already have a main loop which + * is monitoring other event sources: network sockets, X11 events, mouse + * movements, etc. Through exposing a set of file descriptors, libusb is + * designed to cleanly integrate into such main loops. + * + * In addition to polling file descriptors for the other event sources, you + * take a set of file descriptors from libusb and monitor those too. When you + * detect activity on libusb's file descriptors, you call + * libusb_handle_events_timeout() in non-blocking mode. + * + * What's more, libusb may also need to handle events at specific moments in + * time. No file descriptor activity is generated at these times, so your + * own application needs to be continually aware of when the next one of these + * moments occurs (through calling libusb_get_next_timeout()), and then it + * needs to call libusb_handle_events_timeout() in non-blocking mode when + * these moments occur. This means that you need to adjust your + * poll()/select() timeout accordingly. + * + * libusb provides you with a set of file descriptors to poll and expects you + * to poll all of them, treating them as a single entity. The meaning of each + * file descriptor in the set is an internal implementation detail, + * platform-dependent and may vary from release to release. Don't try and + * interpret the meaning of the file descriptors, just do as libusb indicates, + * polling all of them at once. + * + * In pseudo-code, you want something that looks like: +\code +// initialise libusb + +libusb_get_pollfds(ctx) +while (user has not requested application exit) { + libusb_get_next_timeout(ctx); + poll(on libusb file descriptors plus any other event sources of interest, + using a timeout no larger than the value libusb just suggested) + if (poll() indicated activity on libusb file descriptors) + libusb_handle_events_timeout(ctx, &zero_tv); + if (time has elapsed to or beyond the libusb timeout) + libusb_handle_events_timeout(ctx, &zero_tv); + // handle events from other sources here +} + +// clean up and exit +\endcode + * + * \subsection polltime Notes on time-based events + * + * The above complication with having to track time and call into libusb at + * specific moments is a bit of a headache. For maximum compatibility, you do + * need to write your main loop as above, but you may decide that you can + * restrict the supported platforms of your application and get away with + * a more simplistic scheme. + * + * These time-based event complications are \b not required on the following + * platforms: + * - Darwin + * - Linux, provided that the following version requirements are satisfied: + * - Linux v2.6.27 or newer, compiled with timerfd support + * - glibc v2.9 or newer + * - libusb v1.0.5 or newer + * + * Under these configurations, libusb_get_next_timeout() will \em always return + * 0, so your main loop can be simplified to: +\code +// initialise libusb + +libusb_get_pollfds(ctx) +while (user has not requested application exit) { + poll(on libusb file descriptors plus any other event sources of interest, + using any timeout that you like) + if (poll() indicated activity on libusb file descriptors) + libusb_handle_events_timeout(ctx, &zero_tv); + // handle events from other sources here +} + +// clean up and exit +\endcode + * + * Do remember that if you simplify your main loop to the above, you will + * lose compatibility with some platforms (including legacy Linux platforms, + * and any future platforms supported by libusb which may have time-based + * event requirements). The resultant problems will likely appear as + * strange bugs in your application. + * + * You can use the libusb_pollfds_handle_timeouts() function to do a runtime + * check to see if it is safe to ignore the time-based event complications. + * If your application has taken the shortcut of ignoring libusb's next timeout + * in your main loop, then you are advised to check the return value of + * libusb_pollfds_handle_timeouts() during application startup, and to abort + * if the platform does suffer from these timing complications. + * + * \subsection fdsetchange Changes in the file descriptor set + * + * The set of file descriptors that libusb uses as event sources may change + * during the life of your application. Rather than having to repeatedly + * call libusb_get_pollfds(), you can set up notification functions for when + * the file descriptor set changes using libusb_set_pollfd_notifiers(). + * + * \subsection mtissues Multi-threaded considerations + * + * Unfortunately, the situation is complicated further when multiple threads + * come into play. If two threads are monitoring the same file descriptors, + * the fact that only one thread will be woken up when an event occurs causes + * some headaches. + * + * The events lock, event waiters lock, and libusb_handle_events_locked() + * entities are added to solve these problems. You do not need to be concerned + * with these entities otherwise. + * + * See the extra documentation: \ref mtasync + */ + +/** \page mtasync Multi-threaded applications and asynchronous I/O + * + * libusb is a thread-safe library, but extra considerations must be applied + * to applications which interact with libusb from multiple threads. + * + * The underlying issue that must be addressed is that all libusb I/O + * revolves around monitoring file descriptors through the poll()/select() + * system calls. This is directly exposed at the + * \ref asyncio "asynchronous interface" but it is important to note that the + * \ref syncio "synchronous interface" is implemented on top of the + * asynchonrous interface, therefore the same considerations apply. + * + * The issue is that if two or more threads are concurrently calling poll() + * or select() on libusb's file descriptors then only one of those threads + * will be woken up when an event arrives. The others will be completely + * oblivious that anything has happened. + * + * Consider the following pseudo-code, which submits an asynchronous transfer + * then waits for its completion. This style is one way you could implement a + * synchronous interface on top of the asynchronous interface (and libusb + * does something similar, albeit more advanced due to the complications + * explained on this page). + * +\code +void cb(struct libusb_transfer *transfer) +{ + int *completed = transfer->user_data; + *completed = 1; +} + +void myfunc() { + struct libusb_transfer *transfer; + unsigned char buffer[LIBUSB_CONTROL_SETUP_SIZE]; + int completed = 0; + + transfer = libusb_alloc_transfer(0); + libusb_fill_control_setup(buffer, + LIBUSB_REQUEST_TYPE_VENDOR | LIBUSB_ENDPOINT_OUT, 0x04, 0x01, 0, 0); + libusb_fill_control_transfer(transfer, dev, buffer, cb, &completed, 1000); + libusb_submit_transfer(transfer); + + while (!completed) { + poll(libusb file descriptors, 120*1000); + if (poll indicates activity) + libusb_handle_events_timeout(ctx, &zero_tv); + } + printf("completed!"); + // other code here +} +\endcode + * + * Here we are serializing completion of an asynchronous event + * against a condition - the condition being completion of a specific transfer. + * The poll() loop has a long timeout to minimize CPU usage during situations + * when nothing is happening (it could reasonably be unlimited). + * + * If this is the only thread that is polling libusb's file descriptors, there + * is no problem: there is no danger that another thread will swallow up the + * event that we are interested in. On the other hand, if there is another + * thread polling the same descriptors, there is a chance that it will receive + * the event that we were interested in. In this situation, myfunc() + * will only realise that the transfer has completed on the next iteration of + * the loop, up to 120 seconds later. Clearly a two-minute delay is + * undesirable, and don't even think about using short timeouts to circumvent + * this issue! + * + * The solution here is to ensure that no two threads are ever polling the + * file descriptors at the same time. A naive implementation of this would + * impact the capabilities of the library, so libusb offers the scheme + * documented below to ensure no loss of functionality. + * + * Before we go any further, it is worth mentioning that all libusb-wrapped + * event handling procedures fully adhere to the scheme documented below. + * This includes libusb_handle_events() and its variants, and all the + * synchronous I/O functions - libusb hides this headache from you. + * + * \section Using libusb_handle_events() from multiple threads + * + * Even when only using libusb_handle_events() and synchronous I/O functions, + * you can still have a race condition. You might be tempted to solve the + * above with libusb_handle_events() like so: + * +\code + libusb_submit_transfer(transfer); + + while (!completed) { + libusb_handle_events(ctx); + } + printf("completed!"); +\endcode + * + * This however has a race between the checking of completed and + * libusb_handle_events() acquiring the events lock, so another thread + * could have completed the transfer, resulting in this thread hanging + * until either a timeout or another event occurs. See also commit + * 6696512aade99bb15d6792af90ae329af270eba6 which fixes this in the + * synchronous API implementation of libusb. + * + * Fixing this race requires checking the variable completed only after + * taking the event lock, which defeats the concept of just calling + * libusb_handle_events() without worrying about locking. This is why + * libusb-1.0.9 introduces the new libusb_handle_events_timeout_completed() + * and libusb_handle_events_completed() functions, which handles doing the + * completion check for you after they have acquired the lock: + * +\code + libusb_submit_transfer(transfer); + + while (!completed) { + libusb_handle_events_completed(ctx, &completed); + } + printf("completed!"); +\endcode + * + * This nicely fixes the race in our example. Note that if all you want to + * do is submit a single transfer and wait for its completion, then using + * one of the synchronous I/O functions is much easier. + * + * \section eventlock The events lock + * + * The problem is when we consider the fact that libusb exposes file + * descriptors to allow for you to integrate asynchronous USB I/O into + * existing main loops, effectively allowing you to do some work behind + * libusb's back. If you do take libusb's file descriptors and pass them to + * poll()/select() yourself, you need to be aware of the associated issues. + * + * The first concept to be introduced is the events lock. The events lock + * is used to serialize threads that want to handle events, such that only + * one thread is handling events at any one time. + * + * You must take the events lock before polling libusb file descriptors, + * using libusb_lock_events(). You must release the lock as soon as you have + * aborted your poll()/select() loop, using libusb_unlock_events(). + * + * \section threadwait Letting other threads do the work for you + * + * Although the events lock is a critical part of the solution, it is not + * enough on it's own. You might wonder if the following is sufficient... +\code + libusb_lock_events(ctx); + while (!completed) { + poll(libusb file descriptors, 120*1000); + if (poll indicates activity) + libusb_handle_events_timeout(ctx, &zero_tv); + } + libusb_unlock_events(ctx); +\endcode + * ...and the answer is that it is not. This is because the transfer in the + * code shown above may take a long time (say 30 seconds) to complete, and + * the lock is not released until the transfer is completed. + * + * Another thread with similar code that wants to do event handling may be + * working with a transfer that completes after a few milliseconds. Despite + * having such a quick completion time, the other thread cannot check that + * status of its transfer until the code above has finished (30 seconds later) + * due to contention on the lock. + * + * To solve this, libusb offers you a mechanism to determine when another + * thread is handling events. It also offers a mechanism to block your thread + * until the event handling thread has completed an event (and this mechanism + * does not involve polling of file descriptors). + * + * After determining that another thread is currently handling events, you + * obtain the event waiters lock using libusb_lock_event_waiters(). + * You then re-check that some other thread is still handling events, and if + * so, you call libusb_wait_for_event(). + * + * libusb_wait_for_event() puts your application to sleep until an event + * occurs, or until a thread releases the events lock. When either of these + * things happen, your thread is woken up, and should re-check the condition + * it was waiting on. It should also re-check that another thread is handling + * events, and if not, it should start handling events itself. + * + * This looks like the following, as pseudo-code: +\code +retry: +if (libusb_try_lock_events(ctx) == 0) { + // we obtained the event lock: do our own event handling + while (!completed) { + if (!libusb_event_handling_ok(ctx)) { + libusb_unlock_events(ctx); + goto retry; + } + poll(libusb file descriptors, 120*1000); + if (poll indicates activity) + libusb_handle_events_locked(ctx, 0); + } + libusb_unlock_events(ctx); +} else { + // another thread is doing event handling. wait for it to signal us that + // an event has completed + libusb_lock_event_waiters(ctx); + + while (!completed) { + // now that we have the event waiters lock, double check that another + // thread is still handling events for us. (it may have ceased handling + // events in the time it took us to reach this point) + if (!libusb_event_handler_active(ctx)) { + // whoever was handling events is no longer doing so, try again + libusb_unlock_event_waiters(ctx); + goto retry; + } + + libusb_wait_for_event(ctx, NULL); + } + libusb_unlock_event_waiters(ctx); +} +printf("completed!\n"); +\endcode + * + * A naive look at the above code may suggest that this can only support + * one event waiter (hence a total of 2 competing threads, the other doing + * event handling), because the event waiter seems to have taken the event + * waiters lock while waiting for an event. However, the system does support + * multiple event waiters, because libusb_wait_for_event() actually drops + * the lock while waiting, and reaquires it before continuing. + * + * We have now implemented code which can dynamically handle situations where + * nobody is handling events (so we should do it ourselves), and it can also + * handle situations where another thread is doing event handling (so we can + * piggyback onto them). It is also equipped to handle a combination of + * the two, for example, another thread is doing event handling, but for + * whatever reason it stops doing so before our condition is met, so we take + * over the event handling. + * + * Four functions were introduced in the above pseudo-code. Their importance + * should be apparent from the code shown above. + * -# libusb_try_lock_events() is a non-blocking function which attempts + * to acquire the events lock but returns a failure code if it is contended. + * -# libusb_event_handling_ok() checks that libusb is still happy for your + * thread to be performing event handling. Sometimes, libusb needs to + * interrupt the event handler, and this is how you can check if you have + * been interrupted. If this function returns 0, the correct behaviour is + * for you to give up the event handling lock, and then to repeat the cycle. + * The following libusb_try_lock_events() will fail, so you will become an + * events waiter. For more information on this, read \ref fullstory below. + * -# libusb_handle_events_locked() is a variant of + * libusb_handle_events_timeout() that you can call while holding the + * events lock. libusb_handle_events_timeout() itself implements similar + * logic to the above, so be sure not to call it when you are + * "working behind libusb's back", as is the case here. + * -# libusb_event_handler_active() determines if someone is currently + * holding the events lock + * + * You might be wondering why there is no function to wake up all threads + * blocked on libusb_wait_for_event(). This is because libusb can do this + * internally: it will wake up all such threads when someone calls + * libusb_unlock_events() or when a transfer completes (at the point after its + * callback has returned). + * + * \subsection fullstory The full story + * + * The above explanation should be enough to get you going, but if you're + * really thinking through the issues then you may be left with some more + * questions regarding libusb's internals. If you're curious, read on, and if + * not, skip to the next section to avoid confusing yourself! + * + * The immediate question that may spring to mind is: what if one thread + * modifies the set of file descriptors that need to be polled while another + * thread is doing event handling? + * + * There are 2 situations in which this may happen. + * -# libusb_open() will add another file descriptor to the poll set, + * therefore it is desirable to interrupt the event handler so that it + * restarts, picking up the new descriptor. + * -# libusb_close() will remove a file descriptor from the poll set. There + * are all kinds of race conditions that could arise here, so it is + * important that nobody is doing event handling at this time. + * + * libusb handles these issues internally, so application developers do not + * have to stop their event handlers while opening/closing devices. Here's how + * it works, focusing on the libusb_close() situation first: + * + * -# During initialization, libusb opens an internal pipe, and it adds the read + * end of this pipe to the set of file descriptors to be polled. + * -# During libusb_close(), libusb writes some dummy data on this control pipe. + * This immediately interrupts the event handler. libusb also records + * internally that it is trying to interrupt event handlers for this + * high-priority event. + * -# At this point, some of the functions described above start behaving + * differently: + * - libusb_event_handling_ok() starts returning 1, indicating that it is NOT + * OK for event handling to continue. + * - libusb_try_lock_events() starts returning 1, indicating that another + * thread holds the event handling lock, even if the lock is uncontended. + * - libusb_event_handler_active() starts returning 1, indicating that + * another thread is doing event handling, even if that is not true. + * -# The above changes in behaviour result in the event handler stopping and + * giving up the events lock very quickly, giving the high-priority + * libusb_close() operation a "free ride" to acquire the events lock. All + * threads that are competing to do event handling become event waiters. + * -# With the events lock held inside libusb_close(), libusb can safely remove + * a file descriptor from the poll set, in the safety of knowledge that + * nobody is polling those descriptors or trying to access the poll set. + * -# After obtaining the events lock, the close operation completes very + * quickly (usually a matter of milliseconds) and then immediately releases + * the events lock. + * -# At the same time, the behaviour of libusb_event_handling_ok() and friends + * reverts to the original, documented behaviour. + * -# The release of the events lock causes the threads that are waiting for + * events to be woken up and to start competing to become event handlers + * again. One of them will succeed; it will then re-obtain the list of poll + * descriptors, and USB I/O will then continue as normal. + * + * libusb_open() is similar, and is actually a more simplistic case. Upon a + * call to libusb_open(): + * + * -# The device is opened and a file descriptor is added to the poll set. + * -# libusb sends some dummy data on the control pipe, and records that it + * is trying to modify the poll descriptor set. + * -# The event handler is interrupted, and the same behaviour change as for + * libusb_close() takes effect, causing all event handling threads to become + * event waiters. + * -# The libusb_open() implementation takes its free ride to the events lock. + * -# Happy that it has successfully paused the events handler, libusb_open() + * releases the events lock. + * -# The event waiter threads are all woken up and compete to become event + * handlers again. The one that succeeds will obtain the list of poll + * descriptors again, which will include the addition of the new device. + * + * \subsection concl Closing remarks + * + * The above may seem a little complicated, but hopefully I have made it clear + * why such complications are necessary. Also, do not forget that this only + * applies to applications that take libusb's file descriptors and integrate + * them into their own polling loops. + * + * You may decide that it is OK for your multi-threaded application to ignore + * some of the rules and locks detailed above, because you don't think that + * two threads can ever be polling the descriptors at the same time. If that + * is the case, then that's good news for you because you don't have to worry. + * But be careful here; remember that the synchronous I/O functions do event + * handling internally. If you have one thread doing event handling in a loop + * (without implementing the rules and locking semantics documented above) + * and another trying to send a synchronous USB transfer, you will end up with + * two threads monitoring the same descriptors, and the above-described + * undesirable behaviour occuring. The solution is for your polling thread to + * play by the rules; the synchronous I/O functions do so, and this will result + * in them getting along in perfect harmony. + * + * If you do have a dedicated thread doing event handling, it is perfectly + * legal for it to take the event handling lock for long periods of time. Any + * synchronous I/O functions you call from other threads will transparently + * fall back to the "event waiters" mechanism detailed above. The only + * consideration that your event handling thread must apply is the one related + * to libusb_event_handling_ok(): you must call this before every poll(), and + * give up the events lock if instructed. + */ + +int usbi_io_init(struct libusb_context *ctx) +{ + int r; + + usbi_mutex_init(&ctx->flying_transfers_lock, NULL); + usbi_mutex_init(&ctx->pollfds_lock, NULL); + usbi_mutex_init(&ctx->pollfd_modify_lock, NULL); + usbi_mutex_init_recursive(&ctx->events_lock, NULL); + usbi_mutex_init(&ctx->event_waiters_lock, NULL); + usbi_cond_init(&ctx->event_waiters_cond, NULL); + list_init(&ctx->flying_transfers); + list_init(&ctx->pollfds); + + /* FIXME should use an eventfd on kernels that support it */ + r = usbi_pipe(ctx->ctrl_pipe); + if (r < 0) { + r = LIBUSB_ERROR_OTHER; + goto err; + } + + r = usbi_add_pollfd(ctx, ctx->ctrl_pipe[0], POLLIN); + if (r < 0) + goto err_close_pipe; + + /* create hotplug pipe */ + r = usbi_pipe(ctx->hotplug_pipe); + if (r < 0) { + r = LIBUSB_ERROR_OTHER; + goto err; + } + +#ifndef OS_WINDOWS + fcntl (ctx->hotplug_pipe[1], F_SETFD, O_NONBLOCK); +#endif + r = usbi_add_pollfd(ctx, ctx->hotplug_pipe[0], POLLIN); + if (r < 0) + goto err_close_hp_pipe; + +#ifdef USBI_TIMERFD_AVAILABLE + ctx->timerfd = timerfd_create(usbi_backend->get_timerfd_clockid(), + TFD_NONBLOCK); + if (ctx->timerfd >= 0) { + usbi_dbg("using timerfd for timeouts"); + r = usbi_add_pollfd(ctx, ctx->timerfd, POLLIN); + if (r < 0) { + usbi_remove_pollfd(ctx, ctx->ctrl_pipe[0]); + close(ctx->timerfd); + goto err_close_hp_pipe; + } + } else { + usbi_dbg("timerfd not available (code %d error %d)", ctx->timerfd, errno); + ctx->timerfd = -1; + } +#endif + + return 0; + +err_close_hp_pipe: + usbi_close(ctx->hotplug_pipe[0]); + usbi_close(ctx->hotplug_pipe[1]); +err_close_pipe: + usbi_close(ctx->ctrl_pipe[0]); + usbi_close(ctx->ctrl_pipe[1]); +err: + usbi_mutex_destroy(&ctx->flying_transfers_lock); + usbi_mutex_destroy(&ctx->pollfds_lock); + usbi_mutex_destroy(&ctx->pollfd_modify_lock); + usbi_mutex_destroy(&ctx->events_lock); + usbi_mutex_destroy(&ctx->event_waiters_lock); + usbi_cond_destroy(&ctx->event_waiters_cond); + return r; +} + +void usbi_io_exit(struct libusb_context *ctx) +{ + usbi_remove_pollfd(ctx, ctx->ctrl_pipe[0]); + usbi_close(ctx->ctrl_pipe[0]); + usbi_close(ctx->ctrl_pipe[1]); + usbi_remove_pollfd(ctx, ctx->hotplug_pipe[0]); + usbi_close(ctx->hotplug_pipe[0]); + usbi_close(ctx->hotplug_pipe[1]); +#ifdef USBI_TIMERFD_AVAILABLE + if (usbi_using_timerfd(ctx)) { + usbi_remove_pollfd(ctx, ctx->timerfd); + close(ctx->timerfd); + } +#endif + usbi_mutex_destroy(&ctx->flying_transfers_lock); + usbi_mutex_destroy(&ctx->pollfds_lock); + usbi_mutex_destroy(&ctx->pollfd_modify_lock); + usbi_mutex_destroy(&ctx->events_lock); + usbi_mutex_destroy(&ctx->event_waiters_lock); + usbi_cond_destroy(&ctx->event_waiters_cond); +} + +static int calculate_timeout(struct usbi_transfer *transfer) +{ + int r; + struct timespec current_time; + unsigned int timeout = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(transfer)->timeout; + + if (!timeout) + return 0; + + r = usbi_backend->clock_gettime(USBI_CLOCK_MONOTONIC, ¤t_time); + if (r < 0) { + usbi_err(ITRANSFER_CTX(transfer), + "failed to read monotonic clock, errno=%d", errno); + return r; + } + + current_time.tv_sec += timeout / 1000; + current_time.tv_nsec += (timeout % 1000) * 1000000; + + while (current_time.tv_nsec >= 1000000000) { + current_time.tv_nsec -= 1000000000; + current_time.tv_sec++; + } + + TIMESPEC_TO_TIMEVAL(&transfer->timeout, ¤t_time); + return 0; +} + +/* add a transfer to the (timeout-sorted) active transfers list. + * returns 1 if the transfer has a timeout and it is the timeout next to + * expire */ +static int add_to_flying_list(struct usbi_transfer *transfer) +{ + struct usbi_transfer *cur; + struct timeval *timeout = &transfer->timeout; + struct libusb_context *ctx = ITRANSFER_CTX(transfer); + int r = 0; + int first = 1; + + usbi_mutex_lock(&ctx->flying_transfers_lock); + + /* if we have no other flying transfers, start the list with this one */ + if (list_empty(&ctx->flying_transfers)) { + list_add(&transfer->list, &ctx->flying_transfers); + if (timerisset(timeout)) + r = 1; + goto out; + } + + /* if we have infinite timeout, append to end of list */ + if (!timerisset(timeout)) { + list_add_tail(&transfer->list, &ctx->flying_transfers); + goto out; + } + + /* otherwise, find appropriate place in list */ + list_for_each_entry(cur, &ctx->flying_transfers, list, struct usbi_transfer) { + /* find first timeout that occurs after the transfer in question */ + struct timeval *cur_tv = &cur->timeout; + + if (!timerisset(cur_tv) || (cur_tv->tv_sec > timeout->tv_sec) || + (cur_tv->tv_sec == timeout->tv_sec && + cur_tv->tv_usec > timeout->tv_usec)) { + list_add_tail(&transfer->list, &cur->list); + r = first; + goto out; + } + first = 0; + } + + /* otherwise we need to be inserted at the end */ + list_add_tail(&transfer->list, &ctx->flying_transfers); +out: + usbi_mutex_unlock(&ctx->flying_transfers_lock); + return r; +} + +/** \ingroup asyncio + * Allocate a libusb transfer with a specified number of isochronous packet + * descriptors. The returned transfer is pre-initialized for you. When the new + * transfer is no longer needed, it should be freed with + * libusb_free_transfer(). + * + * Transfers intended for non-isochronous endpoints (e.g. control, bulk, + * interrupt) should specify an iso_packets count of zero. + * + * For transfers intended for isochronous endpoints, specify an appropriate + * number of packet descriptors to be allocated as part of the transfer. + * The returned transfer is not specially initialized for isochronous I/O; + * you are still required to set the + * \ref libusb_transfer::num_iso_packets "num_iso_packets" and + * \ref libusb_transfer::type "type" fields accordingly. + * + * It is safe to allocate a transfer with some isochronous packets and then + * use it on a non-isochronous endpoint. If you do this, ensure that at time + * of submission, num_iso_packets is 0 and that type is set appropriately. + * + * \param iso_packets number of isochronous packet descriptors to allocate + * \returns a newly allocated transfer, or NULL on error + */ +DEFAULT_VISIBILITY +struct libusb_transfer * LIBUSB_CALL libusb_alloc_transfer( + int iso_packets) +{ + size_t os_alloc_size = usbi_backend->transfer_priv_size + + (usbi_backend->add_iso_packet_size * iso_packets); + size_t alloc_size = sizeof(struct usbi_transfer) + + sizeof(struct libusb_transfer) + + (sizeof(struct libusb_iso_packet_descriptor) * iso_packets) + + os_alloc_size; + struct usbi_transfer *itransfer = malloc(alloc_size); + if (!itransfer) + return NULL; + + memset(itransfer, 0, alloc_size); + itransfer->num_iso_packets = iso_packets; + usbi_mutex_init(&itransfer->lock, NULL); + return USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); +} + +/** \ingroup asyncio + * Free a transfer structure. This should be called for all transfers + * allocated with libusb_alloc_transfer(). + * + * If the \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_BUFFER + * "LIBUSB_TRANSFER_FREE_BUFFER" flag is set and the transfer buffer is + * non-NULL, this function will also free the transfer buffer using the + * standard system memory allocator (e.g. free()). + * + * It is legal to call this function with a NULL transfer. In this case, + * the function will simply return safely. + * + * It is not legal to free an active transfer (one which has been submitted + * and has not yet completed). + * + * \param transfer the transfer to free + */ +void API_EXPORTED libusb_free_transfer(struct libusb_transfer *transfer) +{ + struct usbi_transfer *itransfer; + if (!transfer) + return; + + if (transfer->flags & LIBUSB_TRANSFER_FREE_BUFFER && transfer->buffer) + free(transfer->buffer); + + itransfer = LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer); + usbi_mutex_destroy(&itransfer->lock); + free(itransfer); +} + +/** \ingroup asyncio + * Submit a transfer. This function will fire off the USB transfer and then + * return immediately. + * + * \param transfer the transfer to submit + * \returns 0 on success + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns LIBUSB_ERROR_BUSY if the transfer has already been submitted. + * \returns LIBUSB_ERROR_NOT_SUPPORTED if the transfer flags are not supported + * by the operating system. + * \returns another LIBUSB_ERROR code on other failure + */ +int API_EXPORTED libusb_submit_transfer(struct libusb_transfer *transfer) +{ + struct libusb_context *ctx = TRANSFER_CTX(transfer); + struct usbi_transfer *itransfer = + LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer); + int r; + int first; + int updated_fds; + + usbi_mutex_lock(&itransfer->lock); + itransfer->transferred = 0; + itransfer->flags = 0; + r = calculate_timeout(itransfer); + if (r < 0) { + r = LIBUSB_ERROR_OTHER; + goto out; + } + + first = add_to_flying_list(itransfer); + r = usbi_backend->submit_transfer(itransfer); + if (r) { + usbi_mutex_lock(&ctx->flying_transfers_lock); + list_del(&itransfer->list); + usbi_mutex_unlock(&ctx->flying_transfers_lock); + } +#ifdef USBI_TIMERFD_AVAILABLE + else if (first && usbi_using_timerfd(ctx)) { + /* if this transfer has the lowest timeout of all active transfers, + * rearm the timerfd with this transfer's timeout */ + const struct itimerspec it = { {0, 0}, + { itransfer->timeout.tv_sec, itransfer->timeout.tv_usec * 1000 } }; + usbi_dbg("arm timerfd for timeout in %dms (first in line)", transfer->timeout); + r = timerfd_settime(ctx->timerfd, TFD_TIMER_ABSTIME, &it, NULL); + if (r < 0) + r = LIBUSB_ERROR_OTHER; + } +#else + (void)first; +#endif + +out: + updated_fds = (itransfer->flags & USBI_TRANSFER_UPDATED_FDS); + usbi_mutex_unlock(&itransfer->lock); + if (updated_fds) + usbi_fd_notification(ctx); + return r; +} + +/** \ingroup asyncio + * Asynchronously cancel a previously submitted transfer. + * This function returns immediately, but this does not indicate cancellation + * is complete. Your callback function will be invoked at some later time + * with a transfer status of + * \ref libusb_transfer_status::LIBUSB_TRANSFER_CANCELLED + * "LIBUSB_TRANSFER_CANCELLED." + * + * \param transfer the transfer to cancel + * \returns 0 on success + * \returns LIBUSB_ERROR_NOT_FOUND if the transfer is already complete or + * cancelled. + * \returns a LIBUSB_ERROR code on failure + */ +int API_EXPORTED libusb_cancel_transfer(struct libusb_transfer *transfer) +{ + struct usbi_transfer *itransfer = + LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer); + int r; + + usbi_dbg(""); + usbi_mutex_lock(&itransfer->lock); + r = usbi_backend->cancel_transfer(itransfer); + if (r < 0) { + if (r != LIBUSB_ERROR_NOT_FOUND) + usbi_err(TRANSFER_CTX(transfer), + "cancel transfer failed error %d", r); + else + usbi_dbg("cancel transfer failed error %d", r); + + if (r == LIBUSB_ERROR_NO_DEVICE) + itransfer->flags |= USBI_TRANSFER_DEVICE_DISAPPEARED; + } + + itransfer->flags |= USBI_TRANSFER_CANCELLING; + + usbi_mutex_unlock(&itransfer->lock); + return r; +} + +#ifdef USBI_TIMERFD_AVAILABLE +static int disarm_timerfd(struct libusb_context *ctx) +{ + const struct itimerspec disarm_timer = { { 0, 0 }, { 0, 0 } }; + int r; + + usbi_dbg(""); + r = timerfd_settime(ctx->timerfd, 0, &disarm_timer, NULL); + if (r < 0) + return LIBUSB_ERROR_OTHER; + else + return 0; +} + +/* iterates through the flying transfers, and rearms the timerfd based on the + * next upcoming timeout. + * must be called with flying_list locked. + * returns 0 if there was no timeout to arm, 1 if the next timeout was armed, + * or a LIBUSB_ERROR code on failure. + */ +static int arm_timerfd_for_next_timeout(struct libusb_context *ctx) +{ + struct usbi_transfer *transfer; + + list_for_each_entry(transfer, &ctx->flying_transfers, list, struct usbi_transfer) { + struct timeval *cur_tv = &transfer->timeout; + + /* if we've reached transfers of infinite timeout, then we have no + * arming to do */ + if (!timerisset(cur_tv)) + return 0; + + /* act on first transfer that is not already cancelled */ + if (!(transfer->flags & USBI_TRANSFER_TIMED_OUT)) { + int r; + const struct itimerspec it = { {0, 0}, + { cur_tv->tv_sec, cur_tv->tv_usec * 1000 } }; + usbi_dbg("next timeout originally %dms", USBI_TRANSFER_TO_LIBUSB_TRANSFER(transfer)->timeout); + r = timerfd_settime(ctx->timerfd, TFD_TIMER_ABSTIME, &it, NULL); + if (r < 0) + return LIBUSB_ERROR_OTHER; + return 1; + } + } + + return 0; +} +#else +static int disarm_timerfd(struct libusb_context *ctx) +{ + (void)ctx; + return 0; +} +static int arm_timerfd_for_next_timeout(struct libusb_context *ctx) +{ + (void)ctx; + return 0; +} +#endif + +/* Handle completion of a transfer (completion might be an error condition). + * This will invoke the user-supplied callback function, which may end up + * freeing the transfer. Therefore you cannot use the transfer structure + * after calling this function, and you should free all backend-specific + * data before calling it. + * Do not call this function with the usbi_transfer lock held. User-specified + * callback functions may attempt to directly resubmit the transfer, which + * will attempt to take the lock. */ +int usbi_handle_transfer_completion(struct usbi_transfer *itransfer, + enum libusb_transfer_status status) +{ + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = TRANSFER_CTX(transfer); + uint8_t flags; + int r = 0; + + /* FIXME: could be more intelligent with the timerfd here. we don't need + * to disarm the timerfd if there was no timer running, and we only need + * to rearm the timerfd if the transfer that expired was the one with + * the shortest timeout. */ + + usbi_mutex_lock(&ctx->flying_transfers_lock); + /* FIXME: Sanity check for some race where this entry has already been + * removed! */ + if ((&itransfer->list)->next) + list_del(&itransfer->list); + if (usbi_using_timerfd(ctx)) { + r = arm_timerfd_for_next_timeout(ctx); + if (0 == r) + r = disarm_timerfd(ctx); + } + usbi_mutex_unlock(&ctx->flying_transfers_lock); + if (r < 0) + return r; + + if (status == LIBUSB_TRANSFER_COMPLETED + && transfer->flags & LIBUSB_TRANSFER_SHORT_NOT_OK) { + int rqlen = transfer->length; + if (transfer->type == LIBUSB_TRANSFER_TYPE_CONTROL) + rqlen -= LIBUSB_CONTROL_SETUP_SIZE; + if (rqlen != itransfer->transferred) { + usbi_dbg("interpreting short transfer as error"); + status = LIBUSB_TRANSFER_ERROR; + } + } + + flags = transfer->flags; + transfer->status = status; + transfer->actual_length = itransfer->transferred; + usbi_dbg("transfer %p has callback %p", transfer, transfer->callback); + if (transfer->callback) + transfer->callback(transfer); + /* transfer might have been freed by the above call, do not use from + * this point. */ + if (flags & LIBUSB_TRANSFER_FREE_TRANSFER) + libusb_free_transfer(transfer); + usbi_mutex_lock(&ctx->event_waiters_lock); + usbi_cond_broadcast(&ctx->event_waiters_cond); + usbi_mutex_unlock(&ctx->event_waiters_lock); + return 0; +} + +/* Similar to usbi_handle_transfer_completion() but exclusively for transfers + * that were asynchronously cancelled. The same concerns w.r.t. freeing of + * transfers exist here. + * Do not call this function with the usbi_transfer lock held. User-specified + * callback functions may attempt to directly resubmit the transfer, which + * will attempt to take the lock. */ +int usbi_handle_transfer_cancellation(struct usbi_transfer *transfer) +{ + /* if the URB was cancelled due to timeout, report timeout to the user */ + if (transfer->flags & USBI_TRANSFER_TIMED_OUT) { + usbi_dbg("detected timeout cancellation"); + return usbi_handle_transfer_completion(transfer, LIBUSB_TRANSFER_TIMED_OUT); + } + + /* otherwise its a normal async cancel */ + return usbi_handle_transfer_completion(transfer, LIBUSB_TRANSFER_CANCELLED); +} + +/** \ingroup poll + * Attempt to acquire the event handling lock. This lock is used to ensure that + * only one thread is monitoring libusb event sources at any one time. + * + * You only need to use this lock if you are developing an application + * which calls poll() or select() on libusb's file descriptors directly. + * If you stick to libusb's event handling loop functions (e.g. + * libusb_handle_events()) then you do not need to be concerned with this + * locking. + * + * While holding this lock, you are trusted to actually be handling events. + * If you are no longer handling events, you must call libusb_unlock_events() + * as soon as possible. + * + * \param ctx the context to operate on, or NULL for the default context + * \returns 0 if the lock was obtained successfully + * \returns 1 if the lock was not obtained (i.e. another thread holds the lock) + * \see \ref mtasync + */ +int API_EXPORTED libusb_try_lock_events(libusb_context *ctx) +{ + int r; + USBI_GET_CONTEXT(ctx); + + /* is someone else waiting to modify poll fds? if so, don't let this thread + * start event handling */ + usbi_mutex_lock(&ctx->pollfd_modify_lock); + r = ctx->pollfd_modify; + usbi_mutex_unlock(&ctx->pollfd_modify_lock); + if (r) { + usbi_dbg("someone else is modifying poll fds"); + return 1; + } + + r = usbi_mutex_trylock(&ctx->events_lock); + if (r) + return 1; + + ctx->event_handler_active = 1; + return 0; +} + +/** \ingroup poll + * Acquire the event handling lock, blocking until successful acquisition if + * it is contended. This lock is used to ensure that only one thread is + * monitoring libusb event sources at any one time. + * + * You only need to use this lock if you are developing an application + * which calls poll() or select() on libusb's file descriptors directly. + * If you stick to libusb's event handling loop functions (e.g. + * libusb_handle_events()) then you do not need to be concerned with this + * locking. + * + * While holding this lock, you are trusted to actually be handling events. + * If you are no longer handling events, you must call libusb_unlock_events() + * as soon as possible. + * + * \param ctx the context to operate on, or NULL for the default context + * \see \ref mtasync + */ +void API_EXPORTED libusb_lock_events(libusb_context *ctx) +{ + USBI_GET_CONTEXT(ctx); + usbi_mutex_lock(&ctx->events_lock); + ctx->event_handler_active = 1; +} + +/** \ingroup poll + * Release the lock previously acquired with libusb_try_lock_events() or + * libusb_lock_events(). Releasing this lock will wake up any threads blocked + * on libusb_wait_for_event(). + * + * \param ctx the context to operate on, or NULL for the default context + * \see \ref mtasync + */ +void API_EXPORTED libusb_unlock_events(libusb_context *ctx) +{ + USBI_GET_CONTEXT(ctx); + ctx->event_handler_active = 0; + usbi_mutex_unlock(&ctx->events_lock); + + /* FIXME: perhaps we should be a bit more efficient by not broadcasting + * the availability of the events lock when we are modifying pollfds + * (check ctx->pollfd_modify)? */ + usbi_mutex_lock(&ctx->event_waiters_lock); + usbi_cond_broadcast(&ctx->event_waiters_cond); + usbi_mutex_unlock(&ctx->event_waiters_lock); +} + +/** \ingroup poll + * Determine if it is still OK for this thread to be doing event handling. + * + * Sometimes, libusb needs to temporarily pause all event handlers, and this + * is the function you should use before polling file descriptors to see if + * this is the case. + * + * If this function instructs your thread to give up the events lock, you + * should just continue the usual logic that is documented in \ref mtasync. + * On the next iteration, your thread will fail to obtain the events lock, + * and will hence become an event waiter. + * + * This function should be called while the events lock is held: you don't + * need to worry about the results of this function if your thread is not + * the current event handler. + * + * \param ctx the context to operate on, or NULL for the default context + * \returns 1 if event handling can start or continue + * \returns 0 if this thread must give up the events lock + * \see \ref fullstory "Multi-threaded I/O: the full story" + */ +int API_EXPORTED libusb_event_handling_ok(libusb_context *ctx) +{ + int r; + USBI_GET_CONTEXT(ctx); + + /* is someone else waiting to modify poll fds? if so, don't let this thread + * continue event handling */ + usbi_mutex_lock(&ctx->pollfd_modify_lock); + r = ctx->pollfd_modify; + usbi_mutex_unlock(&ctx->pollfd_modify_lock); + if (r) { + usbi_dbg("someone else is modifying poll fds"); + return 0; + } + + return 1; +} + + +/** \ingroup poll + * Determine if an active thread is handling events (i.e. if anyone is holding + * the event handling lock). + * + * \param ctx the context to operate on, or NULL for the default context + * \returns 1 if a thread is handling events + * \returns 0 if there are no threads currently handling events + * \see \ref mtasync + */ +int API_EXPORTED libusb_event_handler_active(libusb_context *ctx) +{ + int r; + USBI_GET_CONTEXT(ctx); + + /* is someone else waiting to modify poll fds? if so, don't let this thread + * start event handling -- indicate that event handling is happening */ + usbi_mutex_lock(&ctx->pollfd_modify_lock); + r = ctx->pollfd_modify; + usbi_mutex_unlock(&ctx->pollfd_modify_lock); + if (r) { + usbi_dbg("someone else is modifying poll fds"); + return 1; + } + + return ctx->event_handler_active; +} + +/** \ingroup poll + * Acquire the event waiters lock. This lock is designed to be obtained under + * the situation where you want to be aware when events are completed, but + * some other thread is event handling so calling libusb_handle_events() is not + * allowed. + * + * You then obtain this lock, re-check that another thread is still handling + * events, then call libusb_wait_for_event(). + * + * You only need to use this lock if you are developing an application + * which calls poll() or select() on libusb's file descriptors directly, + * and may potentially be handling events from 2 threads simultaenously. + * If you stick to libusb's event handling loop functions (e.g. + * libusb_handle_events()) then you do not need to be concerned with this + * locking. + * + * \param ctx the context to operate on, or NULL for the default context + * \see \ref mtasync + */ +void API_EXPORTED libusb_lock_event_waiters(libusb_context *ctx) +{ + USBI_GET_CONTEXT(ctx); + usbi_mutex_lock(&ctx->event_waiters_lock); +} + +/** \ingroup poll + * Release the event waiters lock. + * \param ctx the context to operate on, or NULL for the default context + * \see \ref mtasync + */ +void API_EXPORTED libusb_unlock_event_waiters(libusb_context *ctx) +{ + USBI_GET_CONTEXT(ctx); + usbi_mutex_unlock(&ctx->event_waiters_lock); +} + +/** \ingroup poll + * Wait for another thread to signal completion of an event. Must be called + * with the event waiters lock held, see libusb_lock_event_waiters(). + * + * This function will block until any of the following conditions are met: + * -# The timeout expires + * -# A transfer completes + * -# A thread releases the event handling lock through libusb_unlock_events() + * + * Condition 1 is obvious. Condition 2 unblocks your thread after + * the callback for the transfer has completed. Condition 3 is important + * because it means that the thread that was previously handling events is no + * longer doing so, so if any events are to complete, another thread needs to + * step up and start event handling. + * + * This function releases the event waiters lock before putting your thread + * to sleep, and reacquires the lock as it is being woken up. + * + * \param ctx the context to operate on, or NULL for the default context + * \param tv maximum timeout for this blocking function. A NULL value + * indicates unlimited timeout. + * \returns 0 after a transfer completes or another thread stops event handling + * \returns 1 if the timeout expired + * \see \ref mtasync + */ +int API_EXPORTED libusb_wait_for_event(libusb_context *ctx, struct timeval *tv) +{ + struct timespec timeout; + int r; + + USBI_GET_CONTEXT(ctx); + if (tv == NULL) { + usbi_cond_wait(&ctx->event_waiters_cond, &ctx->event_waiters_lock); + return 0; + } + + r = usbi_backend->clock_gettime(USBI_CLOCK_REALTIME, &timeout); + if (r < 0) { + usbi_err(ctx, "failed to read realtime clock, error %d", errno); + return LIBUSB_ERROR_OTHER; + } + + timeout.tv_sec += tv->tv_sec; + timeout.tv_nsec += tv->tv_usec * 1000; + while (timeout.tv_nsec >= 1000000000) { + timeout.tv_nsec -= 1000000000; + timeout.tv_sec++; + } + + r = usbi_cond_timedwait(&ctx->event_waiters_cond, + &ctx->event_waiters_lock, &timeout); + return (r == ETIMEDOUT); +} + +static void handle_timeout(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + int r; + + itransfer->flags |= USBI_TRANSFER_TIMED_OUT; + r = libusb_cancel_transfer(transfer); + if (r < 0) + usbi_warn(TRANSFER_CTX(transfer), + "async cancel failed %d errno=%d", r, errno); +} + +static int handle_timeouts_locked(struct libusb_context *ctx) +{ + int r; + struct timespec systime_ts; + struct timeval systime; + struct usbi_transfer *transfer; + + if (list_empty(&ctx->flying_transfers)) + return 0; + + /* get current time */ + r = usbi_backend->clock_gettime(USBI_CLOCK_MONOTONIC, &systime_ts); + if (r < 0) + return r; + + TIMESPEC_TO_TIMEVAL(&systime, &systime_ts); + + /* iterate through flying transfers list, finding all transfers that + * have expired timeouts */ + list_for_each_entry(transfer, &ctx->flying_transfers, list, struct usbi_transfer) { + struct timeval *cur_tv = &transfer->timeout; + + /* if we've reached transfers of infinite timeout, we're all done */ + if (!timerisset(cur_tv)) + return 0; + + /* ignore timeouts we've already handled */ + if (transfer->flags & (USBI_TRANSFER_TIMED_OUT | USBI_TRANSFER_OS_HANDLES_TIMEOUT)) + continue; + + /* if transfer has non-expired timeout, nothing more to do */ + if ((cur_tv->tv_sec > systime.tv_sec) || + (cur_tv->tv_sec == systime.tv_sec && + cur_tv->tv_usec > systime.tv_usec)) + return 0; + + /* otherwise, we've got an expired timeout to handle */ + handle_timeout(transfer); + } + return 0; +} + +static int handle_timeouts(struct libusb_context *ctx) +{ + int r; + USBI_GET_CONTEXT(ctx); + usbi_mutex_lock(&ctx->flying_transfers_lock); + r = handle_timeouts_locked(ctx); + usbi_mutex_unlock(&ctx->flying_transfers_lock); + return r; +} + +#ifdef USBI_TIMERFD_AVAILABLE +static int handle_timerfd_trigger(struct libusb_context *ctx) +{ + int r; + + r = disarm_timerfd(ctx); + if (r < 0) + return r; + + usbi_mutex_lock(&ctx->flying_transfers_lock); + + /* process the timeout that just happened */ + r = handle_timeouts_locked(ctx); + if (r < 0) + goto out; + + /* arm for next timeout*/ + r = arm_timerfd_for_next_timeout(ctx); + +out: + usbi_mutex_unlock(&ctx->flying_transfers_lock); + return r; +} +#endif + +/* do the actual event handling. assumes that no other thread is concurrently + * doing the same thing. */ +static int handle_events(struct libusb_context *ctx, struct timeval *tv) +{ + int r; + struct usbi_pollfd *ipollfd; + POLL_NFDS_TYPE nfds = 0; + struct pollfd *fds; + int i = -1; + int timeout_ms; + + usbi_mutex_lock(&ctx->pollfds_lock); + list_for_each_entry(ipollfd, &ctx->pollfds, list, struct usbi_pollfd) + nfds++; + + /* TODO: malloc when number of fd's changes, not on every poll */ + fds = malloc(sizeof(*fds) * nfds); + if (!fds) { + usbi_mutex_unlock(&ctx->pollfds_lock); + return LIBUSB_ERROR_NO_MEM; + } + + list_for_each_entry(ipollfd, &ctx->pollfds, list, struct usbi_pollfd) { + struct libusb_pollfd *pollfd = &ipollfd->pollfd; + int fd = pollfd->fd; + i++; + fds[i].fd = fd; + fds[i].events = pollfd->events; + fds[i].revents = 0; + } + usbi_mutex_unlock(&ctx->pollfds_lock); + + timeout_ms = (tv->tv_sec * 1000) + (tv->tv_usec / 1000); + + /* round up to next millisecond */ + if (tv->tv_usec % 1000) + timeout_ms++; + + usbi_dbg("poll() %d fds with timeout in %dms", nfds, timeout_ms); + r = usbi_poll(fds, nfds, timeout_ms); + usbi_dbg("poll() returned %d", r); + if (r == 0) { + free(fds); + return handle_timeouts(ctx); + } else if (r == -1 && errno == EINTR) { + free(fds); + return LIBUSB_ERROR_INTERRUPTED; + } else if (r < 0) { + free(fds); + usbi_err(ctx, "poll failed %d err=%d\n", r, errno); + return LIBUSB_ERROR_IO; + } + + /* fd[0] is always the ctrl pipe */ + if (fds[0].revents) { + /* another thread wanted to interrupt event handling, and it succeeded! + * handle any other events that cropped up at the same time, and + * simply return */ + usbi_dbg("caught a fish on the control pipe"); + + if (r == 1) { + r = 0; + goto handled; + } else { + /* prevent OS backend from trying to handle events on ctrl pipe */ + fds[0].revents = 0; + r--; + } + } + + /* fd[1] is always the hotplug pipe */ + if (libusb_has_capability(LIBUSB_CAP_HAS_HOTPLUG) && fds[1].revents) { + libusb_hotplug_message message; + unsigned int ret; + + /* read the message from the hotplug thread */ + ret = read(ctx->hotplug_pipe[0], &message, sizeof (message)); + if (ret < sizeof(message)) { + ret = LIBUSB_ERROR_OTHER; + goto handled; + } + + usbi_hotplug_match(message.device, message.event); + + /* the device left. dereference the device */ + if (LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT == message.event) + libusb_unref_device(message.device); + + fds[1].revents = 0; + if (1 == r--) + goto handled; + } /* else there shouldn't be anything on this pipe */ + +#ifdef USBI_TIMERFD_AVAILABLE + /* on timerfd configurations, fds[2] is the timerfd */ + if (usbi_using_timerfd(ctx) && fds[2].revents) { + /* timerfd indicates that a timeout has expired */ + int ret; + usbi_dbg("timerfd triggered"); + + ret = handle_timerfd_trigger(ctx); + if (ret < 0) { + /* return error code */ + r = ret; + goto handled; + } else if (r == 1) { + /* no more active file descriptors, nothing more to do */ + r = 0; + goto handled; + } else { + /* more events pending... + * prevent OS backend from trying to handle events on timerfd */ + fds[2].revents = 0; + r--; + } + } +#endif + + r = usbi_backend->handle_events(ctx, fds, nfds, r); + if (r) + usbi_err(ctx, "backend handle_events failed with error %d", r); + +handled: + free(fds); + return r; +} + +/* returns the smallest of: + * 1. timeout of next URB + * 2. user-supplied timeout + * returns 1 if there is an already-expired timeout, otherwise returns 0 + * and populates out + */ +static int get_next_timeout(libusb_context *ctx, struct timeval *tv, + struct timeval *out) +{ + struct timeval timeout; + int r = libusb_get_next_timeout(ctx, &timeout); + if (r) { + /* timeout already expired? */ + if (!timerisset(&timeout)) + return 1; + + /* choose the smallest of next URB timeout or user specified timeout */ + if (timercmp(&timeout, tv, <)) + *out = timeout; + else + *out = *tv; + } else { + *out = *tv; + } + return 0; +} + +/** \ingroup poll + * Handle any pending events. + * + * libusb determines "pending events" by checking if any timeouts have expired + * and by checking the set of file descriptors for activity. + * + * If a zero timeval is passed, this function will handle any already-pending + * events and then immediately return in non-blocking style. + * + * If a non-zero timeval is passed and no events are currently pending, this + * function will block waiting for events to handle up until the specified + * timeout. If an event arrives or a signal is raised, this function will + * return early. + * + * If the parameter completed is not NULL then after obtaining the event + * handling lock this function will return immediately if the integer + * pointed to is not 0. This allows for race free waiting for the completion + * of a specific transfer. + * + * \param ctx the context to operate on, or NULL for the default context + * \param tv the maximum time to block waiting for events, or an all zero + * timeval struct for non-blocking mode + * \param completed pointer to completion integer to check, or NULL + * \returns 0 on success, or a LIBUSB_ERROR code on failure + * \see \ref mtasync + */ +int API_EXPORTED libusb_handle_events_timeout_completed(libusb_context *ctx, + struct timeval *tv, int *completed) +{ + int r; + struct timeval poll_timeout; + + USBI_GET_CONTEXT(ctx); + r = get_next_timeout(ctx, tv, &poll_timeout); + if (r) { + /* timeout already expired */ + return handle_timeouts(ctx); + } + +retry: + if (libusb_try_lock_events(ctx) == 0) { + if (completed == NULL || !*completed) { + /* we obtained the event lock: do our own event handling */ + usbi_dbg("doing our own event handling"); + r = handle_events(ctx, &poll_timeout); + } + libusb_unlock_events(ctx); + return r; + } + + /* another thread is doing event handling. wait for thread events that + * notify event completion. */ + libusb_lock_event_waiters(ctx); + + if (completed && *completed) + goto already_done; + + if (!libusb_event_handler_active(ctx)) { + /* we hit a race: whoever was event handling earlier finished in the + * time it took us to reach this point. try the cycle again. */ + libusb_unlock_event_waiters(ctx); + usbi_dbg("event handler was active but went away, retrying"); + goto retry; + } + + usbi_dbg("another thread is doing event handling"); + r = libusb_wait_for_event(ctx, &poll_timeout); + +already_done: + libusb_unlock_event_waiters(ctx); + + if (r < 0) + return r; + else if (r == 1) + return handle_timeouts(ctx); + else + return 0; +} + +/** \ingroup poll + * Handle any pending events + * + * Like libusb_handle_events_timeout_completed(), but without the completed + * parameter, calling this function is equivalent to calling + * libusb_handle_events_timeout_completed() with a NULL completed parameter. + * + * This function is kept primarily for backwards compatibility. + * All new code should call libusb_handle_events_completed() or + * libusb_handle_events_timeout_completed() to avoid race conditions. + * + * \param ctx the context to operate on, or NULL for the default context + * \param tv the maximum time to block waiting for events, or an all zero + * timeval struct for non-blocking mode + * \returns 0 on success, or a LIBUSB_ERROR code on failure + */ +int API_EXPORTED libusb_handle_events_timeout(libusb_context *ctx, + struct timeval *tv) +{ + return libusb_handle_events_timeout_completed(ctx, tv, NULL); +} + +/** \ingroup poll + * Handle any pending events in blocking mode. There is currently a timeout + * hardcoded at 60 seconds but we plan to make it unlimited in future. For + * finer control over whether this function is blocking or non-blocking, or + * for control over the timeout, use libusb_handle_events_timeout_completed() + * instead. + * + * This function is kept primarily for backwards compatibility. + * All new code should call libusb_handle_events_completed() or + * libusb_handle_events_timeout_completed() to avoid race conditions. + * + * \param ctx the context to operate on, or NULL for the default context + * \returns 0 on success, or a LIBUSB_ERROR code on failure + */ +int API_EXPORTED libusb_handle_events(libusb_context *ctx) +{ + struct timeval tv; + tv.tv_sec = 60; + tv.tv_usec = 0; + return libusb_handle_events_timeout_completed(ctx, &tv, NULL); +} + +/** \ingroup poll + * Handle any pending events in blocking mode. + * + * Like libusb_handle_events(), with the addition of a completed parameter + * to allow for race free waiting for the completion of a specific transfer. + * + * See libusb_handle_events_timeout_completed() for details on the completed + * parameter. + * + * \param ctx the context to operate on, or NULL for the default context + * \param completed pointer to completion integer to check, or NULL + * \returns 0 on success, or a LIBUSB_ERROR code on failure + * \see \ref mtasync + */ +int API_EXPORTED libusb_handle_events_completed(libusb_context *ctx, + int *completed) +{ + struct timeval tv; + tv.tv_sec = 60; + tv.tv_usec = 0; + return libusb_handle_events_timeout_completed(ctx, &tv, completed); +} + +/** \ingroup poll + * Handle any pending events by polling file descriptors, without checking if + * any other threads are already doing so. Must be called with the event lock + * held, see libusb_lock_events(). + * + * This function is designed to be called under the situation where you have + * taken the event lock and are calling poll()/select() directly on libusb's + * file descriptors (as opposed to using libusb_handle_events() or similar). + * You detect events on libusb's descriptors, so you then call this function + * with a zero timeout value (while still holding the event lock). + * + * \param ctx the context to operate on, or NULL for the default context + * \param tv the maximum time to block waiting for events, or zero for + * non-blocking mode + * \returns 0 on success, or a LIBUSB_ERROR code on failure + * \see \ref mtasync + */ +int API_EXPORTED libusb_handle_events_locked(libusb_context *ctx, + struct timeval *tv) +{ + int r; + struct timeval poll_timeout; + + USBI_GET_CONTEXT(ctx); + r = get_next_timeout(ctx, tv, &poll_timeout); + if (r) { + /* timeout already expired */ + return handle_timeouts(ctx); + } + + return handle_events(ctx, &poll_timeout); +} + +/** \ingroup poll + * Determines whether your application must apply special timing considerations + * when monitoring libusb's file descriptors. + * + * This function is only useful for applications which retrieve and poll + * libusb's file descriptors in their own main loop (\ref pollmain). + * + * Ordinarily, libusb's event handler needs to be called into at specific + * moments in time (in addition to times when there is activity on the file + * descriptor set). The usual approach is to use libusb_get_next_timeout() + * to learn about when the next timeout occurs, and to adjust your + * poll()/select() timeout accordingly so that you can make a call into the + * library at that time. + * + * Some platforms supported by libusb do not come with this baggage - any + * events relevant to timing will be represented by activity on the file + * descriptor set, and libusb_get_next_timeout() will always return 0. + * This function allows you to detect whether you are running on such a + * platform. + * + * Since v1.0.5. + * + * \param ctx the context to operate on, or NULL for the default context + * \returns 0 if you must call into libusb at times determined by + * libusb_get_next_timeout(), or 1 if all timeout events are handled internally + * or through regular activity on the file descriptors. + * \see \ref pollmain "Polling libusb file descriptors for event handling" + */ +int API_EXPORTED libusb_pollfds_handle_timeouts(libusb_context *ctx) +{ +#if defined(USBI_TIMERFD_AVAILABLE) + USBI_GET_CONTEXT(ctx); + return usbi_using_timerfd(ctx); +#else + (void)ctx; + return 0; +#endif +} + +/** \ingroup poll + * Determine the next internal timeout that libusb needs to handle. You only + * need to use this function if you are calling poll() or select() or similar + * on libusb's file descriptors yourself - you do not need to use it if you + * are calling libusb_handle_events() or a variant directly. + * + * You should call this function in your main loop in order to determine how + * long to wait for select() or poll() to return results. libusb needs to be + * called into at this timeout, so you should use it as an upper bound on + * your select() or poll() call. + * + * When the timeout has expired, call into libusb_handle_events_timeout() + * (perhaps in non-blocking mode) so that libusb can handle the timeout. + * + * This function may return 1 (success) and an all-zero timeval. If this is + * the case, it indicates that libusb has a timeout that has already expired + * so you should call libusb_handle_events_timeout() or similar immediately. + * A return code of 0 indicates that there are no pending timeouts. + * + * On some platforms, this function will always returns 0 (no pending + * timeouts). See \ref polltime. + * + * \param ctx the context to operate on, or NULL for the default context + * \param tv output location for a relative time against the current + * clock in which libusb must be called into in order to process timeout events + * \returns 0 if there are no pending timeouts, 1 if a timeout was returned, + * or LIBUSB_ERROR_OTHER on failure + */ +int API_EXPORTED libusb_get_next_timeout(libusb_context *ctx, + struct timeval *tv) +{ + struct usbi_transfer *transfer; + struct timespec cur_ts; + struct timeval cur_tv; + struct timeval *next_timeout; + int r; + int found = 0; + + USBI_GET_CONTEXT(ctx); + if (usbi_using_timerfd(ctx)) + return 0; + + usbi_mutex_lock(&ctx->flying_transfers_lock); + if (list_empty(&ctx->flying_transfers)) { + usbi_mutex_unlock(&ctx->flying_transfers_lock); + usbi_dbg("no URBs, no timeout!"); + return 0; + } + + /* find next transfer which hasn't already been processed as timed out */ + list_for_each_entry(transfer, &ctx->flying_transfers, list, struct usbi_transfer) { + if (transfer->flags & (USBI_TRANSFER_TIMED_OUT | USBI_TRANSFER_OS_HANDLES_TIMEOUT)) + continue; + + /* no timeout for this transfer? */ + if (!timerisset(&transfer->timeout)) + continue; + + found = 1; + break; + } + usbi_mutex_unlock(&ctx->flying_transfers_lock); + + if (!found) { + usbi_dbg("no URB with timeout or all handled by OS; no timeout!"); + return 0; + } + + next_timeout = &transfer->timeout; + + r = usbi_backend->clock_gettime(USBI_CLOCK_MONOTONIC, &cur_ts); + if (r < 0) { + usbi_err(ctx, "failed to read monotonic clock, errno=%d", errno); + return LIBUSB_ERROR_OTHER; + } + TIMESPEC_TO_TIMEVAL(&cur_tv, &cur_ts); + + if (!timercmp(&cur_tv, next_timeout, <)) { + usbi_dbg("first timeout already expired"); + timerclear(tv); + } else { + timersub(next_timeout, &cur_tv, tv); + usbi_dbg("next timeout in %d.%06ds", tv->tv_sec, tv->tv_usec); + } + + return 1; +} + +/** \ingroup poll + * Register notification functions for file descriptor additions/removals. + * These functions will be invoked for every new or removed file descriptor + * that libusb uses as an event source. + * + * To remove notifiers, pass NULL values for the function pointers. + * + * Note that file descriptors may have been added even before you register + * these notifiers (e.g. at libusb_init() time). + * + * Additionally, note that the removal notifier may be called during + * libusb_exit() (e.g. when it is closing file descriptors that were opened + * and added to the poll set at libusb_init() time). If you don't want this, + * remove the notifiers immediately before calling libusb_exit(). + * + * \param ctx the context to operate on, or NULL for the default context + * \param added_cb pointer to function for addition notifications + * \param removed_cb pointer to function for removal notifications + * \param user_data User data to be passed back to callbacks (useful for + * passing context information) + */ +void API_EXPORTED libusb_set_pollfd_notifiers(libusb_context *ctx, + libusb_pollfd_added_cb added_cb, libusb_pollfd_removed_cb removed_cb, + void *user_data) +{ + USBI_GET_CONTEXT(ctx); + ctx->fd_added_cb = added_cb; + ctx->fd_removed_cb = removed_cb; + ctx->fd_cb_user_data = user_data; +} + +/* Add a file descriptor to the list of file descriptors to be monitored. + * events should be specified as a bitmask of events passed to poll(), e.g. + * POLLIN and/or POLLOUT. */ +int usbi_add_pollfd(struct libusb_context *ctx, int fd, short events) +{ + struct usbi_pollfd *ipollfd = malloc(sizeof(*ipollfd)); + if (!ipollfd) + return LIBUSB_ERROR_NO_MEM; + + usbi_dbg("add fd %d events %d", fd, events); + ipollfd->pollfd.fd = fd; + ipollfd->pollfd.events = events; + usbi_mutex_lock(&ctx->pollfds_lock); + list_add_tail(&ipollfd->list, &ctx->pollfds); + usbi_mutex_unlock(&ctx->pollfds_lock); + + if (ctx->fd_added_cb) + ctx->fd_added_cb(fd, events, ctx->fd_cb_user_data); + return 0; +} + +/* Remove a file descriptor from the list of file descriptors to be polled. */ +void usbi_remove_pollfd(struct libusb_context *ctx, int fd) +{ + struct usbi_pollfd *ipollfd; + int found = 0; + + usbi_dbg("remove fd %d", fd); + usbi_mutex_lock(&ctx->pollfds_lock); + list_for_each_entry(ipollfd, &ctx->pollfds, list, struct usbi_pollfd) + if (ipollfd->pollfd.fd == fd) { + found = 1; + break; + } + + if (!found) { + usbi_dbg("couldn't find fd %d to remove", fd); + usbi_mutex_unlock(&ctx->pollfds_lock); + return; + } + + list_del(&ipollfd->list); + usbi_mutex_unlock(&ctx->pollfds_lock); + free(ipollfd); + if (ctx->fd_removed_cb) + ctx->fd_removed_cb(fd, ctx->fd_cb_user_data); +} + +/** \ingroup poll + * Retrieve a list of file descriptors that should be polled by your main loop + * as libusb event sources. + * + * The returned list is NULL-terminated and should be freed with free() when + * done. The actual list contents must not be touched. + * + * As file descriptors are a Unix-specific concept, this function is not + * available on Windows and will always return NULL. + * + * \param ctx the context to operate on, or NULL for the default context + * \returns a NULL-terminated list of libusb_pollfd structures + * \returns NULL on error + * \returns NULL on platforms where the functionality is not available + */ +DEFAULT_VISIBILITY +const struct libusb_pollfd ** LIBUSB_CALL libusb_get_pollfds( + libusb_context *ctx) +{ +#ifndef OS_WINDOWS + struct libusb_pollfd **ret = NULL; + struct usbi_pollfd *ipollfd; + size_t i = 0; + size_t cnt = 0; + USBI_GET_CONTEXT(ctx); + + usbi_mutex_lock(&ctx->pollfds_lock); + list_for_each_entry(ipollfd, &ctx->pollfds, list, struct usbi_pollfd) + cnt++; + + ret = calloc(cnt + 1, sizeof(struct libusb_pollfd *)); + if (!ret) + goto out; + + list_for_each_entry(ipollfd, &ctx->pollfds, list, struct usbi_pollfd) + ret[i++] = (struct libusb_pollfd *) ipollfd; + ret[cnt] = NULL; + +out: + usbi_mutex_unlock(&ctx->pollfds_lock); + return (const struct libusb_pollfd **) ret; +#else + usbi_err(ctx, "external polling of libusb's internal descriptors "\ + "is not yet supported on Windows platforms"); + return NULL; +#endif +} + +/* Backends call this from handle_events to report disconnection of a device. + * The transfers get cancelled appropriately. + */ +void usbi_handle_disconnect(struct libusb_device_handle *handle) +{ + struct usbi_transfer *cur; + struct usbi_transfer *to_cancel; + + usbi_dbg("device %d.%d", + handle->dev->bus_number, handle->dev->device_address); + + /* terminate all pending transfers with the LIBUSB_TRANSFER_NO_DEVICE + * status code. + * + * this is a bit tricky because: + * 1. we can't do transfer completion while holding flying_transfers_lock + * 2. the transfers list can change underneath us - if we were to build a + * list of transfers to complete (while holding look), the situation + * might be different by the time we come to free them + * + * so we resort to a loop-based approach as below + * FIXME: is this still potentially racy? + */ + + while (1) { + usbi_mutex_lock(&HANDLE_CTX(handle)->flying_transfers_lock); + to_cancel = NULL; + list_for_each_entry(cur, &HANDLE_CTX(handle)->flying_transfers, list, struct usbi_transfer) + if (USBI_TRANSFER_TO_LIBUSB_TRANSFER(cur)->dev_handle == handle) { + to_cancel = cur; + break; + } + usbi_mutex_unlock(&HANDLE_CTX(handle)->flying_transfers_lock); + + if (!to_cancel) + break; + + usbi_backend->clear_transfer_priv(to_cancel); + usbi_handle_transfer_completion(to_cancel, LIBUSB_TRANSFER_NO_DEVICE); + } + +} diff --git a/compat/libusb-1.0/libusb/libusb-1.0.def b/compat/libusb-1.0/libusb/libusb-1.0.def new file mode 100644 index 0000000..1d6a5d2 --- /dev/null +++ b/compat/libusb-1.0/libusb/libusb-1.0.def @@ -0,0 +1,120 @@ +LIBRARY +EXPORTS + libusb_alloc_transfer + libusb_alloc_transfer@4 = libusb_alloc_transfer + libusb_attach_kernel_driver + libusb_attach_kernel_driver@8 = libusb_attach_kernel_driver + libusb_bulk_transfer + libusb_bulk_transfer@24 = libusb_bulk_transfer + libusb_cancel_transfer + libusb_cancel_transfer@4 = libusb_cancel_transfer + libusb_claim_interface + libusb_claim_interface@8 = libusb_claim_interface + libusb_clear_halt + libusb_clear_halt@8 = libusb_clear_halt + libusb_close + libusb_close@4 = libusb_close + libusb_control_transfer + libusb_control_transfer@32 = libusb_control_transfer + libusb_detach_kernel_driver + libusb_detach_kernel_driver@8 = libusb_detach_kernel_driver + libusb_error_name + libusb_error_name@4 = libusb_error_name + libusb_event_handler_active + libusb_event_handler_active@4 = libusb_event_handler_active + libusb_event_handling_ok + libusb_event_handling_ok@4 = libusb_event_handling_ok + libusb_exit + libusb_exit@4 = libusb_exit + libusb_free_config_descriptor + libusb_free_config_descriptor@4 = libusb_free_config_descriptor + libusb_free_device_list + libusb_free_device_list@8 = libusb_free_device_list + libusb_free_transfer + libusb_free_transfer@4 = libusb_free_transfer + libusb_get_active_config_descriptor + libusb_get_active_config_descriptor@8 = libusb_get_active_config_descriptor + libusb_get_bus_number + libusb_get_bus_number@4 = libusb_get_bus_number + libusb_get_config_descriptor + libusb_get_config_descriptor@12 = libusb_get_config_descriptor + libusb_get_config_descriptor_by_value + libusb_get_config_descriptor_by_value@12 = libusb_get_config_descriptor_by_value + libusb_get_configuration + libusb_get_configuration@8 = libusb_get_configuration + libusb_get_device + libusb_get_device@4 = libusb_get_device + libusb_get_device_address + libusb_get_device_address@4 = libusb_get_device_address + libusb_get_device_descriptor + libusb_get_device_descriptor@8 = libusb_get_device_descriptor + libusb_get_device_list + libusb_get_device_list@8 = libusb_get_device_list + libusb_get_device_speed + libusb_get_device_speed@4 = libusb_get_device_speed + libusb_get_max_iso_packet_size + libusb_get_max_iso_packet_size@8 = libusb_get_max_iso_packet_size + libusb_get_max_packet_size + libusb_get_max_packet_size@8 = libusb_get_max_packet_size + libusb_get_next_timeout + libusb_get_next_timeout@8 = libusb_get_next_timeout + libusb_get_pollfds + libusb_get_pollfds@4 = libusb_get_pollfds + libusb_get_string_descriptor_ascii + libusb_get_string_descriptor_ascii@16 = libusb_get_string_descriptor_ascii + libusb_get_version + libusb_get_version@0 = libusb_get_version + libusb_handle_events + libusb_handle_events@4 = libusb_handle_events + libusb_handle_events_completed + libusb_handle_events_completed@8 = libusb_handle_events_completed + libusb_handle_events_locked + libusb_handle_events_locked@8 = libusb_handle_events_locked + libusb_handle_events_timeout + libusb_handle_events_timeout@8 = libusb_handle_events_timeout + libusb_handle_events_timeout_completed + libusb_handle_events_timeout_completed@12 = libusb_handle_events_timeout_completed + libusb_has_capability + libusb_has_capability@4 = libusb_has_capability + libusb_init + libusb_init@4 = libusb_init + libusb_interrupt_transfer + libusb_interrupt_transfer@24 = libusb_interrupt_transfer + libusb_kernel_driver_active + libusb_kernel_driver_active@8 = libusb_kernel_driver_active + libusb_lock_event_waiters + libusb_lock_event_waiters@4 = libusb_lock_event_waiters + libusb_lock_events + libusb_lock_events@4 = libusb_lock_events + libusb_open + libusb_open@8 = libusb_open + libusb_open_device_with_vid_pid + libusb_open_device_with_vid_pid@12 = libusb_open_device_with_vid_pid + libusb_pollfds_handle_timeouts + libusb_pollfds_handle_timeouts@4 = libusb_pollfds_handle_timeouts + libusb_ref_device + libusb_ref_device@4 = libusb_ref_device + libusb_release_interface + libusb_release_interface@8 = libusb_release_interface + libusb_reset_device + libusb_reset_device@4 = libusb_reset_device + libusb_set_configuration + libusb_set_configuration@8 = libusb_set_configuration + libusb_set_debug + libusb_set_debug@8 = libusb_set_debug + libusb_set_interface_alt_setting + libusb_set_interface_alt_setting@12 = libusb_set_interface_alt_setting + libusb_set_pollfd_notifiers + libusb_set_pollfd_notifiers@16 = libusb_set_pollfd_notifiers + libusb_submit_transfer + libusb_submit_transfer@4 = libusb_submit_transfer + libusb_try_lock_events + libusb_try_lock_events@4 = libusb_try_lock_events + libusb_unlock_event_waiters + libusb_unlock_event_waiters@4 = libusb_unlock_event_waiters + libusb_unlock_events + libusb_unlock_events@4 = libusb_unlock_events + libusb_unref_device + libusb_unref_device@4 = libusb_unref_device + libusb_wait_for_event + libusb_wait_for_event@8 = libusb_wait_for_event diff --git a/compat/libusb-1.0/libusb/libusb-1.0.rc b/compat/libusb-1.0/libusb/libusb-1.0.rc new file mode 100644 index 0000000..a59a430 --- /dev/null +++ b/compat/libusb-1.0/libusb/libusb-1.0.rc @@ -0,0 +1,56 @@ +/* + * For Windows: input this file to the Resoure Compiler to produce a binary + * .res file. This is then embedded in the resultant library (like any other + * compilation object). + * The information can then be queried using standard APIs and can also be + * viewed with utilities such as Windows Explorer. + */ +#include "winresrc.h" + +#include "version.h" +#ifndef LIBUSB_VERSIONSTRING +#define LU_STR(s) #s +#define LU_XSTR(s) LU_STR(s) +#if LIBUSB_NANO > 0 +#define LIBUSB_VERSIONSTRING LU_XSTR(LIBUSB_MAJOR) "." LU_XSTR(LIBUSB_MINOR) "." LU_XSTR(LIBUSB_MICRO) "." LU_XSTR(LIBUSB_NANO) LIBUSB_RC "\0" +#else +#define LIBUSB_VERSIONSTRING LU_XSTR(LIBUSB_MAJOR) "." LU_XSTR(LIBUSB_MINOR) "." LU_XSTR(LIBUSB_MICRO) LIBUSB_RC "\0" +#endif +#endif + +VS_VERSION_INFO VERSIONINFO + FILEVERSION LIBUSB_MAJOR,LIBUSB_MINOR,LIBUSB_MICRO,LIBUSB_NANO + PRODUCTVERSION LIBUSB_MAJOR,LIBUSB_MINOR,LIBUSB_MICRO,LIBUSB_NANO + FILEFLAGSMASK 0x3fL +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x40004L + FILETYPE 0x2L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "Comments", "\0" + VALUE "CompanyName", "libusb.org\0" + VALUE "FileDescription", "C library for writing portable USB drivers in userspace\0" + VALUE "FileVersion", LIBUSB_VERSIONSTRING + VALUE "InternalName", "libusb\0" + VALUE "LegalCopyright", "See individual source files, GNU LGPL v2.1 or later.\0" + VALUE "LegalTrademarks", "http://www.gnu.org/licenses/lgpl-2.1.html\0" + VALUE "OriginalFilename", "libusb-1.0.dll\0" + VALUE "PrivateBuild", "\0" + VALUE "ProductName", "libusb-1.0\0" + VALUE "ProductVersion", LIBUSB_VERSIONSTRING + VALUE "SpecialBuild", "\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END diff --git a/compat/libusb-1.0/libusb/libusb.h b/compat/libusb-1.0/libusb/libusb.h new file mode 100644 index 0000000..f3a8f38 --- /dev/null +++ b/compat/libusb-1.0/libusb/libusb.h @@ -0,0 +1,1779 @@ +/* + * Public libusb header file + * Copyright (C) 2007-2008 Daniel Drake + * Copyright (c) 2001 Johannes Erdfelt + * Copyright (C) 2012-2013 Nathan Hjelm + * Copyright (C) 2012 Peter Stuge + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef LIBUSB_H +#define LIBUSB_H + +#ifdef _MSC_VER +/* on MS environments, the inline keyword is available in C++ only */ +#define inline __inline +/* ssize_t is also not available (copy/paste from MinGW) */ +#ifndef _SSIZE_T_DEFINED +#define _SSIZE_T_DEFINED +#undef ssize_t +#ifdef _WIN64 + typedef __int64 ssize_t; +#else + typedef int ssize_t; +#endif /* _WIN64 */ +#endif /* _SSIZE_T_DEFINED */ +#endif /* _MSC_VER */ + +/* stdint.h is also not usually available on MS */ +#if defined(_MSC_VER) && (_MSC_VER < 1600) && (!defined(_STDINT)) && (!defined(_STDINT_H)) +typedef unsigned __int8 uint8_t; +typedef unsigned __int16 uint16_t; +typedef unsigned __int32 uint32_t; +#else +#include +#endif + +#include +#include +#include + +#if defined(__linux) || defined(__APPLE__) || defined(__CYGWIN__) +#include +#endif + +/* 'interface' might be defined as a macro on Windows, so we need to + * undefine it so as not to break the current libusb API, because + * libusb_config_descriptor has an 'interface' member + * As this can be problematic if you include windows.h after libusb.h + * in your sources, we force windows.h to be included first. */ +#if defined(_WIN32) || defined(__CYGWIN__) +#include +#if defined(interface) +#undef interface +#endif +#endif + +/** \def LIBUSB_CALL + * \ingroup misc + * libusb's Windows calling convention. + * + * Under Windows, the selection of available compilers and configurations + * means that, unlike other platforms, there is not one true calling + * convention (calling convention: the manner in which parameters are + * passed to funcions in the generated assembly code). + * + * Matching the Windows API itself, libusb uses the WINAPI convention (which + * translates to the stdcall convention) and guarantees that the + * library is compiled in this way. The public header file also includes + * appropriate annotations so that your own software will use the right + * convention, even if another convention is being used by default within + * your codebase. + * + * The one consideration that you must apply in your software is to mark + * all functions which you use as libusb callbacks with this LIBUSB_CALL + * annotation, so that they too get compiled for the correct calling + * convention. + * + * On non-Windows operating systems, this macro is defined as nothing. This + * means that you can apply it to your code without worrying about + * cross-platform compatibility. + */ +/* LIBUSB_CALL must be defined on both definition and declaration of libusb + * functions. You'd think that declaration would be enough, but cygwin will + * complain about conflicting types unless both are marked this way. + * The placement of this macro is important too; it must appear after the + * return type, before the function name. See internal documentation for + * API_EXPORTED. + */ +#if defined(_WIN32) || defined(__CYGWIN__) +#define LIBUSB_CALL WINAPI +#else +#define LIBUSB_CALL +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** \def libusb_cpu_to_le16 + * \ingroup misc + * Convert a 16-bit value from host-endian to little-endian format. On + * little endian systems, this function does nothing. On big endian systems, + * the bytes are swapped. + * \param x the host-endian value to convert + * \returns the value in little-endian byte order + */ +static inline uint16_t libusb_cpu_to_le16(const uint16_t x) +{ + union { + uint8_t b8[2]; + uint16_t b16; + } _tmp; + _tmp.b8[1] = x >> 8; + _tmp.b8[0] = x & 0xff; + return _tmp.b16; +} + +/** \def libusb_le16_to_cpu + * \ingroup misc + * Convert a 16-bit value from little-endian to host-endian format. On + * little endian systems, this function does nothing. On big endian systems, + * the bytes are swapped. + * \param x the little-endian value to convert + * \returns the value in host-endian byte order + */ +#define libusb_le16_to_cpu libusb_cpu_to_le16 + +/* standard USB stuff */ + +/** \ingroup desc + * Device and/or Interface Class codes */ +enum libusb_class_code { + /** In the context of a \ref libusb_device_descriptor "device descriptor", + * this bDeviceClass value indicates that each interface specifies its + * own class information and all interfaces operate independently. + */ + LIBUSB_CLASS_PER_INTERFACE = 0, + + /** Audio class */ + LIBUSB_CLASS_AUDIO = 1, + + /** Communications class */ + LIBUSB_CLASS_COMM = 2, + + /** Human Interface Device class */ + LIBUSB_CLASS_HID = 3, + + /** Physical */ + LIBUSB_CLASS_PHYSICAL = 5, + + /** Printer class */ + LIBUSB_CLASS_PRINTER = 7, + + /** Image class */ + LIBUSB_CLASS_PTP = 6, /* legacy name from libusb-0.1 usb.h */ + LIBUSB_CLASS_IMAGE = 6, + + /** Mass storage class */ + LIBUSB_CLASS_MASS_STORAGE = 8, + + /** Hub class */ + LIBUSB_CLASS_HUB = 9, + + /** Data class */ + LIBUSB_CLASS_DATA = 10, + + /** Smart Card */ + LIBUSB_CLASS_SMART_CARD = 0x0b, + + /** Content Security */ + LIBUSB_CLASS_CONTENT_SECURITY = 0x0d, + + /** Video */ + LIBUSB_CLASS_VIDEO = 0x0e, + + /** Personal Healthcare */ + LIBUSB_CLASS_PERSONAL_HEALTHCARE = 0x0f, + + /** Diagnostic Device */ + LIBUSB_CLASS_DIAGNOSTIC_DEVICE = 0xdc, + + /** Wireless class */ + LIBUSB_CLASS_WIRELESS = 0xe0, + + /** Application class */ + LIBUSB_CLASS_APPLICATION = 0xfe, + + /** Class is vendor-specific */ + LIBUSB_CLASS_VENDOR_SPEC = 0xff +}; + +/** \ingroup desc + * Descriptor types as defined by the USB specification. */ +enum libusb_descriptor_type { + /** Device descriptor. See libusb_device_descriptor. */ + LIBUSB_DT_DEVICE = 0x01, + + /** Configuration descriptor. See libusb_config_descriptor. */ + LIBUSB_DT_CONFIG = 0x02, + + /** String descriptor */ + LIBUSB_DT_STRING = 0x03, + + /** Interface descriptor. See libusb_interface_descriptor. */ + LIBUSB_DT_INTERFACE = 0x04, + + /** Endpoint descriptor. See libusb_endpoint_descriptor. */ + LIBUSB_DT_ENDPOINT = 0x05, + + /** HID descriptor */ + LIBUSB_DT_HID = 0x21, + + /** HID report descriptor */ + LIBUSB_DT_REPORT = 0x22, + + /** Physical descriptor */ + LIBUSB_DT_PHYSICAL = 0x23, + + /** Hub descriptor */ + LIBUSB_DT_HUB = 0x29, + + /** BOS descriptor */ + LIBUSB_DT_BOS = 0x0f, + + /** Device Capability descriptor */ + LIBUSB_DT_DEVICE_CAPABILITY = 0x10, + + /** SuperSpeed Endpoint Companion descriptor */ + LIBUSB_DT_SS_ENDPOINT_COMPANION = 0x30 +}; + +/* Descriptor sizes per descriptor type */ +#define LIBUSB_DT_DEVICE_SIZE 18 +#define LIBUSB_DT_CONFIG_SIZE 9 +#define LIBUSB_DT_INTERFACE_SIZE 9 +#define LIBUSB_DT_ENDPOINT_SIZE 7 +#define LIBUSB_DT_ENDPOINT_AUDIO_SIZE 9 /* Audio extension */ +#define LIBUSB_DT_HUB_NONVAR_SIZE 7 +#define LIBUSB_DT_SS_ENDPOINT_COMPANION_SIZE 6 +#define LIBUSB_DT_BOS_SIZE 5 +#define LIBUSB_USB_2_0_EXTENSION_DEVICE_CAPABILITY_SIZE 7 +#define LIBUSB_SS_USB_DEVICE_CAPABILITY_SIZE 10 +#define LIBUSB_DT_BOS_MAX_SIZE ((LIBUSB_DT_BOS_SIZE) + \ + (LIBUSB_USB_2_0_EXTENSION_DEVICE_CAPABILITY_SIZE) + \ + (LIBUSB_SS_USB_DEVICE_CAPABILITY_SIZE)) + +#define LIBUSB_ENDPOINT_ADDRESS_MASK 0x0f /* in bEndpointAddress */ +#define LIBUSB_ENDPOINT_DIR_MASK 0x80 + +/** \ingroup desc + * Endpoint direction. Values for bit 7 of the + * \ref libusb_endpoint_descriptor::bEndpointAddress "endpoint address" scheme. + */ +enum libusb_endpoint_direction { + /** In: device-to-host */ + LIBUSB_ENDPOINT_IN = 0x80, + + /** Out: host-to-device */ + LIBUSB_ENDPOINT_OUT = 0x00 +}; + +#define LIBUSB_TRANSFER_TYPE_MASK 0x03 /* in bmAttributes */ + +/** \ingroup desc + * Endpoint transfer type. Values for bits 0:1 of the + * \ref libusb_endpoint_descriptor::bmAttributes "endpoint attributes" field. + */ +enum libusb_transfer_type { + /** Control endpoint */ + LIBUSB_TRANSFER_TYPE_CONTROL = 0, + + /** Isochronous endpoint */ + LIBUSB_TRANSFER_TYPE_ISOCHRONOUS = 1, + + /** Bulk endpoint */ + LIBUSB_TRANSFER_TYPE_BULK = 2, + + /** Interrupt endpoint */ + LIBUSB_TRANSFER_TYPE_INTERRUPT = 3 +}; + +/** \ingroup misc + * Standard requests, as defined in table 9-3 of the USB2 specifications */ +enum libusb_standard_request { + /** Request status of the specific recipient */ + LIBUSB_REQUEST_GET_STATUS = 0x00, + + /** Clear or disable a specific feature */ + LIBUSB_REQUEST_CLEAR_FEATURE = 0x01, + + /* 0x02 is reserved */ + + /** Set or enable a specific feature */ + LIBUSB_REQUEST_SET_FEATURE = 0x03, + + /* 0x04 is reserved */ + + /** Set device address for all future accesses */ + LIBUSB_REQUEST_SET_ADDRESS = 0x05, + + /** Get the specified descriptor */ + LIBUSB_REQUEST_GET_DESCRIPTOR = 0x06, + + /** Used to update existing descriptors or add new descriptors */ + LIBUSB_REQUEST_SET_DESCRIPTOR = 0x07, + + /** Get the current device configuration value */ + LIBUSB_REQUEST_GET_CONFIGURATION = 0x08, + + /** Set device configuration */ + LIBUSB_REQUEST_SET_CONFIGURATION = 0x09, + + /** Return the selected alternate setting for the specified interface */ + LIBUSB_REQUEST_GET_INTERFACE = 0x0A, + + /** Select an alternate interface for the specified interface */ + LIBUSB_REQUEST_SET_INTERFACE = 0x0B, + + /** Set then report an endpoint's synchronization frame */ + LIBUSB_REQUEST_SYNCH_FRAME = 0x0C, +}; + +/** \ingroup misc + * Request type bits of the + * \ref libusb_control_setup::bmRequestType "bmRequestType" field in control + * transfers. */ +enum libusb_request_type { + /** Standard */ + LIBUSB_REQUEST_TYPE_STANDARD = (0x00 << 5), + + /** Class */ + LIBUSB_REQUEST_TYPE_CLASS = (0x01 << 5), + + /** Vendor */ + LIBUSB_REQUEST_TYPE_VENDOR = (0x02 << 5), + + /** Reserved */ + LIBUSB_REQUEST_TYPE_RESERVED = (0x03 << 5) +}; + +/** \ingroup misc + * Recipient bits of the + * \ref libusb_control_setup::bmRequestType "bmRequestType" field in control + * transfers. Values 4 through 31 are reserved. */ +enum libusb_request_recipient { + /** Device */ + LIBUSB_RECIPIENT_DEVICE = 0x00, + + /** Interface */ + LIBUSB_RECIPIENT_INTERFACE = 0x01, + + /** Endpoint */ + LIBUSB_RECIPIENT_ENDPOINT = 0x02, + + /** Other */ + LIBUSB_RECIPIENT_OTHER = 0x03, +}; + +#define LIBUSB_ISO_SYNC_TYPE_MASK 0x0C + +/** \ingroup desc + * Synchronization type for isochronous endpoints. Values for bits 2:3 of the + * \ref libusb_endpoint_descriptor::bmAttributes "bmAttributes" field in + * libusb_endpoint_descriptor. + */ +enum libusb_iso_sync_type { + /** No synchronization */ + LIBUSB_ISO_SYNC_TYPE_NONE = 0, + + /** Asynchronous */ + LIBUSB_ISO_SYNC_TYPE_ASYNC = 1, + + /** Adaptive */ + LIBUSB_ISO_SYNC_TYPE_ADAPTIVE = 2, + + /** Synchronous */ + LIBUSB_ISO_SYNC_TYPE_SYNC = 3 +}; + +#define LIBUSB_ISO_USAGE_TYPE_MASK 0x30 + +/** \ingroup desc + * Usage type for isochronous endpoints. Values for bits 4:5 of the + * \ref libusb_endpoint_descriptor::bmAttributes "bmAttributes" field in + * libusb_endpoint_descriptor. + */ +enum libusb_iso_usage_type { + /** Data endpoint */ + LIBUSB_ISO_USAGE_TYPE_DATA = 0, + + /** Feedback endpoint */ + LIBUSB_ISO_USAGE_TYPE_FEEDBACK = 1, + + /** Implicit feedback Data endpoint */ + LIBUSB_ISO_USAGE_TYPE_IMPLICIT = 2, +}; + +/** \ingroup desc + * A structure representing the standard USB device descriptor. This + * descriptor is documented in section 9.6.1 of the USB 2.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +struct libusb_device_descriptor { + /** Size of this descriptor (in bytes) */ + uint8_t bLength; + + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_DEVICE LIBUSB_DT_DEVICE in this + * context. */ + uint8_t bDescriptorType; + + /** USB specification release number in binary-coded decimal. A value of + * 0x0200 indicates USB 2.0, 0x0110 indicates USB 1.1, etc. */ + uint16_t bcdUSB; + + /** USB-IF class code for the device. See \ref libusb_class_code. */ + uint8_t bDeviceClass; + + /** USB-IF subclass code for the device, qualified by the bDeviceClass + * value */ + uint8_t bDeviceSubClass; + + /** USB-IF protocol code for the device, qualified by the bDeviceClass and + * bDeviceSubClass values */ + uint8_t bDeviceProtocol; + + /** Maximum packet size for endpoint 0 */ + uint8_t bMaxPacketSize0; + + /** USB-IF vendor ID */ + uint16_t idVendor; + + /** USB-IF product ID */ + uint16_t idProduct; + + /** Device release number in binary-coded decimal */ + uint16_t bcdDevice; + + /** Index of string descriptor describing manufacturer */ + uint8_t iManufacturer; + + /** Index of string descriptor describing product */ + uint8_t iProduct; + + /** Index of string descriptor containing device serial number */ + uint8_t iSerialNumber; + + /** Number of possible configurations */ + uint8_t bNumConfigurations; +}; + +/** \ingroup desc + * A structure representing the superspeed endpoint companion + * descriptor. This descriptor is documented in section 9.6.7 of + * the USB 3.0 specification. All ultiple-byte fields are represented in + * host-endian format. + */ +struct libusb_ss_endpoint_companion_descriptor { + + /** Size of this descriptor (in bytes) */ + uint8_t bLength; + + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_SS_ENDPOINT_COMPANION in + * this context. */ + uint8_t bDescriptorType; + + + /** The maximum number of packets the endpoint can send or + * recieve as part of a burst. */ + uint8_t bMaxBurst; + + /** In bulk EP: bits 4:0 represents the maximum number of + * streams the EP supports. In isochronous EP: bits 1:0 + * represents the Mult - a zero based value that determines + * the maximum number of packets within a service interval */ + uint8_t bmAttributes; + + /** The total number of bytes this EP will transfer every + * service interval. valid only for periodic EPs. */ + uint16_t wBytesPerInterval; +}; + +/** \ingroup desc + * A structure representing the standard USB endpoint descriptor. This + * descriptor is documented in section 9.6.3 of the USB 2.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +struct libusb_endpoint_descriptor { + /** Size of this descriptor (in bytes) */ + uint8_t bLength; + + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_ENDPOINT LIBUSB_DT_ENDPOINT in + * this context. */ + uint8_t bDescriptorType; + + /** The address of the endpoint described by this descriptor. Bits 0:3 are + * the endpoint number. Bits 4:6 are reserved. Bit 7 indicates direction, + * see \ref libusb_endpoint_direction. + */ + uint8_t bEndpointAddress; + + /** Attributes which apply to the endpoint when it is configured using + * the bConfigurationValue. Bits 0:1 determine the transfer type and + * correspond to \ref libusb_transfer_type. Bits 2:3 are only used for + * isochronous endpoints and correspond to \ref libusb_iso_sync_type. + * Bits 4:5 are also only used for isochronous endpoints and correspond to + * \ref libusb_iso_usage_type. Bits 6:7 are reserved. + */ + uint8_t bmAttributes; + + /** Maximum packet size this endpoint is capable of sending/receiving. */ + uint16_t wMaxPacketSize; + + /** Interval for polling endpoint for data transfers. */ + uint8_t bInterval; + + /** For audio devices only: the rate at which synchronization feedback + * is provided. */ + uint8_t bRefresh; + + /** For audio devices only: the address if the synch endpoint */ + uint8_t bSynchAddress; + + /** Extra descriptors. If libusb encounters unknown endpoint descriptors, + * it will store them here, should you wish to parse them. */ + const unsigned char *extra; + + /** Length of the extra descriptors, in bytes. */ + int extra_length; +}; + + +/** \ingroup desc + * A structure representing the standard USB interface descriptor. This + * descriptor is documented in section 9.6.5 of the USB 2.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +struct libusb_interface_descriptor { + /** Size of this descriptor (in bytes) */ + uint8_t bLength; + + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_INTERFACE LIBUSB_DT_INTERFACE + * in this context. */ + uint8_t bDescriptorType; + + /** Number of this interface */ + uint8_t bInterfaceNumber; + + /** Value used to select this alternate setting for this interface */ + uint8_t bAlternateSetting; + + /** Number of endpoints used by this interface (excluding the control + * endpoint). */ + uint8_t bNumEndpoints; + + /** USB-IF class code for this interface. See \ref libusb_class_code. */ + uint8_t bInterfaceClass; + + /** USB-IF subclass code for this interface, qualified by the + * bInterfaceClass value */ + uint8_t bInterfaceSubClass; + + /** USB-IF protocol code for this interface, qualified by the + * bInterfaceClass and bInterfaceSubClass values */ + uint8_t bInterfaceProtocol; + + /** Index of string descriptor describing this interface */ + uint8_t iInterface; + + /** Array of endpoint descriptors. This length of this array is determined + * by the bNumEndpoints field. */ + const struct libusb_endpoint_descriptor *endpoint; + + /** Extra descriptors. If libusb encounters unknown interface descriptors, + * it will store them here, should you wish to parse them. */ + const unsigned char *extra; + + /** Length of the extra descriptors, in bytes. */ + int extra_length; +}; + +/** \ingroup desc + * A collection of alternate settings for a particular USB interface. + */ +struct libusb_interface { + /** Array of interface descriptors. The length of this array is determined + * by the num_altsetting field. */ + const struct libusb_interface_descriptor *altsetting; + + /** The number of alternate settings that belong to this interface */ + int num_altsetting; +}; + +/** \ingroup desc + * A structure representing the standard USB configuration descriptor. This + * descriptor is documented in section 9.6.3 of the USB 2.0 specification. + * All multiple-byte fields are represented in host-endian format. + */ +struct libusb_config_descriptor { + /** Size of this descriptor (in bytes) */ + uint8_t bLength; + + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_CONFIG LIBUSB_DT_CONFIG + * in this context. */ + uint8_t bDescriptorType; + + /** Total length of data returned for this configuration */ + uint16_t wTotalLength; + + /** Number of interfaces supported by this configuration */ + uint8_t bNumInterfaces; + + /** Identifier value for this configuration */ + uint8_t bConfigurationValue; + + /** Index of string descriptor describing this configuration */ + uint8_t iConfiguration; + + /** Configuration characteristics */ + uint8_t bmAttributes; + + /** Maximum power consumption of the USB device from this bus in this + * configuration when the device is fully opreation. Expressed in units + * of 2 mA. */ + uint8_t MaxPower; + + /** Array of interfaces supported by this configuration. The length of + * this array is determined by the bNumInterfaces field. */ + const struct libusb_interface *interface; + + /** Extra descriptors. If libusb encounters unknown configuration + * descriptors, it will store them here, should you wish to parse them. */ + const unsigned char *extra; + + /** Length of the extra descriptors, in bytes. */ + int extra_length; +}; + +/** \ingroup desc + * A structure representing the BOS descriptor. This + * descriptor is documented in section 9.6.2 of the USB 3.0 + * specification. All multiple-byte fields are represented in + * host-endian format. + */ +struct libusb_bos_descriptor { + /** Size of this descriptor (in bytes) */ + uint8_t bLength; + + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_BOS LIBUSB_DT_BOS + * in this context. */ + uint8_t bDescriptorType; + + /** Length of this descriptor and all of its sub descriptors */ + uint16_t wTotalLength; + + /** The number of separate device capability descriptors in + * the BOS */ + uint8_t bNumDeviceCaps; + + /** USB 2.0 extension capability descriptor */ + struct libusb_usb_2_0_device_capability_descriptor *usb_2_0_ext_cap; + + /** SuperSpeed capabilty descriptor */ + struct libusb_ss_usb_device_capability_descriptor *ss_usb_cap; +}; + +/** \ingroup desc + * A structure representing the device capability descriptor for + * USB 2.0. This descriptor is documented in section 9.6.2.1 of + * the USB 3.0 specification. All mutiple-byte fields are represented + * in host-endian format. + */ +struct libusb_usb_2_0_device_capability_descriptor { + /** Size of this descriptor (in bytes) */ + uint8_t bLength; + + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_DEVICE_CAPABILITY + * LIBUSB_DT_DEVICE_CAPABILITY in this context. */ + uint8_t bDescriptorType; + + /** Capability type. Will have value + * \ref libusb_capability_type::LIBUSB_USB_CAP_TYPE_EXT + * LIBUSB_USB_CAP_TYPE_EXT in this context. */ + uint8_t bDevCapabilityType; + + /** Bitmap encoding of supported device level features. + * A value of one in a bit location indicates a feature is + * supported; a value of zero indicates it is not supported. + * See \ref libusb_capability_attributes. */ + uint32_t bmAttributes; +}; + +/** \ingroup desc + * A structure representing the device capability descriptor for + * USB 3.0. This descriptor is documented in section 9.6.2.2 of + * the USB 3.0 specification. All mutiple-byte fields are represented + * in host-endian format. + */ +struct libusb_ss_usb_device_capability_descriptor { + /** Size of this descriptor (in bytes) */ + uint8_t bLength; + + /** Descriptor type. Will have value + * \ref libusb_descriptor_type::LIBUSB_DT_DEVICE_CAPABILITY + * LIBUSB_DT_DEVICE_CAPABILITY in this context. */ + uint8_t bDescriptorType; + + /** Capability type. Will have value + * \ref libusb_capability_type::LIBUSB_SS_USB_CAP_TYPE + * LIBUSB_SS_USB_CAP_TYPE in this context. */ + uint8_t bDevCapabilityType; + + /** Bitmap encoding of supported device level features. + * A value of one in a bit location indicates a feature is + * supported; a value of zero indicates it is not supported. + * See \ref libusb_capability_attributes. */ + uint8_t bmAttributes; + + /** Bitmap encoding of the speed supported by this device when + * operating in SuperSpeed mode. See \ref libusb_supported_speed. */ + uint16_t wSpeedSupported; + + /** The lowest speed at which all the functionality supported + * by the device is available to the user. For example if the + * device supports all its functionality when connected at + * full speed and above then it sets this value to 1. */ + uint8_t bFunctionalitySupport; + + /** U1 Device Exit Latency. */ + uint8_t bU1DevExitLat; + + /** U2 Device Exit Latency. */ + uint16_t bU2DevExitLat; +}; + + +/** \ingroup asyncio + * Setup packet for control transfers. */ +struct libusb_control_setup { + /** Request type. Bits 0:4 determine recipient, see + * \ref libusb_request_recipient. Bits 5:6 determine type, see + * \ref libusb_request_type. Bit 7 determines data transfer direction, see + * \ref libusb_endpoint_direction. + */ + uint8_t bmRequestType; + + /** Request. If the type bits of bmRequestType are equal to + * \ref libusb_request_type::LIBUSB_REQUEST_TYPE_STANDARD + * "LIBUSB_REQUEST_TYPE_STANDARD" then this field refers to + * \ref libusb_standard_request. For other cases, use of this field is + * application-specific. */ + uint8_t bRequest; + + /** Value. Varies according to request */ + uint16_t wValue; + + /** Index. Varies according to request, typically used to pass an index + * or offset */ + uint16_t wIndex; + + /** Number of bytes to transfer */ + uint16_t wLength; +}; + +#define LIBUSB_CONTROL_SETUP_SIZE (sizeof(struct libusb_control_setup)) + +/* libusb */ + +struct libusb_context; +struct libusb_device; +struct libusb_device_handle; +struct libusb_hotplug_callback; + +/** \ingroup lib + * Structure representing the libusb version. + */ +struct libusb_version { + /** Library major version. */ + const uint16_t major; + + /** Library minor version. */ + const uint16_t minor; + + /** Library micro version. */ + const uint16_t micro; + + /** Library nano version. This field is only nonzero on Windows. */ + const uint16_t nano; + + /** Library release candidate suffix string, e.g. "-rc4". */ + const char *rc; + + /** Output of `git describe --tags` at library build time. */ + const char *describe; +}; + +/** \ingroup lib + * Structure representing a libusb session. The concept of individual libusb + * sessions allows for your program to use two libraries (or dynamically + * load two modules) which both independently use libusb. This will prevent + * interference between the individual libusb users - for example + * libusb_set_debug() will not affect the other user of the library, and + * libusb_exit() will not destroy resources that the other user is still + * using. + * + * Sessions are created by libusb_init() and destroyed through libusb_exit(). + * If your application is guaranteed to only ever include a single libusb + * user (i.e. you), you do not have to worry about contexts: pass NULL in + * every function call where a context is required. The default context + * will be used. + * + * For more information, see \ref contexts. + */ +typedef struct libusb_context libusb_context; + +/** \ingroup dev + * Structure representing a USB device detected on the system. This is an + * opaque type for which you are only ever provided with a pointer, usually + * originating from libusb_get_device_list(). + * + * Certain operations can be performed on a device, but in order to do any + * I/O you will have to first obtain a device handle using libusb_open(). + * + * Devices are reference counted with libusb_ref_device() and + * libusb_unref_device(), and are freed when the reference count reaches 0. + * New devices presented by libusb_get_device_list() have a reference count of + * 1, and libusb_free_device_list() can optionally decrease the reference count + * on all devices in the list. libusb_open() adds another reference which is + * later destroyed by libusb_close(). + */ +typedef struct libusb_device libusb_device; + + +/** \ingroup dev + * Structure representing a handle on a USB device. This is an opaque type for + * which you are only ever provided with a pointer, usually originating from + * libusb_open(). + * + * A device handle is used to perform I/O and other operations. When finished + * with a device handle, you should call libusb_close(). + */ +typedef struct libusb_device_handle libusb_device_handle; + +/** \ingroup dev + * Speed codes. Indicates the speed at which the device is operating. + */ +enum libusb_speed { + /** The OS doesn't report or know the device speed. */ + LIBUSB_SPEED_UNKNOWN = 0, + + /** The device is operating at low speed (1.5MBit/s). */ + LIBUSB_SPEED_LOW = 1, + + /** The device is operating at full speed (12MBit/s). */ + LIBUSB_SPEED_FULL = 2, + + /** The device is operating at high speed (480MBit/s). */ + LIBUSB_SPEED_HIGH = 3, + + /** The device is operating at super speed (5000MBit/s). */ + LIBUSB_SPEED_SUPER = 4, +}; + +/** \ingroup dev + * Supported speeds (wSpeedSupported) bitfield. Indicates what + * speeds the device supports. + */ +enum libusb_supported_speed { + /** Low speed operation supported (1.5MBit/s). */ + LIBUSB_LOW_SPEED_OPERATION = 1, + + /** Full speed operation supported (12MBit/s). */ + LIBUSB_FULL_SPEED_OPERATION = 2, + + /** High speed operation supported (480MBit/s). */ + LIBUSB_HIGH_SPEED_OPERATION = 4, + + /** Superspeed operation supported (5000MBit/s). */ + LIBUSB_5GBPS_OPERATION = 8, +}; + +/** \ingroup dev + * Capability attributes + */ +enum libusb_capability_attributes { + /** Supports Link Power Management (LPM) */ + LIBUSB_LPM_SUPPORT = 2, +}; + +/** \ingroup dev + * USB capability types + */ +enum libusb_capability_type { + /** USB 2.0 extension capability type */ + LIBUSB_USB_CAP_TYPE_EXT = 2, + + /** SuperSpeed capability type */ + LIBUSB_SS_USB_CAP_TYPE = 3, +}; + +/** \ingroup misc + * Error codes. Most libusb functions return 0 on success or one of these + * codes on failure. + * You can call \ref libusb_error_name() to retrieve a string representation + * of an error code or \ret libusb_strerror() to get an english description + * of an error code. + */ +enum libusb_error { + /** Success (no error) */ + LIBUSB_SUCCESS = 0, + + /** Input/output error */ + LIBUSB_ERROR_IO = -1, + + /** Invalid parameter */ + LIBUSB_ERROR_INVALID_PARAM = -2, + + /** Access denied (insufficient permissions) */ + LIBUSB_ERROR_ACCESS = -3, + + /** No such device (it may have been disconnected) */ + LIBUSB_ERROR_NO_DEVICE = -4, + + /** Entity not found */ + LIBUSB_ERROR_NOT_FOUND = -5, + + /** Resource busy */ + LIBUSB_ERROR_BUSY = -6, + + /** Operation timed out */ + LIBUSB_ERROR_TIMEOUT = -7, + + /** Overflow */ + LIBUSB_ERROR_OVERFLOW = -8, + + /** Pipe error */ + LIBUSB_ERROR_PIPE = -9, + + /** System call interrupted (perhaps due to signal) */ + LIBUSB_ERROR_INTERRUPTED = -10, + + /** Insufficient memory */ + LIBUSB_ERROR_NO_MEM = -11, + + /** Operation not supported or unimplemented on this platform */ + LIBUSB_ERROR_NOT_SUPPORTED = -12, + + /* NB! Remember to update libusb_error_name() and + libusb_strerror() when adding new error codes here. */ + + /** Other error */ + LIBUSB_ERROR_OTHER = -99, +}; + +/** \ingroup asyncio + * Transfer status codes */ +enum libusb_transfer_status { + /** Transfer completed without error. Note that this does not indicate + * that the entire amount of requested data was transferred. */ + LIBUSB_TRANSFER_COMPLETED, + + /** Transfer failed */ + LIBUSB_TRANSFER_ERROR, + + /** Transfer timed out */ + LIBUSB_TRANSFER_TIMED_OUT, + + /** Transfer was cancelled */ + LIBUSB_TRANSFER_CANCELLED, + + /** For bulk/interrupt endpoints: halt condition detected (endpoint + * stalled). For control endpoints: control request not supported. */ + LIBUSB_TRANSFER_STALL, + + /** Device was disconnected */ + LIBUSB_TRANSFER_NO_DEVICE, + + /** Device sent more data than requested */ + LIBUSB_TRANSFER_OVERFLOW, +}; + +/** \ingroup asyncio + * libusb_transfer.flags values */ +enum libusb_transfer_flags { + /** Report short frames as errors */ + LIBUSB_TRANSFER_SHORT_NOT_OK = 1<<0, + + /** Automatically free() transfer buffer during libusb_free_transfer() */ + LIBUSB_TRANSFER_FREE_BUFFER = 1<<1, + + /** Automatically call libusb_free_transfer() after callback returns. + * If this flag is set, it is illegal to call libusb_free_transfer() + * from your transfer callback, as this will result in a double-free + * when this flag is acted upon. */ + LIBUSB_TRANSFER_FREE_TRANSFER = 1<<2, + + /** Terminate transfers that are a multiple of the endpoint's + * wMaxPacketSize with an extra zero length packet. This is useful + * when a device protocol mandates that each logical request is + * terminated by an incomplete packet (i.e. the logical requests are + * not separated by other means). + * + * This flag only affects host-to-device transfers to bulk and interrupt + * endpoints. In other situations, it is ignored. + * + * This flag only affects transfers with a length that is a multiple of + * the endpoint's wMaxPacketSize. On transfers of other lengths, this + * flag has no effect. Therefore, if you are working with a device that + * needs a ZLP whenever the end of the logical request falls on a packet + * boundary, then it is sensible to set this flag on every + * transfer (you do not have to worry about only setting it on transfers + * that end on the boundary). + * + * This flag is currently only supported on Linux. + * On other systems, libusb_submit_transfer() will return + * LIBUSB_ERROR_NOT_SUPPORTED for every transfer where this flag is set. + * + * Available since libusb-1.0.9. + */ + LIBUSB_TRANSFER_ADD_ZERO_PACKET = 1 << 3, +}; + +/** \ingroup asyncio + * Isochronous packet descriptor. */ +struct libusb_iso_packet_descriptor { + /** Length of data to request in this packet */ + unsigned int length; + + /** Amount of data that was actually transferred */ + unsigned int actual_length; + + /** Status code for this packet */ + enum libusb_transfer_status status; +}; + +struct libusb_transfer; + +/** \ingroup asyncio + * Asynchronous transfer callback function type. When submitting asynchronous + * transfers, you pass a pointer to a callback function of this type via the + * \ref libusb_transfer::callback "callback" member of the libusb_transfer + * structure. libusb will call this function later, when the transfer has + * completed or failed. See \ref asyncio for more information. + * \param transfer The libusb_transfer struct the callback function is being + * notified about. + */ +typedef void (LIBUSB_CALL *libusb_transfer_cb_fn)(struct libusb_transfer *transfer); + +/** \ingroup asyncio + * The generic USB transfer structure. The user populates this structure and + * then submits it in order to request a transfer. After the transfer has + * completed, the library populates the transfer with the results and passes + * it back to the user. + */ +struct libusb_transfer { + /** Handle of the device that this transfer will be submitted to */ + libusb_device_handle *dev_handle; + + /** A bitwise OR combination of \ref libusb_transfer_flags. */ + uint8_t flags; + + /** Address of the endpoint where this transfer will be sent. */ + unsigned char endpoint; + + /** Type of the endpoint from \ref libusb_transfer_type */ + unsigned char type; + + /** Timeout for this transfer in millseconds. A value of 0 indicates no + * timeout. */ + unsigned int timeout; + + /** The status of the transfer. Read-only, and only for use within + * transfer callback function. + * + * If this is an isochronous transfer, this field may read COMPLETED even + * if there were errors in the frames. Use the + * \ref libusb_iso_packet_descriptor::status "status" field in each packet + * to determine if errors occurred. */ + enum libusb_transfer_status status; + + /** Length of the data buffer */ + int length; + + /** Actual length of data that was transferred. Read-only, and only for + * use within transfer callback function. Not valid for isochronous + * endpoint transfers. */ + int actual_length; + + /** Callback function. This will be invoked when the transfer completes, + * fails, or is cancelled. */ + libusb_transfer_cb_fn callback; + + /** User context data to pass to the callback function. */ + void *user_data; + + /** Data buffer */ + unsigned char *buffer; + + /** Number of isochronous packets. Only used for I/O with isochronous + * endpoints. */ + int num_iso_packets; + + /** Isochronous packet descriptors, for isochronous transfers only. */ + struct libusb_iso_packet_descriptor iso_packet_desc +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + [] /* valid C99 code */ +#else + [0] /* non-standard, but usually working code */ +#endif + ; +}; + +/** \ingroup misc + * Capabilities supported by this instance of libusb. Test if the loaded + * library supports a given capability by calling + * \ref libusb_has_capability(). + */ +enum libusb_capability { + /** The libusb_has_capability() API is available. */ + LIBUSB_CAP_HAS_CAPABILITY = 0, + /** The libusb hotplug API is available. */ + LIBUSB_CAP_HAS_HOTPLUG = 1, +}; + +int LIBUSB_CALL libusb_init(libusb_context **ctx); +void LIBUSB_CALL libusb_exit(libusb_context *ctx); +void LIBUSB_CALL libusb_set_debug(libusb_context *ctx, int level); +const struct libusb_version * LIBUSB_CALL libusb_get_version(void); +int LIBUSB_CALL libusb_has_capability(uint32_t capability); +const char * LIBUSB_CALL libusb_error_name(int errcode); +const char * LIBUSB_CALL libusb_strerror(enum libusb_error errcode); + +ssize_t LIBUSB_CALL libusb_get_device_list(libusb_context *ctx, + libusb_device ***list); +void LIBUSB_CALL libusb_free_device_list(libusb_device **list, + int unref_devices); +libusb_device * LIBUSB_CALL libusb_ref_device(libusb_device *dev); +void LIBUSB_CALL libusb_unref_device(libusb_device *dev); + +int LIBUSB_CALL libusb_get_configuration(libusb_device_handle *dev, + int *config); +int LIBUSB_CALL libusb_get_device_descriptor(libusb_device *dev, + struct libusb_device_descriptor *desc); +int LIBUSB_CALL libusb_get_active_config_descriptor(libusb_device *dev, + struct libusb_config_descriptor **config); +int LIBUSB_CALL libusb_get_config_descriptor(libusb_device *dev, + uint8_t config_index, struct libusb_config_descriptor **config); +int LIBUSB_CALL libusb_get_config_descriptor_by_value(libusb_device *dev, + uint8_t bConfigurationValue, struct libusb_config_descriptor **config); +void LIBUSB_CALL libusb_free_config_descriptor( + struct libusb_config_descriptor *config); +uint8_t LIBUSB_CALL libusb_get_bus_number(libusb_device *dev); +uint8_t LIBUSB_CALL libusb_get_device_address(libusb_device *dev); +int LIBUSB_CALL libusb_get_device_speed(libusb_device *dev); +int LIBUSB_CALL libusb_get_max_packet_size(libusb_device *dev, + unsigned char endpoint); +int LIBUSB_CALL libusb_get_max_iso_packet_size(libusb_device *dev, + unsigned char endpoint); + +int LIBUSB_CALL libusb_open(libusb_device *dev, libusb_device_handle **handle); +void LIBUSB_CALL libusb_close(libusb_device_handle *dev_handle); +libusb_device * LIBUSB_CALL libusb_get_device(libusb_device_handle *dev_handle); + +int LIBUSB_CALL libusb_set_configuration(libusb_device_handle *dev, + int configuration); +int LIBUSB_CALL libusb_claim_interface(libusb_device_handle *dev, + int interface_number); +int LIBUSB_CALL libusb_release_interface(libusb_device_handle *dev, + int interface_number); + +libusb_device_handle * LIBUSB_CALL libusb_open_device_with_vid_pid( + libusb_context *ctx, uint16_t vendor_id, uint16_t product_id); + +int LIBUSB_CALL libusb_set_interface_alt_setting(libusb_device_handle *dev, + int interface_number, int alternate_setting); +int LIBUSB_CALL libusb_clear_halt(libusb_device_handle *dev, + unsigned char endpoint); +int LIBUSB_CALL libusb_reset_device(libusb_device_handle *dev); + +int LIBUSB_CALL libusb_kernel_driver_active(libusb_device_handle *dev, + int interface_number); +int LIBUSB_CALL libusb_detach_kernel_driver(libusb_device_handle *dev, + int interface_number); +int LIBUSB_CALL libusb_attach_kernel_driver(libusb_device_handle *dev, + int interface_number); + +/* async I/O */ + +/** \ingroup asyncio + * Get the data section of a control transfer. This convenience function is here + * to remind you that the data does not start until 8 bytes into the actual + * buffer, as the setup packet comes first. + * + * Calling this function only makes sense from a transfer callback function, + * or situations where you have already allocated a suitably sized buffer at + * transfer->buffer. + * + * \param transfer a transfer + * \returns pointer to the first byte of the data section + */ +static inline unsigned char *libusb_control_transfer_get_data( + struct libusb_transfer *transfer) +{ + return transfer->buffer + LIBUSB_CONTROL_SETUP_SIZE; +} + +/** \ingroup asyncio + * Get the control setup packet of a control transfer. This convenience + * function is here to remind you that the control setup occupies the first + * 8 bytes of the transfer data buffer. + * + * Calling this function only makes sense from a transfer callback function, + * or situations where you have already allocated a suitably sized buffer at + * transfer->buffer. + * + * \param transfer a transfer + * \returns a casted pointer to the start of the transfer data buffer + */ +static inline struct libusb_control_setup *libusb_control_transfer_get_setup( + struct libusb_transfer *transfer) +{ + return (struct libusb_control_setup *) transfer->buffer; +} + +/** \ingroup asyncio + * Helper function to populate the setup packet (first 8 bytes of the data + * buffer) for a control transfer. The wIndex, wValue and wLength values should + * be given in host-endian byte order. + * + * \param buffer buffer to output the setup packet into + * \param bmRequestType see the + * \ref libusb_control_setup::bmRequestType "bmRequestType" field of + * \ref libusb_control_setup + * \param bRequest see the + * \ref libusb_control_setup::bRequest "bRequest" field of + * \ref libusb_control_setup + * \param wValue see the + * \ref libusb_control_setup::wValue "wValue" field of + * \ref libusb_control_setup + * \param wIndex see the + * \ref libusb_control_setup::wIndex "wIndex" field of + * \ref libusb_control_setup + * \param wLength see the + * \ref libusb_control_setup::wLength "wLength" field of + * \ref libusb_control_setup + */ +static inline void libusb_fill_control_setup(unsigned char *buffer, + uint8_t bmRequestType, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, + uint16_t wLength) +{ + struct libusb_control_setup *setup = (struct libusb_control_setup *) buffer; + setup->bmRequestType = bmRequestType; + setup->bRequest = bRequest; + setup->wValue = libusb_cpu_to_le16(wValue); + setup->wIndex = libusb_cpu_to_le16(wIndex); + setup->wLength = libusb_cpu_to_le16(wLength); +} + +struct libusb_transfer * LIBUSB_CALL libusb_alloc_transfer(int iso_packets); +int LIBUSB_CALL libusb_submit_transfer(struct libusb_transfer *transfer); +int LIBUSB_CALL libusb_cancel_transfer(struct libusb_transfer *transfer); +void LIBUSB_CALL libusb_free_transfer(struct libusb_transfer *transfer); + +/** \ingroup asyncio + * Helper function to populate the required \ref libusb_transfer fields + * for a control transfer. + * + * If you pass a transfer buffer to this function, the first 8 bytes will + * be interpreted as a control setup packet, and the wLength field will be + * used to automatically populate the \ref libusb_transfer::length "length" + * field of the transfer. Therefore the recommended approach is: + * -# Allocate a suitably sized data buffer (including space for control setup) + * -# Call libusb_fill_control_setup() + * -# If this is a host-to-device transfer with a data stage, put the data + * in place after the setup packet + * -# Call this function + * -# Call libusb_submit_transfer() + * + * It is also legal to pass a NULL buffer to this function, in which case this + * function will not attempt to populate the length field. Remember that you + * must then populate the buffer and length fields later. + * + * \param transfer the transfer to populate + * \param dev_handle handle of the device that will handle the transfer + * \param buffer data buffer. If provided, this function will interpret the + * first 8 bytes as a setup packet and infer the transfer length from that. + * \param callback callback function to be invoked on transfer completion + * \param user_data user data to pass to callback function + * \param timeout timeout for the transfer in milliseconds + */ +static inline void libusb_fill_control_transfer( + struct libusb_transfer *transfer, libusb_device_handle *dev_handle, + unsigned char *buffer, libusb_transfer_cb_fn callback, void *user_data, + unsigned int timeout) +{ + struct libusb_control_setup *setup = (struct libusb_control_setup *) buffer; + transfer->dev_handle = dev_handle; + transfer->endpoint = 0; + transfer->type = LIBUSB_TRANSFER_TYPE_CONTROL; + transfer->timeout = timeout; + transfer->buffer = buffer; + if (setup) + transfer->length = LIBUSB_CONTROL_SETUP_SIZE + + libusb_le16_to_cpu(setup->wLength); + transfer->user_data = user_data; + transfer->callback = callback; +} + +/** \ingroup asyncio + * Helper function to populate the required \ref libusb_transfer fields + * for a bulk transfer. + * + * \param transfer the transfer to populate + * \param dev_handle handle of the device that will handle the transfer + * \param endpoint address of the endpoint where this transfer will be sent + * \param buffer data buffer + * \param length length of data buffer + * \param callback callback function to be invoked on transfer completion + * \param user_data user data to pass to callback function + * \param timeout timeout for the transfer in milliseconds + */ +static inline void libusb_fill_bulk_transfer(struct libusb_transfer *transfer, + libusb_device_handle *dev_handle, unsigned char endpoint, + unsigned char *buffer, int length, libusb_transfer_cb_fn callback, + void *user_data, unsigned int timeout) +{ + transfer->dev_handle = dev_handle; + transfer->endpoint = endpoint; + transfer->type = LIBUSB_TRANSFER_TYPE_BULK; + transfer->timeout = timeout; + transfer->buffer = buffer; + transfer->length = length; + transfer->user_data = user_data; + transfer->callback = callback; +} + +/** \ingroup asyncio + * Helper function to populate the required \ref libusb_transfer fields + * for an interrupt transfer. + * + * \param transfer the transfer to populate + * \param dev_handle handle of the device that will handle the transfer + * \param endpoint address of the endpoint where this transfer will be sent + * \param buffer data buffer + * \param length length of data buffer + * \param callback callback function to be invoked on transfer completion + * \param user_data user data to pass to callback function + * \param timeout timeout for the transfer in milliseconds + */ +static inline void libusb_fill_interrupt_transfer( + struct libusb_transfer *transfer, libusb_device_handle *dev_handle, + unsigned char endpoint, unsigned char *buffer, int length, + libusb_transfer_cb_fn callback, void *user_data, unsigned int timeout) +{ + transfer->dev_handle = dev_handle; + transfer->endpoint = endpoint; + transfer->type = LIBUSB_TRANSFER_TYPE_INTERRUPT; + transfer->timeout = timeout; + transfer->buffer = buffer; + transfer->length = length; + transfer->user_data = user_data; + transfer->callback = callback; +} + +/** \ingroup asyncio + * Helper function to populate the required \ref libusb_transfer fields + * for an isochronous transfer. + * + * \param transfer the transfer to populate + * \param dev_handle handle of the device that will handle the transfer + * \param endpoint address of the endpoint where this transfer will be sent + * \param buffer data buffer + * \param length length of data buffer + * \param num_iso_packets the number of isochronous packets + * \param callback callback function to be invoked on transfer completion + * \param user_data user data to pass to callback function + * \param timeout timeout for the transfer in milliseconds + */ +static inline void libusb_fill_iso_transfer(struct libusb_transfer *transfer, + libusb_device_handle *dev_handle, unsigned char endpoint, + unsigned char *buffer, int length, int num_iso_packets, + libusb_transfer_cb_fn callback, void *user_data, unsigned int timeout) +{ + transfer->dev_handle = dev_handle; + transfer->endpoint = endpoint; + transfer->type = LIBUSB_TRANSFER_TYPE_ISOCHRONOUS; + transfer->timeout = timeout; + transfer->buffer = buffer; + transfer->length = length; + transfer->num_iso_packets = num_iso_packets; + transfer->user_data = user_data; + transfer->callback = callback; +} + +/** \ingroup asyncio + * Convenience function to set the length of all packets in an isochronous + * transfer, based on the num_iso_packets field in the transfer structure. + * + * \param transfer a transfer + * \param length the length to set in each isochronous packet descriptor + * \see libusb_get_max_packet_size() + */ +static inline void libusb_set_iso_packet_lengths( + struct libusb_transfer *transfer, unsigned int length) +{ + int i; + for (i = 0; i < transfer->num_iso_packets; i++) + transfer->iso_packet_desc[i].length = length; +} + +/** \ingroup asyncio + * Convenience function to locate the position of an isochronous packet + * within the buffer of an isochronous transfer. + * + * This is a thorough function which loops through all preceding packets, + * accumulating their lengths to find the position of the specified packet. + * Typically you will assign equal lengths to each packet in the transfer, + * and hence the above method is sub-optimal. You may wish to use + * libusb_get_iso_packet_buffer_simple() instead. + * + * \param transfer a transfer + * \param packet the packet to return the address of + * \returns the base address of the packet buffer inside the transfer buffer, + * or NULL if the packet does not exist. + * \see libusb_get_iso_packet_buffer_simple() + */ +static inline unsigned char *libusb_get_iso_packet_buffer( + struct libusb_transfer *transfer, unsigned int packet) +{ + int i; + size_t offset = 0; + int _packet; + + /* oops..slight bug in the API. packet is an unsigned int, but we use + * signed integers almost everywhere else. range-check and convert to + * signed to avoid compiler warnings. FIXME for libusb-2. */ + if (packet > INT_MAX) + return NULL; + _packet = packet; + + if (_packet >= transfer->num_iso_packets) + return NULL; + + for (i = 0; i < _packet; i++) + offset += transfer->iso_packet_desc[i].length; + + return transfer->buffer + offset; +} + +/** \ingroup asyncio + * Convenience function to locate the position of an isochronous packet + * within the buffer of an isochronous transfer, for transfers where each + * packet is of identical size. + * + * This function relies on the assumption that every packet within the transfer + * is of identical size to the first packet. Calculating the location of + * the packet buffer is then just a simple calculation: + * buffer + (packet_size * packet) + * + * Do not use this function on transfers other than those that have identical + * packet lengths for each packet. + * + * \param transfer a transfer + * \param packet the packet to return the address of + * \returns the base address of the packet buffer inside the transfer buffer, + * or NULL if the packet does not exist. + * \see libusb_get_iso_packet_buffer() + */ +static inline unsigned char *libusb_get_iso_packet_buffer_simple( + struct libusb_transfer *transfer, unsigned int packet) +{ + int _packet; + + /* oops..slight bug in the API. packet is an unsigned int, but we use + * signed integers almost everywhere else. range-check and convert to + * signed to avoid compiler warnings. FIXME for libusb-2. */ + if (packet > INT_MAX) + return NULL; + _packet = packet; + + if (_packet >= transfer->num_iso_packets) + return NULL; + + return transfer->buffer + (transfer->iso_packet_desc[0].length * _packet); +} + +/* sync I/O */ + +int LIBUSB_CALL libusb_control_transfer(libusb_device_handle *dev_handle, + uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, + unsigned char *data, uint16_t wLength, unsigned int timeout); + +int LIBUSB_CALL libusb_bulk_transfer(libusb_device_handle *dev_handle, + unsigned char endpoint, unsigned char *data, int length, + int *actual_length, unsigned int timeout); + +int LIBUSB_CALL libusb_interrupt_transfer(libusb_device_handle *dev_handle, + unsigned char endpoint, unsigned char *data, int length, + int *actual_length, unsigned int timeout); + +/** \ingroup desc + * Retrieve a descriptor from the default control pipe. + * This is a convenience function which formulates the appropriate control + * message to retrieve the descriptor. + * + * \param dev a device handle + * \param desc_type the descriptor type, see \ref libusb_descriptor_type + * \param desc_index the index of the descriptor to retrieve + * \param data output buffer for descriptor + * \param length size of data buffer + * \returns number of bytes returned in data, or LIBUSB_ERROR code on failure + */ +static inline int libusb_get_descriptor(libusb_device_handle *dev, + uint8_t desc_type, uint8_t desc_index, unsigned char *data, int length) +{ + return libusb_control_transfer(dev, LIBUSB_ENDPOINT_IN, + LIBUSB_REQUEST_GET_DESCRIPTOR, (desc_type << 8) | desc_index, 0, data, + (uint16_t) length, 1000); +} + +/** \ingroup desc + * Retrieve a descriptor from a device. + * This is a convenience function which formulates the appropriate control + * message to retrieve the descriptor. The string returned is Unicode, as + * detailed in the USB specifications. + * + * \param dev a device handle + * \param desc_index the index of the descriptor to retrieve + * \param langid the language ID for the string descriptor + * \param data output buffer for descriptor + * \param length size of data buffer + * \returns number of bytes returned in data, or LIBUSB_ERROR code on failure + * \see libusb_get_string_descriptor_ascii() + */ +static inline int libusb_get_string_descriptor(libusb_device_handle *dev, + uint8_t desc_index, uint16_t langid, unsigned char *data, int length) +{ + return libusb_control_transfer(dev, LIBUSB_ENDPOINT_IN, + LIBUSB_REQUEST_GET_DESCRIPTOR, (uint16_t)((LIBUSB_DT_STRING << 8) | desc_index), + langid, data, (uint16_t) length, 1000); +} + +int LIBUSB_CALL libusb_get_string_descriptor_ascii(libusb_device_handle *dev, + uint8_t desc_index, unsigned char *data, int length); + +/* polling and timeouts */ + +int LIBUSB_CALL libusb_try_lock_events(libusb_context *ctx); +void LIBUSB_CALL libusb_lock_events(libusb_context *ctx); +void LIBUSB_CALL libusb_unlock_events(libusb_context *ctx); +int LIBUSB_CALL libusb_event_handling_ok(libusb_context *ctx); +int LIBUSB_CALL libusb_event_handler_active(libusb_context *ctx); +void LIBUSB_CALL libusb_lock_event_waiters(libusb_context *ctx); +void LIBUSB_CALL libusb_unlock_event_waiters(libusb_context *ctx); +int LIBUSB_CALL libusb_wait_for_event(libusb_context *ctx, struct timeval *tv); + +int LIBUSB_CALL libusb_handle_events_timeout(libusb_context *ctx, + struct timeval *tv); +int LIBUSB_CALL libusb_handle_events_timeout_completed(libusb_context *ctx, + struct timeval *tv, int *completed); +int LIBUSB_CALL libusb_handle_events(libusb_context *ctx); +int LIBUSB_CALL libusb_handle_events_completed(libusb_context *ctx, int *completed); +int LIBUSB_CALL libusb_handle_events_locked(libusb_context *ctx, + struct timeval *tv); +int LIBUSB_CALL libusb_pollfds_handle_timeouts(libusb_context *ctx); +int LIBUSB_CALL libusb_get_next_timeout(libusb_context *ctx, + struct timeval *tv); + +/** \ingroup poll + * File descriptor for polling + */ +struct libusb_pollfd { + /** Numeric file descriptor */ + int fd; + + /** Event flags to poll for from . POLLIN indicates that you + * should monitor this file descriptor for becoming ready to read from, + * and POLLOUT indicates that you should monitor this file descriptor for + * nonblocking write readiness. */ + short events; +}; + +/** \ingroup poll + * Callback function, invoked when a new file descriptor should be added + * to the set of file descriptors monitored for events. + * \param fd the new file descriptor + * \param events events to monitor for, see \ref libusb_pollfd for a + * description + * \param user_data User data pointer specified in + * libusb_set_pollfd_notifiers() call + * \see libusb_set_pollfd_notifiers() + */ +typedef void (LIBUSB_CALL *libusb_pollfd_added_cb)(int fd, short events, + void *user_data); + +/** \ingroup poll + * Callback function, invoked when a file descriptor should be removed from + * the set of file descriptors being monitored for events. After returning + * from this callback, do not use that file descriptor again. + * \param fd the file descriptor to stop monitoring + * \param user_data User data pointer specified in + * libusb_set_pollfd_notifiers() call + * \see libusb_set_pollfd_notifiers() + */ +typedef void (LIBUSB_CALL *libusb_pollfd_removed_cb)(int fd, void *user_data); + +const struct libusb_pollfd ** LIBUSB_CALL libusb_get_pollfds( + libusb_context *ctx); +void LIBUSB_CALL libusb_set_pollfd_notifiers(libusb_context *ctx, + libusb_pollfd_added_cb added_cb, libusb_pollfd_removed_cb removed_cb, + void *user_data); + +/** \ingroup desc + * Parse a USB 3.0 endpoint companion descriptor. + * + * \param[in] buf the buffer containing the endpoint companion descriptor + * \param[in] len the length of the buffer + * \param[out] ep_comp a parsed endpoint companion descriptor. must be freed by + * libusb_free_ss_endpoint_comp() + * + * \returns LIBUSB_SUCCESS on success + * \returns LIBUSB_ERROR code on error + */ +int LIBUSB_CALL libusb_parse_ss_endpoint_comp(const void *buf, int len, + struct libusb_ss_endpoint_companion_descriptor **ep_comp); + +/** \ingroup desc + * Free a USB 3.0 endpoint companion descriptor. + * + * \param[in] ep_comp the descriptor to free + */ +void LIBUSB_CALL libusb_free_ss_endpoint_comp(struct libusb_ss_endpoint_companion_descriptor *ep_comp); + +/** \ingroup desc + * Parse a Binary Object Store (BOS) descriptor. + * + * \param[in] buf the buffer containing the BOS descriptor + * \param[in] len the length of the buffer + * \param[out] bos a parsed BOS descriptor. must be freed by + * libusb_free_bos_descriptor() + * + * \returns LIBUSB_SUCCESS on success + * \returns LIBUSB_ERROR code on error + */ +int LIBUSB_CALL libusb_parse_bos_descriptor(const void *buf, int len, + struct libusb_bos_descriptor **bos); + +/** \ingroup desc + * Free a Binary Object Store (BOS) descriptor. + * + * \param[in] bos the descriptor to free + */ +void LIBUSB_CALL libusb_free_bos_descriptor(struct libusb_bos_descriptor *bos); + +/** \ingroup hotplug + * Callback handle. + * + * Callbacks handles are generated by libusb_hotplug_register_callback() + * and can be used to deregister callbacks. Callback handles are unique + * per libusb_context and it is safe to call libusb_hotplug_deregister_callback() + * on an already deregisted callback. + * + * For more information, see \ref hotplug. + */ +typedef int libusb_hotplug_callback_handle; + +/** \ingroup hotplug + * Flags for hotplug events */ +typedef enum { + /** Arm the callback and fire it for all matching currently attached devices. */ + LIBUSB_HOTPLUG_ENUMERATE = 1, +} libusb_hotplug_flag; + +/** \ingroup hotplug + * Hotplug events */ +typedef enum { + /** A device has been plugged in and is ready to use */ + LIBUSB_HOTPLUG_EVENT_DEVICE_ARRIVED = 0x01, + + /** A device has left and is no longer available. + * It is the user's responsibility to call libusb_close on any handle associated with a disconnected device. + * It is safe to call libusb_get_device_descriptor on a device that has left */ + LIBUSB_HOTPLUG_EVENT_DEVICE_LEFT = 0x02, +} libusb_hotplug_event; + +/** \ingroup hotplug + * Wildcard matching for hotplug events */ +#define LIBUSB_HOTPLUG_MATCH_ANY -1 + +/** \ingroup hotplug + * Hotplug callback function type. When requesting hotplug event notifications, + * you pass a pointer to a callback function of this type. + * + * This callback may be called by an internal event thread and as such it is + * recommended the callback do minimal processing before returning. + * + * libusb will call this function later, when a matching event had happened on + * a matching device. See \ref hotplug for more information. + * + * It is safe to call either libusb_hotplug_register_callback() or + * libusb_hotplug_deregister_callback() from within a callback function. + * + * \param libusb_context context of this notification + * \param device libusb_device this event occurred on + * \param event event that occurred + * \param user_data user data provided when this callback was registered + * \returns bool whether this callback is finished processing events. + * returning 1 will cause this callback to be deregistered + */ +typedef int (LIBUSB_CALL *libusb_hotplug_callback_fn)(libusb_context *ctx, + libusb_device *device, + libusb_hotplug_event event, + void *user_data); + +/** \ingroup hotplug + * Register a hotplug callback function + * + * Register a callback with the libusb_context. The callback will fire + * when a matching event occurs on a matching device. The callback is + * armed until either it is deregistered with libusb_hotplug_deregister_callback() + * or the supplied callback returns 1 to indicate it is finished processing events. + * + * \param[in] ctx context to register this callback with + * \param[in] events bitwise or of events that will trigger this callback. See \ref + * libusb_hotplug_event + * \param[in] flags hotplug callback flags. See \ref libusb_hotplug_flag + * \param[in] vendor_id the vendor id to match or \ref LIBUSB_HOTPLUG_MATCH_ANY + * \param[in] product_id the product id to match or \ref LIBUSB_HOTPLUG_MATCH_ANY + * \param[in] dev_class the device class to match or \ref LIBUSB_HOTPLUG_MATCH_ANY + * \param[in] cb_fn the function to be invoked on a matching event/device + * \param[in] user_data user data to pass to the callback function + * \param[out] handle pointer to store the handle of the allocated callback (can be NULL) + * \returns LIBUSB_SUCCESS on success LIBUSB_ERROR code on failure + */ +int LIBUSB_CALL libusb_hotplug_register_callback(libusb_context *ctx, + libusb_hotplug_event events, + libusb_hotplug_flag flags, + int vendor_id, int product_id, + int dev_class, + libusb_hotplug_callback_fn cb_fn, + void *user_data, + libusb_hotplug_callback_handle *handle); + +/** \ingroup hotplug + * Deregisters a hotplug callback. + * + * Deregister a callback from a libusb_context. This function is safe to call from within + * a hotplug callback. + * + * \param[in] ctx context this callback is registered with + * \param[in] handle the handle of the callback to deregister + */ +void LIBUSB_CALL libusb_hotplug_deregister_callback(libusb_context *ctx, + libusb_hotplug_callback_handle handle); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/compat/libusb-1.0/libusb/libusbi.h b/compat/libusb-1.0/libusb/libusbi.h new file mode 100644 index 0000000..198978e --- /dev/null +++ b/compat/libusb-1.0/libusb/libusbi.h @@ -0,0 +1,974 @@ +/* + * Internal header for libusb + * Copyright (C) 2007-2009 Daniel Drake + * Copyright (c) 2001 Johannes Erdfelt + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef LIBUSBI_H +#define LIBUSBI_H + +#include + +#include +#include +#include +#include +#ifdef HAVE_POLL_H +#include +#endif + +#include +#include + +/* Inside the libusb code, mark all public functions as follows: + * return_type API_EXPORTED function_name(params) { ... } + * But if the function returns a pointer, mark it as follows: + * DEFAULT_VISIBILITY return_type * LIBUSB_CALL function_name(params) { ... } + * In the libusb public header, mark all declarations as: + * return_type LIBUSB_CALL function_name(params); + */ +#define API_EXPORTED LIBUSB_CALL DEFAULT_VISIBILITY + +#define DEVICE_DESC_LENGTH 18 + +#define USB_MAXENDPOINTS 32 +#define USB_MAXINTERFACES 32 +#define USB_MAXCONFIG 8 + +struct list_head { + struct list_head *prev, *next; +}; + +/* Get an entry from the list + * ptr - the address of this list_head element in "type" + * type - the data type that contains "member" + * member - the list_head element in "type" + */ +#define list_entry(ptr, type, member) \ + ((type *)((uintptr_t)(ptr) - (uintptr_t)(&((type *)0L)->member))) + +/* Get each entry from a list + * pos - A structure pointer has a "member" element + * head - list head + * member - the list_head element in "pos" + * type - the type of the first parameter + */ +#define list_for_each_entry(pos, head, member, type) \ + for (pos = list_entry((head)->next, type, member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, type, member)) + +#define list_for_each_entry_safe(pos, n, head, member, type) \ + for (pos = list_entry((head)->next, type, member), \ + n = list_entry(pos->member.next, type, member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, type, member)) + +#define list_empty(entry) ((entry)->next == (entry)) + +static inline void list_init(struct list_head *entry) +{ + entry->prev = entry->next = entry; +} + +static inline void list_add(struct list_head *entry, struct list_head *head) +{ + entry->next = head->next; + entry->prev = head; + + head->next->prev = entry; + head->next = entry; +} + +static inline void list_add_tail(struct list_head *entry, + struct list_head *head) +{ + entry->next = head; + entry->prev = head->prev; + + head->prev->next = entry; + head->prev = entry; +} + +static inline void list_del(struct list_head *entry) +{ + entry->next->prev = entry->prev; + entry->prev->next = entry->next; +} + +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *mptr = (ptr); \ + (type *)( (char *)mptr - offsetof(type,member) );}) + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define TIMESPEC_IS_SET(ts) ((ts)->tv_sec != 0 || (ts)->tv_nsec != 0) + +enum usbi_log_level { + LOG_LEVEL_DEBUG, + LOG_LEVEL_INFO, + LOG_LEVEL_WARNING, + LOG_LEVEL_ERROR, +}; + +void usbi_log(struct libusb_context *ctx, enum usbi_log_level level, + const char *function, const char *format, ...); + +void usbi_log_v(struct libusb_context *ctx, enum usbi_log_level level, + const char *function, const char *format, va_list args); + +#if !defined(_MSC_VER) || _MSC_VER >= 1400 + +#ifdef ENABLE_LOGGING +#define _usbi_log(ctx, level, ...) usbi_log(ctx, level, __FUNCTION__, __VA_ARGS__) +#else +#define _usbi_log(ctx, level, ...) do { (void)(ctx); } while(0) +#endif + +#ifdef ENABLE_DEBUG_LOGGING +#define usbi_dbg(...) _usbi_log(NULL, LOG_LEVEL_DEBUG, __VA_ARGS__) +#else +#define usbi_dbg(...) do {} while(0) +#endif + +#define usbi_info(ctx, ...) _usbi_log(ctx, LOG_LEVEL_INFO, __VA_ARGS__) +#define usbi_warn(ctx, ...) _usbi_log(ctx, LOG_LEVEL_WARNING, __VA_ARGS__) +#define usbi_err(ctx, ...) _usbi_log(ctx, LOG_LEVEL_ERROR, __VA_ARGS__) + +#else /* !defined(_MSC_VER) || _MSC_VER >= 1400 */ + +/* Old MS compilers don't support variadic macros. The code is simple, so we + * repeat it for each loglevel. Note that the debug case is special. + * + * Support for variadic macros was introduced in Visual C++ 2005. + * http://msdn.microsoft.com/en-us/library/ms177415%28v=VS.80%29.aspx + */ + +static inline void usbi_info(struct libusb_context *ctx, const char *fmt, ...) +{ +#ifdef ENABLE_LOGGING + va_list args; + va_start(args, fmt); + usbi_log_v(ctx, LOG_LEVEL_INFO, "", fmt, args); + va_end(args); +#else + (void)ctx; +#endif +} + +static inline void usbi_warn(struct libusb_context *ctx, const char *fmt, ...) +{ +#ifdef ENABLE_LOGGING + va_list args; + va_start(args, fmt); + usbi_log_v(ctx, LOG_LEVEL_WARNING, "", fmt, args); + va_end(args); +#else + (void)ctx; +#endif +} + +static inline void usbi_err(struct libusb_context *ctx, const char *fmt, ...) +{ +#ifdef ENABLE_LOGGING + va_list args; + va_start(args, fmt); + usbi_log_v(ctx, LOG_LEVEL_ERROR, "", fmt, args); + va_end(args); +#else + (void)ctx; +#endif +} + +static inline void usbi_dbg(const char *fmt, ...) +{ +#ifdef ENABLE_DEBUG_LOGGING + va_list args; + va_start(args, fmt); + usbi_log_v(NULL, LOG_LEVEL_DEBUG, "", fmt, args); + va_end(args); +#else + (void)fmt; +#endif +} + +#endif /* !defined(_MSC_VER) || _MSC_VER >= 1400 */ + +#define USBI_GET_CONTEXT(ctx) if (!(ctx)) (ctx) = usbi_default_context +#define DEVICE_CTX(dev) ((dev)->ctx) +#define HANDLE_CTX(handle) (DEVICE_CTX((handle)->dev)) +#define TRANSFER_CTX(transfer) (HANDLE_CTX((transfer)->dev_handle)) +#define ITRANSFER_CTX(transfer) \ + (TRANSFER_CTX(USBI_TRANSFER_TO_LIBUSB_TRANSFER(transfer))) + +#define IS_EPIN(ep) (0 != ((ep) & LIBUSB_ENDPOINT_IN)) +#define IS_EPOUT(ep) (!IS_EPIN(ep)) +#define IS_XFERIN(xfer) (0 != ((xfer)->endpoint & LIBUSB_ENDPOINT_IN)) +#define IS_XFEROUT(xfer) (!IS_XFERIN(xfer)) + +/* Internal abstractions for thread synchronization and poll */ +#if defined(THREADS_POSIX) +#include +#elif defined(OS_WINDOWS) +#include +#endif + +#if defined(OS_LINUX) || defined(OS_DARWIN) || defined(OS_OPENBSD) +#include +#include +#elif defined(OS_WINDOWS) +#include +#endif + +#if defined(OS_WINDOWS) && !defined(__GCC__) +#undef HAVE_GETTIMEOFDAY +int usbi_gettimeofday(struct timeval *tp, void *tzp); +#define LIBUSB_GETTIMEOFDAY_WIN32 +#define HAVE_USBI_GETTIMEOFDAY +#else +#ifdef HAVE_GETTIMEOFDAY +#define usbi_gettimeofday(tv, tz) gettimeofday((tv), (tz)) +#define HAVE_USBI_GETTIMEOFDAY +#endif +#endif + +extern struct libusb_context *usbi_default_context; + +struct libusb_context { + int debug; + int debug_fixed; + + /* internal control pipe, used for interrupting event handling when + * something needs to modify poll fds. */ + int ctrl_pipe[2]; + + struct list_head usb_devs; + usbi_mutex_t usb_devs_lock; + + /* A list of open handles. Backends are free to traverse this if required. + */ + struct list_head open_devs; + usbi_mutex_t open_devs_lock; + + /* A list of registered hotplug callbacks */ + struct list_head hotplug_cbs; + usbi_mutex_t hotplug_cbs_lock; + int hotplug_pipe[2]; + + /* this is a list of in-flight transfer handles, sorted by timeout + * expiration. URBs to timeout the soonest are placed at the beginning of + * the list, URBs that will time out later are placed after, and urbs with + * infinite timeout are always placed at the very end. */ + struct list_head flying_transfers; + usbi_mutex_t flying_transfers_lock; + + /* list of poll fds */ + struct list_head pollfds; + usbi_mutex_t pollfds_lock; + + /* a counter that is set when we want to interrupt event handling, in order + * to modify the poll fd set. and a lock to protect it. */ + unsigned int pollfd_modify; + usbi_mutex_t pollfd_modify_lock; + + /* user callbacks for pollfd changes */ + libusb_pollfd_added_cb fd_added_cb; + libusb_pollfd_removed_cb fd_removed_cb; + void *fd_cb_user_data; + + /* ensures that only one thread is handling events at any one time */ + usbi_mutex_t events_lock; + + /* used to see if there is an active thread doing event handling */ + int event_handler_active; + + /* used to wait for event completion in threads other than the one that is + * event handling */ + usbi_mutex_t event_waiters_lock; + usbi_cond_t event_waiters_cond; + +#ifdef USBI_TIMERFD_AVAILABLE + /* used for timeout handling, if supported by OS. + * this timerfd is maintained to trigger on the next pending timeout */ + int timerfd; +#endif + + struct list_head list; +}; + +#ifdef USBI_TIMERFD_AVAILABLE +#define usbi_using_timerfd(ctx) ((ctx)->timerfd >= 0) +#else +#define usbi_using_timerfd(ctx) (0) +#endif + +struct libusb_device { + /* lock protects refcnt, everything else is finalized at initialization + * time */ + usbi_mutex_t lock; + int refcnt; + + struct libusb_context *ctx; + + uint8_t bus_number; + uint8_t device_address; + uint8_t num_configurations; + enum libusb_speed speed; + + struct list_head list; + unsigned long session_data; + + struct libusb_device_descriptor device_descriptor; + int attached; + + unsigned char os_priv +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + [] /* valid C99 code */ +#else + [0] /* non-standard, but usually working code */ +#endif + ; +}; + +struct libusb_device_handle { + /* lock protects claimed_interfaces */ + usbi_mutex_t lock; + unsigned long claimed_interfaces; + + struct list_head list; + struct libusb_device *dev; + unsigned char os_priv +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + [] /* valid C99 code */ +#else + [0] /* non-standard, but usually working code */ +#endif + ; +}; + +enum { + USBI_CLOCK_MONOTONIC, + USBI_CLOCK_REALTIME +}; + +/* in-memory transfer layout: + * + * 1. struct usbi_transfer + * 2. struct libusb_transfer (which includes iso packets) [variable size] + * 3. os private data [variable size] + * + * from a libusb_transfer, you can get the usbi_transfer by rewinding the + * appropriate number of bytes. + * the usbi_transfer includes the number of allocated packets, so you can + * determine the size of the transfer and hence the start and length of the + * OS-private data. + */ + +struct usbi_transfer { + int num_iso_packets; + struct list_head list; + struct timeval timeout; + int transferred; + uint8_t flags; + + /* this lock is held during libusb_submit_transfer() and + * libusb_cancel_transfer() (allowing the OS backend to prevent duplicate + * cancellation, submission-during-cancellation, etc). the OS backend + * should also take this lock in the handle_events path, to prevent the user + * cancelling the transfer from another thread while you are processing + * its completion (presumably there would be races within your OS backend + * if this were possible). */ + usbi_mutex_t lock; +}; + +enum usbi_transfer_flags { + /* The transfer has timed out */ + USBI_TRANSFER_TIMED_OUT = 1 << 0, + + /* Set by backend submit_transfer() if the OS handles timeout */ + USBI_TRANSFER_OS_HANDLES_TIMEOUT = 1 << 1, + + /* Cancellation was requested via libusb_cancel_transfer() */ + USBI_TRANSFER_CANCELLING = 1 << 2, + + /* Operation on the transfer failed because the device disappeared */ + USBI_TRANSFER_DEVICE_DISAPPEARED = 1 << 3, + + /* Set by backend submit_transfer() if the fds in use were updated */ + USBI_TRANSFER_UPDATED_FDS = 1 << 4, +}; + +#define USBI_TRANSFER_TO_LIBUSB_TRANSFER(transfer) \ + ((struct libusb_transfer *)(((unsigned char *)(transfer)) \ + + sizeof(struct usbi_transfer))) +#define LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer) \ + ((struct usbi_transfer *)(((unsigned char *)(transfer)) \ + - sizeof(struct usbi_transfer))) + +static inline void *usbi_transfer_get_os_priv(struct usbi_transfer *transfer) +{ + return ((unsigned char *)transfer) + sizeof(struct usbi_transfer) + + sizeof(struct libusb_transfer) + + (transfer->num_iso_packets + * sizeof(struct libusb_iso_packet_descriptor)); +} + +/* bus structures */ + +/* All standard descriptors have these 2 fields in common */ +struct usb_descriptor_header { + uint8_t bLength; + uint8_t bDescriptorType; +}; + +/* shared data and functions */ + +int usbi_io_init(struct libusb_context *ctx); +void usbi_io_exit(struct libusb_context *ctx); + +struct libusb_device *usbi_alloc_device(struct libusb_context *ctx, + unsigned long session_id); +struct libusb_device *usbi_get_device_by_session_id(struct libusb_context *ctx, + unsigned long session_id); +int usbi_sanitize_device(struct libusb_device *dev); +void usbi_handle_disconnect(struct libusb_device_handle *handle); + +int usbi_handle_transfer_completion(struct usbi_transfer *itransfer, + enum libusb_transfer_status status); +int usbi_handle_transfer_cancellation(struct usbi_transfer *transfer); + +int usbi_parse_descriptor(const unsigned char *source, const char *descriptor, + void *dest, int host_endian); +int usbi_device_cache_descriptor(libusb_device *dev); +int usbi_get_config_index_by_value(struct libusb_device *dev, + uint8_t bConfigurationValue, int *idx); + +void usbi_connect_device (struct libusb_device *dev); +void usbi_disconnect_device (struct libusb_device *dev); + +/* polling */ + +struct usbi_pollfd { + /* must come first */ + struct libusb_pollfd pollfd; + + struct list_head list; +}; + +int usbi_add_pollfd(struct libusb_context *ctx, int fd, short events); +void usbi_remove_pollfd(struct libusb_context *ctx, int fd); +void usbi_fd_notification(struct libusb_context *ctx); + +/* device discovery */ + +/* we traverse usbfs without knowing how many devices we are going to find. + * so we create this discovered_devs model which is similar to a linked-list + * which grows when required. it can be freed once discovery has completed, + * eliminating the need for a list node in the libusb_device structure + * itself. */ +struct discovered_devs { + size_t len; + size_t capacity; + struct libusb_device *devices +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) + [] /* valid C99 code */ +#else + [0] /* non-standard, but usually working code */ +#endif + ; +}; + +struct discovered_devs *discovered_devs_append( + struct discovered_devs *discdevs, struct libusb_device *dev); + +/* OS abstraction */ + +/* This is the interface that OS backends need to implement. + * All fields are mandatory, except ones explicitly noted as optional. */ +struct usbi_os_backend { + /* A human-readable name for your backend, e.g. "Linux usbfs" */ + const char *name; + + /* Perform initialization of your backend. You might use this function + * to determine specific capabilities of the system, allocate required + * data structures for later, etc. + * + * This function is called when a libusb user initializes the library + * prior to use. + * + * Return 0 on success, or a LIBUSB_ERROR code on failure. + */ + int (*init)(struct libusb_context *ctx); + + /* Deinitialization. Optional. This function should destroy anything + * that was set up by init. + * + * This function is called when the user deinitializes the library. + */ + void (*exit)(void); + + /* Enumerate all the USB devices on the system, returning them in a list + * of discovered devices. + * + * Your implementation should enumerate all devices on the system, + * regardless of whether they have been seen before or not. + * + * When you have found a device, compute a session ID for it. The session + * ID should uniquely represent that particular device for that particular + * connection session since boot (i.e. if you disconnect and reconnect a + * device immediately after, it should be assigned a different session ID). + * If your OS cannot provide a unique session ID as described above, + * presenting a session ID of (bus_number << 8 | device_address) should + * be sufficient. Bus numbers and device addresses wrap and get reused, + * but that is an unlikely case. + * + * After computing a session ID for a device, call + * usbi_get_device_by_session_id(). This function checks if libusb already + * knows about the device, and if so, it provides you with a libusb_device + * structure for it. + * + * If usbi_get_device_by_session_id() returns NULL, it is time to allocate + * a new device structure for the device. Call usbi_alloc_device() to + * obtain a new libusb_device structure with reference count 1. Populate + * the bus_number and device_address attributes of the new device, and + * perform any other internal backend initialization you need to do. At + * this point, you should be ready to provide device descriptors and so + * on through the get_*_descriptor functions. Finally, call + * usbi_sanitize_device() to perform some final sanity checks on the + * device. Assuming all of the above succeeded, we can now continue. + * If any of the above failed, remember to unreference the device that + * was returned by usbi_alloc_device(). + * + * At this stage we have a populated libusb_device structure (either one + * that was found earlier, or one that we have just allocated and + * populated). This can now be added to the discovered devices list + * using discovered_devs_append(). Note that discovered_devs_append() + * may reallocate the list, returning a new location for it, and also + * note that reallocation can fail. Your backend should handle these + * error conditions appropriately. + * + * This function should not generate any bus I/O and should not block. + * If I/O is required (e.g. reading the active configuration value), it is + * OK to ignore these suggestions :) + * + * This function is executed when the user wishes to retrieve a list + * of USB devices connected to the system. + * + * Return 0 on success, or a LIBUSB_ERROR code on failure. + */ + int (*get_device_list)(struct libusb_context *ctx, + struct discovered_devs **discdevs); + + /* Open a device for I/O and other USB operations. The device handle + * is preallocated for you, you can retrieve the device in question + * through handle->dev. + * + * Your backend should allocate any internal resources required for I/O + * and other operations so that those operations can happen (hopefully) + * without hiccup. This is also a good place to inform libusb that it + * should monitor certain file descriptors related to this device - + * see the usbi_add_pollfd() function. + * + * This function should not generate any bus I/O and should not block. + * + * This function is called when the user attempts to obtain a device + * handle for a device. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_ACCESS if the user has insufficient permissions + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since + * discovery + * - another LIBUSB_ERROR code on other failure + * + * Do not worry about freeing the handle on failed open, the upper layers + * do this for you. + */ + int (*open)(struct libusb_device_handle *handle); + + /* Close a device such that the handle cannot be used again. Your backend + * should destroy any resources that were allocated in the open path. + * This may also be a good place to call usbi_remove_pollfd() to inform + * libusb of any file descriptors associated with this device that should + * no longer be monitored. + * + * This function is called when the user closes a device handle. + */ + void (*close)(struct libusb_device_handle *handle); + + /* Retrieve the device descriptor from a device. + * + * The descriptor should be retrieved from memory, NOT via bus I/O to the + * device. This means that you may have to cache it in a private structure + * during get_device_list enumeration. Alternatively, you may be able + * to retrieve it from a kernel interface (some Linux setups can do this) + * still without generating bus I/O. + * + * This function is expected to write DEVICE_DESC_LENGTH (18) bytes into + * buffer, which is guaranteed to be big enough. + * + * This function is called when sanity-checking a device before adding + * it to the list of discovered devices, and also when the user requests + * to read the device descriptor. + * + * This function is expected to return the descriptor in bus-endian format + * (LE). If it returns the multi-byte values in host-endian format, + * set the host_endian output parameter to "1". + * + * Return 0 on success or a LIBUSB_ERROR code on failure. + */ + int (*get_device_descriptor)(struct libusb_device *device, + unsigned char *buffer, int *host_endian); + + /* Get the ACTIVE configuration descriptor for a device. + * + * The descriptor should be retrieved from memory, NOT via bus I/O to the + * device. This means that you may have to cache it in a private structure + * during get_device_list enumeration. You may also have to keep track + * of which configuration is active when the user changes it. + * + * This function is expected to write len bytes of data into buffer, which + * is guaranteed to be big enough. If you can only do a partial write, + * return an error code. + * + * This function is expected to return the descriptor in bus-endian format + * (LE). If it returns the multi-byte values in host-endian format, + * set the host_endian output parameter to "1". + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NOT_FOUND if the device is in unconfigured state + * - another LIBUSB_ERROR code on other failure + */ + int (*get_active_config_descriptor)(struct libusb_device *device, + unsigned char *buffer, size_t len, int *host_endian); + + /* Get a specific configuration descriptor for a device. + * + * The descriptor should be retrieved from memory, NOT via bus I/O to the + * device. This means that you may have to cache it in a private structure + * during get_device_list enumeration. + * + * The requested descriptor is expressed as a zero-based index (i.e. 0 + * indicates that we are requesting the first descriptor). The index does + * not (necessarily) equal the bConfigurationValue of the configuration + * being requested. + * + * This function is expected to write len bytes of data into buffer, which + * is guaranteed to be big enough. If you can only do a partial write, + * return an error code. + * + * This function is expected to return the descriptor in bus-endian format + * (LE). If it returns the multi-byte values in host-endian format, + * set the host_endian output parameter to "1". + * + * Return 0 on success or a LIBUSB_ERROR code on failure. + */ + int (*get_config_descriptor)(struct libusb_device *device, + uint8_t config_index, unsigned char *buffer, size_t len, + int *host_endian); + + /* Get the bConfigurationValue for the active configuration for a device. + * Optional. This should only be implemented if you can retrieve it from + * cache (don't generate I/O). + * + * If you cannot retrieve this from cache, either do not implement this + * function, or return LIBUSB_ERROR_NOT_SUPPORTED. This will cause + * libusb to retrieve the information through a standard control transfer. + * + * This function must be non-blocking. + * Return: + * - 0 on success + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since it + * was opened + * - LIBUSB_ERROR_NOT_SUPPORTED if the value cannot be retrieved without + * blocking + * - another LIBUSB_ERROR code on other failure. + */ + int (*get_configuration)(struct libusb_device_handle *handle, int *config); + + /* Set the active configuration for a device. + * + * A configuration value of -1 should put the device in unconfigured state. + * + * This function can block. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NOT_FOUND if the configuration does not exist + * - LIBUSB_ERROR_BUSY if interfaces are currently claimed (and hence + * configuration cannot be changed) + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since it + * was opened + * - another LIBUSB_ERROR code on other failure. + */ + int (*set_configuration)(struct libusb_device_handle *handle, int config); + + /* Claim an interface. When claimed, the application can then perform + * I/O to an interface's endpoints. + * + * This function should not generate any bus I/O and should not block. + * Interface claiming is a logical operation that simply ensures that + * no other drivers/applications are using the interface, and after + * claiming, no other drivers/applicatiosn can use the interface because + * we now "own" it. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NOT_FOUND if the interface does not exist + * - LIBUSB_ERROR_BUSY if the interface is in use by another driver/app + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since it + * was opened + * - another LIBUSB_ERROR code on other failure + */ + int (*claim_interface)(struct libusb_device_handle *handle, int interface_number); + + /* Release a previously claimed interface. + * + * This function should also generate a SET_INTERFACE control request, + * resetting the alternate setting of that interface to 0. It's OK for + * this function to block as a result. + * + * You will only ever be asked to release an interface which was + * successfully claimed earlier. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since it + * was opened + * - another LIBUSB_ERROR code on other failure + */ + int (*release_interface)(struct libusb_device_handle *handle, int interface_number); + + /* Set the alternate setting for an interface. + * + * You will only ever be asked to set the alternate setting for an + * interface which was successfully claimed earlier. + * + * It's OK for this function to block. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NOT_FOUND if the alternate setting does not exist + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since it + * was opened + * - another LIBUSB_ERROR code on other failure + */ + int (*set_interface_altsetting)(struct libusb_device_handle *handle, + int interface_number, int altsetting); + + /* Clear a halt/stall condition on an endpoint. + * + * It's OK for this function to block. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NOT_FOUND if the endpoint does not exist + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since it + * was opened + * - another LIBUSB_ERROR code on other failure + */ + int (*clear_halt)(struct libusb_device_handle *handle, + unsigned char endpoint); + + /* Perform a USB port reset to reinitialize a device. + * + * If possible, the handle should still be usable after the reset + * completes, assuming that the device descriptors did not change during + * reset and all previous interface state can be restored. + * + * If something changes, or you cannot easily locate/verify the resetted + * device, return LIBUSB_ERROR_NOT_FOUND. This prompts the application + * to close the old handle and re-enumerate the device. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NOT_FOUND if re-enumeration is required, or if the device + * has been disconnected since it was opened + * - another LIBUSB_ERROR code on other failure + */ + int (*reset_device)(struct libusb_device_handle *handle); + + /* Determine if a kernel driver is active on an interface. Optional. + * + * The presence of a kernel driver on an interface indicates that any + * calls to claim_interface would fail with the LIBUSB_ERROR_BUSY code. + * + * Return: + * - 0 if no driver is active + * - 1 if a driver is active + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since it + * was opened + * - another LIBUSB_ERROR code on other failure + */ + int (*kernel_driver_active)(struct libusb_device_handle *handle, + int interface_number); + + /* Detach a kernel driver from an interface. Optional. + * + * After detaching a kernel driver, the interface should be available + * for claim. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NOT_FOUND if no kernel driver was active + * - LIBUSB_ERROR_INVALID_PARAM if the interface does not exist + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since it + * was opened + * - another LIBUSB_ERROR code on other failure + */ + int (*detach_kernel_driver)(struct libusb_device_handle *handle, + int interface_number); + + /* Attach a kernel driver to an interface. Optional. + * + * Reattach a kernel driver to the device. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NOT_FOUND if no kernel driver was active + * - LIBUSB_ERROR_INVALID_PARAM if the interface does not exist + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected since it + * was opened + * - LIBUSB_ERROR_BUSY if a program or driver has claimed the interface, + * preventing reattachment + * - another LIBUSB_ERROR code on other failure + */ + int (*attach_kernel_driver)(struct libusb_device_handle *handle, + int interface_number); + + /* Destroy a device. Optional. + * + * This function is called when the last reference to a device is + * destroyed. It should free any resources allocated in the get_device_list + * path. + */ + void (*destroy_device)(struct libusb_device *dev); + + /* Submit a transfer. Your implementation should take the transfer, + * morph it into whatever form your platform requires, and submit it + * asynchronously. + * + * This function must not block. + * + * Return: + * - 0 on success + * - LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * - another LIBUSB_ERROR code on other failure + */ + int (*submit_transfer)(struct usbi_transfer *itransfer); + + /* Cancel a previously submitted transfer. + * + * This function must not block. The transfer cancellation must complete + * later, resulting in a call to usbi_handle_transfer_cancellation() + * from the context of handle_events. + */ + int (*cancel_transfer)(struct usbi_transfer *itransfer); + + /* Clear a transfer as if it has completed or cancelled, but do not + * report any completion/cancellation to the library. You should free + * all private data from the transfer as if you were just about to report + * completion or cancellation. + * + * This function might seem a bit out of place. It is used when libusb + * detects a disconnected device - it calls this function for all pending + * transfers before reporting completion (with the disconnect code) to + * the user. Maybe we can improve upon this internal interface in future. + */ + void (*clear_transfer_priv)(struct usbi_transfer *itransfer); + + /* Handle any pending events. This involves monitoring any active + * transfers and processing their completion or cancellation. + * + * The function is passed an array of pollfd structures (size nfds) + * as a result of the poll() system call. The num_ready parameter + * indicates the number of file descriptors that have reported events + * (i.e. the poll() return value). This should be enough information + * for you to determine which actions need to be taken on the currently + * active transfers. + * + * For any cancelled transfers, call usbi_handle_transfer_cancellation(). + * For completed transfers, call usbi_handle_transfer_completion(). + * For control/bulk/interrupt transfers, populate the "transferred" + * element of the appropriate usbi_transfer structure before calling the + * above functions. For isochronous transfers, populate the status and + * transferred fields of the iso packet descriptors of the transfer. + * + * This function should also be able to detect disconnection of the + * device, reporting that situation with usbi_handle_disconnect(). + * + * When processing an event related to a transfer, you probably want to + * take usbi_transfer.lock to prevent races. See the documentation for + * the usbi_transfer structure. + * + * Return 0 on success, or a LIBUSB_ERROR code on failure. + */ + int (*handle_events)(struct libusb_context *ctx, + struct pollfd *fds, POLL_NFDS_TYPE nfds, int num_ready); + + /* Get time from specified clock. At least two clocks must be implemented + by the backend: USBI_CLOCK_REALTIME, and USBI_CLOCK_MONOTONIC. + + Description of clocks: + USBI_CLOCK_REALTIME : clock returns time since system epoch. + USBI_CLOCK_MONOTONIC: clock returns time since unspecified start + time (usually boot). + */ + int (*clock_gettime)(int clkid, struct timespec *tp); + +#ifdef USBI_TIMERFD_AVAILABLE + /* clock ID of the clock that should be used for timerfd */ + clockid_t (*get_timerfd_clockid)(void); +#endif + + /* Number of bytes to reserve for per-device private backend data. + * This private data area is accessible through the "os_priv" field of + * struct libusb_device. */ + size_t device_priv_size; + + /* Number of bytes to reserve for per-handle private backend data. + * This private data area is accessible through the "os_priv" field of + * struct libusb_device. */ + size_t device_handle_priv_size; + + /* Number of bytes to reserve for per-transfer private backend data. + * This private data area is accessible by calling + * usbi_transfer_get_os_priv() on the appropriate usbi_transfer instance. + */ + size_t transfer_priv_size; + + /* Mumber of additional bytes for os_priv for each iso packet. + * Can your backend use this? */ + /* FIXME: linux can't use this any more. if other OS's cannot either, + * then remove this */ + size_t add_iso_packet_size; +}; + +extern const struct usbi_os_backend * const usbi_backend; + +extern const struct usbi_os_backend linux_usbfs_backend; +extern const struct usbi_os_backend darwin_backend; +extern const struct usbi_os_backend openbsd_backend; +extern const struct usbi_os_backend windows_backend; + +extern struct list_head active_contexts_list; +extern usbi_mutex_static_t active_contexts_lock; + +#endif + diff --git a/compat/libusb-1.0/libusb/os/darwin_usb.c b/compat/libusb-1.0/libusb/os/darwin_usb.c new file mode 100644 index 0000000..b31e818 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/darwin_usb.c @@ -0,0 +1,1788 @@ +/* -*- Mode: C; indent-tabs-mode:nil -*- */ +/* + * darwin backend for libusb 1.0 + * Copyright (C) 2008-2013 Nathan Hjelm + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1060 + #include +#endif + +#include "darwin_usb.h" + +/* async event thread */ +static pthread_mutex_t libusb_darwin_at_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t libusb_darwin_at_cond = PTHREAD_COND_INITIALIZER; + +static clock_serv_t clock_realtime; +static clock_serv_t clock_monotonic; + +static CFRunLoopRef libusb_darwin_acfl = NULL; /* event cf loop */ +static volatile int32_t initCount = 0; + +/* async event thread */ +static pthread_t libusb_darwin_at; + +static int darwin_get_config_descriptor(struct libusb_device *dev, uint8_t config_index, unsigned char *buffer, size_t len, int *host_endian); +static int darwin_claim_interface(struct libusb_device_handle *dev_handle, int iface); +static int darwin_release_interface(struct libusb_device_handle *dev_handle, int iface); +static int darwin_reset_device(struct libusb_device_handle *dev_handle); +static void darwin_async_io_callback (void *refcon, IOReturn result, void *arg0); + +static int darwin_scan_devices(struct libusb_context *ctx); +static int process_new_device (struct libusb_context *ctx, usb_device_t **device, UInt32 locationID); + +#if defined(ENABLE_LOGGING) +static const char *darwin_error_str (int result) { + switch (result) { + case kIOReturnSuccess: + return "no error"; + case kIOReturnNotOpen: + return "device not opened for exclusive access"; + case kIOReturnNoDevice: + return "no connection to an IOService"; + case kIOUSBNoAsyncPortErr: + return "no async port has been opened for interface"; + case kIOReturnExclusiveAccess: + return "another process has device opened for exclusive access"; + case kIOUSBPipeStalled: + return "pipe is stalled"; + case kIOReturnError: + return "could not establish a connection to the Darwin kernel"; + case kIOUSBTransactionTimeout: + return "transaction timed out"; + case kIOReturnBadArgument: + return "invalid argument"; + case kIOReturnAborted: + return "transaction aborted"; + case kIOReturnNotResponding: + return "device not responding"; + case kIOReturnOverrun: + return "data overrun"; + case kIOReturnCannotWire: + return "physical memory can not be wired down"; + case kIOReturnNoResources: + return "out of resources"; + default: + return "unknown error"; + } +} +#endif + +static int darwin_to_libusb (int result) { + switch (result) { + case kIOReturnUnderrun: + case kIOReturnSuccess: + return LIBUSB_SUCCESS; + case kIOReturnNotOpen: + case kIOReturnNoDevice: + return LIBUSB_ERROR_NO_DEVICE; + case kIOReturnExclusiveAccess: + return LIBUSB_ERROR_ACCESS; + case kIOUSBPipeStalled: + return LIBUSB_ERROR_PIPE; + case kIOReturnBadArgument: + return LIBUSB_ERROR_INVALID_PARAM; + case kIOUSBTransactionTimeout: + return LIBUSB_ERROR_TIMEOUT; + case kIOReturnNotResponding: + case kIOReturnAborted: + case kIOReturnError: + case kIOUSBNoAsyncPortErr: + default: + return LIBUSB_ERROR_OTHER; + } +} + + +static int ep_to_pipeRef(struct libusb_device_handle *dev_handle, uint8_t ep, uint8_t *pipep, uint8_t *ifcp) { + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)dev_handle->os_priv; + + /* current interface */ + struct darwin_interface *cInterface; + + int8_t i, iface; + + usbi_info (HANDLE_CTX(dev_handle), "converting ep address 0x%02x to pipeRef and interface", ep); + + for (iface = 0 ; iface < USB_MAXINTERFACES ; iface++) { + cInterface = &priv->interfaces[iface]; + + if (dev_handle->claimed_interfaces & (1 << iface)) { + for (i = 0 ; i < cInterface->num_endpoints ; i++) { + if (cInterface->endpoint_addrs[i] == ep) { + *pipep = i + 1; + *ifcp = iface; + usbi_info (HANDLE_CTX(dev_handle), "pipe %d on interface %d matches", *pipep, *ifcp); + return 0; + } + } + } + } + + /* No pipe found with the correct endpoint address */ + usbi_warn (HANDLE_CTX(dev_handle), "no pipeRef found with endpoint address 0x%02x.", ep); + + return -1; +} + +static int usb_setup_device_iterator (io_iterator_t *deviceIterator, UInt32 location) { + CFMutableDictionaryRef matchingDict = IOServiceMatching(kIOUSBDeviceClassName); + + if (!matchingDict) + return kIOReturnError; + + if (location) { + CFMutableDictionaryRef propertyMatchDict = CFDictionaryCreateMutable(kCFAllocatorDefault, 0, + &kCFTypeDictionaryKeyCallBacks, + &kCFTypeDictionaryValueCallBacks); + + if (propertyMatchDict) { + /* there are no unsigned CFNumber types so treat the value as signed. the os seems to do this + internally (CFNumberType of locationID is 3) */ + CFTypeRef locationCF = CFNumberCreate (NULL, kCFNumberSInt32Type, &location); + + CFDictionarySetValue (propertyMatchDict, CFSTR(kUSBDevicePropertyLocationID), locationCF); + /* release our reference to the CFNumber (CFDictionarySetValue retains it) */ + CFRelease (locationCF); + + CFDictionarySetValue (matchingDict, CFSTR(kIOPropertyMatchKey), propertyMatchDict); + /* release out reference to the CFMutableDictionaryRef (CFDictionarySetValue retains it) */ + CFRelease (propertyMatchDict); + } + /* else we can still proceed as long as the caller accounts for the possibility of other devices in the iterator */ + } + + return IOServiceGetMatchingServices(kIOMasterPortDefault, matchingDict, deviceIterator); +} + +static usb_device_t **usb_get_next_device (io_iterator_t deviceIterator, UInt32 *locationp) { + io_cf_plugin_ref_t *plugInInterface = NULL; + usb_device_t **device; + io_service_t usbDevice; + long result; + SInt32 score; + + if (!IOIteratorIsValid (deviceIterator)) + return NULL; + + + while ((usbDevice = IOIteratorNext(deviceIterator))) { + result = IOCreatePlugInInterfaceForService(usbDevice, kIOUSBDeviceUserClientTypeID, + kIOCFPlugInInterfaceID, &plugInInterface, + &score); + + /* we are done with the usb_device_t */ + (void)IOObjectRelease(usbDevice); + if (kIOReturnSuccess == result && plugInInterface) + break; + + usbi_dbg ("libusb/darwin.c usb_get_next_device: could not set up plugin for service: %s\n", darwin_error_str (result)); + } + + if (!usbDevice) + return NULL; + + (void)(*plugInInterface)->QueryInterface(plugInInterface, CFUUIDGetUUIDBytes(DeviceInterfaceID), + (LPVOID)&device); + /* Use release instead of IODestroyPlugInInterface to avoid stopping IOServices associated with this device */ + (*plugInInterface)->Release (plugInInterface); + + /* get the location from the device */ + if (locationp) + (*(device))->GetLocationID(device, locationp); + + return device; +} + +static void darwin_devices_attached (void *ptr, io_iterator_t add_devices) { + struct libusb_context *ctx; + usb_device_t **device; + UInt32 location; + + usbi_mutex_lock(&active_contexts_lock); + + while ((device = usb_get_next_device (add_devices, &location))) { + /* add this device to each active context's device list */ + list_for_each_entry(ctx, &active_contexts_list, list, struct libusb_context) { + process_new_device (ctx, device, location); + } + + /* release extra reference */ + (*device)->Release (device); + } + + usbi_mutex_unlock(&active_contexts_lock); +} + +static void darwin_devices_detached (void *ptr, io_iterator_t rem_devices) { + struct libusb_device *dev = NULL; + struct libusb_context *ctx; + + io_service_t device; + bool locationValid; + UInt32 location; + CFTypeRef locationCF; + + while ((device = IOIteratorNext (rem_devices)) != 0) { + /* get the location from the i/o registry */ + locationCF = IORegistryEntryCreateCFProperty (device, CFSTR(kUSBDevicePropertyLocationID), kCFAllocatorDefault, 0); + + IOObjectRelease (device); + + if (!locationCF) + continue; + + locationValid = CFGetTypeID(locationCF) == CFNumberGetTypeID() && + CFNumberGetValue(locationCF, kCFNumberSInt32Type, &location); + + CFRelease (locationCF); + + if (!locationValid) + continue; + + usbi_mutex_lock(&active_contexts_lock); + + list_for_each_entry(ctx, &active_contexts_list, list, struct libusb_context) { + usbi_dbg ("libusb/darwin.c darwin_devices_detached: notifying context %p of device disconnect", ctx); + + dev = usbi_get_device_by_session_id (ctx, location); + if (!dev) { + continue; + } + + /* signal the core that this device has been disconnected. the core will tear down this device + when the reference count reaches 0 */ + usbi_disconnect_device (dev); + } + + usbi_mutex_unlock(&active_contexts_lock); + } +} + +static void darwin_clear_iterator (io_iterator_t iter) { + io_service_t device; + + while ((device = IOIteratorNext (iter)) != 0) + IOObjectRelease (device); +} + +static void *darwin_event_thread_main (void *arg0) { + IOReturn kresult; + struct libusb_context *ctx = (struct libusb_context *)arg0; + CFRunLoopRef runloop; + + /* Set this thread's name, so it can be seen in the debugger + and crash reports. */ +#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1060 + pthread_setname_np ("org.libusb.device-hotplug"); +#endif + + /* Tell the Objective-C garbage collector about this thread. + This is required because, unlike NSThreads, pthreads are + not automatically registered. Although we don't use + Objective-C, we use CoreFoundation, which does. */ +#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1060 + objc_registerThreadWithCollector(); +#endif + + /* hotplug (device arrival/removal) sources */ + CFRunLoopSourceRef libusb_notification_cfsource; + io_notification_port_t libusb_notification_port; + io_iterator_t libusb_rem_device_iterator; + io_iterator_t libusb_add_device_iterator; + + usbi_info (ctx, "creating hotplug event source"); + + runloop = CFRunLoopGetCurrent (); + CFRetain (runloop); + + /* add the notification port to the run loop */ + libusb_notification_port = IONotificationPortCreate (kIOMasterPortDefault); + libusb_notification_cfsource = IONotificationPortGetRunLoopSource (libusb_notification_port); + CFRunLoopAddSource(runloop, libusb_notification_cfsource, kCFRunLoopDefaultMode); + + /* create notifications for removed devices */ + kresult = IOServiceAddMatchingNotification (libusb_notification_port, kIOTerminatedNotification, + IOServiceMatching(kIOUSBDeviceClassName), + (IOServiceMatchingCallback)darwin_devices_detached, + (void *)ctx, &libusb_rem_device_iterator); + + if (kresult != kIOReturnSuccess) { + usbi_err (ctx, "could not add hotplug event source: %s", darwin_error_str (kresult)); + + pthread_exit (NULL); + } + + /* create notifications for attached devices */ + kresult = IOServiceAddMatchingNotification (libusb_notification_port, kIOFirstMatchNotification, + IOServiceMatching(kIOUSBDeviceClassName), + (IOServiceMatchingCallback)darwin_devices_attached, + (void *)ctx, &libusb_add_device_iterator); + + if (kresult != kIOReturnSuccess) { + usbi_err (ctx, "could not add hotplug event source: %s", darwin_error_str (kresult)); + + pthread_exit (NULL); + } + + /* arm notifiers */ + darwin_clear_iterator (libusb_rem_device_iterator); + darwin_clear_iterator (libusb_add_device_iterator); + + usbi_info (ctx, "darwin event thread ready to receive events"); + + /* signal the main thread that the hotplug runloop has been created. */ + pthread_mutex_lock (&libusb_darwin_at_mutex); + libusb_darwin_acfl = runloop; + pthread_cond_signal (&libusb_darwin_at_cond); + pthread_mutex_unlock (&libusb_darwin_at_mutex); + + /* run the runloop */ + CFRunLoopRun(); + + usbi_info (ctx, "darwin event thread exiting"); + + /* remove the notification cfsource */ + CFRunLoopRemoveSource(runloop, libusb_notification_cfsource, kCFRunLoopDefaultMode); + + /* delete notification port */ + IONotificationPortDestroy (libusb_notification_port); + + /* delete iterators */ + IOObjectRelease (libusb_rem_device_iterator); + IOObjectRelease (libusb_add_device_iterator); + + CFRelease (runloop); + + libusb_darwin_acfl = NULL; + + pthread_exit (NULL); +} + +static int darwin_init(struct libusb_context *ctx) { + host_name_port_t host_self; + int rc; + + rc = darwin_scan_devices (ctx); + if (LIBUSB_SUCCESS != rc) { + return rc; + } + + if (OSAtomicIncrement32Barrier(&initCount) == 1) { + /* create the clocks that will be used */ + + host_self = mach_host_self(); + host_get_clock_service(host_self, CALENDAR_CLOCK, &clock_realtime); + host_get_clock_service(host_self, SYSTEM_CLOCK, &clock_monotonic); + mach_port_deallocate(mach_task_self(), host_self); + + pthread_create (&libusb_darwin_at, NULL, darwin_event_thread_main, (void *)ctx); + + pthread_mutex_lock (&libusb_darwin_at_mutex); + while (!libusb_darwin_acfl) + pthread_cond_wait (&libusb_darwin_at_cond, &libusb_darwin_at_mutex); + pthread_mutex_unlock (&libusb_darwin_at_mutex); + } + + return 0; +} + +static void darwin_exit (void) { + if (OSAtomicDecrement32Barrier(&initCount) == 0) { + mach_port_deallocate(mach_task_self(), clock_realtime); + mach_port_deallocate(mach_task_self(), clock_monotonic); + + /* stop the event runloop and wait for the thread to terminate. */ + CFRunLoopStop (libusb_darwin_acfl); + pthread_join (libusb_darwin_at, NULL); + } +} + +static int darwin_get_device_descriptor(struct libusb_device *dev, unsigned char *buffer, int *host_endian) { + struct darwin_device_priv *priv = (struct darwin_device_priv *)dev->os_priv; + + /* return cached copy */ + memmove (buffer, &(priv->dev_descriptor), DEVICE_DESC_LENGTH); + + *host_endian = 0; + + return 0; +} + +static int get_configuration_index (struct libusb_device *dev, int config_value) { + struct darwin_device_priv *priv = (struct darwin_device_priv *)dev->os_priv; + UInt8 i, numConfig; + IOUSBConfigurationDescriptorPtr desc; + IOReturn kresult; + + /* is there a simpler way to determine the index? */ + kresult = (*(priv->device))->GetNumberOfConfigurations (priv->device, &numConfig); + if (kresult != kIOReturnSuccess) + return darwin_to_libusb (kresult); + + for (i = 0 ; i < numConfig ; i++) { + (*(priv->device))->GetConfigurationDescriptorPtr (priv->device, i, &desc); + + if (desc->bConfigurationValue == config_value) + return i; + } + + /* configuration not found */ + return LIBUSB_ERROR_OTHER; +} + +static int darwin_get_active_config_descriptor(struct libusb_device *dev, unsigned char *buffer, size_t len, int *host_endian) { + struct darwin_device_priv *priv = (struct darwin_device_priv *)dev->os_priv; + int config_index; + + if (0 == priv->active_config) + return LIBUSB_ERROR_NOT_FOUND; + + config_index = get_configuration_index (dev, priv->active_config); + if (config_index < 0) + return config_index; + + return darwin_get_config_descriptor (dev, config_index, buffer, len, host_endian); +} + +static int darwin_get_config_descriptor(struct libusb_device *dev, uint8_t config_index, unsigned char *buffer, size_t len, int *host_endian) { + struct darwin_device_priv *priv = (struct darwin_device_priv *)dev->os_priv; + IOUSBConfigurationDescriptorPtr desc; + IOReturn kresult; + + if (!priv || !priv->device) + return LIBUSB_ERROR_OTHER; + + kresult = (*priv->device)->GetConfigurationDescriptorPtr (priv->device, config_index, &desc); + if (kresult == kIOReturnSuccess) { + /* copy descriptor */ + if (libusb_le16_to_cpu(desc->wTotalLength) < len) + len = libusb_le16_to_cpu(desc->wTotalLength); + + memmove (buffer, desc, len); + + /* GetConfigurationDescriptorPtr returns the descriptor in USB bus order */ + *host_endian = 0; + } + + return darwin_to_libusb (kresult); +} + +/* check whether the os has configured the device */ +static int darwin_check_configuration (struct libusb_context *ctx, struct libusb_device *dev, usb_device_t **darwin_device) { + struct darwin_device_priv *priv = (struct darwin_device_priv *)dev->os_priv; + + IOUSBConfigurationDescriptorPtr configDesc; + IOUSBFindInterfaceRequest request; + kern_return_t kresult; + io_iterator_t interface_iterator; + io_service_t firstInterface; + + if (priv->dev_descriptor.bNumConfigurations < 1) { + usbi_err (ctx, "device has no configurations"); + return LIBUSB_ERROR_OTHER; /* no configurations at this speed so we can't use it */ + } + + /* find the first configuration */ + kresult = (*darwin_device)->GetConfigurationDescriptorPtr (darwin_device, 0, &configDesc); + priv->first_config = (kIOReturnSuccess == kresult) ? configDesc->bConfigurationValue : 1; + + /* check if the device is already configured. there is probably a better way than iterating over the + to accomplish this (the trick is we need to avoid a call to GetConfigurations since buggy devices + might lock up on the device request) */ + + /* Setup the Interface Request */ + request.bInterfaceClass = kIOUSBFindInterfaceDontCare; + request.bInterfaceSubClass = kIOUSBFindInterfaceDontCare; + request.bInterfaceProtocol = kIOUSBFindInterfaceDontCare; + request.bAlternateSetting = kIOUSBFindInterfaceDontCare; + + kresult = (*(darwin_device))->CreateInterfaceIterator(darwin_device, &request, &interface_iterator); + if (kresult) + return darwin_to_libusb (kresult); + + /* iterate once */ + firstInterface = IOIteratorNext(interface_iterator); + + /* done with the interface iterator */ + IOObjectRelease(interface_iterator); + + if (firstInterface) { + IOObjectRelease (firstInterface); + + /* device is configured */ + if (priv->dev_descriptor.bNumConfigurations == 1) + /* to avoid problems with some devices get the configurations value from the configuration descriptor */ + priv->active_config = priv->first_config; + else + /* devices with more than one configuration should work with GetConfiguration */ + (*darwin_device)->GetConfiguration (darwin_device, &priv->active_config); + } else + /* not configured */ + priv->active_config = 0; + + usbi_info (ctx, "active config: %u, first config: %u", priv->active_config, priv->first_config); + + return 0; +} + +static int darwin_request_descriptor (usb_device_t **device, UInt8 desc, UInt8 desc_index, void *buffer, size_t buffer_size) { + IOUSBDevRequest req; + + memset (buffer, 0, buffer_size); + + /* Set up request for descriptor/ */ + req.bmRequestType = USBmakebmRequestType(kUSBIn, kUSBStandard, kUSBDevice); + req.bRequest = kUSBRqGetDescriptor; + req.wValue = desc << 8; + req.wIndex = desc_index; + req.wLength = buffer_size; + req.pData = buffer; + + return (*device)->DeviceRequest (device, &req); +} + +static int darwin_cache_device_descriptor (struct libusb_context *ctx, struct libusb_device *dev, usb_device_t **device) { + struct darwin_device_priv *priv; + int retries = 2, delay = 30000; + int unsuspended = 0, try_unsuspend = 1, try_reconfigure = 1; + int is_open = 0; + int ret = 0, ret2; + UInt8 bDeviceClass; + UInt16 idProduct, idVendor; + + (*device)->GetDeviceClass (device, &bDeviceClass); + (*device)->GetDeviceProduct (device, &idProduct); + (*device)->GetDeviceVendor (device, &idVendor); + + priv = (struct darwin_device_priv *)dev->os_priv; + + /* try to open the device (we can usually continue even if this fails) */ + is_open = ((*device)->USBDeviceOpenSeize(device) == kIOReturnSuccess); + + /**** retrieve device descriptor ****/ + do { + /* according to Apple's documentation the device must be open for DeviceRequest but we may not be able to open some + * devices and Apple's USB Prober doesn't bother to open the device before issuing a descriptor request. Still, + * to follow the spec as closely as possible, try opening the device */ + ret = darwin_request_descriptor (device, kUSBDeviceDesc, 0, &priv->dev_descriptor, sizeof(priv->dev_descriptor)); + + + if (kIOReturnOverrun == ret && kUSBDeviceDesc == priv->dev_descriptor.bDescriptorType) + /* received an overrun error but we still received a device descriptor */ + ret = kIOReturnSuccess; + + if (kIOUSBVendorIDAppleComputer == idVendor) { + /* NTH: don't bother retrying or unsuspending Apple devices */ + break; + } + + if (kIOReturnSuccess == ret && (0 == priv->dev_descriptor.bNumConfigurations || + 0 == priv->dev_descriptor.bcdUSB)) { + /* work around for incorrectly configured devices */ + if (try_reconfigure && is_open) { + usbi_dbg("descriptor appears to be invalid. resetting configuration before trying again..."); + + /* set the first configuration */ + (*device)->SetConfiguration(device, 1); + + /* don't try to reconfigure again */ + try_reconfigure = 0; + } + + ret = kIOUSBPipeStalled; + } + + if (kIOReturnSuccess != ret && is_open && try_unsuspend) { + /* device may be suspended. unsuspend it and try again */ +#if DeviceVersion >= 320 + UInt32 info; + + /* IOUSBFamily 320+ provides a way to detect device suspension but earlier versions do not */ + (void)(*device)->GetUSBDeviceInformation (device, &info); + + try_unsuspend = info & (1 << kUSBInformationDeviceIsSuspendedBit); +#endif + + if (try_unsuspend) { + /* resume the device */ + ret2 = (*device)->USBDeviceSuspend (device, 0); + if (kIOReturnSuccess != ret2) { + /* prevent log spew from poorly behaving devices. this indicates the + os actually had trouble communicating with the device */ + usbi_dbg("could not retrieve device descriptor. failed to unsuspend: %s",darwin_error_str(ret2)); + } else + unsuspended = 1; + + try_unsuspend = 0; + } + } + + if (kIOReturnSuccess != ret) { + usbi_dbg("kernel responded with code: 0x%08x. sleeping for %d ms before trying again", ret, delay/1000); + /* sleep for a little while before trying again */ + usleep (delay); + } + } while (kIOReturnSuccess != ret && retries--); + + if (unsuspended) + /* resuspend the device */ + (void)(*device)->USBDeviceSuspend (device, 1); + + if (is_open) + (void) (*device)->USBDeviceClose (device); + + if (ret != kIOReturnSuccess) { + /* a debug message was already printed out for this error */ + if (LIBUSB_CLASS_HUB == bDeviceClass) + usbi_dbg ("could not retrieve device descriptor %.4x:%.4x: %s. skipping device", idVendor, idProduct, darwin_error_str (ret)); + else + usbi_warn (ctx, "could not retrieve device descriptor %.4x:%.4x: %s. skipping device", idVendor, idProduct, darwin_error_str (ret)); + + return -1; + } + + usbi_dbg ("device descriptor:"); + usbi_dbg (" bDescriptorType: 0x%02x", priv->dev_descriptor.bDescriptorType); + usbi_dbg (" bcdUSB: 0x%04x", priv->dev_descriptor.bcdUSB); + usbi_dbg (" bDeviceClass: 0x%02x", priv->dev_descriptor.bDeviceClass); + usbi_dbg (" bDeviceSubClass: 0x%02x", priv->dev_descriptor.bDeviceSubClass); + usbi_dbg (" bDeviceProtocol: 0x%02x", priv->dev_descriptor.bDeviceProtocol); + usbi_dbg (" bMaxPacketSize0: 0x%02x", priv->dev_descriptor.bMaxPacketSize0); + usbi_dbg (" idVendor: 0x%04x", priv->dev_descriptor.idVendor); + usbi_dbg (" idProduct: 0x%04x", priv->dev_descriptor.idProduct); + usbi_dbg (" bcdDevice: 0x%04x", priv->dev_descriptor.bcdDevice); + usbi_dbg (" iManufacturer: 0x%02x", priv->dev_descriptor.iManufacturer); + usbi_dbg (" iProduct: 0x%02x", priv->dev_descriptor.iProduct); + usbi_dbg (" iSerialNumber: 0x%02x", priv->dev_descriptor.iSerialNumber); + usbi_dbg (" bNumConfigurations: 0x%02x", priv->dev_descriptor.bNumConfigurations); + + /* catch buggy hubs (which appear to be virtual). Apple's own USB prober has problems with these devices. */ + if (libusb_le16_to_cpu (priv->dev_descriptor.idProduct) != idProduct) { + /* not a valid device */ + usbi_warn (ctx, "idProduct from iokit (%04x) does not match idProduct in descriptor (%04x). skipping device", + idProduct, libusb_le16_to_cpu (priv->dev_descriptor.idProduct)); + return -1; + } + + return 0; +} + +static int process_new_device (struct libusb_context *ctx, usb_device_t **device, UInt32 locationID) { + struct libusb_device *dev = NULL; + struct darwin_device_priv *priv; + UInt8 devSpeed; + UInt16 address; + int ret = 0; + + do { + usbi_info (ctx, "allocating new device for location 0x%08x", locationID); + + dev = usbi_alloc_device(ctx, locationID); + if (!dev) { + return LIBUSB_ERROR_NO_MEM; + } + + priv = (struct darwin_device_priv *)dev->os_priv; + priv->device = device; + + /* increment the device's reference count (it is decremented in darwin_destroy_device) */ + (*device)->AddRef (device); + + (*device)->GetDeviceAddress (device, (USBDeviceAddress *)&address); + + ret = darwin_cache_device_descriptor (ctx, dev, device); + if (ret < 0) + break; + + /* check current active configuration (and cache the first configuration value-- which may be used by claim_interface) */ + ret = darwin_check_configuration (ctx, dev, device); + if (ret < 0) + break; + + dev->bus_number = locationID >> 24; + dev->device_address = address; + + (*device)->GetDeviceSpeed (device, &devSpeed); + + switch (devSpeed) { + case kUSBDeviceSpeedLow: dev->speed = LIBUSB_SPEED_LOW; break; + case kUSBDeviceSpeedFull: dev->speed = LIBUSB_SPEED_FULL; break; + case kUSBDeviceSpeedHigh: dev->speed = LIBUSB_SPEED_HIGH; break; +#if DeviceVersion >= 500 + case kUSBDeviceSpeedSuper: dev->speed = LIBUSB_SPEED_SUPER; break; +#endif + default: + usbi_warn (ctx, "Got unknown device speed %d", devSpeed); + } + + /* save our location, we'll need this later */ + priv->location = locationID; + snprintf(priv->sys_path, 20, "%03i-%04x-%04x-%02x-%02x", address, priv->dev_descriptor.idVendor, priv->dev_descriptor.idProduct, + priv->dev_descriptor.bDeviceClass, priv->dev_descriptor.bDeviceSubClass); + + ret = usbi_sanitize_device (dev); + if (ret < 0) + break; + + usbi_info (ctx, "found device with address %d at %s", dev->device_address, priv->sys_path); + } while (0); + + if (0 == ret) { + usbi_connect_device (dev); + } else { + libusb_unref_device (dev); + } + + return ret; +} + +static int darwin_scan_devices(struct libusb_context *ctx) { + io_iterator_t deviceIterator; + usb_device_t **device; + kern_return_t kresult; + UInt32 location; + + kresult = usb_setup_device_iterator (&deviceIterator, 0); + if (kresult != kIOReturnSuccess) + return darwin_to_libusb (kresult); + + while ((device = usb_get_next_device (deviceIterator, &location)) != NULL) { + (void) process_new_device (ctx, device, location); + + /* process_new_device added a reference so we need to release the one + from QueryInterface */ + (*device)->Release (device); + } + + IOObjectRelease(deviceIterator); + + return 0; +} + +static int darwin_open (struct libusb_device_handle *dev_handle) { + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)dev_handle->os_priv; + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)dev_handle->dev->os_priv; + IOReturn kresult; + + if (0 == dpriv->open_count) { + /* try to open the device */ + kresult = (*(dpriv->device))->USBDeviceOpenSeize (dpriv->device); + if (kresult != kIOReturnSuccess) { + usbi_err (HANDLE_CTX (dev_handle), "USBDeviceOpen: %s", darwin_error_str(kresult)); + + if (kIOReturnExclusiveAccess != kresult) { + return darwin_to_libusb (kresult); + } + + /* it is possible to perform some actions on a device that is not open so do not return an error */ + priv->is_open = 0; + } else { + priv->is_open = 1; + } + + /* create async event source */ + kresult = (*(dpriv->device))->CreateDeviceAsyncEventSource (dpriv->device, &priv->cfSource); + if (kresult != kIOReturnSuccess) { + usbi_err (HANDLE_CTX (dev_handle), "CreateDeviceAsyncEventSource: %s", darwin_error_str(kresult)); + + if (priv->is_open) { + (*(dpriv->device))->USBDeviceClose (dpriv->device); + } + + priv->is_open = 0; + + return darwin_to_libusb (kresult); + } + + CFRetain (libusb_darwin_acfl); + + /* add the cfSource to the aync run loop */ + CFRunLoopAddSource(libusb_darwin_acfl, priv->cfSource, kCFRunLoopCommonModes); + } + + /* device opened successfully */ + dpriv->open_count++; + + /* create a file descriptor for notifications */ + pipe (priv->fds); + + /* set the pipe to be non-blocking */ + fcntl (priv->fds[1], F_SETFD, O_NONBLOCK); + + usbi_add_pollfd(HANDLE_CTX(dev_handle), priv->fds[0], POLLIN); + + usbi_info (HANDLE_CTX (dev_handle), "device open for access"); + + return 0; +} + +static void darwin_close (struct libusb_device_handle *dev_handle) { + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)dev_handle->os_priv; + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)dev_handle->dev->os_priv; + IOReturn kresult; + int i; + + if (dpriv->open_count == 0) { + /* something is probably very wrong if this is the case */ + usbi_err (HANDLE_CTX (dev_handle), "Close called on a device that was not open!\n"); + return; + } + + dpriv->open_count--; + + /* make sure all interfaces are released */ + for (i = 0 ; i < USB_MAXINTERFACES ; i++) + if (dev_handle->claimed_interfaces & (1 << i)) + libusb_release_interface (dev_handle, i); + + if (0 == dpriv->open_count) { + /* delete the device's async event source */ + if (priv->cfSource) { + CFRunLoopRemoveSource (libusb_darwin_acfl, priv->cfSource, kCFRunLoopDefaultMode); + CFRelease (priv->cfSource); + priv->cfSource = NULL; + CFRelease (libusb_darwin_acfl); + } + + if (priv->is_open) { + /* close the device */ + kresult = (*(dpriv->device))->USBDeviceClose(dpriv->device); + if (kresult) { + /* Log the fact that we had a problem closing the file, however failing a + * close isn't really an error, so return success anyway */ + usbi_err (HANDLE_CTX (dev_handle), "USBDeviceClose: %s", darwin_error_str(kresult)); + } + } + } + + /* file descriptors are maintained per-instance */ + usbi_remove_pollfd (HANDLE_CTX (dev_handle), priv->fds[0]); + close (priv->fds[1]); + close (priv->fds[0]); + + priv->fds[0] = priv->fds[1] = -1; +} + +static int darwin_get_configuration(struct libusb_device_handle *dev_handle, int *config) { + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)dev_handle->dev->os_priv; + + *config = (int) dpriv->active_config; + + return 0; +} + +static int darwin_set_configuration(struct libusb_device_handle *dev_handle, int config) { + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)dev_handle->dev->os_priv; + IOReturn kresult; + int i; + + /* Setting configuration will invalidate the interface, so we need + to reclaim it. First, dispose of existing interfaces, if any. */ + for (i = 0 ; i < USB_MAXINTERFACES ; i++) + if (dev_handle->claimed_interfaces & (1 << i)) + darwin_release_interface (dev_handle, i); + + kresult = (*(dpriv->device))->SetConfiguration (dpriv->device, config); + if (kresult != kIOReturnSuccess) + return darwin_to_libusb (kresult); + + /* Reclaim any interfaces. */ + for (i = 0 ; i < USB_MAXINTERFACES ; i++) + if (dev_handle->claimed_interfaces & (1 << i)) + darwin_claim_interface (dev_handle, i); + + dpriv->active_config = config; + + return 0; +} + +static int darwin_get_interface (usb_device_t **darwin_device, uint8_t ifc, io_service_t *usbInterfacep) { + IOUSBFindInterfaceRequest request; + kern_return_t kresult; + io_iterator_t interface_iterator; + CFTypeRef bInterfaceNumberCF; + int bInterfaceNumber; + + *usbInterfacep = IO_OBJECT_NULL; + + /* Setup the Interface Request */ + request.bInterfaceClass = kIOUSBFindInterfaceDontCare; + request.bInterfaceSubClass = kIOUSBFindInterfaceDontCare; + request.bInterfaceProtocol = kIOUSBFindInterfaceDontCare; + request.bAlternateSetting = kIOUSBFindInterfaceDontCare; + + kresult = (*(darwin_device))->CreateInterfaceIterator(darwin_device, &request, &interface_iterator); + if (kresult) + return kresult; + + while ((*usbInterfacep = IOIteratorNext(interface_iterator))) { + /* find the interface number */ + bInterfaceNumberCF = IORegistryEntryCreateCFProperty (*usbInterfacep, CFSTR("bInterfaceNumber"), + kCFAllocatorDefault, 0); + if (!bInterfaceNumberCF) { + continue; + } + + CFNumberGetValue(bInterfaceNumberCF, kCFNumberIntType, &bInterfaceNumber); + + CFRelease(bInterfaceNumberCF); + + if ((uint8_t) bInterfaceNumber == ifc) { + break; + } + + (void) IOObjectRelease (*usbInterfacep); + } + + /* done with the interface iterator */ + IOObjectRelease(interface_iterator); + + return 0; +} + +static int get_endpoints (struct libusb_device_handle *dev_handle, int iface) { + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)dev_handle->os_priv; + + /* current interface */ + struct darwin_interface *cInterface = &priv->interfaces[iface]; + + kern_return_t kresult; + + u_int8_t numep, direction, number; + u_int8_t dont_care1, dont_care3; + u_int16_t dont_care2; + int i; + + usbi_info (HANDLE_CTX (dev_handle), "building table of endpoints."); + + /* retrieve the total number of endpoints on this interface */ + kresult = (*(cInterface->interface))->GetNumEndpoints(cInterface->interface, &numep); + if (kresult) { + usbi_err (HANDLE_CTX (dev_handle), "can't get number of endpoints for interface: %s", darwin_error_str(kresult)); + return darwin_to_libusb (kresult); + } + + /* iterate through pipe references */ + for (i = 1 ; i <= numep ; i++) { + kresult = (*(cInterface->interface))->GetPipeProperties(cInterface->interface, i, &direction, &number, &dont_care1, + &dont_care2, &dont_care3); + + if (kresult != kIOReturnSuccess) { + usbi_err (HANDLE_CTX (dev_handle), "error getting pipe information for pipe %d: %s", i, darwin_error_str(kresult)); + + return darwin_to_libusb (kresult); + } + + usbi_info (HANDLE_CTX (dev_handle), "interface: %i pipe %i: dir: %i number: %i", iface, i, direction, number); + + cInterface->endpoint_addrs[i - 1] = ((direction << 7 & LIBUSB_ENDPOINT_DIR_MASK) | (number & LIBUSB_ENDPOINT_ADDRESS_MASK)); + } + + cInterface->num_endpoints = numep; + + return 0; +} + +static int darwin_claim_interface(struct libusb_device_handle *dev_handle, int iface) { + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)dev_handle->dev->os_priv; + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)dev_handle->os_priv; + io_service_t usbInterface = IO_OBJECT_NULL; + IOReturn kresult; + IOCFPlugInInterface **plugInInterface = NULL; + SInt32 score; + + /* current interface */ + struct darwin_interface *cInterface = &priv->interfaces[iface]; + + kresult = darwin_get_interface (dpriv->device, iface, &usbInterface); + if (kresult != kIOReturnSuccess) + return darwin_to_libusb (kresult); + + /* make sure we have an interface */ + if (!usbInterface && dpriv->first_config != 0) { + usbi_info (HANDLE_CTX (dev_handle), "no interface found; setting configuration: %d", dpriv->first_config); + + /* set the configuration */ + kresult = darwin_set_configuration (dev_handle, dpriv->first_config); + if (kresult != LIBUSB_SUCCESS) { + usbi_err (HANDLE_CTX (dev_handle), "could not set configuration"); + return kresult; + } + + kresult = darwin_get_interface (dpriv->device, iface, &usbInterface); + if (kresult) { + usbi_err (HANDLE_CTX (dev_handle), "darwin_get_interface: %s", darwin_error_str(kresult)); + return darwin_to_libusb (kresult); + } + } + + if (!usbInterface) { + usbi_err (HANDLE_CTX (dev_handle), "interface not found"); + return LIBUSB_ERROR_NOT_FOUND; + } + + /* get an interface to the device's interface */ + kresult = IOCreatePlugInInterfaceForService (usbInterface, kIOUSBInterfaceUserClientTypeID, + kIOCFPlugInInterfaceID, &plugInInterface, &score); + + /* ignore release error */ + (void)IOObjectRelease (usbInterface); + + if (kresult) { + usbi_err (HANDLE_CTX (dev_handle), "IOCreatePlugInInterfaceForService: %s", darwin_error_str(kresult)); + return darwin_to_libusb (kresult); + } + + if (!plugInInterface) { + usbi_err (HANDLE_CTX (dev_handle), "plugin interface not found"); + return LIBUSB_ERROR_NOT_FOUND; + } + + /* Do the actual claim */ + kresult = (*plugInInterface)->QueryInterface(plugInInterface, + CFUUIDGetUUIDBytes(kIOUSBInterfaceInterfaceID), + (LPVOID)&cInterface->interface); + /* We no longer need the intermediate plug-in */ + /* Use release instead of IODestroyPlugInInterface to avoid stopping IOServices associated with this device */ + (*plugInInterface)->Release (plugInInterface); + if (kresult || !cInterface->interface) { + usbi_err (HANDLE_CTX (dev_handle), "QueryInterface: %s", darwin_error_str(kresult)); + return darwin_to_libusb (kresult); + } + + /* claim the interface */ + kresult = (*(cInterface->interface))->USBInterfaceOpen(cInterface->interface); + if (kresult) { + usbi_err (HANDLE_CTX (dev_handle), "USBInterfaceOpen: %s", darwin_error_str(kresult)); + return darwin_to_libusb (kresult); + } + + /* update list of endpoints */ + kresult = get_endpoints (dev_handle, iface); + if (kresult) { + /* this should not happen */ + darwin_release_interface (dev_handle, iface); + usbi_err (HANDLE_CTX (dev_handle), "could not build endpoint table"); + return kresult; + } + + cInterface->cfSource = NULL; + + /* create async event source */ + kresult = (*(cInterface->interface))->CreateInterfaceAsyncEventSource (cInterface->interface, &cInterface->cfSource); + if (kresult != kIOReturnSuccess) { + usbi_err (HANDLE_CTX (dev_handle), "could not create async event source"); + + /* can't continue without an async event source */ + (void)darwin_release_interface (dev_handle, iface); + + return darwin_to_libusb (kresult); + } + + /* add the cfSource to the async thread's run loop */ + CFRunLoopAddSource(libusb_darwin_acfl, cInterface->cfSource, kCFRunLoopDefaultMode); + + usbi_info (HANDLE_CTX (dev_handle), "interface opened"); + + return 0; +} + +static int darwin_release_interface(struct libusb_device_handle *dev_handle, int iface) { + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)dev_handle->os_priv; + IOReturn kresult; + + /* current interface */ + struct darwin_interface *cInterface = &priv->interfaces[iface]; + + /* Check to see if an interface is open */ + if (!cInterface->interface) + return LIBUSB_SUCCESS; + + /* clean up endpoint data */ + cInterface->num_endpoints = 0; + + /* delete the interface's async event source */ + if (cInterface->cfSource) { + CFRunLoopRemoveSource (libusb_darwin_acfl, cInterface->cfSource, kCFRunLoopDefaultMode); + CFRelease (cInterface->cfSource); + } + + kresult = (*(cInterface->interface))->USBInterfaceClose(cInterface->interface); + if (kresult) + usbi_err (HANDLE_CTX (dev_handle), "USBInterfaceClose: %s", darwin_error_str(kresult)); + + kresult = (*(cInterface->interface))->Release(cInterface->interface); + if (kresult != kIOReturnSuccess) + usbi_err (HANDLE_CTX (dev_handle), "Release: %s", darwin_error_str(kresult)); + + cInterface->interface = IO_OBJECT_NULL; + + return darwin_to_libusb (kresult); +} + +static int darwin_set_interface_altsetting(struct libusb_device_handle *dev_handle, int iface, int altsetting) { + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)dev_handle->os_priv; + IOReturn kresult; + + /* current interface */ + struct darwin_interface *cInterface = &priv->interfaces[iface]; + + if (!cInterface->interface) + return LIBUSB_ERROR_NO_DEVICE; + + kresult = (*(cInterface->interface))->SetAlternateInterface (cInterface->interface, altsetting); + if (kresult != kIOReturnSuccess) + darwin_reset_device (dev_handle); + + /* update list of endpoints */ + kresult = get_endpoints (dev_handle, iface); + if (kresult) { + /* this should not happen */ + darwin_release_interface (dev_handle, iface); + usbi_err (HANDLE_CTX (dev_handle), "could not build endpoint table"); + return kresult; + } + + return darwin_to_libusb (kresult); +} + +static int darwin_clear_halt(struct libusb_device_handle *dev_handle, unsigned char endpoint) { + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)dev_handle->os_priv; + + /* current interface */ + struct darwin_interface *cInterface; + uint8_t pipeRef, iface; + IOReturn kresult; + + /* determine the interface/endpoint to use */ + if (ep_to_pipeRef (dev_handle, endpoint, &pipeRef, &iface) != 0) { + usbi_err (HANDLE_CTX (dev_handle), "endpoint not found on any open interface"); + + return LIBUSB_ERROR_NOT_FOUND; + } + + cInterface = &priv->interfaces[iface]; + +#if (InterfaceVersion < 190) + kresult = (*(cInterface->interface))->ClearPipeStall(cInterface->interface, pipeRef); +#else + /* newer versions of darwin support clearing additional bits on the device's endpoint */ + kresult = (*(cInterface->interface))->ClearPipeStallBothEnds(cInterface->interface, pipeRef); +#endif + if (kresult) + usbi_err (HANDLE_CTX (dev_handle), "ClearPipeStall: %s", darwin_error_str (kresult)); + + return darwin_to_libusb (kresult); +} + +static int darwin_reset_device(struct libusb_device_handle *dev_handle) { + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)dev_handle->dev->os_priv; + IOUSBDeviceDescriptor descriptor; + IOUSBConfigurationDescriptorPtr cached_configuration; + IOUSBConfigurationDescriptor configuration; + bool reenumerate = false; + IOReturn kresult; + int i; + + kresult = (*(dpriv->device))->ResetDevice (dpriv->device); + if (kresult) { + usbi_err (HANDLE_CTX (dev_handle), "ResetDevice: %s", darwin_error_str (kresult)); + return darwin_to_libusb (kresult); + } + + do { + usbi_dbg ("darwin/reset_device: checking if device descriptor changed"); + + /* ignore return code. if we can't get a descriptor it might be worthwhile re-enumerating anway */ + (void) darwin_request_descriptor (dpriv->device, kUSBDeviceDesc, 0, &descriptor, sizeof (descriptor)); + + /* check if the device descriptor has changed */ + if (0 != memcmp (&dpriv->dev_descriptor, &descriptor, sizeof (descriptor))) { + reenumerate = true; + break; + } + + /* check if any configuration descriptor has changed */ + for (i = 0 ; i < descriptor.bNumConfigurations ; ++i) { + usbi_dbg ("darwin/reset_device: checking if configuration descriptor %d changed", i); + + (void) darwin_request_descriptor (dpriv->device, kUSBConfDesc, i, &configuration, sizeof (configuration)); + (*(dpriv->device))->GetConfigurationDescriptorPtr (dpriv->device, i, &cached_configuration); + + if (!cached_configuration || 0 != memcmp (cached_configuration, &configuration, sizeof (configuration))) { + reenumerate = true; + break; + } + } + } while (0); + + if (reenumerate) { + usbi_dbg ("darwin/reset_device: device requires reenumeration"); + (void) (*(dpriv->device))->USBDeviceReEnumerate (dpriv->device, 0); + return LIBUSB_ERROR_NOT_FOUND; + } + + usbi_dbg ("darwin/reset_device: device reset complete"); + + return LIBUSB_SUCCESS; +} + +static int darwin_kernel_driver_active(struct libusb_device_handle *dev_handle, int interface) { + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)dev_handle->dev->os_priv; + io_service_t usbInterface; + CFTypeRef driver; + IOReturn kresult; + + kresult = darwin_get_interface (dpriv->device, interface, &usbInterface); + if (kresult) { + usbi_err (HANDLE_CTX (dev_handle), "darwin_get_interface: %s", darwin_error_str(kresult)); + + return darwin_to_libusb (kresult); + } + + driver = IORegistryEntryCreateCFProperty (usbInterface, kIOBundleIdentifierKey, kCFAllocatorDefault, 0); + IOObjectRelease (usbInterface); + + if (driver) { + CFRelease (driver); + + return 1; + } + + /* no driver */ + return 0; +} + +/* attaching/detaching kernel drivers is not currently supported (maybe in the future?) */ +static int darwin_attach_kernel_driver (struct libusb_device_handle *dev_handle, int interface) { + (void)dev_handle; + (void)interface; + return LIBUSB_ERROR_NOT_SUPPORTED; +} + +static int darwin_detach_kernel_driver (struct libusb_device_handle *dev_handle, int interface) { + (void)dev_handle; + (void)interface; + return LIBUSB_ERROR_NOT_SUPPORTED; +} + +static void darwin_destroy_device(struct libusb_device *dev) { + struct darwin_device_priv *dpriv = (struct darwin_device_priv *) dev->os_priv; + + if (dpriv->device) { + /* it is an internal error if the reference count of a device is < 0 after release */ + assert(0 <= (*(dpriv->device))->Release(dpriv->device)); + + dpriv->device = NULL; + } +} + +static int submit_bulk_transfer(struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)transfer->dev_handle->os_priv; + + IOReturn ret; + uint8_t transferType; + /* None of the values below are used in libusb for bulk transfers */ + uint8_t direction, number, interval, pipeRef, iface; + uint16_t maxPacketSize; + + struct darwin_interface *cInterface; + + if (ep_to_pipeRef (transfer->dev_handle, transfer->endpoint, &pipeRef, &iface) != 0) { + usbi_err (TRANSFER_CTX (transfer), "endpoint not found on any open interface"); + + return LIBUSB_ERROR_NOT_FOUND; + } + + cInterface = &priv->interfaces[iface]; + + (*(cInterface->interface))->GetPipeProperties (cInterface->interface, pipeRef, &direction, &number, + &transferType, &maxPacketSize, &interval); + + if (0 != (transfer->length % maxPacketSize)) { + /* do not need a zero packet */ + transfer->flags &= ~LIBUSB_TRANSFER_ADD_ZERO_PACKET; + } + + /* submit the request */ + /* timeouts are unavailable on interrupt endpoints */ + if (transferType == kUSBInterrupt) { + if (IS_XFERIN(transfer)) + ret = (*(cInterface->interface))->ReadPipeAsync(cInterface->interface, pipeRef, transfer->buffer, + transfer->length, darwin_async_io_callback, itransfer); + else + ret = (*(cInterface->interface))->WritePipeAsync(cInterface->interface, pipeRef, transfer->buffer, + transfer->length, darwin_async_io_callback, itransfer); + } else { + itransfer->flags |= USBI_TRANSFER_OS_HANDLES_TIMEOUT; + + if (IS_XFERIN(transfer)) + ret = (*(cInterface->interface))->ReadPipeAsyncTO(cInterface->interface, pipeRef, transfer->buffer, + transfer->length, transfer->timeout, transfer->timeout, + darwin_async_io_callback, (void *)itransfer); + else + ret = (*(cInterface->interface))->WritePipeAsyncTO(cInterface->interface, pipeRef, transfer->buffer, + transfer->length, transfer->timeout, transfer->timeout, + darwin_async_io_callback, (void *)itransfer); + } + + if (ret) + usbi_err (TRANSFER_CTX (transfer), "bulk transfer failed (dir = %s): %s (code = 0x%08x)", IS_XFERIN(transfer) ? "In" : "Out", + darwin_error_str(ret), ret); + + return darwin_to_libusb (ret); +} + +static int submit_iso_transfer(struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct darwin_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)transfer->dev_handle->os_priv; + + IOReturn kresult; + uint8_t direction, number, interval, pipeRef, iface, transferType; + uint16_t maxPacketSize; + UInt64 frame; + AbsoluteTime atTime; + int i; + + struct darwin_interface *cInterface; + + /* construct an array of IOUSBIsocFrames, reuse the old one if possible */ + if (tpriv->isoc_framelist && tpriv->num_iso_packets != transfer->num_iso_packets) { + free(tpriv->isoc_framelist); + tpriv->isoc_framelist = NULL; + } + + if (!tpriv->isoc_framelist) { + tpriv->num_iso_packets = transfer->num_iso_packets; + tpriv->isoc_framelist = (IOUSBIsocFrame*) calloc (transfer->num_iso_packets, sizeof(IOUSBIsocFrame)); + if (!tpriv->isoc_framelist) + return LIBUSB_ERROR_NO_MEM; + } + + /* copy the frame list from the libusb descriptor (the structures differ only is member order) */ + for (i = 0 ; i < transfer->num_iso_packets ; i++) + tpriv->isoc_framelist[i].frReqCount = transfer->iso_packet_desc[i].length; + + /* determine the interface/endpoint to use */ + if (ep_to_pipeRef (transfer->dev_handle, transfer->endpoint, &pipeRef, &iface) != 0) { + usbi_err (TRANSFER_CTX (transfer), "endpoint not found on any open interface"); + + return LIBUSB_ERROR_NOT_FOUND; + } + + cInterface = &priv->interfaces[iface]; + + /* determine the properties of this endpoint and the speed of the device */ + (*(cInterface->interface))->GetPipeProperties (cInterface->interface, pipeRef, &direction, &number, + &transferType, &maxPacketSize, &interval); + + /* Last but not least we need the bus frame number */ + kresult = (*(cInterface->interface))->GetBusFrameNumber(cInterface->interface, &frame, &atTime); + if (kresult) { + usbi_err (TRANSFER_CTX (transfer), "failed to get bus frame number: %d", kresult); + free(tpriv->isoc_framelist); + tpriv->isoc_framelist = NULL; + + return darwin_to_libusb (kresult); + } + + /* schedule for a frame a little in the future */ + frame += 4; + + if (cInterface->frames[transfer->endpoint] && frame < cInterface->frames[transfer->endpoint]) + frame = cInterface->frames[transfer->endpoint]; + + /* submit the request */ + if (IS_XFERIN(transfer)) + kresult = (*(cInterface->interface))->ReadIsochPipeAsync(cInterface->interface, pipeRef, transfer->buffer, frame, + transfer->num_iso_packets, tpriv->isoc_framelist, darwin_async_io_callback, + itransfer); + else + kresult = (*(cInterface->interface))->WriteIsochPipeAsync(cInterface->interface, pipeRef, transfer->buffer, frame, + transfer->num_iso_packets, tpriv->isoc_framelist, darwin_async_io_callback, + itransfer); + + if (LIBUSB_SPEED_FULL == transfer->dev_handle->dev->speed) + /* Full speed */ + cInterface->frames[transfer->endpoint] = frame + transfer->num_iso_packets * (1 << (interval - 1)); + else + /* High/super speed */ + cInterface->frames[transfer->endpoint] = frame + transfer->num_iso_packets * (1 << (interval - 1)) / 8; + + if (kresult != kIOReturnSuccess) { + usbi_err (TRANSFER_CTX (transfer), "isochronous transfer failed (dir: %s): %s", IS_XFERIN(transfer) ? "In" : "Out", + darwin_error_str(kresult)); + free (tpriv->isoc_framelist); + tpriv->isoc_framelist = NULL; + } + + return darwin_to_libusb (kresult); +} + +static int submit_control_transfer(struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_control_setup *setup = (struct libusb_control_setup *) transfer->buffer; + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)transfer->dev_handle->dev->os_priv; + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)transfer->dev_handle->os_priv; + struct darwin_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + + IOReturn kresult; + + bzero(&tpriv->req, sizeof(tpriv->req)); + + /* IOUSBDeviceInterface expects the request in cpu endianess */ + tpriv->req.bmRequestType = setup->bmRequestType; + tpriv->req.bRequest = setup->bRequest; + /* these values should be in bus order from libusb_fill_control_setup */ + tpriv->req.wValue = OSSwapLittleToHostInt16 (setup->wValue); + tpriv->req.wIndex = OSSwapLittleToHostInt16 (setup->wIndex); + tpriv->req.wLength = OSSwapLittleToHostInt16 (setup->wLength); + /* data is stored after the libusb control block */ + tpriv->req.pData = transfer->buffer + LIBUSB_CONTROL_SETUP_SIZE; + tpriv->req.completionTimeout = transfer->timeout; + tpriv->req.noDataTimeout = transfer->timeout; + + itransfer->flags |= USBI_TRANSFER_OS_HANDLES_TIMEOUT; + + /* all transfers in libusb-1.0 are async */ + + if (transfer->endpoint) { + struct darwin_interface *cInterface; + uint8_t pipeRef, iface; + + if (ep_to_pipeRef (transfer->dev_handle, transfer->endpoint, &pipeRef, &iface) != 0) { + usbi_err (TRANSFER_CTX (transfer), "endpoint not found on any open interface"); + + return LIBUSB_ERROR_NOT_FOUND; + } + + cInterface = &priv->interfaces[iface]; + + kresult = (*(cInterface->interface))->ControlRequestAsyncTO (cInterface->interface, pipeRef, &(tpriv->req), darwin_async_io_callback, itransfer); + } else + /* control request on endpoint 0 */ + kresult = (*(dpriv->device))->DeviceRequestAsyncTO(dpriv->device, &(tpriv->req), darwin_async_io_callback, itransfer); + + if (kresult != kIOReturnSuccess) + usbi_err (TRANSFER_CTX (transfer), "control request failed: %s", darwin_error_str(kresult)); + + return darwin_to_libusb (kresult); +} + +static int darwin_submit_transfer(struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_CONTROL: + return submit_control_transfer(itransfer); + case LIBUSB_TRANSFER_TYPE_BULK: + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + return submit_bulk_transfer(itransfer); + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + return submit_iso_transfer(itransfer); + default: + usbi_err (TRANSFER_CTX(transfer), "unknown endpoint type %d", transfer->type); + return LIBUSB_ERROR_INVALID_PARAM; + } +} + +static int cancel_control_transfer(struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)transfer->dev_handle->dev->os_priv; + IOReturn kresult; + + usbi_info (ITRANSFER_CTX (itransfer), "WARNING: aborting all transactions control pipe"); + + if (!dpriv->device) + return LIBUSB_ERROR_NO_DEVICE; + + kresult = (*(dpriv->device))->USBDeviceAbortPipeZero (dpriv->device); + + return darwin_to_libusb (kresult); +} + +static int darwin_abort_transfers (struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct darwin_device_priv *dpriv = (struct darwin_device_priv *)transfer->dev_handle->dev->os_priv; + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)transfer->dev_handle->os_priv; + struct darwin_interface *cInterface; + uint8_t pipeRef, iface; + IOReturn kresult; + + if (ep_to_pipeRef (transfer->dev_handle, transfer->endpoint, &pipeRef, &iface) != 0) { + usbi_err (TRANSFER_CTX (transfer), "endpoint not found on any open interface"); + + return LIBUSB_ERROR_NOT_FOUND; + } + + cInterface = &priv->interfaces[iface]; + + if (!dpriv->device) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_info (ITRANSFER_CTX (itransfer), "WARNING: aborting all transactions on interface %d pipe %d", iface, pipeRef); + + /* abort transactions */ + (*(cInterface->interface))->AbortPipe (cInterface->interface, pipeRef); + + usbi_info (ITRANSFER_CTX (itransfer), "calling clear pipe stall to clear the data toggle bit"); + + /* clear the data toggle bit */ +#if (InterfaceVersion < 190) + kresult = (*(cInterface->interface))->ClearPipeStall(cInterface->interface, pipeRef); +#else + /* newer versions of darwin support clearing additional bits on the device's endpoint */ + kresult = (*(cInterface->interface))->ClearPipeStallBothEnds(cInterface->interface, pipeRef); +#endif + + return darwin_to_libusb (kresult); +} + +static int darwin_cancel_transfer(struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_CONTROL: + return cancel_control_transfer(itransfer); + case LIBUSB_TRANSFER_TYPE_BULK: + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + return darwin_abort_transfers (itransfer); + default: + usbi_err (TRANSFER_CTX(transfer), "unknown endpoint type %d", transfer->type); + return LIBUSB_ERROR_INVALID_PARAM; + } +} + +static void darwin_clear_transfer_priv (struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct darwin_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + + if (transfer->type == LIBUSB_TRANSFER_TYPE_ISOCHRONOUS && tpriv->isoc_framelist) { + free (tpriv->isoc_framelist); + tpriv->isoc_framelist = NULL; + } +} + +static void darwin_async_io_callback (void *refcon, IOReturn result, void *arg0) { + struct usbi_transfer *itransfer = (struct usbi_transfer *)refcon; + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct darwin_device_handle_priv *priv = (struct darwin_device_handle_priv *)transfer->dev_handle->os_priv; + struct darwin_msg_async_io_complete message = {.itransfer = itransfer, .result = result, + .size = (UInt32) (uintptr_t) arg0}; + + usbi_info (ITRANSFER_CTX (itransfer), "an async io operation has completed"); + + /* if requested write a zero packet */ + if (kIOReturnSuccess == result && IS_XFEROUT(transfer) && transfer->flags & LIBUSB_TRANSFER_ADD_ZERO_PACKET) { + struct darwin_interface *cInterface; + uint8_t iface, pipeRef; + + (void) ep_to_pipeRef (transfer->dev_handle, transfer->endpoint, &pipeRef, &iface); + cInterface = &priv->interfaces[iface]; + + (*(cInterface->interface))->WritePipe (cInterface->interface, pipeRef, transfer->buffer, 0); + } + + /* send a completion message to the device's file descriptor */ + write (priv->fds[1], &message, sizeof (message)); +} + +static int darwin_transfer_status (struct usbi_transfer *itransfer, kern_return_t result) { + if (itransfer->flags & USBI_TRANSFER_TIMED_OUT) + result = kIOUSBTransactionTimeout; + + switch (result) { + case kIOReturnUnderrun: + case kIOReturnSuccess: + return LIBUSB_TRANSFER_COMPLETED; + case kIOReturnAborted: + return LIBUSB_TRANSFER_CANCELLED; + case kIOUSBPipeStalled: + usbi_warn (ITRANSFER_CTX (itransfer), "transfer error: pipe is stalled"); + return LIBUSB_TRANSFER_STALL; + case kIOReturnOverrun: + usbi_err (ITRANSFER_CTX (itransfer), "transfer error: data overrun"); + return LIBUSB_TRANSFER_OVERFLOW; + case kIOUSBTransactionTimeout: + usbi_err (ITRANSFER_CTX (itransfer), "transfer error: timed out"); + itransfer->flags |= USBI_TRANSFER_TIMED_OUT; + return LIBUSB_TRANSFER_TIMED_OUT; + default: + usbi_err (ITRANSFER_CTX (itransfer), "transfer error: %s (value = 0x%08x)", darwin_error_str (result), result); + return LIBUSB_TRANSFER_ERROR; + } +} + +static void darwin_handle_callback (struct usbi_transfer *itransfer, kern_return_t result, UInt32 io_size) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct darwin_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + int isIsoc = LIBUSB_TRANSFER_TYPE_ISOCHRONOUS == transfer->type; + int isBulk = LIBUSB_TRANSFER_TYPE_BULK == transfer->type; + int isControl = LIBUSB_TRANSFER_TYPE_CONTROL == transfer->type; + int isInterrupt = LIBUSB_TRANSFER_TYPE_INTERRUPT == transfer->type; + int i; + + if (!isIsoc && !isBulk && !isControl && !isInterrupt) { + usbi_err (TRANSFER_CTX(transfer), "unknown endpoint type %d", transfer->type); + return; + } + + usbi_info (ITRANSFER_CTX (itransfer), "handling %s completion with kernel status %d", + isControl ? "control" : isBulk ? "bulk" : isIsoc ? "isoc" : "interrupt", result); + + if (kIOReturnSuccess == result || kIOReturnUnderrun == result) { + if (isIsoc && tpriv->isoc_framelist) { + /* copy isochronous results back */ + + for (i = 0; i < transfer->num_iso_packets ; i++) { + struct libusb_iso_packet_descriptor *lib_desc = &transfer->iso_packet_desc[i]; + lib_desc->status = darwin_to_libusb (tpriv->isoc_framelist[i].frStatus); + lib_desc->actual_length = tpriv->isoc_framelist[i].frActCount; + } + } else if (!isIsoc) + itransfer->transferred += io_size; + } + + /* it is ok to handle cancelled transfers without calling usbi_handle_transfer_cancellation (we catch timeout transfers) */ + usbi_handle_transfer_completion (itransfer, darwin_transfer_status (itransfer, result)); +} + +static int op_handle_events(struct libusb_context *ctx, struct pollfd *fds, POLL_NFDS_TYPE nfds, int num_ready) { + struct darwin_msg_async_io_complete message; + POLL_NFDS_TYPE i = 0; + ssize_t ret; + + usbi_mutex_lock(&ctx->open_devs_lock); + + for (i = 0; i < nfds && num_ready > 0; i++) { + struct pollfd *pollfd = &fds[i]; + + usbi_info (ctx, "checking fd %i with revents = %x", pollfd->fd, pollfd->revents); + + if (!pollfd->revents) + continue; + + num_ready--; + + if (pollfd->revents & POLLERR) { + /* this probably will never happen so ignore the error an move on. */ + continue; + } + + /* there is only one type of message */ + ret = read (pollfd->fd, &message, sizeof (message)); + if (ret < (ssize_t) sizeof (message)) { + usbi_dbg ("WARNING: short read on async io completion pipe\n"); + continue; + } + + darwin_handle_callback (message.itransfer, message.result, message.size); + } + + usbi_mutex_unlock(&ctx->open_devs_lock); + + return 0; +} + +static int darwin_clock_gettime(int clk_id, struct timespec *tp) { + mach_timespec_t sys_time; + clock_serv_t clock_ref; + + switch (clk_id) { + case USBI_CLOCK_REALTIME: + /* CLOCK_REALTIME represents time since the epoch */ + clock_ref = clock_realtime; + break; + case USBI_CLOCK_MONOTONIC: + /* use system boot time as reference for the monotonic clock */ + clock_ref = clock_monotonic; + break; + default: + return LIBUSB_ERROR_INVALID_PARAM; + } + + clock_get_time (clock_ref, &sys_time); + + tp->tv_sec = sys_time.tv_sec; + tp->tv_nsec = sys_time.tv_nsec; + + return 0; +} + +const struct usbi_os_backend darwin_backend = { + .name = "Darwin", + .init = darwin_init, + .exit = darwin_exit, + .get_device_list = NULL, /* not needed */ + .get_device_descriptor = darwin_get_device_descriptor, + .get_active_config_descriptor = darwin_get_active_config_descriptor, + .get_config_descriptor = darwin_get_config_descriptor, + + .open = darwin_open, + .close = darwin_close, + .get_configuration = darwin_get_configuration, + .set_configuration = darwin_set_configuration, + .claim_interface = darwin_claim_interface, + .release_interface = darwin_release_interface, + + .set_interface_altsetting = darwin_set_interface_altsetting, + .clear_halt = darwin_clear_halt, + .reset_device = darwin_reset_device, + + .kernel_driver_active = darwin_kernel_driver_active, + .detach_kernel_driver = darwin_detach_kernel_driver, + .attach_kernel_driver = darwin_attach_kernel_driver, + + .destroy_device = darwin_destroy_device, + + .submit_transfer = darwin_submit_transfer, + .cancel_transfer = darwin_cancel_transfer, + .clear_transfer_priv = darwin_clear_transfer_priv, + + .handle_events = op_handle_events, + + .clock_gettime = darwin_clock_gettime, + + .device_priv_size = sizeof(struct darwin_device_priv), + .device_handle_priv_size = sizeof(struct darwin_device_handle_priv), + .transfer_priv_size = sizeof(struct darwin_transfer_priv), + .add_iso_packet_size = 0, +}; + diff --git a/compat/libusb-1.0/libusb/os/darwin_usb.h b/compat/libusb-1.0/libusb/os/darwin_usb.h new file mode 100644 index 0000000..368dec2 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/darwin_usb.h @@ -0,0 +1,175 @@ +/* + * darwin backend for libusb 1.0 + * Copyright (C) 2008-2013 Nathan Hjelm + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if !defined(LIBUSB_DARWIN_H) +#define LIBUSB_DARWIN_H + +#include "libusbi.h" + +#include +#include +#include +#include + +/* IOUSBInterfaceInferface */ +#if defined (kIOUSBInterfaceInterfaceID300) + +#define usb_interface_t IOUSBInterfaceInterface300 +#define InterfaceInterfaceID kIOUSBInterfaceInterfaceID300 +#define InterfaceVersion 300 + +#elif defined (kIOUSBInterfaceInterfaceID245) + +#define usb_interface_t IOUSBInterfaceInterface245 +#define InterfaceInterfaceID kIOUSBInterfaceInterfaceID245 +#define InterfaceVersion 245 + +#elif defined (kIOUSBInterfaceInterfaceID220) + +#define usb_interface_t IOUSBInterfaceInterface220 +#define InterfaceInterfaceID kIOUSBInterfaceInterfaceID220 +#define InterfaceVersion 220 + +#elif defined (kIOUSBInterfaceInterfaceID197) + +#define usb_interface_t IOUSBInterfaceInterface197 +#define InterfaceInterfaceID kIOUSBInterfaceInterfaceID197 +#define InterfaceVersion 197 + +#elif defined (kIOUSBInterfaceInterfaceID190) + +#define usb_interface_t IOUSBInterfaceInterface190 +#define InterfaceInterfaceID kIOUSBInterfaceInterfaceID190 +#define InterfaceVersion 190 + +#elif defined (kIOUSBInterfaceInterfaceID182) + +#define usb_interface_t IOUSBInterfaceInterface182 +#define InterfaceInterfaceID kIOUSBInterfaceInterfaceID182 +#define InterfaceVersion 182 + +#else + +#error "IOUSBFamily is too old. Please upgrade your OS" + +#endif + +/* IOUSBDeviceInterface */ +#if defined (kIOUSBDeviceInterfaceID500) + +#define usb_device_t IOUSBDeviceInterface500 +#define DeviceInterfaceID kIOUSBDeviceInterfaceID500 +#define DeviceVersion 500 + +#elif defined (kIOUSBDeviceInterfaceID320) + +#define usb_device_t IOUSBDeviceInterface320 +#define DeviceInterfaceID kIOUSBDeviceInterfaceID320 +#define DeviceVersion 320 + +#elif defined (kIOUSBDeviceInterfaceID300) + +#define usb_device_t IOUSBDeviceInterface300 +#define DeviceInterfaceID kIOUSBDeviceInterfaceID300 +#define DeviceVersion 300 + +#elif defined (kIOUSBDeviceInterfaceID245) + +#define usb_device_t IOUSBDeviceInterface245 +#define DeviceInterfaceID kIOUSBDeviceInterfaceID245 +#define DeviceVersion 245 + +#elif defined (kIOUSBDeviceInterfaceID197) + +#define usb_device_t IOUSBDeviceInterface197 +#define DeviceInterfaceID kIOUSBDeviceInterfaceID197 +#define DeviceVersion 197 + +#elif defined (kIOUSBDeviceInterfaceID187) + +#define usb_device_t IOUSBDeviceInterface187 +#define DeviceInterfaceID kIOUSBDeviceInterfaceID187 +#define DeviceVersion 187 + +#elif defined (kIOUSBDeviceInterfaceID182) + +#define usb_device_t IOUSBDeviceInterface182 +#define DeviceInterfaceID kIOUSBDeviceInterfaceID182 +#define DeviceVersion 182 + +#else + +#error "IOUSBFamily is too old. Please upgrade your OS" + +#endif + +#if !defined(IO_OBJECT_NULL) +#define IO_OBJECT_NULL ((io_object_t) 0) +#endif + +typedef IOCFPlugInInterface *io_cf_plugin_ref_t; +typedef IONotificationPortRef io_notification_port_t; + +/* private structures */ +struct darwin_device_priv { + IOUSBDeviceDescriptor dev_descriptor; + UInt32 location; + char sys_path[21]; + usb_device_t **device; + int open_count; + UInt8 first_config, active_config; +}; + +struct darwin_device_handle_priv { + int is_open; + CFRunLoopSourceRef cfSource; + int fds[2]; + + struct darwin_interface { + usb_interface_t **interface; + uint8_t num_endpoints; + CFRunLoopSourceRef cfSource; + uint64_t frames[256]; + uint8_t endpoint_addrs[USB_MAXENDPOINTS]; + } interfaces[USB_MAXINTERFACES]; +}; + +struct darwin_transfer_priv { + /* Isoc */ + IOUSBIsocFrame *isoc_framelist; + int num_iso_packets; + + /* Control */ +#if !defined (LIBUSB_NO_TIMEOUT_DEVICE) + IOUSBDevRequestTO req; +#else + IOUSBDevRequest req; +#endif + + /* Bulk */ +}; + +/* structure for signaling io completion */ +struct darwin_msg_async_io_complete { + struct usbi_transfer *itransfer; + IOReturn result; + UInt32 size; +}; + +#endif diff --git a/compat/libusb-1.0/libusb/os/linux_netlink.c b/compat/libusb-1.0/libusb/os/linux_netlink.c new file mode 100644 index 0000000..20c652f --- /dev/null +++ b/compat/libusb-1.0/libusb/os/linux_netlink.c @@ -0,0 +1,231 @@ +/* -*- Mode: C; c-basic-offset:8 ; indent-tabs-mode:t -*- */ +/* + * Linux usbfs backend for libusb + * Copyright (C) 2007-2009 Daniel Drake + * Copyright (c) 2001 Johannes Erdfelt + * Copyright (c) 2013 Nathan Hjelm + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "config.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libusb.h" +#include "libusbi.h" +#include "linux_usbfs.h" + +#include +#include + +#define KERNEL 1 + +static int linux_netlink_socket = -1; +static pthread_t libusb_linux_event_thread; + +static void *linux_netlink_event_thread_main(void *arg); + +struct sockaddr_nl snl = { .nl_family=AF_NETLINK, .nl_groups=KERNEL }; + +int linux_netlink_start_event_monitor(void) +{ + int ret; + + snl.nl_groups = KERNEL; + + linux_netlink_socket = socket(PF_NETLINK, SOCK_RAW|SOCK_CLOEXEC|SOCK_NONBLOCK, NETLINK_KOBJECT_UEVENT); + if (-1 == linux_netlink_socket) { + return LIBUSB_ERROR_OTHER; + } + + ret = bind(linux_netlink_socket, (struct sockaddr *) &snl, sizeof(snl)); + if (0 != ret) { + return LIBUSB_ERROR_OTHER; + } + + /* TODO -- add authentication */ + /* setsockopt(linux_netlink_socket, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one)); */ + + ret = pthread_create(&libusb_linux_event_thread, NULL, linux_netlink_event_thread_main, NULL); + if (0 != ret) { + return LIBUSB_ERROR_OTHER; + } + + return LIBUSB_SUCCESS; +} + +int linux_netlink_stop_event_monitor(void) +{ + int r; + + if (-1 == linux_netlink_socket) { + /* already closed. nothing to do */ + return LIBUSB_SUCCESS; + } + + r = close(linux_netlink_socket); + if (0 > r) { + usbi_err(NULL, "error closing netlink socket. %s", strerror(errno)); + return LIBUSB_ERROR_OTHER; + } + + pthread_cancel(libusb_linux_event_thread); + + linux_netlink_socket = -1; + + return LIBUSB_SUCCESS; +} + +static const char *netlink_message_parse (const char *buffer, size_t len, const char *key) +{ + size_t keylen = strlen(key); + size_t offset; + + for (offset = 0 ; offset < len && '\0' != buffer[offset] ; offset += strlen(buffer + offset) + 1) { + if (0 == strncmp(buffer + offset, key, keylen) && + '=' == buffer[offset + keylen]) { + return buffer + offset + keylen + 1; + } + } + + return NULL; +} + +/* parse parts of netlink message common to both libudev and the kernel */ +static int linux_netlink_parse(char *buffer, size_t len, int *detached, const char **sys_name, + uint8_t *busnum, uint8_t *devaddr) { + const char *tmp; + int i; + + errno = 0; + + *sys_name = NULL; + *detached = 0; + *busnum = 0; + *devaddr = 0; + + tmp = netlink_message_parse((const char *) buffer, len, "ACTION"); + if (0 == strcmp(tmp, "remove")) { + *detached = 1; + } else if (0 != strcmp(tmp, "add")) { + usbi_dbg("unknown device action"); + return -1; + } + + /* check that this is a usb message */ + tmp = netlink_message_parse(buffer, len, "SUBSYSTEM"); + if (NULL == tmp || 0 != strcmp(tmp, "usb")) { + /* not usb. ignore */ + return -1; + } + + tmp = netlink_message_parse(buffer, len, "BUSNUM"); + if (NULL == tmp) { + /* no bus number (likely a usb interface). ignore*/ + return -1; + } + + *busnum = (uint8_t)(strtoul(tmp, NULL, 10) & 0xff); + if (errno) { + errno = 0; + return -1; + } + + tmp = netlink_message_parse(buffer, len, "DEVNUM"); + if (NULL == tmp) { + return -1; + } + + *devaddr = (uint8_t)(strtoul(tmp, NULL, 10) & 0xff); + if (errno) { + errno = 0; + return -1; + } + + tmp = netlink_message_parse(buffer, len, "DEVPATH"); + if (NULL == tmp) { + return -1; + } + + for (i = strlen(tmp) - 1 ; i ; --i) { + if ('/' ==tmp[i]) { + *sys_name = tmp + i + 1; + break; + } + } + + /* found a usb device */ + return 0; +} + +static void *linux_netlink_event_thread_main(void *arg) +{ + struct pollfd fds = {.fd = linux_netlink_socket, + .events = POLLIN}; + unsigned char buffer[1024]; + struct iovec iov = {.iov_base = buffer, .iov_len = sizeof(buffer)}; + struct msghdr meh = { .msg_iov=&iov, .msg_iovlen=1, + .msg_name=&snl, .msg_namelen=sizeof(snl) }; + uint8_t busnum, devaddr; + int detached, r; + size_t len; + + /* silence compiler warning */ + (void) arg; + + while (1 == poll(&fds, 1, -1)) { + const char *sys_name = NULL; + + if (POLLIN != fds.revents) { + break; + } + + /* read netlink message */ + memset(buffer, 0, sizeof(buffer)); + len = recvmsg(linux_netlink_socket, &meh, 0); + if (len < 32) { + usbi_dbg("error recieving message from netlink"); + continue; + } + + /* TODO -- authenticate this message is from the kernel or udevd */ + + r = linux_netlink_parse(buffer, len, &detached, &sys_name, + &busnum, &devaddr); + if (r) + continue; + + usbi_dbg("netlink hotplug found device busnum: %hhu, devaddr: %hhu, sys_name: %s, removed: %s", + busnum, devaddr, sys_name, detached ? "yes" : "no"); + + /* signal device is available (or not) to all contexts */ + if (detached) + linux_hotplug_disconnected(busnum, devaddr, sys_name); + else + linux_hotplug_enumerate(busnum, devaddr, sys_name); + } + + return NULL; +} diff --git a/compat/libusb-1.0/libusb/os/linux_udev.c b/compat/libusb-1.0/libusb/os/linux_udev.c new file mode 100644 index 0000000..abe1e66 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/linux_udev.c @@ -0,0 +1,242 @@ +/* -*- Mode: C; c-basic-offset:8 ; indent-tabs-mode:t -*- */ +/* + * Linux usbfs backend for libusb + * Copyright (C) 2007-2009 Daniel Drake + * Copyright (c) 2001 Johannes Erdfelt + * Copyright (c) 2012-2013 Nathan Hjelm + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libusb.h" +#include "libusbi.h" +#include "linux_usbfs.h" + +/* udev context */ +static struct udev *udev_ctx = NULL; +static int udev_monitor_fd = -1; +static struct udev_monitor *udev_monitor = NULL; +static pthread_t linux_event_thread; + +static void udev_hotplug_event(void); +static void *linux_udev_event_thread_main(void *arg); + +int linux_udev_start_event_monitor(void) +{ + int r; + + if (NULL == udev_ctx) { + udev_ctx = udev_new(); + if (!udev_ctx) { + return LIBUSB_ERROR_OTHER; + } + } + + udev_monitor = udev_monitor_new_from_netlink(udev_ctx, "udev"); + if (!udev_monitor) { + usbi_err(NULL, "could not initialize udev monitor"); + return LIBUSB_ERROR_OTHER; + } + + r = udev_monitor_filter_add_match_subsystem_devtype(udev_monitor, "usb", 0); + if (r) { + usbi_err(NULL, "could not initialize udev monitor filter for \"usb\" subsystem"); + return LIBUSB_ERROR_OTHER; + } + + if (udev_monitor_enable_receiving(udev_monitor)) { + usbi_err(NULL, "failed to enable the udev monitor"); + return LIBUSB_ERROR_OTHER; + } + + udev_monitor_fd = udev_monitor_get_fd(udev_monitor); + + pthread_create(&linux_event_thread, NULL, linux_udev_event_thread_main, NULL); + + return LIBUSB_SUCCESS; +} + +int linux_udev_stop_event_monitor(void) +{ + if (-1 == udev_monitor_fd) { + /* this should never happen */ + return LIBUSB_ERROR_OTHER; + } + + /* Cancel the event thread. This is the only way to garauntee the thread + exits since closing the monitor fd won't necessarily cause poll + to return. */ + pthread_cancel(linux_event_thread); + + /* Release the udev monitor */ + udev_monitor_unref(udev_monitor); + udev_monitor = NULL; + udev_monitor_fd = -1; + + /* Clean up the udev context */ + udev_unref(udev_ctx); + udev_ctx = NULL; + + return LIBUSB_SUCCESS; +} + +static void *linux_udev_event_thread_main(void __attribute__((unused)) *arg) +{ + struct pollfd fds = {.fd = udev_monitor_fd, + .events = POLLIN}; + + usbi_dbg("udev event thread entering."); + + while (1 == poll(&fds, 1, -1)) { + if (NULL == udev_monitor || POLLIN != fds.revents) { + break; + } + + udev_hotplug_event(); + } + + usbi_dbg("udev event thread exiting"); + + return NULL; +} + +static int udev_device_info(struct libusb_context *ctx, int detached, + struct udev_device *udev_dev, uint8_t *busnum, + uint8_t *devaddr, const char **sys_name) { + const char *dev_node; + + dev_node = udev_device_get_devnode(udev_dev); + if (!dev_node) { + return LIBUSB_ERROR_OTHER; + } + + *sys_name = udev_device_get_sysname(udev_dev); + if (!*sys_name) { + return LIBUSB_ERROR_OTHER; + } + + return linux_get_device_address(ctx, detached, busnum, devaddr, + dev_node, *sys_name); +} + +static void udev_hotplug_event(void) +{ + struct udev_device* udev_dev; + const char* udev_action; + const char* sys_name = NULL; + uint8_t busnum = 0, devaddr = 0; + int detached; + int r; + + if (NULL == udev_monitor) { + return; + } + + do { + udev_dev = udev_monitor_receive_device(udev_monitor); + if (!udev_dev) { + usbi_err(NULL, "failed to read data from udev monitor socket."); + return; + } + + udev_action = udev_device_get_action(udev_dev); + if (!udev_action) { + break; + } + + detached = !strncmp(udev_action, "remove", 6); + + r = udev_device_info(NULL, detached, udev_dev, &busnum, &devaddr, &sys_name); + if (LIBUSB_SUCCESS != r) { + break; + } + + usbi_dbg("udev hotplug event. action: %s.", udev_action); + + if (strncmp(udev_action, "add", 3) == 0) { + linux_hotplug_enumerate(busnum, devaddr, sys_name); + } else if (detached) { + linux_hotplug_disconnected(busnum, devaddr, sys_name); + } else { + usbi_err(NULL, "ignoring udev action %s", udev_action); + } + } while (0); + + udev_device_unref(udev_dev); +} + +int linux_udev_scan_devices(struct libusb_context *ctx) +{ + struct udev_enumerate *enumerator; + struct udev_list_entry *devices, *entry; + struct udev_device *udev_dev; + const char *sys_name; + int r; + + if (NULL == udev_ctx) { + udev_ctx = udev_new(); + if (!udev_ctx) { + return LIBUSB_ERROR_OTHER; + } + } + + enumerator = udev_enumerate_new(udev_ctx); + if (NULL == enumerator) { + usbi_err(ctx, "error creating udev enumerator"); + return LIBUSB_ERROR_OTHER; + } + + udev_enumerate_add_match_subsystem(enumerator, "usb"); + udev_enumerate_scan_devices(enumerator); + devices = udev_enumerate_get_list_entry(enumerator); + + udev_list_entry_foreach(entry, devices) { + const char *path = udev_list_entry_get_name(entry); + uint8_t busnum = 0, devaddr = 0; + + udev_dev = udev_device_new_from_syspath(udev_ctx, path); + + r = udev_device_info(ctx, 0, udev_dev, &busnum, &devaddr, &sys_name); + if (r) { + udev_device_unref(udev_dev); + continue; + } + + linux_enumerate_device(ctx, busnum, devaddr, sys_name); + udev_device_unref(udev_dev); + } + + udev_enumerate_unref(enumerator); + + return LIBUSB_SUCCESS; +} + diff --git a/compat/libusb-1.0/libusb/os/linux_usbfs.c b/compat/libusb-1.0/libusb/os/linux_usbfs.c new file mode 100644 index 0000000..bd847c2 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/linux_usbfs.c @@ -0,0 +1,2613 @@ +/* -*- Mode: C; c-basic-offset:8 ; indent-tabs-mode:t -*- */ +/* + * Linux usbfs backend for libusb + * Copyright (C) 2007-2009 Daniel Drake + * Copyright (c) 2001 Johannes Erdfelt + * Copyright (c) 2013 Nathan Hjelm + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libusb.h" +#include "libusbi.h" +#include "linux_usbfs.h" + +/* sysfs vs usbfs: + * opening a usbfs node causes the device to be resumed, so we attempt to + * avoid this during enumeration. + * + * sysfs allows us to read the kernel's in-memory copies of device descriptors + * and so forth, avoiding the need to open the device: + * - The binary "descriptors" file was added in 2.6.23. + * - The "busnum" file was added in 2.6.22 + * - The "devnum" file has been present since pre-2.6.18 + * - the "bConfigurationValue" file has been present since pre-2.6.18 + * + * If we have bConfigurationValue, busnum, and devnum, then we can determine + * the active configuration without having to open the usbfs node in RDWR mode. + * We assume this is the case if we see the busnum file (indicates 2.6.22+). + * The busnum file is important as that is the only way we can relate sysfs + * devices to usbfs nodes. + * + * If we also have descriptors, we can obtain the device descriptor and active + * configuration without touching usbfs at all. + * + * The descriptors file originally only contained the active configuration + * descriptor alongside the device descriptor, but all configurations are + * included as of Linux 2.6.26. + */ + +/* endianness for multi-byte fields: + * + * Descriptors exposed by usbfs have the multi-byte fields in the device + * descriptor as host endian. Multi-byte fields in the other descriptors are + * bus-endian. The kernel documentation says otherwise, but it is wrong. + */ + +const char *usbfs_path = NULL; + +/* use usbdev*.* device names in /dev instead of the usbfs bus directories */ +static int usbdev_names = 0; + +/* Linux 2.6.32 adds support for a bulk continuation URB flag. this basically + * allows us to mark URBs as being part of a specific logical transfer when + * we submit them to the kernel. then, on any error except a cancellation, all + * URBs within that transfer will be cancelled and no more URBs will be + * accepted for the transfer, meaning that no more data can creep in. + * + * The BULK_CONTINUATION flag must be set on all URBs within a bulk transfer + * (in either direction) except the first. + * For IN transfers, we must also set SHORT_NOT_OK on all URBs except the + * last; it means that the kernel should treat a short reply as an error. + * For OUT transfers, SHORT_NOT_OK must not be set. it isn't needed (OUT + * transfers can't be short unless there's already some sort of error), and + * setting this flag is disallowed (a kernel with USB debugging enabled will + * reject such URBs). + */ +static int supports_flag_bulk_continuation = -1; + +/* Linux 2.6.31 fixes support for the zero length packet URB flag. This + * allows us to mark URBs that should be followed by a zero length data + * packet, which can be required by device- or class-specific protocols. + */ +static int supports_flag_zero_packet = -1; + +/* clock ID for monotonic clock, as not all clock sources are available on all + * systems. appropriate choice made at initialization time. */ +static clockid_t monotonic_clkid = -1; + +/* do we have a busnum to relate devices? this also implies that we can read + * the active configuration through bConfigurationValue */ +static int sysfs_can_relate_devices = 0; + +/* do we have a descriptors file? */ +static int sysfs_has_descriptors = 0; + +/* how many times have we initted (and not exited) ? */ +static volatile int init_count = 0; + +/* lock for init_count */ +static pthread_mutex_t hotplug_lock = PTHREAD_MUTEX_INITIALIZER; + +static int linux_start_event_monitor(void); +static int linux_stop_event_monitor(void); +static int linux_scan_devices(struct libusb_context *ctx); + +#if !defined(USE_UDEV) +static int linux_default_scan_devices (struct libusb_context *ctx); +#endif + +struct linux_device_priv { + char *sysfs_dir; + unsigned char *dev_descriptor; + unsigned char *config_descriptor; +}; + +struct linux_device_handle_priv { + int fd; + uint32_t caps; +}; + +enum reap_action { + NORMAL = 0, + /* submission failed after the first URB, so await cancellation/completion + * of all the others */ + SUBMIT_FAILED, + + /* cancelled by user or timeout */ + CANCELLED, + + /* completed multi-URB transfer in non-final URB */ + COMPLETED_EARLY, + + /* one or more urbs encountered a low-level error */ + ERROR, +}; + +struct linux_transfer_priv { + union { + struct usbfs_urb *urbs; + struct usbfs_urb **iso_urbs; + }; + + enum reap_action reap_action; + int num_urbs; + int num_retired; + enum libusb_transfer_status reap_status; + + /* next iso packet in user-supplied transfer to be populated */ + int iso_packet_offset; +}; + +static void _get_usbfs_path(struct libusb_device *dev, char *path) +{ + if (usbdev_names) + snprintf(path, PATH_MAX, "%s/usbdev%d.%d", + usbfs_path, dev->bus_number, dev->device_address); + else + snprintf(path, PATH_MAX, "%s/%03d/%03d", + usbfs_path, dev->bus_number, dev->device_address); +} + +static struct linux_device_priv *_device_priv(struct libusb_device *dev) +{ + return (struct linux_device_priv *) dev->os_priv; +} + +static struct linux_device_handle_priv *_device_handle_priv( + struct libusb_device_handle *handle) +{ + return (struct linux_device_handle_priv *) handle->os_priv; +} + +/* check dirent for a /dev/usbdev%d.%d name + * optionally return bus/device on success */ +static int _is_usbdev_entry(struct dirent *entry, int *bus_p, int *dev_p) +{ + int busnum, devnum; + + if (sscanf(entry->d_name, "usbdev%d.%d", &busnum, &devnum) != 2) + return 0; + + usbi_dbg("found: %s", entry->d_name); + if (bus_p != NULL) + *bus_p = busnum; + if (dev_p != NULL) + *dev_p = devnum; + return 1; +} + +static int check_usb_vfs(const char *dirname) +{ + DIR *dir; + struct dirent *entry; + int found = 0; + + dir = opendir(dirname); + if (!dir) + return 0; + + while ((entry = readdir(dir)) != NULL) { + if (entry->d_name[0] == '.') + continue; + + /* We assume if we find any files that it must be the right place */ + found = 1; + break; + } + + closedir(dir); + return found; +} + +static const char *find_usbfs_path(void) +{ + const char *path = "/dev/bus/usb"; + const char *ret = NULL; + + if (check_usb_vfs(path)) { + ret = path; + } else { + path = "/proc/bus/usb"; + if (check_usb_vfs(path)) + ret = path; + } + + /* look for /dev/usbdev*.* if the normal places fail */ + if (ret == NULL) { + struct dirent *entry; + DIR *dir; + + path = "/dev"; + dir = opendir(path); + if (dir != NULL) { + while ((entry = readdir(dir)) != NULL) { + if (_is_usbdev_entry(entry, NULL, NULL)) { + /* found one; that's enough */ + ret = path; + usbdev_names = 1; + break; + } + } + closedir(dir); + } + } + + if (ret != NULL) + usbi_dbg("found usbfs at %s", ret); + + return ret; +} + +/* the monotonic clock is not usable on all systems (e.g. embedded ones often + * seem to lack it). fall back to REALTIME if we have to. */ +static clockid_t find_monotonic_clock(void) +{ +#ifdef CLOCK_MONOTONIC + struct timespec ts; + int r; + + /* Linux 2.6.28 adds CLOCK_MONOTONIC_RAW but we don't use it + * because it's not available through timerfd */ + r = clock_gettime(CLOCK_MONOTONIC, &ts); + if (r == 0) + return CLOCK_MONOTONIC; + usbi_dbg("monotonic clock doesn't work, errno %d", errno); +#endif + + return CLOCK_REALTIME; +} + +static int kernel_version_ge(int major, int minor, int sublevel) +{ + struct utsname uts; + int atoms, kmajor, kminor, ksublevel; + + if (uname(&uts) < 0) + return -1; + atoms = sscanf(uts.release, "%d.%d.%d", &kmajor, &kminor, &ksublevel); + if (atoms < 1) + return -1; + + if (kmajor > major) + return 1; + if (kmajor < major) + return 0; + + /* kmajor == major */ + if (atoms < 2) + return 0 == minor && 0 == sublevel; + if (kminor > minor) + return 1; + if (kminor < minor) + return 0; + + /* kminor == minor */ + if (atoms < 3) + return 0 == sublevel; + + return ksublevel >= sublevel; +} + +/* Return 1 if filename exists inside dirname in sysfs. + SYSFS_DEVICE_PATH is assumed to be the beginning of the path. */ +static int sysfs_has_file(const char *dirname, const char *filename) +{ + struct stat statbuf; + char path[PATH_MAX]; + int r; + + snprintf(path, PATH_MAX, "%s/%s/%s", SYSFS_DEVICE_PATH, dirname, filename); + r = stat(path, &statbuf); + if (r == 0 && S_ISREG(statbuf.st_mode)) + return 1; + + return 0; +} + +static int op_init(struct libusb_context *ctx) +{ + struct stat statbuf; + int r; + + usbfs_path = find_usbfs_path(); + if (!usbfs_path) { + usbi_err(ctx, "could not find usbfs"); + return LIBUSB_ERROR_OTHER; + } + + if (monotonic_clkid == -1) + monotonic_clkid = find_monotonic_clock(); + + if (supports_flag_bulk_continuation == -1) { + /* bulk continuation URB flag available from Linux 2.6.32 */ + supports_flag_bulk_continuation = kernel_version_ge(2,6,32); + if (supports_flag_bulk_continuation == -1) { + usbi_err(ctx, "error checking for bulk continuation support"); + return LIBUSB_ERROR_OTHER; + } + } + + if (supports_flag_bulk_continuation) + usbi_dbg("bulk continuation flag supported"); + + if (-1 == supports_flag_zero_packet) { + /* zero length packet URB flag fixed since Linux 2.6.31 */ + supports_flag_zero_packet = kernel_version_ge(2,6,31); + if (-1 == supports_flag_zero_packet) { + usbi_err(ctx, "error checking for zero length packet support"); + return LIBUSB_ERROR_OTHER; + } + } + + if (supports_flag_zero_packet) + usbi_dbg("zero length packet flag supported"); + + r = stat(SYSFS_DEVICE_PATH, &statbuf); + if (r == 0 && S_ISDIR(statbuf.st_mode)) { + DIR *devices = opendir(SYSFS_DEVICE_PATH); + struct dirent *entry; + + usbi_dbg("found usb devices in sysfs"); + + if (!devices) { + usbi_err(ctx, "opendir devices failed errno=%d", errno); + return LIBUSB_ERROR_IO; + } + + /* Make sure sysfs supports all the required files. If it + * does not, then usbfs will be used instead. Determine + * this by looping through the directories in + * SYSFS_DEVICE_PATH. With the assumption that there will + * always be subdirectories of the name usbN (usb1, usb2, + * etc) representing the root hubs, check the usbN + * subdirectories to see if they have all the needed files. + * This algorithm uses the usbN subdirectories (root hubs) + * because a device disconnection will cause a race + * condition regarding which files are available, sometimes + * causing an incorrect result. The root hubs are used + * because it is assumed that they will always be present. + * See the "sysfs vs usbfs" comment at the top of this file + * for more details. */ + while ((entry = readdir(devices))) { + int has_busnum=0, has_devnum=0, has_descriptors=0; + int has_configuration_value=0; + + /* Only check the usbN directories. */ + if (strncmp(entry->d_name, "usb", 3) != 0) + continue; + + /* Check for the files libusb needs from sysfs. */ + has_busnum = sysfs_has_file(entry->d_name, "busnum"); + has_devnum = sysfs_has_file(entry->d_name, "devnum"); + has_descriptors = sysfs_has_file(entry->d_name, "descriptors"); + has_configuration_value = sysfs_has_file(entry->d_name, "bConfigurationValue"); + + if (has_busnum && has_devnum && has_configuration_value) + sysfs_can_relate_devices = 1; + if (has_descriptors) + sysfs_has_descriptors = 1; + + /* Only need to check until we've found ONE device which + has all the attributes. */ + if (sysfs_has_descriptors && sysfs_can_relate_devices) + break; + } + closedir(devices); + + /* Only use sysfs descriptors if the rest of + sysfs will work for libusb. */ + if (!sysfs_can_relate_devices) + sysfs_has_descriptors = 0; + } else { + usbi_dbg("sysfs usb info not available"); + sysfs_has_descriptors = 0; + sysfs_can_relate_devices = 0; + } + + pthread_mutex_lock(&hotplug_lock); + if (!init_count++) { + /* start up hotplug event handler */ + r = linux_start_event_monitor(); + if (LIBUSB_SUCCESS != r) { + usbi_err(ctx, "error starting hotplug event monitor"); + return r; + } + } + pthread_mutex_unlock(&hotplug_lock); + + r = linux_scan_devices(ctx); + if (LIBUSB_SUCCESS != r) { + return r; + } + + return r; +} + +static void op_exit(void) +{ + if (!init_count) { + /* should not happen */ + return; + } + + pthread_mutex_lock(&hotplug_lock); + if (!--init_count) { + /* tear down event handler */ + (void)linux_stop_event_monitor(); + } + pthread_mutex_unlock(&hotplug_lock); +} + +static int linux_start_event_monitor(void) +{ +#if defined(USE_UDEV) + return linux_udev_start_event_monitor(); +#else + return linux_netlink_start_event_monitor(); +#endif +} + +static int linux_stop_event_monitor(void) +{ +#if defined(USE_UDEV) + return linux_udev_stop_event_monitor(); +#else + return linux_netlink_stop_event_monitor(); +#endif +} + +static int linux_scan_devices(struct libusb_context *ctx) +{ +#if defined(USE_UDEV) + return linux_udev_scan_devices(ctx); +#else + return linux_default_scan_devices(ctx); +#endif +} + +static int usbfs_get_device_descriptor(struct libusb_device *dev, + unsigned char *buffer) +{ + struct linux_device_priv *priv = _device_priv(dev); + + /* return cached copy */ + memcpy(buffer, priv->dev_descriptor, DEVICE_DESC_LENGTH); + return 0; +} + +static int _open_sysfs_attr(struct libusb_device *dev, const char *attr) +{ + struct linux_device_priv *priv = _device_priv(dev); + char filename[PATH_MAX]; + int fd; + + snprintf(filename, PATH_MAX, "%s/%s/%s", + SYSFS_DEVICE_PATH, priv->sysfs_dir, attr); + fd = open(filename, O_RDONLY); + if (fd < 0) { + usbi_err(DEVICE_CTX(dev), + "open %s failed ret=%d errno=%d", filename, fd, errno); + return LIBUSB_ERROR_IO; + } + + return fd; +} + +/* Note only suitable for attributes which always read >= 0, < 0 is error */ +static int __read_sysfs_attr(struct libusb_context *ctx, + const char *devname, const char *attr) +{ + char filename[PATH_MAX]; + FILE *f; + int r, value; + + snprintf(filename, PATH_MAX, "%s/%s/%s", SYSFS_DEVICE_PATH, + devname, attr); + f = fopen(filename, "r"); + if (f == NULL) { + if (errno == ENOENT) { + /* File doesn't exist. Assume the device has been + disconnected (see trac ticket #70). */ + return LIBUSB_ERROR_NO_DEVICE; + } + usbi_err(ctx, "open %s failed errno=%d", filename, errno); + return LIBUSB_ERROR_IO; + } + + r = fscanf(f, "%d", &value); + fclose(f); + if (r != 1) { + usbi_err(ctx, "fscanf %s returned %d, errno=%d", attr, r, errno); + return LIBUSB_ERROR_NO_DEVICE; /* For unplug race (trac #70) */ + } + if (value < 0) { + usbi_err(ctx, "%s contains a negative value", filename); + return LIBUSB_ERROR_IO; + } + + return value; +} + +static int sysfs_get_device_descriptor(struct libusb_device *dev, + unsigned char *buffer) +{ + int fd; + ssize_t r; + + /* sysfs provides access to an in-memory copy of the device descriptor, + * so we use that rather than keeping our own copy */ + + fd = _open_sysfs_attr(dev, "descriptors"); + if (fd < 0) + return fd; + + r = read(fd, buffer, DEVICE_DESC_LENGTH);; + close(fd); + if (r < 0) { + usbi_err(DEVICE_CTX(dev), "read failed, ret=%d errno=%d", fd, errno); + return LIBUSB_ERROR_IO; + } else if (r < DEVICE_DESC_LENGTH) { + usbi_err(DEVICE_CTX(dev), "short read %d/%d", r, DEVICE_DESC_LENGTH); + return LIBUSB_ERROR_IO; + } + + return 0; +} + +static int op_get_device_descriptor(struct libusb_device *dev, + unsigned char *buffer, int *host_endian) +{ + if (sysfs_has_descriptors) { + *host_endian = 0; + return sysfs_get_device_descriptor(dev, buffer); + } else { + *host_endian = 1; + return usbfs_get_device_descriptor(dev, buffer); + } +} + +static int usbfs_get_active_config_descriptor(struct libusb_device *dev, + unsigned char *buffer, size_t len) +{ + struct linux_device_priv *priv = _device_priv(dev); + if (!priv->config_descriptor) + return LIBUSB_ERROR_NOT_FOUND; /* device is unconfigured */ + + /* retrieve cached copy */ + memcpy(buffer, priv->config_descriptor, len); + return 0; +} + +/* read the bConfigurationValue for a device */ +static int sysfs_get_active_config(struct libusb_device *dev, int *config) +{ + char *endptr; + char tmp[4] = {0, 0, 0, 0}; + long num; + int fd; + ssize_t r; + + fd = _open_sysfs_attr(dev, "bConfigurationValue"); + if (fd < 0) + return fd; + + r = read(fd, tmp, sizeof(tmp)); + close(fd); + if (r < 0) { + usbi_err(DEVICE_CTX(dev), + "read bConfigurationValue failed ret=%d errno=%d", r, errno); + return LIBUSB_ERROR_IO; + } else if (r == 0) { + usbi_dbg("device unconfigured"); + *config = -1; + return 0; + } + + if (tmp[sizeof(tmp) - 1] != 0) { + usbi_err(DEVICE_CTX(dev), "not null-terminated?"); + return LIBUSB_ERROR_IO; + } else if (tmp[0] == 0) { + usbi_err(DEVICE_CTX(dev), "no configuration value?"); + return LIBUSB_ERROR_IO; + } + + num = strtol(tmp, &endptr, 10); + if (endptr == tmp) { + usbi_err(DEVICE_CTX(dev), "error converting '%s' to integer", tmp); + return LIBUSB_ERROR_IO; + } + + *config = (int) num; + return 0; +} + +/* takes a usbfs/descriptors fd seeked to the start of a configuration, and + * seeks to the next one. */ +static int seek_to_next_config(struct libusb_context *ctx, int fd, + int host_endian) +{ + struct libusb_config_descriptor config; + unsigned char tmp[6]; + off_t off; + ssize_t r; + + /* read first 6 bytes of descriptor */ + r = read(fd, tmp, sizeof(tmp)); + if (r < 0) { + usbi_err(ctx, "read failed ret=%d errno=%d", r, errno); + return LIBUSB_ERROR_IO; + } else if (r < (ssize_t)sizeof(tmp)) { + usbi_err(ctx, "short descriptor read %d/%d", r, sizeof(tmp)); + return LIBUSB_ERROR_IO; + } + + /* seek forward to end of config */ + usbi_parse_descriptor(tmp, "bbwbb", &config, host_endian); + off = lseek(fd, config.wTotalLength - sizeof(tmp), SEEK_CUR); + if (off < 0) { + usbi_err(ctx, "seek failed ret=%d errno=%d", off, errno); + return LIBUSB_ERROR_IO; + } + + return 0; +} + +static int sysfs_get_active_config_descriptor(struct libusb_device *dev, + unsigned char *buffer, size_t len) +{ + int fd; + ssize_t r; + off_t off; + int to_copy; + int config; + unsigned char tmp[6]; + + r = sysfs_get_active_config(dev, &config); + if (r < 0) + return r; + if (config == -1) + return LIBUSB_ERROR_NOT_FOUND; + + usbi_dbg("active configuration %d", config); + + /* sysfs provides access to an in-memory copy of the device descriptor, + * so we use that rather than keeping our own copy */ + + fd = _open_sysfs_attr(dev, "descriptors"); + if (fd < 0) + return fd; + + /* device might have been unconfigured since we read bConfigurationValue, + * so first check that there is any config descriptor data at all... */ + off = lseek(fd, 0, SEEK_END); + if (off < 1) { + usbi_err(DEVICE_CTX(dev), "end seek failed, ret=%d errno=%d", + off, errno); + close(fd); + return LIBUSB_ERROR_IO; + } else if (off == DEVICE_DESC_LENGTH) { + close(fd); + return LIBUSB_ERROR_NOT_FOUND; + } + + off = lseek(fd, DEVICE_DESC_LENGTH, SEEK_SET); + if (off < 0) { + usbi_err(DEVICE_CTX(dev), "seek failed, ret=%d errno=%d", off, errno); + close(fd); + return LIBUSB_ERROR_IO; + } + + /* unbounded loop: we expect the descriptor to be present under all + * circumstances */ + while (1) { + r = read(fd, tmp, sizeof(tmp)); + if (r < 0) { + usbi_err(DEVICE_CTX(dev), "read failed, ret=%d errno=%d", + fd, errno); + return LIBUSB_ERROR_IO; + } else if (r < (ssize_t)sizeof(tmp)) { + usbi_err(DEVICE_CTX(dev), "short read %d/%d", r, sizeof(tmp)); + return LIBUSB_ERROR_IO; + } + + /* check bConfigurationValue */ + if (tmp[5] == config) + break; + + /* try the next descriptor */ + off = lseek(fd, 0 - sizeof(tmp), SEEK_CUR); + if (off < 0) + return LIBUSB_ERROR_IO; + + r = seek_to_next_config(DEVICE_CTX(dev), fd, 0); + if (r < 0) + return r; + } + + to_copy = (len < sizeof(tmp)) ? len : sizeof(tmp); + memcpy(buffer, tmp, to_copy); + if (len > sizeof(tmp)) { + r = read(fd, buffer + sizeof(tmp), len - sizeof(tmp)); + if (r < 0) { + usbi_err(DEVICE_CTX(dev), "read failed, ret=%d errno=%d", + fd, errno); + r = LIBUSB_ERROR_IO; + } else if (r == 0) { + usbi_dbg("device is unconfigured"); + r = LIBUSB_ERROR_NOT_FOUND; + } else if ((size_t)r < len - sizeof(tmp)) { + usbi_err(DEVICE_CTX(dev), "short read %d/%d", r, len); + r = LIBUSB_ERROR_IO; + } + } else { + r = 0; + } + + close(fd); + return r; +} + +int linux_get_device_address (struct libusb_context *ctx, int detached, + uint8_t *busnum, uint8_t *devaddr, + const char *dev_node, const char *sys_name) +{ + int retbus, retdev; + + usbi_dbg("getting address for device: %s detached: %d", + sys_name, detached); + /* can't use sysfs to read the bus and device number if the + device has been detached */ + if (!sysfs_can_relate_devices || detached || NULL == sys_name) { + if (NULL == dev_node) { + return LIBUSB_ERROR_OTHER; + } + + /* will this work with all supported kernel versions? */ + if (!strncmp(dev_node, "/dev/bus/usb", 12)) { + sscanf (dev_node, "/dev/bus/usb/%hhd/%hhd", busnum, devaddr); + } else if (!strncmp(dev_node, "/proc/bus/usb", 13)) { + sscanf (dev_node, "/proc/bus/usb/%hhd/%hhd", busnum, devaddr); + } + + return LIBUSB_SUCCESS; + } + + usbi_dbg("scan %s", sys_name); + + *busnum = retbus = __read_sysfs_attr(ctx, sys_name, "busnum"); + if (retbus < 0) + return retbus; + + *devaddr = retdev = __read_sysfs_attr(ctx, sys_name, "devnum"); + if (retdev < 0) + return retdev; + + usbi_dbg("bus=%d dev=%d", *busnum, *devaddr); + if (retbus > 255 || retdev > 255) + return LIBUSB_ERROR_INVALID_PARAM; + + return LIBUSB_SUCCESS; +} + +static int op_get_active_config_descriptor(struct libusb_device *dev, + unsigned char *buffer, size_t len, int *host_endian) +{ + *host_endian = *host_endian; + if (sysfs_has_descriptors) { + return sysfs_get_active_config_descriptor(dev, buffer, len); + } else { + return usbfs_get_active_config_descriptor(dev, buffer, len); + } +} + +/* takes a usbfs fd, attempts to find the requested config and copy a certain + * amount of it into an output buffer. */ +static int get_config_descriptor(struct libusb_context *ctx, int fd, + uint8_t config_index, unsigned char *buffer, size_t len) +{ + off_t off; + ssize_t r; + + off = lseek(fd, DEVICE_DESC_LENGTH, SEEK_SET); + if (off < 0) { + usbi_err(ctx, "seek failed ret=%d errno=%d", off, errno); + return LIBUSB_ERROR_IO; + } + + /* might need to skip some configuration descriptors to reach the + * requested configuration */ + while (config_index > 0) { + r = seek_to_next_config(ctx, fd, 1); + if (r < 0) + return r; + config_index--; + } + + /* read the rest of the descriptor */ + r = read(fd, buffer, len); + if (r < 0) { + usbi_err(ctx, "read failed ret=%d errno=%d", r, errno); + return LIBUSB_ERROR_IO; + } else if ((size_t)r < len) { + usbi_err(ctx, "short output read %d/%d", r, len); + return LIBUSB_ERROR_IO; + } + + return 0; +} + +static int op_get_config_descriptor(struct libusb_device *dev, + uint8_t config_index, unsigned char *buffer, size_t len, int *host_endian) +{ + char filename[PATH_MAX]; + int fd; + int r; + + *host_endian = *host_endian; + /* always read from usbfs: sysfs only has the active descriptor + * this will involve waking the device up, but oh well! */ + + /* FIXME: the above is no longer true, new kernels have all descriptors + * in the descriptors file. but its kinda hard to detect if the kernel + * is sufficiently new. */ + + _get_usbfs_path(dev, filename); + fd = open(filename, O_RDONLY); + if (fd < 0) { + usbi_err(DEVICE_CTX(dev), + "open '%s' failed, ret=%d errno=%d", filename, fd, errno); + return LIBUSB_ERROR_IO; + } + + r = get_config_descriptor(DEVICE_CTX(dev), fd, config_index, buffer, len); + close(fd); + return r; +} + +/* cache the active config descriptor in memory. a value of -1 means that + * we aren't sure which one is active, so just assume the first one. + * only for usbfs. */ +static int cache_active_config(struct libusb_device *dev, int fd, + int active_config) +{ + struct linux_device_priv *priv = _device_priv(dev); + struct libusb_config_descriptor config; + unsigned char tmp[8]; + unsigned char *buf; + int idx; + int r; + + if (active_config == -1) { + idx = 0; + } else { + r = usbi_get_config_index_by_value(dev, active_config, &idx); + if (r < 0) + return r; + if (idx == -1) + return LIBUSB_ERROR_NOT_FOUND; + } + + r = get_config_descriptor(DEVICE_CTX(dev), fd, idx, tmp, sizeof(tmp)); + if (r < 0) { + usbi_err(DEVICE_CTX(dev), "first read error %d", r); + return r; + } + + usbi_parse_descriptor(tmp, "bbw", &config, 0); + buf = malloc(config.wTotalLength); + if (!buf) + return LIBUSB_ERROR_NO_MEM; + + r = get_config_descriptor(DEVICE_CTX(dev), fd, idx, buf, + config.wTotalLength); + if (r < 0) { + free(buf); + return r; + } + + if (priv->config_descriptor) + free(priv->config_descriptor); + priv->config_descriptor = buf; + return 0; +} + +/* send a control message to retrieve active configuration */ +static int usbfs_get_active_config(struct libusb_device *dev, int fd) +{ + unsigned char active_config = 0; + int r; + + struct usbfs_ctrltransfer ctrl = { + .bmRequestType = LIBUSB_ENDPOINT_IN, + .bRequest = LIBUSB_REQUEST_GET_CONFIGURATION, + .wValue = 0, + .wIndex = 0, + .wLength = 1, + .timeout = 1000, + .data = &active_config + }; + + r = ioctl(fd, IOCTL_USBFS_CONTROL, &ctrl); + if (r < 0) { + if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + /* we hit this error path frequently with buggy devices :( */ + usbi_warn(DEVICE_CTX(dev), + "get_configuration failed ret=%d errno=%d", r, errno); + return LIBUSB_ERROR_IO; + } + + return active_config; +} + +static int initialize_device(struct libusb_device *dev, uint8_t busnum, + uint8_t devaddr, const char *sysfs_dir) +{ + struct linux_device_priv *priv = _device_priv(dev); + unsigned char *dev_buf; + char path[PATH_MAX]; + int fd, speed; + int active_config = 0; + int device_configured = 1; + ssize_t r; + + dev->bus_number = busnum; + dev->device_address = devaddr; + + if (sysfs_dir) { + priv->sysfs_dir = malloc(strlen(sysfs_dir) + 1); + if (!priv->sysfs_dir) + return LIBUSB_ERROR_NO_MEM; + strcpy(priv->sysfs_dir, sysfs_dir); + + /* Note speed can contain 1.5, in this case __read_sysfs_attr + will stop parsing at the '.' and return 1 */ + speed = __read_sysfs_attr(DEVICE_CTX(dev), sysfs_dir, "speed"); + if (speed >= 0) { + switch (speed) { + case 1: dev->speed = LIBUSB_SPEED_LOW; break; + case 12: dev->speed = LIBUSB_SPEED_FULL; break; + case 480: dev->speed = LIBUSB_SPEED_HIGH; break; + case 5000: dev->speed = LIBUSB_SPEED_SUPER; break; + default: + usbi_warn(DEVICE_CTX(dev), "Unknown device speed: %d Mbps", speed); + } + } + } + + if (sysfs_has_descriptors) + return 0; + + /* cache device descriptor in memory so that we can retrieve it later + * without waking the device up (op_get_device_descriptor) */ + + priv->dev_descriptor = NULL; + priv->config_descriptor = NULL; + + if (sysfs_can_relate_devices) { + int tmp = sysfs_get_active_config(dev, &active_config); + if (tmp < 0) + return tmp; + if (active_config == -1) + device_configured = 0; + } + + _get_usbfs_path(dev, path); + fd = open(path, O_RDWR); + if (fd < 0 && errno == EACCES) { + fd = open(path, O_RDONLY); + /* if we only have read-only access to the device, we cannot + * send a control message to determine the active config. just + * assume the first one is active. */ + active_config = -1; + } + + if (fd < 0) { + usbi_err(DEVICE_CTX(dev), "open failed, ret=%d errno=%d", fd, errno); + return LIBUSB_ERROR_IO; + } + + if (!sysfs_can_relate_devices) { + if (active_config == -1) { + /* if we only have read-only access to the device, we cannot + * send a control message to determine the active config. just + * assume the first one is active. */ + usbi_warn(DEVICE_CTX(dev), "access to %s is read-only; cannot " + "determine active configuration descriptor", path); + } else { + active_config = usbfs_get_active_config(dev, fd); + if (active_config == LIBUSB_ERROR_IO) { + /* buggy devices sometimes fail to report their active config. + * assume unconfigured and continue the probing */ + usbi_warn(DEVICE_CTX(dev), "couldn't query active " + "configuration, assumung unconfigured"); + device_configured = 0; + } else if (active_config < 0) { + close(fd); + return active_config; + } else if (active_config == 0) { + /* some buggy devices have a configuration 0, but we're + * reaching into the corner of a corner case here, so let's + * not support buggy devices in these circumstances. + * stick to the specs: a configuration value of 0 means + * unconfigured. */ + usbi_dbg("active cfg 0? assuming unconfigured device"); + device_configured = 0; + } + } + } + + dev_buf = malloc(DEVICE_DESC_LENGTH); + if (!dev_buf) { + close(fd); + return LIBUSB_ERROR_NO_MEM; + } + + r = read(fd, dev_buf, DEVICE_DESC_LENGTH); + if (r < 0) { + usbi_err(DEVICE_CTX(dev), + "read descriptor failed ret=%d errno=%d", fd, errno); + free(dev_buf); + close(fd); + return LIBUSB_ERROR_IO; + } else if (r < DEVICE_DESC_LENGTH) { + usbi_err(DEVICE_CTX(dev), "short descriptor read (%d)", r); + free(dev_buf); + close(fd); + return LIBUSB_ERROR_IO; + } + + /* bit of a hack: set num_configurations now because cache_active_config() + * calls usbi_get_config_index_by_value() which uses it */ + dev->num_configurations = dev_buf[DEVICE_DESC_LENGTH - 1]; + + if (device_configured) { + r = cache_active_config(dev, fd, active_config); + if (r < 0) { + close(fd); + free(dev_buf); + return r; + } + } + + close(fd); + priv->dev_descriptor = dev_buf; + return 0; +} + +int linux_enumerate_device(struct libusb_context *ctx, + uint8_t busnum, uint8_t devaddr, + const char *sysfs_dir) +{ + unsigned long session_id; + struct libusb_device *dev; + int r = 0; + + /* FIXME: session ID is not guaranteed unique as addresses can wrap and + * will be reused. instead we should add a simple sysfs attribute with + * a session ID. */ + session_id = busnum << 8 | devaddr; + usbi_dbg("busnum %d devaddr %d session_id %ld", busnum, devaddr, + session_id); + + usbi_dbg("allocating new device for %d/%d (session %ld)", + busnum, devaddr, session_id); + dev = usbi_alloc_device(ctx, session_id); + if (!dev) + return LIBUSB_ERROR_NO_MEM; + + r = initialize_device(dev, busnum, devaddr, sysfs_dir); + if (r < 0) + goto out; + r = usbi_sanitize_device(dev); + if (r < 0) + goto out; +out: + if (r < 0) + libusb_unref_device(dev); + else + usbi_connect_device(dev); + + return r; +} + +void linux_hotplug_enumerate(uint8_t busnum, uint8_t devaddr, const char *sys_name) +{ + struct libusb_context *ctx; + + usbi_mutex_lock(&active_contexts_lock); + list_for_each_entry(ctx, &active_contexts_list, list, struct libusb_context) { + if (usbi_get_device_by_session_id(ctx, busnum << 8 | devaddr)) { + /* device already exists in the context */ + usbi_dbg("device already exists in context"); + continue; + } + + linux_enumerate_device(ctx, busnum, devaddr, sys_name); + } + usbi_mutex_unlock(&active_contexts_lock); +} + +void linux_hotplug_disconnected(uint8_t busnum, uint8_t devaddr, const char *sys_name) +{ + struct libusb_context *ctx, *tmp; + struct libusb_device *dev; + + usbi_mutex_lock(&active_contexts_lock); + list_for_each_entry_safe(ctx, tmp, &active_contexts_list, list, struct libusb_context) { + dev = usbi_get_device_by_session_id (ctx, busnum << 8 | devaddr); + if (NULL != dev) { + usbi_disconnect_device (dev); + } else { + usbi_err(ctx, "device not found for session %x %s", busnum << 8 | devaddr, sys_name); + } + } + usbi_mutex_unlock(&active_contexts_lock); +} + +#if !defined(USE_UDEV) +/* open a bus directory and adds all discovered devices to the context */ +static int usbfs_scan_busdir(struct libusb_context *ctx, uint8_t busnum) +{ + DIR *dir; + char dirpath[PATH_MAX]; + struct dirent *entry; + int r = LIBUSB_ERROR_IO; + + snprintf(dirpath, PATH_MAX, "%s/%03d", usbfs_path, busnum); + usbi_dbg("%s", dirpath); + dir = opendir(dirpath); + if (!dir) { + usbi_err(ctx, "opendir '%s' failed, errno=%d", dirpath, errno); + /* FIXME: should handle valid race conditions like hub unplugged + * during directory iteration - this is not an error */ + return r; + } + + while ((entry = readdir(dir))) { + int devaddr; + + if (entry->d_name[0] == '.') + continue; + + devaddr = atoi(entry->d_name); + if (devaddr == 0) { + usbi_dbg("unknown dir entry %s", entry->d_name); + continue; + } + + if (linux_enumerate_device(ctx, busnum, (uint8_t) devaddr, NULL)) { + usbi_dbg("failed to enumerate dir entry %s", entry->d_name); + continue; + } + + r = 0; + } + + closedir(dir); + return r; +} + +static int usbfs_get_device_list(struct libusb_context *ctx) +{ + struct dirent *entry; + DIR *buses = opendir(usbfs_path); + int r = 0; + + if (!buses) { + usbi_err(ctx, "opendir buses failed errno=%d", errno); + return LIBUSB_ERROR_IO; + } + + while ((entry = readdir(buses))) { + int busnum; + + if (entry->d_name[0] == '.') + continue; + + if (usbdev_names) { + int devaddr; + if (!_is_usbdev_entry(entry, &busnum, &devaddr)) + continue; + + r = linux_enumerate_device(ctx, busnum, (uint8_t) devaddr, NULL); + if (r < 0) { + usbi_dbg("failed to enumerate dir entry %s", entry->d_name); + continue; + } + } else { + busnum = atoi(entry->d_name); + if (busnum == 0) { + usbi_dbg("unknown dir entry %s", entry->d_name); + continue; + } + + r = usbfs_scan_busdir(ctx, busnum); + if (r < 0) + break; + } + } + + closedir(buses); + return r; + +} + +static int sysfs_scan_device(struct libusb_context *ctx, const char *devname) +{ + uint8_t busnum, devaddr; + int ret; + + ret = linux_get_device_address (ctx, 0, &busnum, &devaddr, NULL, devname); + if (LIBUSB_SUCCESS != ret) { + return ret; + } + + return linux_enumerate_device(ctx, busnum & 0xff, devaddr & 0xff, + devname); +} + +static int sysfs_get_device_list(struct libusb_context *ctx) +{ + DIR *devices = opendir(SYSFS_DEVICE_PATH); + struct dirent *entry; + int r = LIBUSB_ERROR_IO; + + if (!devices) { + usbi_err(ctx, "opendir devices failed errno=%d", errno); + return r; + } + + while ((entry = readdir(devices))) { + if ((!isdigit(entry->d_name[0]) && strncmp(entry->d_name, "usb", 3)) + || strchr(entry->d_name, ':')) + continue; + + if (sysfs_scan_device(ctx, entry->d_name)) { + usbi_dbg("failed to enumerate dir entry %s", entry->d_name); + continue; + } + + r = 0; + } + + closedir(devices); + return r; +} + +static int linux_default_scan_devices (struct libusb_context *ctx) +{ + /* we can retrieve device list and descriptors from sysfs or usbfs. + * sysfs is preferable, because if we use usbfs we end up resuming + * any autosuspended USB devices. however, sysfs is not available + * everywhere, so we need a usbfs fallback too. + * + * as described in the "sysfs vs usbfs" comment at the top of this + * file, sometimes we have sysfs but not enough information to + * relate sysfs devices to usbfs nodes. op_init() determines the + * adequacy of sysfs and sets sysfs_can_relate_devices. + */ + if (sysfs_can_relate_devices != 0) + return sysfs_get_device_list(ctx); + else + return usbfs_get_device_list(ctx); +} +#endif + +static int op_open(struct libusb_device_handle *handle) +{ + struct linux_device_handle_priv *hpriv = _device_handle_priv(handle); + char filename[PATH_MAX]; + int r; + + _get_usbfs_path(handle->dev, filename); + usbi_dbg("opening %s", filename); + hpriv->fd = open(filename, O_RDWR); + if (hpriv->fd < 0) { + if (errno == EACCES) { + usbi_err(HANDLE_CTX(handle), "libusb couldn't open USB device %s: " + "Permission denied.", filename); + usbi_err(HANDLE_CTX(handle), + "libusb requires write access to USB device nodes."); + return LIBUSB_ERROR_ACCESS; + } else if (errno == ENOENT) { + usbi_err(HANDLE_CTX(handle), "libusb couldn't open USB device %s: " + "No such file or directory.", filename); + return LIBUSB_ERROR_NO_DEVICE; + } else { + usbi_err(HANDLE_CTX(handle), + "open failed, code %d errno %d", hpriv->fd, errno); + return LIBUSB_ERROR_IO; + } + } + + r = ioctl(hpriv->fd, IOCTL_USBFS_GET_CAPABILITIES, &hpriv->caps); + if (r < 0) { + if (errno == ENOTTY) + usbi_dbg("%s: getcap not available", filename); + else + usbi_err(HANDLE_CTX(handle), + "%s: getcap failed (%d)", filename, errno); + hpriv->caps = 0; + if (supports_flag_zero_packet) + hpriv->caps |= USBFS_CAP_ZERO_PACKET; + if (supports_flag_bulk_continuation) + hpriv->caps |= USBFS_CAP_BULK_CONTINUATION; + } + + return usbi_add_pollfd(HANDLE_CTX(handle), hpriv->fd, POLLOUT); +} + +static void op_close(struct libusb_device_handle *dev_handle) +{ + int fd = _device_handle_priv(dev_handle)->fd; + usbi_remove_pollfd(HANDLE_CTX(dev_handle), fd); + close(fd); +} + +static int op_get_configuration(struct libusb_device_handle *handle, + int *config) +{ + int r; + if (sysfs_can_relate_devices != 1) + return LIBUSB_ERROR_NOT_SUPPORTED; + + r = sysfs_get_active_config(handle->dev, config); + if (r < 0) + return r; + + if (*config == -1) { + usbi_err(HANDLE_CTX(handle), "device unconfigured"); + *config = 0; + } + + return 0; +} + +static int op_set_configuration(struct libusb_device_handle *handle, int config) +{ + struct linux_device_priv *priv = _device_priv(handle->dev); + int fd = _device_handle_priv(handle)->fd; + int r = ioctl(fd, IOCTL_USBFS_SETCONFIG, &config); + if (r) { + if (errno == EINVAL) + return LIBUSB_ERROR_NOT_FOUND; + else if (errno == EBUSY) + return LIBUSB_ERROR_BUSY; + else if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_err(HANDLE_CTX(handle), "failed, error %d errno %d", r, errno); + return LIBUSB_ERROR_OTHER; + } + + if (!sysfs_has_descriptors) { + /* update our cached active config descriptor */ + if (config == -1) { + if (priv->config_descriptor) { + free(priv->config_descriptor); + priv->config_descriptor = NULL; + } + } else { + r = cache_active_config(handle->dev, fd, config); + if (r < 0) + usbi_warn(HANDLE_CTX(handle), + "failed to update cached config descriptor, error %d", r); + } + } + + return 0; +} + +static int op_claim_interface(struct libusb_device_handle *handle, int iface) +{ + int fd = _device_handle_priv(handle)->fd; + int r = ioctl(fd, IOCTL_USBFS_CLAIMINTF, &iface); + if (r) { + if (errno == ENOENT) + return LIBUSB_ERROR_NOT_FOUND; + else if (errno == EBUSY) + return LIBUSB_ERROR_BUSY; + else if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_err(HANDLE_CTX(handle), + "claim interface failed, error %d errno %d", r, errno); + return LIBUSB_ERROR_OTHER; + } + return 0; +} + +static int op_release_interface(struct libusb_device_handle *handle, int iface) +{ + int fd = _device_handle_priv(handle)->fd; + int r = ioctl(fd, IOCTL_USBFS_RELEASEINTF, &iface); + if (r) { + if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_err(HANDLE_CTX(handle), + "release interface failed, error %d errno %d", r, errno); + return LIBUSB_ERROR_OTHER; + } + return 0; +} + +static int op_set_interface(struct libusb_device_handle *handle, int iface, + int altsetting) +{ + int fd = _device_handle_priv(handle)->fd; + struct usbfs_setinterface setintf; + int r; + + setintf.interface = iface; + setintf.altsetting = altsetting; + r = ioctl(fd, IOCTL_USBFS_SETINTF, &setintf); + if (r) { + if (errno == EINVAL) + return LIBUSB_ERROR_NOT_FOUND; + else if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_err(HANDLE_CTX(handle), + "setintf failed error %d errno %d", r, errno); + return LIBUSB_ERROR_OTHER; + } + + return 0; +} + +static int op_clear_halt(struct libusb_device_handle *handle, + unsigned char endpoint) +{ + int fd = _device_handle_priv(handle)->fd; + unsigned int _endpoint = endpoint; + int r = ioctl(fd, IOCTL_USBFS_CLEAR_HALT, &_endpoint); + if (r) { + if (errno == ENOENT) + return LIBUSB_ERROR_NOT_FOUND; + else if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_err(HANDLE_CTX(handle), + "clear_halt failed error %d errno %d", r, errno); + return LIBUSB_ERROR_OTHER; + } + + return 0; +} + +static int op_reset_device(struct libusb_device_handle *handle) +{ + int fd = _device_handle_priv(handle)->fd; + int i, r, ret = 0; + + /* Doing a device reset will cause the usbfs driver to get unbound + from any interfaces it is bound to. By voluntarily unbinding + the usbfs driver ourself, we stop the kernel from rebinding + the interface after reset (which would end up with the interface + getting bound to the in kernel driver if any). */ + for (i = 0; i < USB_MAXINTERFACES; i++) { + if (handle->claimed_interfaces & (1L << i)) { + op_release_interface(handle, i); + } + } + + usbi_mutex_lock(&handle->lock); + r = ioctl(fd, IOCTL_USBFS_RESET, NULL); + if (r) { + if (errno == ENODEV) { + ret = LIBUSB_ERROR_NOT_FOUND; + goto out; + } + + usbi_err(HANDLE_CTX(handle), + "reset failed error %d errno %d", r, errno); + ret = LIBUSB_ERROR_OTHER; + goto out; + } + + /* And re-claim any interfaces which were claimed before the reset */ + for (i = 0; i < USB_MAXINTERFACES; i++) { + if (handle->claimed_interfaces & (1L << i)) { + r = op_claim_interface(handle, i); + if (r) { + usbi_warn(HANDLE_CTX(handle), + "failed to re-claim interface %d after reset", i); + handle->claimed_interfaces &= ~(1L << i); + } + } + } +out: + usbi_mutex_unlock(&handle->lock); + return ret; +} + +static int op_kernel_driver_active(struct libusb_device_handle *handle, + int interface) +{ + int fd = _device_handle_priv(handle)->fd; + struct usbfs_getdriver getdrv; + int r; + + getdrv.interface = interface; + r = ioctl(fd, IOCTL_USBFS_GETDRIVER, &getdrv); + if (r) { + if (errno == ENODATA) + return 0; + else if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_err(HANDLE_CTX(handle), + "get driver failed error %d errno %d", r, errno); + return LIBUSB_ERROR_OTHER; + } + + return 1; +} + +static int op_detach_kernel_driver(struct libusb_device_handle *handle, + int interface) +{ + int fd = _device_handle_priv(handle)->fd; + struct usbfs_ioctl command; + int r; + + command.ifno = interface; + command.ioctl_code = IOCTL_USBFS_DISCONNECT; + command.data = NULL; + + r = ioctl(fd, IOCTL_USBFS_IOCTL, &command); + if (r) { + if (errno == ENODATA) + return LIBUSB_ERROR_NOT_FOUND; + else if (errno == EINVAL) + return LIBUSB_ERROR_INVALID_PARAM; + else if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_err(HANDLE_CTX(handle), + "detach failed error %d errno %d", r, errno); + return LIBUSB_ERROR_OTHER; + } + + return 0; +} + +static int op_attach_kernel_driver(struct libusb_device_handle *handle, + int interface) +{ + int fd = _device_handle_priv(handle)->fd; + struct usbfs_ioctl command; + int r; + + command.ifno = interface; + command.ioctl_code = IOCTL_USBFS_CONNECT; + command.data = NULL; + + r = ioctl(fd, IOCTL_USBFS_IOCTL, &command); + if (r < 0) { + if (errno == ENODATA) + return LIBUSB_ERROR_NOT_FOUND; + else if (errno == EINVAL) + return LIBUSB_ERROR_INVALID_PARAM; + else if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + else if (errno == EBUSY) + return LIBUSB_ERROR_BUSY; + + usbi_err(HANDLE_CTX(handle), + "attach failed error %d errno %d", r, errno); + return LIBUSB_ERROR_OTHER; + } else if (r == 0) { + return LIBUSB_ERROR_NOT_FOUND; + } + + return 0; +} + +static void op_destroy_device(struct libusb_device *dev) +{ + struct linux_device_priv *priv = _device_priv(dev); + if (!sysfs_has_descriptors) { + if (priv->dev_descriptor) + free(priv->dev_descriptor); + if (priv->config_descriptor) + free(priv->config_descriptor); + } + if (priv->sysfs_dir) + free(priv->sysfs_dir); +} + +/* URBs are discarded in reverse order of submission to avoid races. */ +static int discard_urbs(struct usbi_transfer *itransfer, int first, int last_plus_one) +{ + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct linux_transfer_priv *tpriv = + usbi_transfer_get_os_priv(itransfer); + struct linux_device_handle_priv *dpriv = + _device_handle_priv(transfer->dev_handle); + int i, ret = 0; + struct usbfs_urb *urb; + + for (i = last_plus_one - 1; i >= first; i--) { + if (LIBUSB_TRANSFER_TYPE_ISOCHRONOUS == transfer->type) + urb = tpriv->iso_urbs[i]; + else + urb = &tpriv->urbs[i]; + + if (0 == ioctl(dpriv->fd, IOCTL_USBFS_DISCARDURB, urb)) + continue; + + if (EINVAL == errno) { + usbi_dbg("URB not found --> assuming ready to be reaped"); + if (i == (last_plus_one - 1)) + ret = LIBUSB_ERROR_NOT_FOUND; + } else if (ENODEV == errno) { + usbi_dbg("Device not found for URB --> assuming ready to be reaped"); + ret = LIBUSB_ERROR_NO_DEVICE; + } else { + usbi_warn(TRANSFER_CTX(transfer), + "unrecognised discard errno %d", errno); + ret = LIBUSB_ERROR_OTHER; + } + } + return ret; +} + +static void free_iso_urbs(struct linux_transfer_priv *tpriv) +{ + int i; + for (i = 0; i < tpriv->num_urbs; i++) { + struct usbfs_urb *urb = tpriv->iso_urbs[i]; + if (!urb) + break; + free(urb); + } + + free(tpriv->iso_urbs); + tpriv->iso_urbs = NULL; +} + +static int submit_bulk_transfer(struct usbi_transfer *itransfer, + unsigned char urb_type) +{ + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct linux_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + struct linux_device_handle_priv *dpriv = + _device_handle_priv(transfer->dev_handle); + struct usbfs_urb *urbs; + int is_out = (transfer->endpoint & LIBUSB_ENDPOINT_DIR_MASK) + == LIBUSB_ENDPOINT_OUT; + int bulk_buffer_len, use_bulk_continuation; + int r; + int i; + size_t alloc_size; + + if (tpriv->urbs) + return LIBUSB_ERROR_BUSY; + + if (is_out && (transfer->flags & LIBUSB_TRANSFER_ADD_ZERO_PACKET) && + !(dpriv->caps & USBFS_CAP_ZERO_PACKET)) + return LIBUSB_ERROR_NOT_SUPPORTED; + + /* + * Older versions of usbfs place a 16kb limit on bulk URBs. We work + * around this by splitting large transfers into 16k blocks, and then + * submit all urbs at once. it would be simpler to submit one urb at + * a time, but there is a big performance gain doing it this way. + * + * Newer versions lift the 16k limit (USBFS_CAP_NO_PACKET_SIZE_LIM), + * using arbritary large transfers can still be a bad idea though, as + * the kernel needs to allocate physical contiguous memory for this, + * which may fail for large buffers. + * + * The kernel solves this problem by splitting the transfer into + * blocks itself when the host-controller is scatter-gather capable + * (USBFS_CAP_BULK_SCATTER_GATHER), which most controllers are. + * + * Last, there is the issue of short-transfers when splitting, for + * short split-transfers to work reliable USBFS_CAP_BULK_CONTINUATION + * is needed, but this is not always available. + */ + if (dpriv->caps & USBFS_CAP_BULK_SCATTER_GATHER) { + /* Good! Just submit everything in one go */ + bulk_buffer_len = transfer->length ? transfer->length : 1; + use_bulk_continuation = 0; + } else if (dpriv->caps & USBFS_CAP_BULK_CONTINUATION) { + /* Split the transfers and use bulk-continuation to + avoid issues with short-transfers */ + bulk_buffer_len = MAX_BULK_BUFFER_LENGTH; + use_bulk_continuation = 1; + } else if (dpriv->caps & USBFS_CAP_NO_PACKET_SIZE_LIM) { + /* Don't split, assume the kernel can alloc the buffer + (otherwise the submit will fail with -ENOMEM) */ + bulk_buffer_len = transfer->length ? transfer->length : 1; + use_bulk_continuation = 0; + } else { + /* Bad, splitting without bulk-continuation, short transfers + which end before the last urb will not work reliable! */ + /* Note we don't warn here as this is "normal" on kernels < + 2.6.32 and not a problem for most applications */ + bulk_buffer_len = MAX_BULK_BUFFER_LENGTH; + use_bulk_continuation = 0; + } + + int num_urbs = transfer->length / bulk_buffer_len; + int last_urb_partial = 0; + + if (transfer->length == 0) { + num_urbs = 1; + } else if ((transfer->length % bulk_buffer_len) > 0) { + last_urb_partial = 1; + num_urbs++; + } + usbi_dbg("need %d urbs for new transfer with length %d", num_urbs, + transfer->length); + alloc_size = num_urbs * sizeof(struct usbfs_urb); + urbs = malloc(alloc_size); + if (!urbs) + return LIBUSB_ERROR_NO_MEM; + memset(urbs, 0, alloc_size); + tpriv->urbs = urbs; + tpriv->num_urbs = num_urbs; + tpriv->num_retired = 0; + tpriv->reap_action = NORMAL; + tpriv->reap_status = LIBUSB_TRANSFER_COMPLETED; + + for (i = 0; i < num_urbs; i++) { + struct usbfs_urb *urb = &urbs[i]; + urb->usercontext = itransfer; + urb->type = urb_type; + urb->endpoint = transfer->endpoint; + urb->buffer = transfer->buffer + (i * bulk_buffer_len); + /* don't set the short not ok flag for the last URB */ + if (use_bulk_continuation && !is_out && i < num_urbs - 1) + urb->flags = USBFS_URB_SHORT_NOT_OK; + if (i == num_urbs - 1 && last_urb_partial) + urb->buffer_length = transfer->length % bulk_buffer_len; + else if (transfer->length == 0) + urb->buffer_length = 0; + else + urb->buffer_length = bulk_buffer_len; + + if (i > 0 && use_bulk_continuation) + urb->flags |= USBFS_URB_BULK_CONTINUATION; + + /* we have already checked that the flag is supported */ + if (is_out && i == num_urbs - 1 && + transfer->flags & LIBUSB_TRANSFER_ADD_ZERO_PACKET) + urb->flags |= USBFS_URB_ZERO_PACKET; + + r = ioctl(dpriv->fd, IOCTL_USBFS_SUBMITURB, urb); + if (r < 0) { + if (errno == ENODEV) { + r = LIBUSB_ERROR_NO_DEVICE; + } else { + usbi_err(TRANSFER_CTX(transfer), + "submiturb failed error %d errno=%d", r, errno); + r = LIBUSB_ERROR_IO; + } + + /* if the first URB submission fails, we can simply free up and + * return failure immediately. */ + if (i == 0) { + usbi_dbg("first URB failed, easy peasy"); + free(urbs); + tpriv->urbs = NULL; + return r; + } + + /* if it's not the first URB that failed, the situation is a bit + * tricky. we may need to discard all previous URBs. there are + * complications: + * - discarding is asynchronous - discarded urbs will be reaped + * later. the user must not have freed the transfer when the + * discarded URBs are reaped, otherwise libusb will be using + * freed memory. + * - the earlier URBs may have completed successfully and we do + * not want to throw away any data. + * - this URB failing may be no error; EREMOTEIO means that + * this transfer simply didn't need all the URBs we submitted + * so, we report that the transfer was submitted successfully and + * in case of error we discard all previous URBs. later when + * the final reap completes we can report error to the user, + * or success if an earlier URB was completed successfully. + */ + tpriv->reap_action = EREMOTEIO == errno ? COMPLETED_EARLY : SUBMIT_FAILED; + + /* The URBs we haven't submitted yet we count as already + * retired. */ + tpriv->num_retired += num_urbs - i; + + /* If we completed short then don't try to discard. */ + if (COMPLETED_EARLY == tpriv->reap_action) + return 0; + + discard_urbs(itransfer, 0, i); + + usbi_dbg("reporting successful submission but waiting for %d " + "discards before reporting error", i); + return 0; + } + } + + return 0; +} + +static int submit_iso_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct linux_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + struct linux_device_handle_priv *dpriv = + _device_handle_priv(transfer->dev_handle); + struct usbfs_urb **urbs; + size_t alloc_size; + int num_packets = transfer->num_iso_packets; + int i; + int this_urb_len = 0; + int num_urbs = 1; + int packet_offset = 0; + unsigned int packet_len; + unsigned char *urb_buffer = transfer->buffer; + + if (tpriv->iso_urbs) + return LIBUSB_ERROR_BUSY; + + /* usbfs places a 32kb limit on iso URBs. we divide up larger requests + * into smaller units to meet such restriction, then fire off all the + * units at once. it would be simpler if we just fired one unit at a time, + * but there is a big performance gain through doing it this way. + * + * Newer kernels lift the 32k limit (USBFS_CAP_NO_PACKET_SIZE_LIM), + * using arbritary large transfers is still be a bad idea though, as + * the kernel needs to allocate physical contiguous memory for this, + * which may fail for large buffers. + */ + + /* calculate how many URBs we need */ + for (i = 0; i < num_packets; i++) { + unsigned int space_remaining = MAX_ISO_BUFFER_LENGTH - this_urb_len; + packet_len = transfer->iso_packet_desc[i].length; + + if (packet_len > space_remaining) { + num_urbs++; + this_urb_len = packet_len; + } else { + this_urb_len += packet_len; + } + } + usbi_dbg("need %d 32k URBs for transfer", num_urbs); + + alloc_size = num_urbs * sizeof(*urbs); + urbs = malloc(alloc_size); + if (!urbs) + return LIBUSB_ERROR_NO_MEM; + memset(urbs, 0, alloc_size); + + tpriv->iso_urbs = urbs; + tpriv->num_urbs = num_urbs; + tpriv->num_retired = 0; + tpriv->reap_action = NORMAL; + tpriv->iso_packet_offset = 0; + + /* allocate + initialize each URB with the correct number of packets */ + for (i = 0; i < num_urbs; i++) { + struct usbfs_urb *urb; + unsigned int space_remaining_in_urb = MAX_ISO_BUFFER_LENGTH; + int urb_packet_offset = 0; + unsigned char *urb_buffer_orig = urb_buffer; + int j; + int k; + + /* swallow up all the packets we can fit into this URB */ + while (packet_offset < transfer->num_iso_packets) { + packet_len = transfer->iso_packet_desc[packet_offset].length; + if (packet_len <= space_remaining_in_urb) { + /* throw it in */ + urb_packet_offset++; + packet_offset++; + space_remaining_in_urb -= packet_len; + urb_buffer += packet_len; + } else { + /* it can't fit, save it for the next URB */ + break; + } + } + + alloc_size = sizeof(*urb) + + (urb_packet_offset * sizeof(struct usbfs_iso_packet_desc)); + urb = malloc(alloc_size); + if (!urb) { + free_iso_urbs(tpriv); + return LIBUSB_ERROR_NO_MEM; + } + memset(urb, 0, alloc_size); + urbs[i] = urb; + + /* populate packet lengths */ + for (j = 0, k = packet_offset - urb_packet_offset; + k < packet_offset; k++, j++) { + packet_len = transfer->iso_packet_desc[k].length; + urb->iso_frame_desc[j].length = packet_len; + } + + urb->usercontext = itransfer; + urb->type = USBFS_URB_TYPE_ISO; + /* FIXME: interface for non-ASAP data? */ + urb->flags = USBFS_URB_ISO_ASAP; + urb->endpoint = transfer->endpoint; + urb->number_of_packets = urb_packet_offset; + urb->buffer = urb_buffer_orig; + } + + /* submit URBs */ + for (i = 0; i < num_urbs; i++) { + int r = ioctl(dpriv->fd, IOCTL_USBFS_SUBMITURB, urbs[i]); + if (r < 0) { + if (errno == ENODEV) { + r = LIBUSB_ERROR_NO_DEVICE; + } else { + usbi_err(TRANSFER_CTX(transfer), + "submiturb failed error %d errno=%d", r, errno); + r = LIBUSB_ERROR_IO; + } + + /* if the first URB submission fails, we can simply free up and + * return failure immediately. */ + if (i == 0) { + usbi_dbg("first URB failed, easy peasy"); + free_iso_urbs(tpriv); + return r; + } + + /* if it's not the first URB that failed, the situation is a bit + * tricky. we must discard all previous URBs. there are + * complications: + * - discarding is asynchronous - discarded urbs will be reaped + * later. the user must not have freed the transfer when the + * discarded URBs are reaped, otherwise libusb will be using + * freed memory. + * - the earlier URBs may have completed successfully and we do + * not want to throw away any data. + * so, in this case we discard all the previous URBs BUT we report + * that the transfer was submitted successfully. then later when + * the final discard completes we can report error to the user. + */ + tpriv->reap_action = SUBMIT_FAILED; + + /* The URBs we haven't submitted yet we count as already + * retired. */ + tpriv->num_retired = num_urbs - i; + discard_urbs(itransfer, 0, i); + + usbi_dbg("reporting successful submission but waiting for %d " + "discards before reporting error", i); + return 0; + } + } + + return 0; +} + +static int submit_control_transfer(struct usbi_transfer *itransfer) +{ + struct linux_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct linux_device_handle_priv *dpriv = + _device_handle_priv(transfer->dev_handle); + struct usbfs_urb *urb; + int r; + + if (tpriv->urbs) + return LIBUSB_ERROR_BUSY; + + if (transfer->length - LIBUSB_CONTROL_SETUP_SIZE > MAX_CTRL_BUFFER_LENGTH) + return LIBUSB_ERROR_INVALID_PARAM; + + urb = malloc(sizeof(struct usbfs_urb)); + if (!urb) + return LIBUSB_ERROR_NO_MEM; + memset(urb, 0, sizeof(struct usbfs_urb)); + tpriv->urbs = urb; + tpriv->num_urbs = 1; + tpriv->reap_action = NORMAL; + + urb->usercontext = itransfer; + urb->type = USBFS_URB_TYPE_CONTROL; + urb->endpoint = transfer->endpoint; + urb->buffer = transfer->buffer; + urb->buffer_length = transfer->length; + + r = ioctl(dpriv->fd, IOCTL_USBFS_SUBMITURB, urb); + if (r < 0) { + free(urb); + tpriv->urbs = NULL; + if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_err(TRANSFER_CTX(transfer), + "submiturb failed error %d errno=%d", r, errno); + return LIBUSB_ERROR_IO; + } + return 0; +} + +static int op_submit_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_CONTROL: + return submit_control_transfer(itransfer); + case LIBUSB_TRANSFER_TYPE_BULK: + return submit_bulk_transfer(itransfer, USBFS_URB_TYPE_BULK); + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + return submit_bulk_transfer(itransfer, USBFS_URB_TYPE_INTERRUPT); + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + return submit_iso_transfer(itransfer); + default: + usbi_err(TRANSFER_CTX(transfer), + "unknown endpoint type %d", transfer->type); + return LIBUSB_ERROR_INVALID_PARAM; + } +} + +static int op_cancel_transfer(struct usbi_transfer *itransfer) +{ + struct linux_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_BULK: + if (tpriv->reap_action == ERROR) + break; + /* else, fall through */ + case LIBUSB_TRANSFER_TYPE_CONTROL: + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + tpriv->reap_action = CANCELLED; + break; + default: + usbi_err(TRANSFER_CTX(transfer), + "unknown endpoint type %d", transfer->type); + return LIBUSB_ERROR_INVALID_PARAM; + } + + if (!tpriv->urbs) + return LIBUSB_ERROR_NOT_FOUND; + + return discard_urbs(itransfer, 0, tpriv->num_urbs); +} + +static void op_clear_transfer_priv(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct linux_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + + /* urbs can be freed also in submit_transfer so lock mutex first */ + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_CONTROL: + case LIBUSB_TRANSFER_TYPE_BULK: + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + usbi_mutex_lock(&itransfer->lock); + if (tpriv->urbs) + free(tpriv->urbs); + tpriv->urbs = NULL; + usbi_mutex_unlock(&itransfer->lock); + break; + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + usbi_mutex_lock(&itransfer->lock); + if (tpriv->iso_urbs) + free_iso_urbs(tpriv); + usbi_mutex_unlock(&itransfer->lock); + break; + default: + usbi_err(TRANSFER_CTX(transfer), + "unknown endpoint type %d", transfer->type); + } +} + +static int handle_bulk_completion(struct usbi_transfer *itransfer, + struct usbfs_urb *urb) +{ + struct linux_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + int urb_idx = urb - tpriv->urbs; + + usbi_mutex_lock(&itransfer->lock); + usbi_dbg("handling completion status %d of bulk urb %d/%d", urb->status, + urb_idx + 1, tpriv->num_urbs); + + tpriv->num_retired++; + + if (tpriv->reap_action != NORMAL) { + /* cancelled, submit_fail, or completed early */ + usbi_dbg("abnormal reap: urb status %d", urb->status); + + /* even though we're in the process of cancelling, it's possible that + * we may receive some data in these URBs that we don't want to lose. + * examples: + * 1. while the kernel is cancelling all the packets that make up an + * URB, a few of them might complete. so we get back a successful + * cancellation *and* some data. + * 2. we receive a short URB which marks the early completion condition, + * so we start cancelling the remaining URBs. however, we're too + * slow and another URB completes (or at least completes partially). + * (this can't happen since we always use BULK_CONTINUATION.) + * + * When this happens, our objectives are not to lose any "surplus" data, + * and also to stick it at the end of the previously-received data + * (closing any holes), so that libusb reports the total amount of + * transferred data and presents it in a contiguous chunk. + */ + if (urb->actual_length > 0) { + unsigned char *target = transfer->buffer + itransfer->transferred; + usbi_dbg("received %d bytes of surplus data", urb->actual_length); + if (urb->buffer != target) { + usbi_dbg("moving surplus data from offset %d to offset %d", + (unsigned char *) urb->buffer - transfer->buffer, + target - transfer->buffer); + memmove(target, urb->buffer, urb->actual_length); + } + itransfer->transferred += urb->actual_length; + } + + if (tpriv->num_retired == tpriv->num_urbs) { + usbi_dbg("abnormal reap: last URB handled, reporting"); + if (tpriv->reap_action != COMPLETED_EARLY && + tpriv->reap_status == LIBUSB_TRANSFER_COMPLETED) + tpriv->reap_status = LIBUSB_TRANSFER_ERROR; + goto completed; + } + goto out_unlock; + } + + itransfer->transferred += urb->actual_length; + + /* Many of these errors can occur on *any* urb of a multi-urb + * transfer. When they do, we tear down the rest of the transfer. + */ + switch (urb->status) { + case 0: + break; + case -EREMOTEIO: /* short transfer */ + break; + case -ENOENT: /* cancelled */ + case -ECONNRESET: + break; + case -ENODEV: + case -ESHUTDOWN: + usbi_dbg("device removed"); + tpriv->reap_status = LIBUSB_TRANSFER_NO_DEVICE; + goto cancel_remaining; + case -EPIPE: + usbi_dbg("detected endpoint stall"); + if (tpriv->reap_status == LIBUSB_TRANSFER_COMPLETED) + tpriv->reap_status = LIBUSB_TRANSFER_STALL; + goto cancel_remaining; + case -EOVERFLOW: + /* overflow can only ever occur in the last urb */ + usbi_dbg("overflow, actual_length=%d", urb->actual_length); + if (tpriv->reap_status == LIBUSB_TRANSFER_COMPLETED) + tpriv->reap_status = LIBUSB_TRANSFER_OVERFLOW; + goto completed; + case -ETIME: + case -EPROTO: + case -EILSEQ: + case -ECOMM: + case -ENOSR: + usbi_dbg("low level error %d", urb->status); + tpriv->reap_action = ERROR; + goto cancel_remaining; + default: + usbi_warn(ITRANSFER_CTX(itransfer), + "unrecognised urb status %d", urb->status); + tpriv->reap_action = ERROR; + goto cancel_remaining; + } + + /* if we're the last urb or we got less data than requested then we're + * done */ + if (urb_idx == tpriv->num_urbs - 1) { + usbi_dbg("last URB in transfer --> complete!"); + goto completed; + } else if (urb->actual_length < urb->buffer_length) { + usbi_dbg("short transfer %d/%d --> complete!", + urb->actual_length, urb->buffer_length); + if (tpriv->reap_action == NORMAL) + tpriv->reap_action = COMPLETED_EARLY; + } else + goto out_unlock; + +cancel_remaining: + if (ERROR == tpriv->reap_action && LIBUSB_TRANSFER_COMPLETED == tpriv->reap_status) + tpriv->reap_status = LIBUSB_TRANSFER_ERROR; + + if (tpriv->num_retired == tpriv->num_urbs) /* nothing to cancel */ + goto completed; + + /* cancel remaining urbs and wait for their completion before + * reporting results */ + discard_urbs(itransfer, urb_idx + 1, tpriv->num_urbs); + +out_unlock: + usbi_mutex_unlock(&itransfer->lock); + return 0; + +completed: + free(tpriv->urbs); + tpriv->urbs = NULL; + usbi_mutex_unlock(&itransfer->lock); + return CANCELLED == tpriv->reap_action ? + usbi_handle_transfer_cancellation(itransfer) : + usbi_handle_transfer_completion(itransfer, tpriv->reap_status); +} + +static int handle_iso_completion(struct usbi_transfer *itransfer, + struct usbfs_urb *urb) +{ + struct libusb_transfer *transfer = + USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct linux_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + int num_urbs = tpriv->num_urbs; + int urb_idx = 0; + int i; + enum libusb_transfer_status status = LIBUSB_TRANSFER_COMPLETED; + + usbi_mutex_lock(&itransfer->lock); + for (i = 0; i < num_urbs; i++) { + if (urb == tpriv->iso_urbs[i]) { + urb_idx = i + 1; + break; + } + } + if (urb_idx == 0) { + usbi_err(TRANSFER_CTX(transfer), "could not locate urb!"); + usbi_mutex_unlock(&itransfer->lock); + return LIBUSB_ERROR_NOT_FOUND; + } + + usbi_dbg("handling completion status %d of iso urb %d/%d", urb->status, + urb_idx, num_urbs); + + /* copy isochronous results back in */ + + for (i = 0; i < urb->number_of_packets; i++) { + struct usbfs_iso_packet_desc *urb_desc = &urb->iso_frame_desc[i]; + struct libusb_iso_packet_descriptor *lib_desc = + &transfer->iso_packet_desc[tpriv->iso_packet_offset++]; + lib_desc->status = LIBUSB_TRANSFER_COMPLETED; + switch (urb_desc->status) { + case 0: + break; + case -ENOENT: /* cancelled */ + case -ECONNRESET: + break; + case -ENODEV: + case -ESHUTDOWN: + usbi_dbg("device removed"); + lib_desc->status = LIBUSB_TRANSFER_NO_DEVICE; + break; + case -EPIPE: + usbi_dbg("detected endpoint stall"); + lib_desc->status = LIBUSB_TRANSFER_STALL; + break; + case -EOVERFLOW: + usbi_dbg("overflow error"); + lib_desc->status = LIBUSB_TRANSFER_OVERFLOW; + break; + case -ETIME: + case -EPROTO: + case -EILSEQ: + case -ECOMM: + case -ENOSR: + case -EXDEV: + usbi_dbg("low-level USB error %d", urb_desc->status); + lib_desc->status = LIBUSB_TRANSFER_ERROR; + break; + default: + usbi_warn(TRANSFER_CTX(transfer), + "unrecognised urb status %d", urb_desc->status); + lib_desc->status = LIBUSB_TRANSFER_ERROR; + break; + } + lib_desc->actual_length = urb_desc->actual_length; + } + + tpriv->num_retired++; + + if (tpriv->reap_action != NORMAL) { /* cancelled or submit_fail */ + usbi_dbg("CANCEL: urb status %d", urb->status); + + if (tpriv->num_retired == num_urbs) { + usbi_dbg("CANCEL: last URB handled, reporting"); + free_iso_urbs(tpriv); + if (tpriv->reap_action == CANCELLED) { + usbi_mutex_unlock(&itransfer->lock); + return usbi_handle_transfer_cancellation(itransfer); + } else { + usbi_mutex_unlock(&itransfer->lock); + return usbi_handle_transfer_completion(itransfer, + LIBUSB_TRANSFER_ERROR); + } + } + goto out; + } + + switch (urb->status) { + case 0: + break; + case -ENOENT: /* cancelled */ + case -ECONNRESET: + break; + case -ESHUTDOWN: + usbi_dbg("device removed"); + status = LIBUSB_TRANSFER_NO_DEVICE; + break; + default: + usbi_warn(TRANSFER_CTX(transfer), + "unrecognised urb status %d", urb->status); + status = LIBUSB_TRANSFER_ERROR; + break; + } + + /* if we're the last urb then we're done */ + if (urb_idx == num_urbs) { + usbi_dbg("last URB in transfer --> complete!"); + free_iso_urbs(tpriv); + usbi_mutex_unlock(&itransfer->lock); + return usbi_handle_transfer_completion(itransfer, status); + } + +out: + usbi_mutex_unlock(&itransfer->lock); + return 0; +} + +static int handle_control_completion(struct usbi_transfer *itransfer, + struct usbfs_urb *urb) +{ + struct linux_transfer_priv *tpriv = usbi_transfer_get_os_priv(itransfer); + int status; + + usbi_mutex_lock(&itransfer->lock); + usbi_dbg("handling completion status %d", urb->status); + + itransfer->transferred += urb->actual_length; + + if (tpriv->reap_action == CANCELLED) { + if (urb->status != 0 && urb->status != -ENOENT) + usbi_warn(ITRANSFER_CTX(itransfer), + "cancel: unrecognised urb status %d", urb->status); + free(tpriv->urbs); + tpriv->urbs = NULL; + usbi_mutex_unlock(&itransfer->lock); + return usbi_handle_transfer_cancellation(itransfer); + } + + switch (urb->status) { + case 0: + status = LIBUSB_TRANSFER_COMPLETED; + break; + case -ENOENT: /* cancelled */ + status = LIBUSB_TRANSFER_CANCELLED; + break; + case -ENODEV: + case -ESHUTDOWN: + usbi_dbg("device removed"); + status = LIBUSB_TRANSFER_NO_DEVICE; + break; + case -EPIPE: + usbi_dbg("unsupported control request"); + status = LIBUSB_TRANSFER_STALL; + break; + case -EOVERFLOW: + usbi_dbg("control overflow error"); + status = LIBUSB_TRANSFER_OVERFLOW; + break; + case -ETIME: + case -EPROTO: + case -EILSEQ: + case -ECOMM: + case -ENOSR: + usbi_dbg("low-level bus error occurred"); + status = LIBUSB_TRANSFER_ERROR; + break; + default: + usbi_warn(ITRANSFER_CTX(itransfer), + "unrecognised urb status %d", urb->status); + status = LIBUSB_TRANSFER_ERROR; + break; + } + + free(tpriv->urbs); + tpriv->urbs = NULL; + usbi_mutex_unlock(&itransfer->lock); + return usbi_handle_transfer_completion(itransfer, status); +} + +static int reap_for_handle(struct libusb_device_handle *handle) +{ + struct linux_device_handle_priv *hpriv = _device_handle_priv(handle); + int r; + struct usbfs_urb *urb; + struct usbi_transfer *itransfer; + struct libusb_transfer *transfer; + + r = ioctl(hpriv->fd, IOCTL_USBFS_REAPURBNDELAY, &urb); + if (r == -1 && errno == EAGAIN) + return 1; + if (r < 0) { + if (errno == ENODEV) + return LIBUSB_ERROR_NO_DEVICE; + + usbi_err(HANDLE_CTX(handle), "reap failed error %d errno=%d", + r, errno); + return LIBUSB_ERROR_IO; + } + + itransfer = urb->usercontext; + transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + usbi_dbg("urb type=%d status=%d transferred=%d", urb->type, urb->status, + urb->actual_length); + + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + return handle_iso_completion(itransfer, urb); + case LIBUSB_TRANSFER_TYPE_BULK: + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + return handle_bulk_completion(itransfer, urb); + case LIBUSB_TRANSFER_TYPE_CONTROL: + return handle_control_completion(itransfer, urb); + default: + usbi_err(HANDLE_CTX(handle), "unrecognised endpoint type %x", + transfer->type); + return LIBUSB_ERROR_OTHER; + } +} + +static int op_handle_events(struct libusb_context *ctx, + struct pollfd *fds, POLL_NFDS_TYPE nfds, int num_ready) +{ + int r; + unsigned int i = 0; + + usbi_mutex_lock(&ctx->open_devs_lock); + for (i = 0; i < nfds && num_ready > 0; i++) { + struct pollfd *pollfd = &fds[i]; + struct libusb_device_handle *handle; + struct linux_device_handle_priv *hpriv = NULL; + + if (!pollfd->revents) + continue; + + num_ready--; + list_for_each_entry(handle, &ctx->open_devs, list, struct libusb_device_handle) { + hpriv = _device_handle_priv(handle); + if (hpriv->fd == pollfd->fd) + break; + } + + if (pollfd->revents & POLLERR) { + usbi_remove_pollfd(HANDLE_CTX(handle), hpriv->fd); + usbi_handle_disconnect(handle); + continue; + } + + do { + r = reap_for_handle(handle); + } while (r == 0); + if (r == 1 || r == LIBUSB_ERROR_NO_DEVICE) + continue; + else if (r < 0) + goto out; + } + + r = 0; +out: + usbi_mutex_unlock(&ctx->open_devs_lock); + return r; +} + +static int op_clock_gettime(int clk_id, struct timespec *tp) +{ + switch (clk_id) { + case USBI_CLOCK_MONOTONIC: + return clock_gettime(monotonic_clkid, tp); + case USBI_CLOCK_REALTIME: + return clock_gettime(CLOCK_REALTIME, tp); + default: + return LIBUSB_ERROR_INVALID_PARAM; + } +} + +#ifdef USBI_TIMERFD_AVAILABLE +static clockid_t op_get_timerfd_clockid(void) +{ + return monotonic_clkid; + +} +#endif + +const struct usbi_os_backend linux_usbfs_backend = { + .name = "Linux usbfs", + .init = op_init, + .exit = op_exit, + .get_device_list = NULL, + .get_device_descriptor = op_get_device_descriptor, + .get_active_config_descriptor = op_get_active_config_descriptor, + .get_config_descriptor = op_get_config_descriptor, + + .open = op_open, + .close = op_close, + .get_configuration = op_get_configuration, + .set_configuration = op_set_configuration, + .claim_interface = op_claim_interface, + .release_interface = op_release_interface, + + .set_interface_altsetting = op_set_interface, + .clear_halt = op_clear_halt, + .reset_device = op_reset_device, + + .kernel_driver_active = op_kernel_driver_active, + .detach_kernel_driver = op_detach_kernel_driver, + .attach_kernel_driver = op_attach_kernel_driver, + + .destroy_device = op_destroy_device, + + .submit_transfer = op_submit_transfer, + .cancel_transfer = op_cancel_transfer, + .clear_transfer_priv = op_clear_transfer_priv, + + .handle_events = op_handle_events, + + .clock_gettime = op_clock_gettime, + +#ifdef USBI_TIMERFD_AVAILABLE + .get_timerfd_clockid = op_get_timerfd_clockid, +#endif + + .device_priv_size = sizeof(struct linux_device_priv), + .device_handle_priv_size = sizeof(struct linux_device_handle_priv), + .transfer_priv_size = sizeof(struct linux_transfer_priv), + .add_iso_packet_size = 0, +}; + diff --git a/compat/libusb-1.0/libusb/os/linux_usbfs.h b/compat/libusb-1.0/libusb/os/linux_usbfs.h new file mode 100644 index 0000000..661a9c3 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/linux_usbfs.h @@ -0,0 +1,168 @@ +/* + * usbfs header structures + * Copyright (C) 2007 Daniel Drake + * Copyright (c) 2001 Johannes Erdfelt + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef LIBUSB_USBFS_H +#define LIBUSB_USBFS_H + +#include + +#define SYSFS_DEVICE_PATH "/sys/bus/usb/devices" + +struct usbfs_ctrltransfer { + /* keep in sync with usbdevice_fs.h:usbdevfs_ctrltransfer */ + uint8_t bmRequestType; + uint8_t bRequest; + uint16_t wValue; + uint16_t wIndex; + uint16_t wLength; + + uint32_t timeout; /* in milliseconds */ + + /* pointer to data */ + void *data; +}; + +struct usbfs_bulktransfer { + /* keep in sync with usbdevice_fs.h:usbdevfs_bulktransfer */ + unsigned int ep; + unsigned int len; + unsigned int timeout; /* in milliseconds */ + + /* pointer to data */ + void *data; +}; + +struct usbfs_setinterface { + /* keep in sync with usbdevice_fs.h:usbdevfs_setinterface */ + unsigned int interface; + unsigned int altsetting; +}; + +#define USBFS_MAXDRIVERNAME 255 + +struct usbfs_getdriver { + unsigned int interface; + char driver[USBFS_MAXDRIVERNAME + 1]; +}; + +#define USBFS_URB_SHORT_NOT_OK 0x01 +#define USBFS_URB_ISO_ASAP 0x02 +#define USBFS_URB_BULK_CONTINUATION 0x04 +#define USBFS_URB_QUEUE_BULK 0x10 +#define USBFS_URB_ZERO_PACKET 0x40 + +enum usbfs_urb_type { + USBFS_URB_TYPE_ISO = 0, + USBFS_URB_TYPE_INTERRUPT = 1, + USBFS_URB_TYPE_CONTROL = 2, + USBFS_URB_TYPE_BULK = 3, +}; + +struct usbfs_iso_packet_desc { + unsigned int length; + unsigned int actual_length; + unsigned int status; +}; + +#define MAX_ISO_BUFFER_LENGTH 32768 +#define MAX_BULK_BUFFER_LENGTH 16384 +#define MAX_CTRL_BUFFER_LENGTH 4096 + +struct usbfs_urb { + unsigned char type; + unsigned char endpoint; + int status; + unsigned int flags; + void *buffer; + int buffer_length; + int actual_length; + int start_frame; + int number_of_packets; + int error_count; + unsigned int signr; + void *usercontext; + struct usbfs_iso_packet_desc iso_frame_desc[0]; +}; + +struct usbfs_connectinfo { + unsigned int devnum; + unsigned char slow; +}; + +struct usbfs_ioctl { + int ifno; /* interface 0..N ; negative numbers reserved */ + int ioctl_code; /* MUST encode size + direction of data so the + * macros in give correct values */ + void *data; /* param buffer (in, or out) */ +}; + +struct usbfs_hub_portinfo { + unsigned char numports; + unsigned char port[127]; /* port to device num mapping */ +}; + +#define USBFS_CAP_ZERO_PACKET 0x01 +#define USBFS_CAP_BULK_CONTINUATION 0x02 +#define USBFS_CAP_NO_PACKET_SIZE_LIM 0x04 +#define USBFS_CAP_BULK_SCATTER_GATHER 0x08 + +#define IOCTL_USBFS_CONTROL _IOWR('U', 0, struct usbfs_ctrltransfer) +#define IOCTL_USBFS_BULK _IOWR('U', 2, struct usbfs_bulktransfer) +#define IOCTL_USBFS_RESETEP _IOR('U', 3, unsigned int) +#define IOCTL_USBFS_SETINTF _IOR('U', 4, struct usbfs_setinterface) +#define IOCTL_USBFS_SETCONFIG _IOR('U', 5, unsigned int) +#define IOCTL_USBFS_GETDRIVER _IOW('U', 8, struct usbfs_getdriver) +#define IOCTL_USBFS_SUBMITURB _IOR('U', 10, struct usbfs_urb) +#define IOCTL_USBFS_DISCARDURB _IO('U', 11) +#define IOCTL_USBFS_REAPURB _IOW('U', 12, void *) +#define IOCTL_USBFS_REAPURBNDELAY _IOW('U', 13, void *) +#define IOCTL_USBFS_CLAIMINTF _IOR('U', 15, unsigned int) +#define IOCTL_USBFS_RELEASEINTF _IOR('U', 16, unsigned int) +#define IOCTL_USBFS_CONNECTINFO _IOW('U', 17, struct usbfs_connectinfo) +#define IOCTL_USBFS_IOCTL _IOWR('U', 18, struct usbfs_ioctl) +#define IOCTL_USBFS_HUB_PORTINFO _IOR('U', 19, struct usbfs_hub_portinfo) +#define IOCTL_USBFS_RESET _IO('U', 20) +#define IOCTL_USBFS_CLEAR_HALT _IOR('U', 21, unsigned int) +#define IOCTL_USBFS_DISCONNECT _IO('U', 22) +#define IOCTL_USBFS_CONNECT _IO('U', 23) +#define IOCTL_USBFS_CLAIM_PORT _IOR('U', 24, unsigned int) +#define IOCTL_USBFS_RELEASE_PORT _IOR('U', 25, unsigned int) +#define IOCTL_USBFS_GET_CAPABILITIES _IOR('U', 26, __u32) + +#if defined(HAVE_LIBUDEV) +int linux_udev_start_event_monitor(void); +int linux_udev_stop_event_monitor(void); +int linux_udev_scan_devices(struct libusb_context *ctx); +#else +int linux_netlink_start_event_monitor(void); +int linux_netlink_stop_event_monitor(void); +#endif + +void linux_hotplug_enumerate(uint8_t busnum, uint8_t devaddr, const char *sys_name); +void linux_hotplug_disconnected(uint8_t busnum, uint8_t devaddr, const char *sys_name); + +int linux_get_device_address (struct libusb_context *ctx, int detached, + uint8_t *busnum, uint8_t *devaddr, + const char *dev_node, const char *sys_name); +int linux_enumerate_device(struct libusb_context *ctx, + uint8_t busnum, uint8_t devaddr, + const char *sysfs_dir); + +#endif diff --git a/compat/libusb-1.0/libusb/os/openbsd_usb.c b/compat/libusb-1.0/libusb/os/openbsd_usb.c new file mode 100644 index 0000000..e31941b --- /dev/null +++ b/compat/libusb-1.0/libusb/os/openbsd_usb.c @@ -0,0 +1,727 @@ +/* + * Copyright (c) 2011 Martin Pieuchot + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "libusb.h" +#include "libusbi.h" + +struct device_priv { + char devnode[16]; + int fd; + + unsigned char *cdesc; /* active config descriptor */ + usb_device_descriptor_t ddesc; /* usb device descriptor */ +}; + +struct handle_priv { + int pipe[2]; /* for event notification */ + int endpoints[USB_MAX_ENDPOINTS]; +}; + +/* + * Backend functions + */ +static int obsd_get_device_list(struct libusb_context *, + struct discovered_devs **); +static int obsd_open(struct libusb_device_handle *); +static void obsd_close(struct libusb_device_handle *); + +static int obsd_get_device_descriptor(struct libusb_device *, unsigned char *, + int *); +static int obsd_get_active_config_descriptor(struct libusb_device *, + unsigned char *, size_t, int *); +static int obsd_get_config_descriptor(struct libusb_device *, uint8_t, + unsigned char *, size_t, int *); + +static int obsd_get_configuration(struct libusb_device_handle *, int *); +static int obsd_set_configuration(struct libusb_device_handle *, int); + +static int obsd_claim_interface(struct libusb_device_handle *, int); +static int obsd_release_interface(struct libusb_device_handle *, int); + +static int obsd_set_interface_altsetting(struct libusb_device_handle *, int, + int); +static int obsd_clear_halt(struct libusb_device_handle *, unsigned char); +static int obsd_reset_device(struct libusb_device_handle *); +static void obsd_destroy_device(struct libusb_device *); + +static int obsd_submit_transfer(struct usbi_transfer *); +static int obsd_cancel_transfer(struct usbi_transfer *); +static void obsd_clear_transfer_priv(struct usbi_transfer *); +static int obsd_handle_events(struct libusb_context *ctx, struct pollfd *, + nfds_t, int); +static int obsd_clock_gettime(int, struct timespec *); + +/* + * Private functions + */ +static int _errno_to_libusb(int); +static int _cache_active_config_descriptor(struct libusb_device *, int); +static int _sync_control_transfer(struct usbi_transfer *); +static int _sync_gen_transfer(struct usbi_transfer *); +static int _access_endpoint(struct libusb_transfer *); + +const struct usbi_os_backend openbsd_backend = { + "Synchronous OpenBSD backend", + NULL, /* init() */ + NULL, /* exit() */ + obsd_get_device_list, + obsd_open, + obsd_close, + + obsd_get_device_descriptor, + obsd_get_active_config_descriptor, + obsd_get_config_descriptor, + + obsd_get_configuration, + obsd_set_configuration, + + obsd_claim_interface, + obsd_release_interface, + + obsd_set_interface_altsetting, + obsd_clear_halt, + obsd_reset_device, + + NULL, /* kernel_driver_active() */ + NULL, /* detach_kernel_driver() */ + NULL, /* attach_kernel_driver() */ + + obsd_destroy_device, + + obsd_submit_transfer, + obsd_cancel_transfer, + obsd_clear_transfer_priv, + + obsd_handle_events, + + obsd_clock_gettime, + sizeof(struct device_priv), + sizeof(struct handle_priv), + 0, /* transfer_priv_size */ + 0, /* add_iso_packet_size */ +}; + +int +obsd_get_device_list(struct libusb_context * ctx, + struct discovered_devs **discdevs) +{ + struct libusb_device *dev; + struct device_priv *dpriv; + struct usb_device_info di; + unsigned long session_id; + char devnode[16]; + int fd, err, i; + + usbi_dbg(""); + + /* Only ugen(4) is supported */ + for (i = 0; i < USB_MAX_DEVICES; i++) { + /* Control endpoint is always .00 */ + snprintf(devnode, sizeof(devnode), "/dev/ugen%d.00", i); + + if ((fd = open(devnode, O_RDONLY)) < 0) { + if (errno != ENOENT && errno != ENXIO) + usbi_err(ctx, "could not open %s", devnode); + continue; + } + + if (ioctl(fd, USB_GET_DEVICEINFO, &di) < 0) + continue; + + session_id = (di.udi_bus << 8 | di.udi_addr); + dev = usbi_get_device_by_session_id(ctx, session_id); + + if (dev == NULL) { + dev = usbi_alloc_device(ctx, session_id); + if (dev == NULL) + return (LIBUSB_ERROR_NO_MEM); + + dev->bus_number = di.udi_bus; + dev->device_address = di.udi_addr; + dev->speed = di.udi_speed; + + dpriv = (struct device_priv *)dev->os_priv; + strlcpy(dpriv->devnode, devnode, sizeof(devnode)); + dpriv->fd = -1; + + if (ioctl(fd, USB_GET_DEVICE_DESC, &dpriv->ddesc) < 0) { + err = errno; + goto error; + } + + dpriv->cdesc = NULL; + if (_cache_active_config_descriptor(dev, fd)) { + err = errno; + goto error; + } + + if ((err = usbi_sanitize_device(dev))) + goto error; + } + close(fd); + + if (discovered_devs_append(*discdevs, dev) == NULL) + return (LIBUSB_ERROR_NO_MEM); + } + + return (LIBUSB_SUCCESS); + +error: + close(fd); + libusb_unref_device(dev); + return _errno_to_libusb(err); +} + +int +obsd_open(struct libusb_device_handle *handle) +{ + struct handle_priv *hpriv = (struct handle_priv *)handle->os_priv; + struct device_priv *dpriv = (struct device_priv *)handle->dev->os_priv; + + dpriv->fd = open(dpriv->devnode, O_RDWR); + if (dpriv->fd < 0) { + dpriv->fd = open(dpriv->devnode, O_RDONLY); + if (dpriv->fd < 0) + return _errno_to_libusb(errno); + } + + usbi_dbg("open %s: fd %d", dpriv->devnode, dpriv->fd); + + if (pipe(hpriv->pipe) < 0) + return _errno_to_libusb(errno); + + return usbi_add_pollfd(HANDLE_CTX(handle), hpriv->pipe[0], POLLIN); +} + +void +obsd_close(struct libusb_device_handle *handle) +{ + struct handle_priv *hpriv = (struct handle_priv *)handle->os_priv; + struct device_priv *dpriv = (struct device_priv *)handle->dev->os_priv; + + usbi_dbg("close: fd %d", dpriv->fd); + + close(dpriv->fd); + dpriv->fd = -1; + + usbi_remove_pollfd(HANDLE_CTX(handle), hpriv->pipe[0]); + + close(hpriv->pipe[0]); + close(hpriv->pipe[1]); +} + +int +obsd_get_device_descriptor(struct libusb_device *dev, unsigned char *buf, + int *host_endian) +{ + struct device_priv *dpriv = (struct device_priv *)dev->os_priv; + + usbi_dbg(""); + + memcpy(buf, &dpriv->ddesc, DEVICE_DESC_LENGTH); + + *host_endian = 0; + + return (LIBUSB_SUCCESS); +} + +int +obsd_get_active_config_descriptor(struct libusb_device *dev, + unsigned char *buf, size_t len, int *host_endian) +{ + struct device_priv *dpriv = (struct device_priv *)dev->os_priv; + usb_config_descriptor_t *ucd; + + ucd = (usb_config_descriptor_t *) dpriv->cdesc; + len = MIN(len, UGETW(ucd->wTotalLength)); + + usbi_dbg("len %d", len); + + memcpy(buf, dpriv->cdesc, len); + + *host_endian = 0; + + return (LIBUSB_SUCCESS); +} + +int +obsd_get_config_descriptor(struct libusb_device *dev, uint8_t idx, + unsigned char *buf, size_t len, int *host_endian) +{ + struct device_priv *dpriv = (struct device_priv *)dev->os_priv; + struct usb_full_desc ufd; + int fd, err; + + usbi_dbg("index %d, len %d", idx, len); + + /* A config descriptor may be requested before opening the device */ + if (dpriv->fd >= 0) { + fd = dpriv->fd; + } else { + fd = open(dpriv->devnode, O_RDONLY); + if (fd < 0) + return _errno_to_libusb(errno); + } + + ufd.ufd_config_index = idx; + ufd.ufd_size = len; + ufd.ufd_data = buf; + + if ((ioctl(fd, USB_GET_FULL_DESC, &ufd)) < 0) { + err = errno; + if (dpriv->fd < 0) + close(fd); + return _errno_to_libusb(err); + } + + if (dpriv->fd < 0) + close(fd); + + *host_endian = 0; + + return (LIBUSB_SUCCESS); +} + +int +obsd_get_configuration(struct libusb_device_handle *handle, int *config) +{ + struct device_priv *dpriv = (struct device_priv *)handle->dev->os_priv; + + usbi_dbg(""); + + if (ioctl(dpriv->fd, USB_GET_CONFIG, config) < 0) + return _errno_to_libusb(errno); + + usbi_dbg("configuration %d", *config); + + return (LIBUSB_SUCCESS); +} + +int +obsd_set_configuration(struct libusb_device_handle *handle, int config) +{ + struct device_priv *dpriv = (struct device_priv *)handle->dev->os_priv; + + usbi_dbg("configuration %d", config); + + if (ioctl(dpriv->fd, USB_SET_CONFIG, &config) < 0) + return _errno_to_libusb(errno); + + return _cache_active_config_descriptor(handle->dev, dpriv->fd); +} + +int +obsd_claim_interface(struct libusb_device_handle *handle, int iface) +{ + struct handle_priv *hpriv = (struct handle_priv *)handle->os_priv; + int i; + + for (i = 0; i < USB_MAX_ENDPOINTS; i++) + hpriv->endpoints[i] = -1; + + return (LIBUSB_SUCCESS); +} + +int +obsd_release_interface(struct libusb_device_handle *handle, int iface) +{ + struct handle_priv *hpriv = (struct handle_priv *)handle->os_priv; + int i; + + for (i = 0; i < USB_MAX_ENDPOINTS; i++) + if (hpriv->endpoints[i] >= 0) + close(hpriv->endpoints[i]); + + return (LIBUSB_SUCCESS); +} + +int +obsd_set_interface_altsetting(struct libusb_device_handle *handle, int iface, + int altsetting) +{ + struct device_priv *dpriv = (struct device_priv *)handle->dev->os_priv; + struct usb_alt_interface intf; + + usbi_dbg("iface %d, setting %d", iface, altsetting); + + memset(&intf, 0, sizeof(intf)); + + intf.uai_interface_index = iface; + intf.uai_alt_no = altsetting; + + if (ioctl(dpriv->fd, USB_SET_ALTINTERFACE, &intf) < 0) + return _errno_to_libusb(errno); + + return (LIBUSB_SUCCESS); +} + +int +obsd_clear_halt(struct libusb_device_handle *handle, unsigned char endpoint) +{ + struct device_priv *dpriv = (struct device_priv *)handle->dev->os_priv; + struct usb_ctl_request req; + + usbi_dbg(""); + + req.ucr_request.bmRequestType = UT_WRITE_ENDPOINT; + req.ucr_request.bRequest = UR_CLEAR_FEATURE; + USETW(req.ucr_request.wValue, UF_ENDPOINT_HALT); + USETW(req.ucr_request.wIndex, endpoint); + USETW(req.ucr_request.wLength, 0); + + if (ioctl(dpriv->fd, USB_DO_REQUEST, &req) < 0) + return _errno_to_libusb(errno); + + return (LIBUSB_SUCCESS); +} + +int +obsd_reset_device(struct libusb_device_handle *handle) +{ + usbi_dbg(""); + + return (LIBUSB_ERROR_NOT_SUPPORTED); +} + +void +obsd_destroy_device(struct libusb_device *dev) +{ + struct device_priv *dpriv = (struct device_priv *)dev->os_priv; + + usbi_dbg(""); + + free(dpriv->cdesc); +} + +int +obsd_submit_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer; + struct handle_priv *hpriv; + int err = 0; + + usbi_dbg(""); + + transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + hpriv = (struct handle_priv *)transfer->dev_handle->os_priv; + + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_CONTROL: + err = _sync_control_transfer(itransfer); + break; + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + if (IS_XFEROUT(transfer)) { + /* Isochronous write is not supported */ + err = LIBUSB_ERROR_NOT_SUPPORTED; + break; + } + err = _sync_gen_transfer(itransfer); + break; + case LIBUSB_TRANSFER_TYPE_BULK: + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + if (IS_XFEROUT(transfer) && + transfer->flags & LIBUSB_TRANSFER_ADD_ZERO_PACKET) { + err = LIBUSB_ERROR_NOT_SUPPORTED; + break; + } + err = _sync_gen_transfer(itransfer); + break; + } + + if (err) + return (err); + + if (write(hpriv->pipe[1], &itransfer, sizeof(itransfer)) < 0) + return _errno_to_libusb(errno); + + return (LIBUSB_SUCCESS); +} + +int +obsd_cancel_transfer(struct usbi_transfer *itransfer) +{ + usbi_dbg(""); + + return (LIBUSB_ERROR_NOT_SUPPORTED); +} + +void +obsd_clear_transfer_priv(struct usbi_transfer *itransfer) +{ + usbi_dbg(""); + + /* Nothing to do */ +} + +int +obsd_handle_events(struct libusb_context *ctx, struct pollfd *fds, nfds_t nfds, + int num_ready) +{ + struct libusb_device_handle *handle; + struct handle_priv *hpriv = NULL; + struct usbi_transfer *itransfer; + struct pollfd *pollfd; + int i, err = 0; + + usbi_dbg(""); + + pthread_mutex_lock(&ctx->open_devs_lock); + for (i = 0; i < nfds && num_ready > 0; i++) { + pollfd = &fds[i]; + + if (!pollfd->revents) + continue; + + hpriv = NULL; + num_ready--; + list_for_each_entry(handle, &ctx->open_devs, list, + struct libusb_device_handle) { + hpriv = (struct handle_priv *)handle->os_priv; + + if (hpriv->pipe[0] == pollfd->fd) + break; + + hpriv = NULL; + } + + if (NULL == hpriv) { + usbi_dbg("fd %d is not an event pipe!", pollfd->fd); + err = ENOENT; + break; + } + + if (pollfd->revents & POLLERR) { + usbi_remove_pollfd(HANDLE_CTX(handle), hpriv->pipe[0]); + usbi_handle_disconnect(handle); + continue; + } + + if (read(hpriv->pipe[0], &itransfer, sizeof(itransfer)) < 0) { + err = errno; + break; + } + + if ((err = usbi_handle_transfer_completion(itransfer, + LIBUSB_TRANSFER_COMPLETED))) + break; + } + pthread_mutex_unlock(&ctx->open_devs_lock); + + if (err) + return _errno_to_libusb(err); + + return (LIBUSB_SUCCESS); +} + +int +obsd_clock_gettime(int clkid, struct timespec *tp) +{ + usbi_dbg("clock %d", clkid); + + if (clkid == USBI_CLOCK_REALTIME) + return clock_gettime(CLOCK_REALTIME, tp); + + if (clkid == USBI_CLOCK_MONOTONIC) + return clock_gettime(CLOCK_MONOTONIC, tp); + + return (LIBUSB_ERROR_INVALID_PARAM); +} + +int +_errno_to_libusb(int err) +{ + switch (err) { + case EIO: + return (LIBUSB_ERROR_IO); + case EACCES: + return (LIBUSB_ERROR_ACCESS); + case ENOENT: + return (LIBUSB_ERROR_NO_DEVICE); + case ENOMEM: + return (LIBUSB_ERROR_NO_MEM); + } + + usbi_dbg("error: %s", strerror(err)); + + return (LIBUSB_ERROR_OTHER); +} + +int +_cache_active_config_descriptor(struct libusb_device *dev, int fd) +{ + struct device_priv *dpriv = (struct device_priv *)dev->os_priv; + struct usb_config_desc ucd; + struct usb_full_desc ufd; + unsigned char* buf; + int len; + + usbi_dbg("fd %d", fd); + + ucd.ucd_config_index = USB_CURRENT_CONFIG_INDEX; + + if ((ioctl(fd, USB_GET_CONFIG_DESC, &ucd)) < 0) + return _errno_to_libusb(errno); + + usbi_dbg("active bLength %d", ucd.ucd_desc.bLength); + + len = UGETW(ucd.ucd_desc.wTotalLength); + buf = malloc(len); + if (buf == NULL) + return (LIBUSB_ERROR_NO_MEM); + + ufd.ufd_config_index = ucd.ucd_config_index; + ufd.ufd_size = len; + ufd.ufd_data = buf; + + usbi_dbg("index %d, len %d", ufd.ufd_config_index, len); + + if ((ioctl(fd, USB_GET_FULL_DESC, &ufd)) < 0) { + free(buf); + return _errno_to_libusb(errno); + } + + if (dpriv->cdesc) + free(dpriv->cdesc); + dpriv->cdesc = buf; + + return (0); +} + +int +_sync_control_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer; + struct libusb_control_setup *setup; + struct device_priv *dpriv; + struct usb_ctl_request req; + + transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + dpriv = (struct device_priv *)transfer->dev_handle->dev->os_priv; + setup = (struct libusb_control_setup *)transfer->buffer; + + usbi_dbg("type %d request %d value %d index %d length %d timeout %d", + setup->bmRequestType, setup->bRequest, + libusb_le16_to_cpu(setup->wValue), + libusb_le16_to_cpu(setup->wIndex), + libusb_le16_to_cpu(setup->wLength), transfer->timeout); + + req.ucr_request.bmRequestType = setup->bmRequestType; + req.ucr_request.bRequest = setup->bRequest; + /* Don't use USETW, libusb already deals with the endianness */ + (*(uint16_t *)req.ucr_request.wValue) = setup->wValue; + (*(uint16_t *)req.ucr_request.wIndex) = setup->wIndex; + (*(uint16_t *)req.ucr_request.wLength) = setup->wLength; + req.ucr_data = transfer->buffer + LIBUSB_CONTROL_SETUP_SIZE; + + if ((transfer->flags & LIBUSB_TRANSFER_SHORT_NOT_OK) == 0) + req.ucr_flags = USBD_SHORT_XFER_OK; + + if ((ioctl(dpriv->fd, USB_SET_TIMEOUT, &transfer->timeout)) < 0) + return _errno_to_libusb(errno); + + if ((ioctl(dpriv->fd, USB_DO_REQUEST, &req)) < 0) + return _errno_to_libusb(errno); + + itransfer->transferred = req.ucr_actlen; + + usbi_dbg("transferred %d", itransfer->transferred); + + return (0); +} + +int +_access_endpoint(struct libusb_transfer *transfer) +{ + struct handle_priv *hpriv; + struct device_priv *dpriv; + char *s, devnode[16]; + int fd, endpt; + mode_t mode; + + hpriv = (struct handle_priv *)transfer->dev_handle->os_priv; + dpriv = (struct device_priv *)transfer->dev_handle->dev->os_priv; + + endpt = UE_GET_ADDR(transfer->endpoint); + mode = IS_XFERIN(transfer) ? O_RDONLY : O_WRONLY; + + usbi_dbg("endpoint %d mode %d", endpt, mode); + + if (hpriv->endpoints[endpt] < 0) { + /* Pick the right node given the control one */ + strlcpy(devnode, dpriv->devnode, sizeof(devnode)); + s = strchr(devnode, '.'); + snprintf(s, 4, ".%02d", endpt); + + /* We may need to read/write to the same endpoint later. */ + if (((fd = open(devnode, O_RDWR)) < 0) && (errno == ENXIO)) + if ((fd = open(devnode, mode)) < 0) + return (-1); + + hpriv->endpoints[endpt] = fd; + } + + return (hpriv->endpoints[endpt]); +} + +int +_sync_gen_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer; + int fd, nr = 1; + + transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + /* + * Bulk, Interrupt or Isochronous transfer depends on the + * endpoint and thus the node to open. + */ + if ((fd = _access_endpoint(transfer)) < 0) + return _errno_to_libusb(errno); + + if ((ioctl(fd, USB_SET_TIMEOUT, &transfer->timeout)) < 0) + return _errno_to_libusb(errno); + + if (IS_XFERIN(transfer)) { + if ((transfer->flags & LIBUSB_TRANSFER_SHORT_NOT_OK) == 0) + if ((ioctl(fd, USB_SET_SHORT_XFER, &nr)) < 0) + return _errno_to_libusb(errno); + + nr = read(fd, transfer->buffer, transfer->length); + } else { + nr = write(fd, transfer->buffer, transfer->length); + } + + if (nr < 0) + return _errno_to_libusb(errno); + + itransfer->transferred = nr; + + return (0); +} diff --git a/compat/libusb-1.0/libusb/os/poll_posix.h b/compat/libusb-1.0/libusb/os/poll_posix.h new file mode 100644 index 0000000..0e5e7f5 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/poll_posix.h @@ -0,0 +1,10 @@ +#ifndef LIBUSB_POLL_POSIX_H +#define LIBUSB_POLL_POSIX_H + +#define usbi_write write +#define usbi_read read +#define usbi_close close +#define usbi_pipe pipe +#define usbi_poll poll + +#endif /* LIBUSB_POLL_POSIX_H */ diff --git a/compat/libusb-1.0/libusb/os/poll_windows.c b/compat/libusb-1.0/libusb/os/poll_windows.c new file mode 100644 index 0000000..7f4d9c4 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/poll_windows.c @@ -0,0 +1,745 @@ +/* + * poll_windows: poll compatibility wrapper for Windows + * Copyright (C) 2009-2010 Pete Batard + * With contributions from Michael Plante, Orin Eman et al. + * Parts of poll implementation from libusb-win32, by Stephan Meyer et al. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +/* + * poll() and pipe() Windows compatibility layer for libusb 1.0 + * + * The way this layer works is by using OVERLAPPED with async I/O transfers, as + * OVERLAPPED have an associated event which is flagged for I/O completion. + * + * For USB pollable async I/O, you would typically: + * - obtain a Windows HANDLE to a file or device that has been opened in + * OVERLAPPED mode + * - call usbi_create_fd with this handle to obtain a custom fd. + * Note that if you need simultaneous R/W access, you need to call create_fd + * twice, once in _O_RDONLY and once in _O_WRONLY mode to obtain 2 separate + * pollable fds + * - leave the core functions call the poll routine and flag POLLIN/POLLOUT + * + * The pipe pollable synchronous I/O works using the overlapped event associated + * with a fake pipe. The read/write functions are only meant to be used in that + * context. + */ +#include +#include +#include +#include +#include + +#include + +// Uncomment to debug the polling layer +//#define DEBUG_POLL_WINDOWS +#if defined(DEBUG_POLL_WINDOWS) +#define poll_dbg usbi_dbg +#else +// MSVC++ < 2005 cannot use a variadic argument and non MSVC +// compilers produce warnings if parenthesis are omitted. +#if defined(_MSC_VER) && _MSC_VER < 1400 +#define poll_dbg +#else +#define poll_dbg(...) +#endif +#endif + +#if defined(_PREFAST_) +#pragma warning(disable:28719) +#endif + +#if defined(__CYGWIN__) +// cygwin produces a warning unless these prototypes are defined +extern int _open(char* name, int flags); +extern int _close(int fd); +extern int _snprintf(char *buffer, size_t count, const char *format, ...); +#define NUL_DEVICE "/dev/null" +#else +#define NUL_DEVICE "NUL" +#endif + +#define CHECK_INIT_POLLING do {if(!is_polling_set) init_polling();} while(0) + +// public fd data +const struct winfd INVALID_WINFD = {-1, INVALID_HANDLE_VALUE, NULL, RW_NONE}; +struct winfd poll_fd[MAX_FDS]; +// internal fd data +struct { + CRITICAL_SECTION mutex; // lock for fds + // Additional variables for XP CancelIoEx partial emulation + HANDLE original_handle; + DWORD thread_id; +} _poll_fd[MAX_FDS]; + +// globals +BOOLEAN is_polling_set = FALSE; +LONG pipe_number = 0; +static volatile LONG compat_spinlock = 0; + +// CancelIoEx, available on Vista and later only, provides the ability to cancel +// a single transfer (OVERLAPPED) when used. As it may not be part of any of the +// platform headers, we hook into the Kernel32 system DLL directly to seek it. +static BOOL (__stdcall *pCancelIoEx)(HANDLE, LPOVERLAPPED) = NULL; +#define CancelIoEx_Available (pCancelIoEx != NULL) +static __inline BOOL cancel_io(int _index) +{ + if ((_index < 0) || (_index >= MAX_FDS)) { + return FALSE; + } + + if ( (poll_fd[_index].fd < 0) || (poll_fd[_index].handle == INVALID_HANDLE_VALUE) + || (poll_fd[_index].handle == 0) || (poll_fd[_index].overlapped == NULL) ) { + return TRUE; + } + if (CancelIoEx_Available) { + return (*pCancelIoEx)(poll_fd[_index].handle, poll_fd[_index].overlapped); + } + if (_poll_fd[_index].thread_id == GetCurrentThreadId()) { + return CancelIo(poll_fd[_index].handle); + } + usbi_warn(NULL, "Unable to cancel I/O that was started from another thread"); + return FALSE; +} + +// Init +void init_polling(void) +{ + int i; + + while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) { + SleepEx(0, TRUE); + } + if (!is_polling_set) { + pCancelIoEx = (BOOL (__stdcall *)(HANDLE,LPOVERLAPPED)) + GetProcAddress(GetModuleHandleA("KERNEL32"), "CancelIoEx"); + usbi_dbg("Will use CancelIo%s for I/O cancellation", + CancelIoEx_Available?"Ex":""); + for (i=0; ihEvent = CreateEvent(NULL, TRUE, FALSE, NULL); + if(overlapped->hEvent == NULL) { + free (overlapped); + return NULL; + } + return overlapped; +} + +void free_overlapped(OVERLAPPED *overlapped) +{ + if (overlapped == NULL) + return; + + if ( (overlapped->hEvent != 0) + && (overlapped->hEvent != INVALID_HANDLE_VALUE) ) { + CloseHandle(overlapped->hEvent); + } + free(overlapped); +} + +void reset_overlapped(OVERLAPPED *overlapped) +{ + HANDLE event_handle; + if (overlapped == NULL) + return; + + event_handle = overlapped->hEvent; + if (event_handle != NULL) { + ResetEvent(event_handle); + } + memset(overlapped, 0, sizeof(OVERLAPPED)); + overlapped->hEvent = event_handle; +} + +void exit_polling(void) +{ + int i; + + while (InterlockedExchange((LONG *)&compat_spinlock, 1) == 1) { + SleepEx(0, TRUE); + } + if (is_polling_set) { + is_polling_set = FALSE; + + for (i=0; i 0) && (poll_fd[i].handle != INVALID_HANDLE_VALUE) && (poll_fd[i].handle != 0) + && (GetFileType(poll_fd[i].handle) == FILE_TYPE_UNKNOWN) ) { + _close(poll_fd[i].fd); + } + free_overlapped(poll_fd[i].overlapped); + if (!CancelIoEx_Available) { + // Close duplicate handle + if (_poll_fd[i].original_handle != INVALID_HANDLE_VALUE) { + CloseHandle(poll_fd[i].handle); + } + } + poll_fd[i] = INVALID_WINFD; + LeaveCriticalSection(&_poll_fd[i].mutex); + DeleteCriticalSection(&_poll_fd[i].mutex); + } + } + compat_spinlock = 0; +} + +/* + * Create a fake pipe. + * As libusb only uses pipes for signaling, all we need from a pipe is an + * event. To that extent, we create a single wfd and overlapped as a means + * to access that event. + */ +int usbi_pipe(int filedes[2]) +{ + int i; + OVERLAPPED* overlapped; + + CHECK_INIT_POLLING; + + overlapped = (OVERLAPPED*) calloc(1, sizeof(OVERLAPPED)); + if (overlapped == NULL) { + return -1; + } + // The overlapped must have status pending for signaling to work in poll + overlapped->Internal = STATUS_PENDING; + overlapped->InternalHigh = 0; + + // Read end of the "pipe" + filedes[0] = _open(NUL_DEVICE, _O_WRONLY); + if (filedes[0] < 0) { + usbi_err(NULL, "could not create pipe: errno %d", errno); + goto out1; + } + // We can use the same handle for both ends + filedes[1] = filedes[0]; + poll_dbg("pipe filedes = %d", filedes[0]); + + // Note: manual reset must be true (second param) as the reset occurs in read + overlapped->hEvent = CreateEvent(NULL, TRUE, FALSE, NULL); + if(!overlapped->hEvent) { + goto out2; + } + + for (i=0; i= 0) { + LeaveCriticalSection(&_poll_fd[i].mutex); + continue; + } + + poll_fd[i].fd = filedes[0]; + poll_fd[i].handle = DUMMY_HANDLE; + poll_fd[i].overlapped = overlapped; + // There's no polling on the write end, so we just use READ for our needs + poll_fd[i].rw = RW_READ; + _poll_fd[i].original_handle = INVALID_HANDLE_VALUE; + LeaveCriticalSection(&_poll_fd[i].mutex); + return 0; + } + } + + CloseHandle(overlapped->hEvent); +out2: + _close(filedes[0]); +out1: + free(overlapped); + return -1; +} + +/* + * Create both an fd and an OVERLAPPED from an open Windows handle, so that + * it can be used with our polling function + * The handle MUST support overlapped transfers (usually requires CreateFile + * with FILE_FLAG_OVERLAPPED) + * Return a pollable file descriptor struct, or INVALID_WINFD on error + * + * Note that the fd returned by this function is a per-transfer fd, rather + * than a per-session fd and cannot be used for anything else but our + * custom functions (the fd itself points to the NUL: device) + * if you plan to do R/W on the same handle, you MUST create 2 fds: one for + * read and one for write. Using a single R/W fd is unsupported and will + * produce unexpected results + */ +struct winfd usbi_create_fd(HANDLE handle, int access_mode) +{ + int i, fd; + struct winfd wfd = INVALID_WINFD; + OVERLAPPED* overlapped = NULL; + + CHECK_INIT_POLLING; + + if ((handle == 0) || (handle == INVALID_HANDLE_VALUE)) { + return INVALID_WINFD; + } + + if ((access_mode != _O_RDONLY) && (access_mode != _O_WRONLY)) { + usbi_warn(NULL, "only one of _O_RDONLY or _O_WRONLY are supported.\n" + "If you want to poll for R/W simultaneously, create multiple fds from the same handle."); + return INVALID_WINFD; + } + if (access_mode == _O_RDONLY) { + wfd.rw = RW_READ; + } else { + wfd.rw = RW_WRITE; + } + + // Ensure that we get a non system conflicting unique fd, using + // the same fd attribution system as the pipe ends + fd = _open(NUL_DEVICE, _O_WRONLY); + if (fd < 0) { + return INVALID_WINFD; + } + + overlapped = create_overlapped(); + if(overlapped == NULL) { + _close(fd); + return INVALID_WINFD; + } + + for (i=0; i= 0) { + LeaveCriticalSection(&_poll_fd[i].mutex); + continue; + } + wfd.fd = fd; + // Attempt to emulate some of the CancelIoEx behaviour on platforms + // that don't have it + if (!CancelIoEx_Available) { + _poll_fd[i].thread_id = GetCurrentThreadId(); + if (!DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), + &wfd.handle, 0, TRUE, DUPLICATE_SAME_ACCESS)) { + usbi_dbg("could not duplicate handle for CancelIo - using original one"); + wfd.handle = handle; + // Make sure we won't close the original handle on fd deletion then + _poll_fd[i].original_handle = INVALID_HANDLE_VALUE; + } else { + _poll_fd[i].original_handle = handle; + } + } else { + wfd.handle = handle; + } + wfd.overlapped = overlapped; + memcpy(&poll_fd[i], &wfd, sizeof(struct winfd)); + LeaveCriticalSection(&_poll_fd[i].mutex); + return wfd; + } + } + free_overlapped(overlapped); + _close(fd); + return INVALID_WINFD; +} + +void _free_index(int _index) +{ + // Cancel any async IO (Don't care about the validity of our handles for this) + cancel_io(_index); + // close fake handle for devices + if ( (poll_fd[_index].handle != INVALID_HANDLE_VALUE) && (poll_fd[_index].handle != 0) + && (GetFileType(poll_fd[_index].handle) == FILE_TYPE_UNKNOWN) ) { + _close(poll_fd[_index].fd); + } + // close the duplicate handle (if we have an actual duplicate) + if (!CancelIoEx_Available) { + if (_poll_fd[_index].original_handle != INVALID_HANDLE_VALUE) { + CloseHandle(poll_fd[_index].handle); + } + _poll_fd[_index].original_handle = INVALID_HANDLE_VALUE; + _poll_fd[_index].thread_id = 0; + } + free_overlapped(poll_fd[_index].overlapped); + poll_fd[_index] = INVALID_WINFD; +} + +/* + * Release a pollable file descriptor. + * + * Note that the associated Windows handle is not closed by this call + */ +void usbi_free_fd(int fd) +{ + int _index; + + CHECK_INIT_POLLING; + + _index = _fd_to_index_and_lock(fd); + if (_index < 0) { + return; + } + _free_index(_index); + LeaveCriticalSection(&_poll_fd[_index].mutex); +} + +/* + * The functions below perform various conversions between fd, handle and OVERLAPPED + */ +struct winfd fd_to_winfd(int fd) +{ + int i; + struct winfd wfd; + + CHECK_INIT_POLLING; + + if (fd <= 0) + return INVALID_WINFD; + + for (i=0; i= 0) { + LeaveCriticalSection(&_poll_fd[_index].mutex); + } + usbi_warn(NULL, "invalid fd"); + triggered = -1; + goto poll_exit; + } + + // IN or OUT must match our fd direction + if ((fds[i].events & POLLIN) && (poll_fd[_index].rw != RW_READ)) { + fds[i].revents |= POLLNVAL | POLLERR; + errno = EBADF; + usbi_warn(NULL, "attempted POLLIN on fd without READ access"); + LeaveCriticalSection(&_poll_fd[_index].mutex); + triggered = -1; + goto poll_exit; + } + + if ((fds[i].events & POLLOUT) && (poll_fd[_index].rw != RW_WRITE)) { + fds[i].revents |= POLLNVAL | POLLERR; + errno = EBADF; + usbi_warn(NULL, "attempted POLLOUT on fd without WRITE access"); + LeaveCriticalSection(&_poll_fd[_index].mutex); + triggered = -1; + goto poll_exit; + } + + // The following macro only works if overlapped I/O was reported pending + if ( (HasOverlappedIoCompleted(poll_fd[_index].overlapped)) + || (HasOverlappedIoCompletedSync(poll_fd[_index].overlapped)) ) { + poll_dbg(" completed"); + // checks above should ensure this works: + fds[i].revents = fds[i].events; + triggered++; + } else { + handles_to_wait_on[nb_handles_to_wait_on] = poll_fd[_index].overlapped->hEvent; + handle_to_index[nb_handles_to_wait_on] = i; + nb_handles_to_wait_on++; + } + LeaveCriticalSection(&_poll_fd[_index].mutex); + } + + // If nothing was triggered, wait on all fds that require it + if ((timeout != 0) && (triggered == 0) && (nb_handles_to_wait_on != 0)) { + if (timeout < 0) { + poll_dbg("starting infinite wait for %d handles...", (int)nb_handles_to_wait_on); + } else { + poll_dbg("starting %d ms wait for %d handles...", timeout, (int)nb_handles_to_wait_on); + } + ret = WaitForMultipleObjects(nb_handles_to_wait_on, handles_to_wait_on, + FALSE, (timeout<0)?INFINITE:(DWORD)timeout); + object_index = ret-WAIT_OBJECT_0; + if ((object_index >= 0) && ((DWORD)object_index < nb_handles_to_wait_on)) { + poll_dbg(" completed after wait"); + i = handle_to_index[object_index]; + _index = _fd_to_index_and_lock(fds[i].fd); + fds[i].revents = fds[i].events; + triggered++; + if (_index >= 0) { + LeaveCriticalSection(&_poll_fd[_index].mutex); + } + } else if (ret == WAIT_TIMEOUT) { + poll_dbg(" timed out"); + triggered = 0; // 0 = timeout + } else { + errno = EIO; + triggered = -1; // error + } + } + +poll_exit: + if (handles_to_wait_on != NULL) { + free(handles_to_wait_on); + } + if (handle_to_index != NULL) { + free(handle_to_index); + } + return triggered; +} + +/* + * close a fake pipe fd + */ +int usbi_close(int fd) +{ + int _index; + int r = -1; + + CHECK_INIT_POLLING; + + _index = _fd_to_index_and_lock(fd); + + if (_index < 0) { + errno = EBADF; + } else { + if (poll_fd[_index].overlapped != NULL) { + // Must be a different event for each end of the pipe + CloseHandle(poll_fd[_index].overlapped->hEvent); + free(poll_fd[_index].overlapped); + } + r = _close(poll_fd[_index].fd); + if (r != 0) { + errno = EIO; + } + poll_fd[_index] = INVALID_WINFD; + LeaveCriticalSection(&_poll_fd[_index].mutex); + } + return r; +} + +/* + * synchronous write for fake "pipe" signaling + */ +ssize_t usbi_write(int fd, const void *buf, size_t count) +{ + int _index; + + CHECK_INIT_POLLING; + + if (count != sizeof(unsigned char)) { + usbi_err(NULL, "this function should only used for signaling"); + return -1; + } + + _index = _fd_to_index_and_lock(fd); + + if ( (_index < 0) || (poll_fd[_index].overlapped == NULL) ) { + errno = EBADF; + if (_index >= 0) { + LeaveCriticalSection(&_poll_fd[_index].mutex); + } + return -1; + } + + poll_dbg("set pipe event (fd = %d, thread = %08X)", _index, GetCurrentThreadId()); + SetEvent(poll_fd[_index].overlapped->hEvent); + poll_fd[_index].overlapped->Internal = STATUS_WAIT_0; + // If two threads write on the pipe at the same time, we need to + // process two separate reads => use the overlapped as a counter + poll_fd[_index].overlapped->InternalHigh++; + + LeaveCriticalSection(&_poll_fd[_index].mutex); + return sizeof(unsigned char); +} + +/* + * synchronous read for fake "pipe" signaling + */ +ssize_t usbi_read(int fd, void *buf, size_t count) +{ + int _index; + ssize_t r = -1; + + CHECK_INIT_POLLING; + + if (count != sizeof(unsigned char)) { + usbi_err(NULL, "this function should only used for signaling"); + return -1; + } + + _index = _fd_to_index_and_lock(fd); + + if (_index < 0) { + errno = EBADF; + return -1; + } + + if (WaitForSingleObject(poll_fd[_index].overlapped->hEvent, INFINITE) != WAIT_OBJECT_0) { + usbi_warn(NULL, "waiting for event failed: %d", (int)GetLastError()); + errno = EIO; + goto out; + } + + poll_dbg("clr pipe event (fd = %d, thread = %08X)", _index, GetCurrentThreadId()); + poll_fd[_index].overlapped->InternalHigh--; + // Don't reset unless we don't have any more events to process + if (poll_fd[_index].overlapped->InternalHigh <= 0) { + ResetEvent(poll_fd[_index].overlapped->hEvent); + poll_fd[_index].overlapped->Internal = STATUS_PENDING; + } + + r = sizeof(unsigned char); + +out: + LeaveCriticalSection(&_poll_fd[_index].mutex); + return r; +} diff --git a/compat/libusb-1.0/libusb/os/poll_windows.h b/compat/libusb-1.0/libusb/os/poll_windows.h new file mode 100644 index 0000000..d3bda47 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/poll_windows.h @@ -0,0 +1,117 @@ +/* + * Windows compat: POSIX compatibility wrapper + * Copyright (C) 2009-2010 Pete Batard + * With contributions from Michael Plante, Orin Eman et al. + * Parts of poll implementation from libusb-win32, by Stephan Meyer et al. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ +#pragma once + +#include + +#if defined(_MSC_VER) +// disable /W4 MSVC warnings that are benign +#pragma warning(disable:4127) // conditional expression is constant +#endif + +// Handle synchronous completion through the overlapped structure +#if !defined(STATUS_REPARSE) // reuse the REPARSE status code +#define STATUS_REPARSE ((LONG)0x00000104L) +#endif +#define STATUS_COMPLETED_SYNCHRONOUSLY STATUS_REPARSE +#define HasOverlappedIoCompletedSync(lpOverlapped) (((DWORD)(lpOverlapped)->Internal) == STATUS_COMPLETED_SYNCHRONOUSLY) + +#define DUMMY_HANDLE ((HANDLE)(LONG_PTR)-2) + +enum windows_version { + WINDOWS_UNSUPPORTED, + WINDOWS_XP, + WINDOWS_2003, // also includes XP 64 + WINDOWS_VISTA_AND_LATER, +}; +extern enum windows_version windows_version; + +#define MAX_FDS 256 + +#define POLLIN 0x0001 /* There is data to read */ +#define POLLPRI 0x0002 /* There is urgent data to read */ +#define POLLOUT 0x0004 /* Writing now will not block */ +#define POLLERR 0x0008 /* Error condition */ +#define POLLHUP 0x0010 /* Hung up */ +#define POLLNVAL 0x0020 /* Invalid request: fd not open */ + +struct pollfd { + int fd; /* file descriptor */ + short events; /* requested events */ + short revents; /* returned events */ +}; + +// access modes +enum rw_type { + RW_NONE, + RW_READ, + RW_WRITE, +}; + +// fd struct that can be used for polling on Windows +struct winfd { + int fd; // what's exposed to libusb core + HANDLE handle; // what we need to attach overlapped to the I/O op, so we can poll it + OVERLAPPED* overlapped; // what will report our I/O status + enum rw_type rw; // I/O transfer direction: read *XOR* write (NOT BOTH) +}; +extern const struct winfd INVALID_WINFD; + +int usbi_pipe(int pipefd[2]); +int usbi_poll(struct pollfd *fds, unsigned int nfds, int timeout); +ssize_t usbi_write(int fd, const void *buf, size_t count); +ssize_t usbi_read(int fd, void *buf, size_t count); +int usbi_close(int fd); + +void init_polling(void); +void exit_polling(void); +struct winfd usbi_create_fd(HANDLE handle, int access_mode); +void usbi_free_fd(int fd); +struct winfd fd_to_winfd(int fd); +struct winfd handle_to_winfd(HANDLE handle); +struct winfd overlapped_to_winfd(OVERLAPPED* overlapped); + +/* + * Timeval operations + */ +#if defined(DDKBUILD) +#include // defines timeval functions on DDK +#endif + +#if !defined(TIMESPEC_TO_TIMEVAL) +#define TIMESPEC_TO_TIMEVAL(tv, ts) { \ + (tv)->tv_sec = (long)(ts)->tv_sec; \ + (tv)->tv_usec = (long)(ts)->tv_nsec / 1000; \ +} +#endif +#if !defined(timersub) +#define timersub(a, b, result) \ +do { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ + if ((result)->tv_usec < 0) { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ +} while (0) +#endif + diff --git a/compat/libusb-1.0/libusb/os/threads_posix.c b/compat/libusb-1.0/libusb/os/threads_posix.c new file mode 100644 index 0000000..60c57cf --- /dev/null +++ b/compat/libusb-1.0/libusb/os/threads_posix.c @@ -0,0 +1,55 @@ +/* + * libusb synchronization using POSIX Threads + * + * Copyright (C) 2011 Vitali Lovich + * Copyright (C) 2011 Peter Stuge + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifdef _XOPEN_SOURCE +# if _XOPEN_SOURCE < 500 +# undef _XOPEN_SOURCE +# define _XOPEN_SOURCE 500 +# endif +#else +#define _XOPEN_SOURCE 500 +#endif /* _XOPEN_SOURCE */ + +#include "threads_posix.h" + +int usbi_mutex_init_recursive(pthread_mutex_t *mutex, pthread_mutexattr_t *attr) +{ + int err; + pthread_mutexattr_t stack_attr; + if (!attr) { + attr = &stack_attr; + err = pthread_mutexattr_init(&stack_attr); + if (err != 0) + return err; + } + + err = pthread_mutexattr_settype(attr, PTHREAD_MUTEX_RECURSIVE); + if (err != 0) + goto finish; + + err = pthread_mutex_init(mutex, attr); + +finish: + if (attr == &stack_attr) + pthread_mutexattr_destroy(&stack_attr); + + return err; +} diff --git a/compat/libusb-1.0/libusb/os/threads_posix.h b/compat/libusb-1.0/libusb/os/threads_posix.h new file mode 100644 index 0000000..9752208 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/threads_posix.h @@ -0,0 +1,48 @@ +/* + * libusb synchronization using POSIX Threads + * + * Copyright (C) 2010 Peter Stuge + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef LIBUSB_THREADS_POSIX_H +#define LIBUSB_THREADS_POSIX_H + +#include + +#define usbi_mutex_static_t pthread_mutex_t +#define USBI_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER +#define usbi_mutex_static_lock pthread_mutex_lock +#define usbi_mutex_static_unlock pthread_mutex_unlock + +#define usbi_mutex_t pthread_mutex_t +#define usbi_mutex_init pthread_mutex_init +#define usbi_mutex_lock pthread_mutex_lock +#define usbi_mutex_unlock pthread_mutex_unlock +#define usbi_mutex_trylock pthread_mutex_trylock +#define usbi_mutex_destroy pthread_mutex_destroy + +#define usbi_cond_t pthread_cond_t +#define usbi_cond_init pthread_cond_init +#define usbi_cond_wait pthread_cond_wait +#define usbi_cond_timedwait pthread_cond_timedwait +#define usbi_cond_broadcast pthread_cond_broadcast +#define usbi_cond_destroy pthread_cond_destroy +#define usbi_cond_signal pthread_cond_signal + +extern int usbi_mutex_init_recursive(pthread_mutex_t *mutex, pthread_mutexattr_t *attr); + +#endif /* LIBUSB_THREADS_POSIX_H */ diff --git a/compat/libusb-1.0/libusb/os/threads_windows.c b/compat/libusb-1.0/libusb/os/threads_windows.c new file mode 100644 index 0000000..b92b645 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/threads_windows.c @@ -0,0 +1,208 @@ +/* + * libusb synchronization on Microsoft Windows + * + * Copyright (C) 2010 Michael Plante + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include + +#include "libusbi.h" + + +int usbi_mutex_init(usbi_mutex_t *mutex, + const usbi_mutexattr_t *attr) { + if(! mutex) return ((errno=EINVAL)); + *mutex = CreateMutex(NULL, FALSE, NULL); + if(!*mutex) return ((errno=ENOMEM)); + return 0; +} +int usbi_mutex_destroy(usbi_mutex_t *mutex) { + // It is not clear if CloseHandle failure is due to failure to unlock. + // If so, this should be errno=EBUSY. + if(!mutex || !CloseHandle(*mutex)) return ((errno=EINVAL)); + *mutex = NULL; + return 0; +} +int usbi_mutex_trylock(usbi_mutex_t *mutex) { + DWORD result; + if(!mutex) return ((errno=EINVAL)); + result = WaitForSingleObject(*mutex, 0); + if(result == WAIT_OBJECT_0 || result == WAIT_ABANDONED) + return 0; // acquired (ToDo: check that abandoned is ok) + if(result == WAIT_TIMEOUT) + return ((errno=EBUSY)); + return ((errno=EINVAL)); // don't know how this would happen + // so don't know proper errno +} +int usbi_mutex_lock(usbi_mutex_t *mutex) { + DWORD result; + if(!mutex) return ((errno=EINVAL)); + result = WaitForSingleObject(*mutex, INFINITE); + if(result == WAIT_OBJECT_0 || result == WAIT_ABANDONED) + return 0; // acquired (ToDo: check that abandoned is ok) + return ((errno=EINVAL)); // don't know how this would happen + // so don't know proper errno +} +int usbi_mutex_unlock(usbi_mutex_t *mutex) { + if(!mutex) return ((errno=EINVAL)); + if(!ReleaseMutex(*mutex)) return ((errno=EPERM )); + return 0; +} + +int usbi_mutex_static_lock(usbi_mutex_static_t *mutex) { + if(!mutex) return ((errno=EINVAL)); + while (InterlockedExchange((LONG *)mutex, 1) == 1) { + SleepEx(0, TRUE); + } + return 0; +} +int usbi_mutex_static_unlock(usbi_mutex_static_t *mutex) { + if(!mutex) return ((errno=EINVAL)); + *mutex = 0; + return 0; +} + + + +int usbi_cond_init(usbi_cond_t *cond, + const usbi_condattr_t *attr) { + if(!cond) return ((errno=EINVAL)); + list_init(&cond->waiters ); + list_init(&cond->not_waiting); + return 0; +} +int usbi_cond_destroy(usbi_cond_t *cond) { + // This assumes no one is using this anymore. The check MAY NOT BE safe. + struct usbi_cond_perthread *pos, *prev_pos = NULL; + if(!cond) return ((errno=EINVAL)); + if(!list_empty(&cond->waiters)) return ((errno=EBUSY )); // (!see above!) + list_for_each_entry(pos, &cond->not_waiting, list, struct usbi_cond_perthread) { + free(prev_pos); + CloseHandle(pos->event); + list_del(&pos->list); + prev_pos = pos; + } + free(prev_pos); + + return 0; +} + +int usbi_cond_broadcast(usbi_cond_t *cond) { + // Assumes mutex is locked; this is not in keeping with POSIX spec, but + // libusb does this anyway, so we simplify by not adding more sync + // primitives to the CV definition! + int fail = 0; + struct usbi_cond_perthread *pos; + if(!cond) return ((errno=EINVAL)); + list_for_each_entry(pos, &cond->waiters, list, struct usbi_cond_perthread) { + if(!SetEvent(pos->event)) + fail = 1; + } + // The wait function will remove its respective item from the list. + return fail ? ((errno=EINVAL)) : 0; +} +int usbi_cond_signal(usbi_cond_t *cond) { + // Assumes mutex is locked; this is not in keeping with POSIX spec, but + // libusb does this anyway, so we simplify by not adding more sync + // primitives to the CV definition! + struct usbi_cond_perthread *pos; + if(!cond) return ((errno=EINVAL)); + if(list_empty(&cond->waiters)) return 0; // no one to wakeup. + pos = list_entry(&cond->waiters.next, struct usbi_cond_perthread, list); + // The wait function will remove its respective item from the list. + return SetEvent(pos->event) ? 0 : ((errno=EINVAL)); +} +static int __inline usbi_cond_intwait(usbi_cond_t *cond, + usbi_mutex_t *mutex, + DWORD timeout_ms) { + struct usbi_cond_perthread *pos; + int found = 0, r; + DWORD r2,tid = GetCurrentThreadId(); + if(!cond || !mutex) return ((errno=EINVAL)); + list_for_each_entry(pos, &cond->not_waiting, list, struct usbi_cond_perthread) { + if(tid == pos->tid) { + found = 1; + break; + } + } + if(!found) { + pos = (struct usbi_cond_perthread*) calloc(1, sizeof(struct usbi_cond_perthread)); + if(!pos) return ((errno=ENOMEM)); // This errno is not POSIX-allowed. + pos->tid = tid; + pos->event = CreateEvent(NULL, FALSE, FALSE, NULL); // auto-reset. + if(!pos->event) { + free(pos); + return ((errno=ENOMEM)); + } + list_add(&pos->list, &cond->not_waiting); + } + + list_del(&pos->list); // remove from not_waiting list. + list_add(&pos->list, &cond->waiters); + + r = usbi_mutex_unlock(mutex); + if(r) return r; + r2 = WaitForSingleObject(pos->event, timeout_ms); + r = usbi_mutex_lock(mutex); + if(r) return r; + + list_del(&pos->list); + list_add(&pos->list, &cond->not_waiting); + + if(r2 == WAIT_TIMEOUT) return ((errno=ETIMEDOUT)); + + return 0; +} +// N.B.: usbi_cond_*wait() can also return ENOMEM, even though pthread_cond_*wait cannot! +int usbi_cond_wait(usbi_cond_t *cond, usbi_mutex_t *mutex) { + return usbi_cond_intwait(cond, mutex, INFINITE); +} +int usbi_cond_timedwait(usbi_cond_t *cond, + usbi_mutex_t *mutex, + const struct timespec *abstime) { + FILETIME filetime; + ULARGE_INTEGER rtime; + struct timeval targ_time, cur_time, delta_time; + struct timespec cur_time_ns; + DWORD millis; + extern const uint64_t epoch_time; + + GetSystemTimeAsFileTime(&filetime); + rtime.LowPart = filetime.dwLowDateTime; + rtime.HighPart = filetime.dwHighDateTime; + rtime.QuadPart -= epoch_time; + cur_time_ns.tv_sec = (long)(rtime.QuadPart / 10000000); + cur_time_ns.tv_nsec = (long)((rtime.QuadPart % 10000000)*100); + TIMESPEC_TO_TIMEVAL(&cur_time, &cur_time_ns); + + TIMESPEC_TO_TIMEVAL(&targ_time, abstime); + timersub(&targ_time, &cur_time, &delta_time); + if(delta_time.tv_sec < 0) // abstime already passed? + millis = 0; + else { + millis = delta_time.tv_usec/1000; + millis += delta_time.tv_sec *1000; + if (delta_time.tv_usec % 1000) // round up to next millisecond + millis++; + } + + return usbi_cond_intwait(cond, mutex, millis); +} + diff --git a/compat/libusb-1.0/libusb/os/threads_windows.h b/compat/libusb-1.0/libusb/os/threads_windows.h new file mode 100644 index 0000000..e486df9 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/threads_windows.h @@ -0,0 +1,88 @@ +/* + * libusb synchronization on Microsoft Windows + * + * Copyright (C) 2010 Michael Plante + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef LIBUSB_THREADS_WINDOWS_H +#define LIBUSB_THREADS_WINDOWS_H + +#include + +#define usbi_mutex_static_t volatile LONG +#define USBI_MUTEX_INITIALIZER 0 + +#define usbi_mutex_t HANDLE + +struct usbi_cond_perthread { + struct list_head list; + DWORD tid; + HANDLE event; +}; +struct usbi_cond_t_ { + // Every time a thread touches the CV, it winds up in one of these lists. + // It stays there until the CV is destroyed, even if the thread + // terminates. + struct list_head waiters; + struct list_head not_waiting; +}; +typedef struct usbi_cond_t_ usbi_cond_t; + +// We *were* getting timespec from pthread.h: +#if (!defined(HAVE_STRUCT_TIMESPEC) && !defined(_TIMESPEC_DEFINED)) +#define HAVE_STRUCT_TIMESPEC 1 +#define _TIMESPEC_DEFINED 1 +struct timespec { + long tv_sec; + long tv_nsec; +}; +#endif /* HAVE_STRUCT_TIMESPEC | _TIMESPEC_DEFINED */ + +// We *were* getting ETIMEDOUT from pthread.h: +#ifndef ETIMEDOUT +# define ETIMEDOUT 10060 /* This is the value in winsock.h. */ +#endif + +#define usbi_mutexattr_t void +#define usbi_condattr_t void + +// all Windows mutexes are recursive +#define usbi_mutex_init_recursive(mutex, attr) usbi_mutex_init((mutex), (attr)) + +int usbi_mutex_static_lock(usbi_mutex_static_t *mutex); +int usbi_mutex_static_unlock(usbi_mutex_static_t *mutex); + + +int usbi_mutex_init(usbi_mutex_t *mutex, + const usbi_mutexattr_t *attr); +int usbi_mutex_lock(usbi_mutex_t *mutex); +int usbi_mutex_unlock(usbi_mutex_t *mutex); +int usbi_mutex_trylock(usbi_mutex_t *mutex); +int usbi_mutex_destroy(usbi_mutex_t *mutex); + +int usbi_cond_init(usbi_cond_t *cond, + const usbi_condattr_t *attr); +int usbi_cond_destroy(usbi_cond_t *cond); +int usbi_cond_wait(usbi_cond_t *cond, usbi_mutex_t *mutex); +int usbi_cond_timedwait(usbi_cond_t *cond, + usbi_mutex_t *mutex, + const struct timespec *abstime); +int usbi_cond_broadcast(usbi_cond_t *cond); +int usbi_cond_signal(usbi_cond_t *cond); + +#endif /* LIBUSB_THREADS_WINDOWS_H */ + diff --git a/compat/libusb-1.0/libusb/os/windows_usb.c b/compat/libusb-1.0/libusb/os/windows_usb.c new file mode 100644 index 0000000..873905c --- /dev/null +++ b/compat/libusb-1.0/libusb/os/windows_usb.c @@ -0,0 +1,2998 @@ +/* + * windows backend for libusb 1.0 + * Copyright (c) 2009-2010 Pete Batard + * With contributions from Michael Plante, Orin Eman et al. + * Parts of this code adapted from libusb-win32-v1 by Stephan Meyer + * Hash table functions adapted from glibc, by Ulrich Drepper et al. + * Major code testing contribution by Xiaofan Chen + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "poll_windows.h" +#include "windows_usb.h" + +// The following prevents "banned API" errors when using the MS's WDK OACR/Prefast +#if defined(_PREFAST_) +#pragma warning(disable:28719) +#endif + +// The 2 macros below are used in conjunction with safe loops. +#define LOOP_CHECK(fcall) { r=fcall; if (r != LIBUSB_SUCCESS) continue; } +#define LOOP_BREAK(err) { r=err; continue; } + +extern void usbi_fd_notification(struct libusb_context *ctx); + +// Helper prototypes +static int windows_get_active_config_descriptor(struct libusb_device *dev, unsigned char *buffer, size_t len, int *host_endian); +static int windows_clock_gettime(int clk_id, struct timespec *tp); +unsigned __stdcall windows_clock_gettime_threaded(void* param); +// WinUSB API prototypes +static int winusb_init(struct libusb_context *ctx); +static int winusb_exit(void); +static int winusb_open(struct libusb_device_handle *dev_handle); +static void winusb_close(struct libusb_device_handle *dev_handle); +static int winusb_configure_endpoints(struct libusb_device_handle *dev_handle, int iface); +static int winusb_claim_interface(struct libusb_device_handle *dev_handle, int iface); +static int winusb_release_interface(struct libusb_device_handle *dev_handle, int iface); +static int winusb_submit_control_transfer(struct usbi_transfer *itransfer); +static int winusb_set_interface_altsetting(struct libusb_device_handle *dev_handle, int iface, int altsetting); +static int winusb_submit_bulk_transfer(struct usbi_transfer *itransfer); +static int winusb_clear_halt(struct libusb_device_handle *dev_handle, unsigned char endpoint); +static int winusb_abort_transfers(struct usbi_transfer *itransfer); +static int winusb_abort_control(struct usbi_transfer *itransfer); +static int winusb_reset_device(struct libusb_device_handle *dev_handle); +static int winusb_copy_transfer_data(struct usbi_transfer *itransfer, uint32_t io_size); +// Composite API prototypes +static int composite_init(struct libusb_context *ctx); +static int composite_exit(void); +static int composite_open(struct libusb_device_handle *dev_handle); +static void composite_close(struct libusb_device_handle *dev_handle); +static int composite_claim_interface(struct libusb_device_handle *dev_handle, int iface); +static int composite_set_interface_altsetting(struct libusb_device_handle *dev_handle, int iface, int altsetting); +static int composite_release_interface(struct libusb_device_handle *dev_handle, int iface); +static int composite_submit_control_transfer(struct usbi_transfer *itransfer); +static int composite_submit_bulk_transfer(struct usbi_transfer *itransfer); +static int composite_submit_iso_transfer(struct usbi_transfer *itransfer); +static int composite_clear_halt(struct libusb_device_handle *dev_handle, unsigned char endpoint); +static int composite_abort_transfers(struct usbi_transfer *itransfer); +static int composite_abort_control(struct usbi_transfer *itransfer); +static int composite_reset_device(struct libusb_device_handle *dev_handle); +static int composite_copy_transfer_data(struct usbi_transfer *itransfer, uint32_t io_size); + + +// Global variables +uint64_t hires_frequency, hires_ticks_to_ps; +const uint64_t epoch_time = UINT64_C(116444736000000000); // 1970.01.01 00:00:000 in MS Filetime +enum windows_version windows_version = WINDOWS_UNSUPPORTED; +// Concurrency +static int concurrent_usage = -1; +usbi_mutex_t autoclaim_lock; +// Timer thread +// NB: index 0 is for monotonic and 1 is for the thread exit event +HANDLE timer_thread = NULL; +HANDLE timer_mutex = NULL; +struct timespec timer_tp; +volatile LONG request_count[2] = {0, 1}; // last one must be > 0 +HANDLE timer_request[2] = { NULL, NULL }; +HANDLE timer_response = NULL; +// API globals +bool api_winusb_available = false; +#define CHECK_WINUSB_AVAILABLE do { if (!api_winusb_available) return LIBUSB_ERROR_ACCESS; } while (0) + +static inline BOOLEAN guid_eq(const GUID *guid1, const GUID *guid2) { + if ((guid1 != NULL) && (guid2 != NULL)) { + return (memcmp(guid1, guid2, sizeof(GUID)) == 0); + } + return false; +} + +#if defined(ENABLE_DEBUG_LOGGING) || (defined(_MSC_VER) && _MSC_VER < 1400) +static char* guid_to_string(const GUID* guid) +{ + static char guid_string[MAX_GUID_STRING_LENGTH]; + + if (guid == NULL) return NULL; + sprintf(guid_string, "{%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X}", + (unsigned int)guid->Data1, guid->Data2, guid->Data3, + guid->Data4[0], guid->Data4[1], guid->Data4[2], guid->Data4[3], + guid->Data4[4], guid->Data4[5], guid->Data4[6], guid->Data4[7]); + return guid_string; +} +#endif + +/* + * Converts a windows error to human readable string + * uses retval as errorcode, or, if 0, use GetLastError() + */ +static char *windows_error_str(uint32_t retval) +{ +static char err_string[ERR_BUFFER_SIZE]; + + DWORD size; + size_t i; + uint32_t error_code, format_error; + + error_code = retval?retval:GetLastError(); + + safe_sprintf(err_string, ERR_BUFFER_SIZE, "[%d] ", error_code); + + size = FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, error_code, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), &err_string[safe_strlen(err_string)], + ERR_BUFFER_SIZE - (DWORD)safe_strlen(err_string), NULL); + if (size == 0) { + format_error = GetLastError(); + if (format_error) + safe_sprintf(err_string, ERR_BUFFER_SIZE, + "Windows error code %u (FormatMessage error code %u)", error_code, format_error); + else + safe_sprintf(err_string, ERR_BUFFER_SIZE, "Unknown error code %u", error_code); + } else { + // Remove CR/LF terminators + for (i=safe_strlen(err_string)-1; ((err_string[i]==0x0A) || (err_string[i]==0x0D)); i--) { + err_string[i] = 0; + } + } + return err_string; +} + +/* + * Sanitize Microsoft's paths: convert to uppercase, add prefix and fix backslashes. + * Return an allocated sanitized string or NULL on error. + */ +static char* sanitize_path(const char* path) +{ + const char root_prefix[] = "\\\\.\\"; + size_t j, size, root_size; + char* ret_path = NULL; + size_t add_root = 0; + + if (path == NULL) + return NULL; + + size = safe_strlen(path)+1; + root_size = sizeof(root_prefix)-1; + + // Microsoft indiscriminatly uses '\\?\', '\\.\', '##?#" or "##.#" for root prefixes. + if (!((size > 3) && (((path[0] == '\\') && (path[1] == '\\') && (path[3] == '\\')) || + ((path[0] == '#') && (path[1] == '#') && (path[3] == '#'))))) { + add_root = root_size; + size += add_root; + } + + if ((ret_path = (char*)calloc(size, 1)) == NULL) + return NULL; + + safe_strcpy(&ret_path[add_root], size-add_root, path); + + // Ensure consistancy with root prefix + for (j=0; jcbSize = sizeof(SP_DEVINFO_DATA); + if (!pSetupDiEnumDeviceInfo(*dev_info, _index, dev_info_data)) { + if (GetLastError() != ERROR_NO_MORE_ITEMS) { + usbi_err(ctx, "Could not obtain device info data for index %u: %s", + _index, windows_error_str(0)); + } + pSetupDiDestroyDeviceInfoList(*dev_info); + *dev_info = INVALID_HANDLE_VALUE; + return false; + } + return true; +} + +/* + * enumerate interfaces for a specific GUID + * + * Parameters: + * dev_info: a pointer to a dev_info list + * dev_info_data: a pointer to an SP_DEVINFO_DATA to be filled (or NULL if not needed) + * guid: the GUID for which to retrieve interface details + * index: zero based index of the interface in the device info list + * + * Note: it is the responsibility of the caller to free the DEVICE_INTERFACE_DETAIL_DATA + * structure returned and call this function repeatedly using the same guid (with an + * incremented index starting at zero) until all interfaces have been returned. + */ +static SP_DEVICE_INTERFACE_DETAIL_DATA_A *get_interface_details(struct libusb_context *ctx, + HDEVINFO *dev_info, SP_DEVINFO_DATA *dev_info_data, const GUID* guid, unsigned _index) +{ + SP_DEVICE_INTERFACE_DATA dev_interface_data; + SP_DEVICE_INTERFACE_DETAIL_DATA_A *dev_interface_details = NULL; + DWORD size; + + if (_index <= 0) { + *dev_info = pSetupDiGetClassDevsA(guid, NULL, NULL, DIGCF_PRESENT|DIGCF_DEVICEINTERFACE); + } + + if (dev_info_data != NULL) { + dev_info_data->cbSize = sizeof(SP_DEVINFO_DATA); + if (!pSetupDiEnumDeviceInfo(*dev_info, _index, dev_info_data)) { + if (GetLastError() != ERROR_NO_MORE_ITEMS) { + usbi_err(ctx, "Could not obtain device info data for index %u: %s", + _index, windows_error_str(0)); + } + pSetupDiDestroyDeviceInfoList(*dev_info); + *dev_info = INVALID_HANDLE_VALUE; + return NULL; + } + } + + dev_interface_data.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA); + if (!pSetupDiEnumDeviceInterfaces(*dev_info, NULL, guid, _index, &dev_interface_data)) { + if (GetLastError() != ERROR_NO_MORE_ITEMS) { + usbi_err(ctx, "Could not obtain interface data for index %u: %s", + _index, windows_error_str(0)); + } + pSetupDiDestroyDeviceInfoList(*dev_info); + *dev_info = INVALID_HANDLE_VALUE; + return NULL; + } + + // Read interface data (dummy + actual) to access the device path + if (!pSetupDiGetDeviceInterfaceDetailA(*dev_info, &dev_interface_data, NULL, 0, &size, NULL)) { + // The dummy call should fail with ERROR_INSUFFICIENT_BUFFER + if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) { + usbi_err(ctx, "could not access interface data (dummy) for index %u: %s", + _index, windows_error_str(0)); + goto err_exit; + } + } else { + usbi_err(ctx, "program assertion failed - http://msdn.microsoft.com/en-us/library/ms792901.aspx is wrong."); + goto err_exit; + } + + if ((dev_interface_details = (SP_DEVICE_INTERFACE_DETAIL_DATA_A*) calloc(size, 1)) == NULL) { + usbi_err(ctx, "could not allocate interface data for index %u.", _index); + goto err_exit; + } + + dev_interface_details->cbSize = sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA_A); + if (!pSetupDiGetDeviceInterfaceDetailA(*dev_info, &dev_interface_data, + dev_interface_details, size, &size, NULL)) { + usbi_err(ctx, "could not access interface data (actual) for index %u: %s", + _index, windows_error_str(0)); + } + + return dev_interface_details; + +err_exit: + pSetupDiDestroyDeviceInfoList(*dev_info); + *dev_info = INVALID_HANDLE_VALUE; + return NULL; +} + +/* Hash table functions - modified From glibc 2.3.2: + [Aho,Sethi,Ullman] Compilers: Principles, Techniques and Tools, 1986 + [Knuth] The Art of Computer Programming, part 3 (6.4) */ +typedef struct htab_entry { + unsigned long used; + char* str; +} htab_entry; +htab_entry* htab_table = NULL; +usbi_mutex_t htab_write_mutex = NULL; +unsigned long htab_size, htab_filled; + +/* For the used double hash method the table size has to be a prime. To + correct the user given table size we need a prime test. This trivial + algorithm is adequate because the code is called only during init and + the number is likely to be small */ +static int isprime(unsigned long number) +{ + // no even number will be passed + unsigned int divider = 3; + + while((divider * divider < number) && (number % divider != 0)) + divider += 2; + + return (number % divider != 0); +} + +/* Before using the hash table we must allocate memory for it. + We allocate one element more as the found prime number says. + This is done for more effective indexing as explained in the + comment for the hash function. */ +static int htab_create(struct libusb_context *ctx, unsigned long nel) +{ + if (htab_table != NULL) { + usbi_err(ctx, "hash table already allocated"); + } + + // Create a mutex + usbi_mutex_init(&htab_write_mutex, NULL); + + // Change nel to the first prime number not smaller as nel. + nel |= 1; + while(!isprime(nel)) + nel += 2; + + htab_size = nel; + usbi_dbg("using %d entries hash table", nel); + htab_filled = 0; + + // allocate memory and zero out. + htab_table = (htab_entry*)calloc(htab_size + 1, sizeof(htab_entry)); + if (htab_table == NULL) { + usbi_err(ctx, "could not allocate space for hash table"); + return 0; + } + + return 1; +} + +/* After using the hash table it has to be destroyed. */ +static void htab_destroy(void) +{ + size_t i; + if (htab_table == NULL) { + return; + } + + for (i=0; i New entry + + // If the table is full return an error + if (htab_filled >= htab_size) { + usbi_err(NULL, "hash table is full (%d entries)", htab_size); + return 0; + } + + // Concurrent threads might be storing the same entry at the same time + // (eg. "simultaneous" enums from different threads) => use a mutex + usbi_mutex_lock(&htab_write_mutex); + // Just free any previously allocated string (which should be the same as + // new one). The possibility of concurrent threads storing a collision + // string (same hash, different string) at the same time is extremely low + safe_free(htab_table[idx].str); + htab_table[idx].used = hval; + htab_table[idx].str = (char*) calloc(1, safe_strlen(str)+1); + if (htab_table[idx].str == NULL) { + usbi_err(NULL, "could not duplicate string for hash table"); + usbi_mutex_unlock(&htab_write_mutex); + return 0; + } + memcpy(htab_table[idx].str, str, safe_strlen(str)+1); + ++htab_filled; + usbi_mutex_unlock(&htab_write_mutex); + + return idx; +} + +/* + * Returns the session ID of a device's nth level ancestor + * If there's no device at the nth level, return 0 + */ +static unsigned long get_ancestor_session_id(DWORD devinst, unsigned level) +{ + DWORD parent_devinst; + unsigned long session_id = 0; + char* sanitized_path = NULL; + char path[MAX_PATH_LENGTH]; + unsigned i; + + if (level < 1) return 0; + for (i = 0; idev); + struct libusb_config_descriptor *conf_desc; + const struct libusb_interface_descriptor *if_desc; + struct libusb_context *ctx = DEVICE_CTX(dev_handle->dev); + + r = libusb_get_config_descriptor(dev_handle->dev, 0, &conf_desc); + if (r != LIBUSB_SUCCESS) { + usbi_warn(ctx, "could not read config descriptor: error %d", r); + return r; + } + + if_desc = &conf_desc->interface[iface].altsetting[altsetting]; + safe_free(priv->usb_interface[iface].endpoint); + + if (if_desc->bNumEndpoints == 0) { + usbi_dbg("no endpoints found for interface %d", iface); + return LIBUSB_SUCCESS; + } + + priv->usb_interface[iface].endpoint = (uint8_t*) calloc(1, if_desc->bNumEndpoints); + if (priv->usb_interface[iface].endpoint == NULL) { + return LIBUSB_ERROR_NO_MEM; + } + + priv->usb_interface[iface].nb_endpoints = if_desc->bNumEndpoints; + for (i=0; ibNumEndpoints; i++) { + priv->usb_interface[iface].endpoint[i] = if_desc->endpoint[i].bEndpointAddress; + usbi_dbg("(re)assigned endpoint %02X to interface %d", priv->usb_interface[iface].endpoint[i], iface); + } + libusb_free_config_descriptor(conf_desc); + + // Extra init is required for WinUSB endpoints + if (priv->apib->id == USB_API_WINUSB) { + return winusb_configure_endpoints(dev_handle, iface); + } + + return LIBUSB_SUCCESS; +} + +// Lookup for a match in the list of API driver names +static bool is_api_driver(char* driver, uint8_t api) +{ + uint8_t i; + const char sep_str[2] = {LIST_SEPARATOR, 0}; + char *tok, *tmp_str; + size_t len = safe_strlen(driver); + + if (len == 0) return false; + tmp_str = (char*) calloc(1, len+1); + if (tmp_str == NULL) return false; + memcpy(tmp_str, driver, len+1); + tok = strtok(tmp_str, sep_str); + while (tok != NULL) { + for (i=0; idev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv( + transfer->dev_handle); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + int current_interface = *interface_number; + int r = LIBUSB_SUCCESS; + + usbi_mutex_lock(&autoclaim_lock); + if (current_interface < 0) // No serviceable interface was found + { + for (current_interface=0; current_interfaceusb_interface[current_interface].apib->id == api_type) + && (libusb_claim_interface(transfer->dev_handle, current_interface) == LIBUSB_SUCCESS) ) { + usbi_dbg("auto-claimed interface %d for control request", current_interface); + if (handle_priv->autoclaim_count[current_interface] != 0) { + usbi_warn(ctx, "program assertion failed - autoclaim_count was nonzero"); + } + handle_priv->autoclaim_count[current_interface]++; + break; + } + } + if (current_interface == USB_MAXINTERFACES) { + usbi_err(ctx, "could not auto-claim any interface"); + r = LIBUSB_ERROR_NOT_FOUND; + } + } else { + // If we have a valid interface that was autoclaimed, we must increment + // its autoclaim count so that we can prevent an early release. + if (handle_priv->autoclaim_count[current_interface] != 0) { + handle_priv->autoclaim_count[current_interface]++; + } + } + usbi_mutex_unlock(&autoclaim_lock); + + *interface_number = current_interface; + return r; + +} + +static void auto_release(struct usbi_transfer *itransfer) +{ + struct windows_transfer_priv *transfer_priv = (struct windows_transfer_priv*)usbi_transfer_get_os_priv(itransfer); + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + libusb_device_handle *dev_handle = transfer->dev_handle; + struct windows_device_handle_priv* handle_priv = _device_handle_priv(dev_handle); + int r; + + usbi_mutex_lock(&autoclaim_lock); + if (handle_priv->autoclaim_count[transfer_priv->interface_number] > 0) { + handle_priv->autoclaim_count[transfer_priv->interface_number]--; + if (handle_priv->autoclaim_count[transfer_priv->interface_number] == 0) { + r = libusb_release_interface(dev_handle, transfer_priv->interface_number); + if (r == LIBUSB_SUCCESS) { + usbi_dbg("auto-released interface %d", transfer_priv->interface_number); + } else { + usbi_dbg("failed to auto-release interface %d (%s)", + transfer_priv->interface_number, libusb_error_name((enum libusb_error)r)); + } + } + } + usbi_mutex_unlock(&autoclaim_lock); +} + +/* + * init: libusb backend init function + * + * This function enumerates the HCDs (Host Controller Drivers) and populates our private HCD list + * In our implementation, we equate Windows' "HCD" to LibUSB's "bus". Note that bus is zero indexed. + * HCDs are not expected to change after init (might not hold true for hot pluggable USB PCI card?) + */ +static int windows_init(struct libusb_context *ctx) +{ + int i, r = LIBUSB_ERROR_OTHER; + OSVERSIONINFO os_version; + HANDLE semaphore; + char sem_name[11+1+8]; // strlen(libusb_init)+'\0'+(32-bit hex PID) + + sprintf(sem_name, "libusb_init%08X", (unsigned int)GetCurrentProcessId()&0xFFFFFFFF); + semaphore = CreateSemaphoreA(NULL, 1, 1, sem_name); + if (semaphore == NULL) { + usbi_err(ctx, "could not create semaphore: %s", windows_error_str(0)); + return LIBUSB_ERROR_NO_MEM; + } + + // A successful wait brings our semaphore count to 0 (unsignaled) + // => any concurent wait stalls until the semaphore's release + if (WaitForSingleObject(semaphore, INFINITE) != WAIT_OBJECT_0) { + usbi_err(ctx, "failure to access semaphore: %s", windows_error_str(0)); + CloseHandle(semaphore); + return LIBUSB_ERROR_NO_MEM; + } + + // NB: concurrent usage supposes that init calls are equally balanced with + // exit calls. If init is called more than exit, we will not exit properly + if ( ++concurrent_usage == 0 ) { // First init? + // Detect OS version + memset(&os_version, 0, sizeof(OSVERSIONINFO)); + os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + windows_version = WINDOWS_UNSUPPORTED; + if ((GetVersionEx(&os_version) != 0) && (os_version.dwPlatformId == VER_PLATFORM_WIN32_NT)) { + if ((os_version.dwMajorVersion == 5) && (os_version.dwMinorVersion == 1)) { + windows_version = WINDOWS_XP; + } else if ((os_version.dwMajorVersion == 5) && (os_version.dwMinorVersion == 2)) { + windows_version = WINDOWS_2003; // also includes XP 64 + } else if (os_version.dwMajorVersion >= 6) { + windows_version = WINDOWS_VISTA_AND_LATER; + } + } + if (windows_version == WINDOWS_UNSUPPORTED) { + usbi_err(ctx, "This version of Windows is NOT supported"); + r = LIBUSB_ERROR_NOT_SUPPORTED; + goto init_exit; + } + + // We need a lock for proper auto-release + usbi_mutex_init(&autoclaim_lock, NULL); + + // Initialize pollable file descriptors + init_polling(); + + // Load DLL imports + if (init_dlls() != LIBUSB_SUCCESS) { + usbi_err(ctx, "could not resolve DLL functions"); + return LIBUSB_ERROR_NOT_FOUND; + } + + // Initialize the low level APIs (we don't care about errors at this stage) + for (i=0; inum_configurations = 1; + priv->dev_descriptor.bLength = sizeof(USB_DEVICE_DESCRIPTOR); + priv->dev_descriptor.bDescriptorType = USB_DEVICE_DESCRIPTOR_TYPE; + priv->dev_descriptor.bNumConfigurations = 1; + priv->active_config = 1; + + if (priv->parent_dev == NULL) { + usbi_err(ctx, "program assertion failed - HCD hub has no parent"); + return LIBUSB_ERROR_NO_DEVICE; + } + parent_priv = _device_priv(priv->parent_dev); + if (sscanf(parent_priv->path, "\\\\.\\PCI#VEN_%04x&DEV_%04x%*s", &vid, &pid) == 2) { + priv->dev_descriptor.idVendor = (uint16_t)vid; + priv->dev_descriptor.idProduct = (uint16_t)pid; + } else { + usbi_warn(ctx, "could not infer VID/PID of HCD hub from '%s'", parent_priv->path); + priv->dev_descriptor.idVendor = 0x1d6b; // Linux Foundation root hub + priv->dev_descriptor.idProduct = 1; + } + return LIBUSB_SUCCESS; +} + +/* + * fetch and cache all the config descriptors through I/O + */ +static int cache_config_descriptors(struct libusb_device *dev, HANDLE hub_handle, char* device_id) +{ + DWORD size, ret_size; + struct libusb_context *ctx = DEVICE_CTX(dev); + struct windows_device_priv *priv = _device_priv(dev); + int r; + uint8_t i; + + USB_CONFIGURATION_DESCRIPTOR_SHORT cd_buf_short; // dummy request + PUSB_DESCRIPTOR_REQUEST cd_buf_actual = NULL; // actual request + PUSB_CONFIGURATION_DESCRIPTOR cd_data = NULL; + + if (dev->num_configurations == 0) + return LIBUSB_ERROR_INVALID_PARAM; + + priv->config_descriptor = (unsigned char**) calloc(dev->num_configurations, sizeof(PUSB_CONFIGURATION_DESCRIPTOR)); + if (priv->config_descriptor == NULL) + return LIBUSB_ERROR_NO_MEM; + for (i=0; inum_configurations; i++) + priv->config_descriptor[i] = NULL; + + for (i=0, r=LIBUSB_SUCCESS; ; i++) + { + // safe loop: release all dynamic resources + safe_free(cd_buf_actual); + + // safe loop: end of loop condition + if ((i >= dev->num_configurations) || (r != LIBUSB_SUCCESS)) + break; + + size = sizeof(USB_CONFIGURATION_DESCRIPTOR_SHORT); + memset(&cd_buf_short, 0, size); + + cd_buf_short.req.ConnectionIndex = (ULONG)priv->port; + cd_buf_short.req.SetupPacket.bmRequest = LIBUSB_ENDPOINT_IN; + cd_buf_short.req.SetupPacket.bRequest = USB_REQUEST_GET_DESCRIPTOR; + cd_buf_short.req.SetupPacket.wValue = (USB_CONFIGURATION_DESCRIPTOR_TYPE << 8) | i; + cd_buf_short.req.SetupPacket.wIndex = i; + cd_buf_short.req.SetupPacket.wLength = (USHORT)(size - sizeof(USB_DESCRIPTOR_REQUEST)); + + // Dummy call to get the required data size + if (!DeviceIoControl(hub_handle, IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION, &cd_buf_short, size, + &cd_buf_short, size, &ret_size, NULL)) { + usbi_err(ctx, "could not access configuration descriptor (dummy) for '%s': %s", device_id, windows_error_str(0)); + LOOP_BREAK(LIBUSB_ERROR_IO); + } + + if ((ret_size != size) || (cd_buf_short.data.wTotalLength < sizeof(USB_CONFIGURATION_DESCRIPTOR))) { + usbi_err(ctx, "unexpected configuration descriptor size (dummy) for '%s'.", device_id); + LOOP_BREAK(LIBUSB_ERROR_IO); + } + + size = sizeof(USB_DESCRIPTOR_REQUEST) + cd_buf_short.data.wTotalLength; + if ((cd_buf_actual = (PUSB_DESCRIPTOR_REQUEST) calloc(1, size)) == NULL) { + usbi_err(ctx, "could not allocate configuration descriptor buffer for '%s'.", device_id); + LOOP_BREAK(LIBUSB_ERROR_NO_MEM); + } + memset(cd_buf_actual, 0, size); + + // Actual call + cd_buf_actual->ConnectionIndex = (ULONG)priv->port; + cd_buf_actual->SetupPacket.bmRequest = LIBUSB_ENDPOINT_IN; + cd_buf_actual->SetupPacket.bRequest = USB_REQUEST_GET_DESCRIPTOR; + cd_buf_actual->SetupPacket.wValue = (USB_CONFIGURATION_DESCRIPTOR_TYPE << 8) | i; + cd_buf_actual->SetupPacket.wIndex = i; + cd_buf_actual->SetupPacket.wLength = (USHORT)(size - sizeof(USB_DESCRIPTOR_REQUEST)); + + if (!DeviceIoControl(hub_handle, IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION, cd_buf_actual, size, + cd_buf_actual, size, &ret_size, NULL)) { + usbi_err(ctx, "could not access configuration descriptor (actual) for '%s': %s", device_id, windows_error_str(0)); + LOOP_BREAK(LIBUSB_ERROR_IO); + } + + cd_data = (PUSB_CONFIGURATION_DESCRIPTOR)((UCHAR*)cd_buf_actual+sizeof(USB_DESCRIPTOR_REQUEST)); + + if ((size != ret_size) || (cd_data->wTotalLength != cd_buf_short.data.wTotalLength)) { + usbi_err(ctx, "unexpected configuration descriptor size (actual) for '%s'.", device_id); + LOOP_BREAK(LIBUSB_ERROR_IO); + } + + if (cd_data->bDescriptorType != USB_CONFIGURATION_DESCRIPTOR_TYPE) { + usbi_err(ctx, "not a configuration descriptor for '%s'", device_id); + LOOP_BREAK(LIBUSB_ERROR_IO); + } + + usbi_dbg("cached config descriptor %d (bConfigurationValue=%d, %d bytes)", + i, cd_data->bConfigurationValue, cd_data->wTotalLength); + + // Cache the descriptor + priv->config_descriptor[i] = (unsigned char*) calloc(1, cd_data->wTotalLength); + if (priv->config_descriptor[i] == NULL) + return LIBUSB_ERROR_NO_MEM; + memcpy(priv->config_descriptor[i], cd_data, cd_data->wTotalLength); + } + return LIBUSB_SUCCESS; +} + +/* + * Populate a libusb device structure + */ +static int init_device(struct libusb_device* dev, struct libusb_device* parent_dev, + uint8_t port_number, char* device_id, DWORD devinst) +{ + HANDLE handle; + DWORD size; + USB_NODE_CONNECTION_INFORMATION_EX conn_info; + struct windows_device_priv *priv, *parent_priv; + struct libusb_context *ctx = DEVICE_CTX(dev); + struct libusb_device* tmp_dev; + unsigned i; + + if ((dev == NULL) || (parent_dev == NULL)) { + return LIBUSB_ERROR_NOT_FOUND; + } + priv = _device_priv(dev); + parent_priv = _device_priv(parent_dev); + if (parent_priv->apib->id != USB_API_HUB) { + usbi_warn(ctx, "parent for device '%s' is not a hub", device_id); + return LIBUSB_ERROR_NOT_FOUND; + } + + // It is possible for the parent hub not to have been initialized yet + // If that's the case, lookup the ancestors to set the bus number + if (parent_dev->bus_number == 0) { + for (i=2; ; i++) { + tmp_dev = usbi_get_device_by_session_id(ctx, get_ancestor_session_id(devinst, i)); + if (tmp_dev == NULL) break; + if (tmp_dev->bus_number != 0) { + usbi_dbg("got bus number from ancestor #%d", i); + parent_dev->bus_number = tmp_dev->bus_number; + break; + } + } + } + if (parent_dev->bus_number == 0) { + usbi_err(ctx, "program assertion failed: unable to find ancestor bus number for '%s'", device_id); + return LIBUSB_ERROR_NOT_FOUND; + } + dev->bus_number = parent_dev->bus_number; + priv->port = port_number; + priv->depth = parent_priv->depth + 1; + priv->parent_dev = parent_dev; + + // If the device address is already set, we can stop here + if (dev->device_address != 0) { + return LIBUSB_SUCCESS; + } + memset(&conn_info, 0, sizeof(conn_info)); + if (priv->depth != 0) { // Not a HCD hub + handle = CreateFileA(parent_priv->path, GENERIC_WRITE, FILE_SHARE_WRITE, NULL, OPEN_EXISTING, + FILE_FLAG_OVERLAPPED, NULL); + if (handle == INVALID_HANDLE_VALUE) { + usbi_warn(ctx, "could not open hub %s: %s", parent_priv->path, windows_error_str(0)); + return LIBUSB_ERROR_ACCESS; + } + size = sizeof(conn_info); + conn_info.ConnectionIndex = (ULONG)port_number; + if (!DeviceIoControl(handle, IOCTL_USB_GET_NODE_CONNECTION_INFORMATION_EX, &conn_info, size, + &conn_info, size, &size, NULL)) { + usbi_warn(ctx, "could not get node connection information for device '%s': %s", + device_id, windows_error_str(0)); + safe_closehandle(handle); + return LIBUSB_ERROR_NO_DEVICE; + } + if (conn_info.ConnectionStatus == NoDeviceConnected) { + usbi_err(ctx, "device '%s' is no longer connected!", device_id); + safe_closehandle(handle); + return LIBUSB_ERROR_NO_DEVICE; + } + memcpy(&priv->dev_descriptor, &(conn_info.DeviceDescriptor), sizeof(USB_DEVICE_DESCRIPTOR)); + dev->num_configurations = priv->dev_descriptor.bNumConfigurations; + priv->active_config = conn_info.CurrentConfigurationValue; + usbi_dbg("found %d configurations (active conf: %d)", dev->num_configurations, priv->active_config); + // If we can't read the config descriptors, just set the number of confs to zero + if (cache_config_descriptors(dev, handle, device_id) != LIBUSB_SUCCESS) { + dev->num_configurations = 0; + priv->dev_descriptor.bNumConfigurations = 0; + } + safe_closehandle(handle); + + if (conn_info.DeviceAddress > UINT8_MAX) { + usbi_err(ctx, "program assertion failed: device address overflow"); + } + dev->device_address = (uint8_t)conn_info.DeviceAddress; + switch (conn_info.Speed) { + case 0: dev->speed = LIBUSB_SPEED_LOW; break; + case 1: dev->speed = LIBUSB_SPEED_FULL; break; + case 2: dev->speed = LIBUSB_SPEED_HIGH; break; + case 3: dev->speed = LIBUSB_SPEED_SUPER; break; + default: + usbi_warn(ctx, "Got unknown device speed %d", conn_info.Speed); + break; + } + } else { + dev->device_address = UINT8_MAX; // Hubs from HCD have a devaddr of 255 + force_hcd_device_descriptor(dev); + } + + usbi_sanitize_device(dev); + + usbi_dbg("(bus: %d, addr: %d, depth: %d, port: %d): '%s'", + dev->bus_number, dev->device_address, priv->depth, priv->port, device_id); + + return LIBUSB_SUCCESS; +} + +// Returns the api type, or 0 if not found/unsupported +static uint8_t get_api_type(struct libusb_context *ctx, + HDEVINFO *dev_info, SP_DEVINFO_DATA *dev_info_data) +{ + // Precedence for filter drivers vs driver is in the order of this array + struct driver_lookup lookup[3] = { + {"\0\0", SPDRP_SERVICE, "driver"}, + {"\0\0", SPDRP_UPPERFILTERS, "upper filter driver"}, + {"\0\0", SPDRP_LOWERFILTERS, "lower filter driver"} + }; + DWORD size, reg_type; + unsigned k, l; + uint8_t api; + + // Check the service & filter names to know the API we should use + for (k=0; k<3; k++) { + if (pSetupDiGetDeviceRegistryPropertyA(*dev_info, dev_info_data, lookup[k].reg_prop, + ®_type, (BYTE*)lookup[k].list, MAX_KEY_LENGTH, &size)) { + // Turn the REG_SZ SPDRP_SERVICE into REG_MULTI_SZ + if (lookup[k].reg_prop == SPDRP_SERVICE) { + // our buffers are MAX_KEY_LENGTH+1 so we can overflow if needed + lookup[k].list[safe_strlen(lookup[k].list)+1] = 0; + } + // MULTI_SZ is a pain to work with. Turn it into something much more manageable + // NB: none of the driver names we check against contain LIST_SEPARATOR, + // (currently ';'), so even if an unsuported one does, it's not an issue + for (l=0; (lookup[k].list[l] != 0) || (lookup[k].list[l+1] != 0); l++) { + if (lookup[k].list[l] == 0) { + lookup[k].list[l] = LIST_SEPARATOR; + } + } + upperize(lookup[k].list); + usbi_dbg("%s(s): %s", lookup[k].designation, lookup[k].list); + } else { + if (GetLastError() != ERROR_INVALID_DATA) { + usbi_dbg("could not access %s: %s", lookup[k].designation, windows_error_str(0)); + } + lookup[k].list[0] = 0; + } + } + + for (api=1; api= 3) continue; + return api; + } + return 0; +} + +static int set_composite_interface(struct libusb_context* ctx, struct libusb_device* dev, + char* dev_interface_path, char* device_id, uint8_t api) +{ + unsigned i; + struct windows_device_priv *priv = _device_priv(dev); + int interface_number; + + if (priv->apib->id != USB_API_COMPOSITE) { + usbi_err(ctx, "program assertion failed: '%s' is not composite", device_id); + return LIBUSB_ERROR_NO_DEVICE; + } + + // Because MI_## are not necessarily in sequential order (some composite + // devices will have only MI_00 & MI_03 for instance), we retrieve the actual + // interface number from the path's MI value + interface_number = 0; + for (i=0; device_id[i] != 0; ) { + if ( (device_id[i++] == 'M') && (device_id[i++] == 'I') + && (device_id[i++] == '_') ) { + interface_number = (device_id[i++] - '0')*10; + interface_number += device_id[i] - '0'; + break; + } + } + + if (device_id[i] == 0) { + usbi_warn(ctx, "failure to read interface number for %s. Using default value %d", + device_id, interface_number); + } + + if (priv->usb_interface[interface_number].path != NULL) { + usbi_warn(ctx, "interface[%d] already set - ignoring: %s", interface_number, device_id); + return LIBUSB_ERROR_ACCESS; + } + + usbi_dbg("interface[%d] = %s", interface_number, dev_interface_path); + priv->usb_interface[interface_number].path = dev_interface_path; + priv->usb_interface[interface_number].apib = &usb_api_backend[api]; + priv->composite_api_flags |= 1<DevicePath); + if (dev_interface_path == NULL) { + usbi_warn(ctx, "could not sanitize device interface path for '%s'", dev_interface_details->DevicePath); + continue; + } + } + } else { + // Workaround for a Nec/Renesas USB 3.0 driver bug where root hubs are + // being listed under the "NUSB3" PnP Symbolic Name rather than "USB" + while ( (class_index < 2) && + (!(b = get_devinfo_data(ctx, &dev_info, &dev_info_data, usb_class[class_index], i))) ) { + class_index++; + i = 0; + } + if (!b) break; + } + + // Read the Device ID path. This is what we'll use as UID + // Note that if the device is plugged in a different port or hub, the Device ID changes + if (CM_Get_Device_IDA(dev_info_data.DevInst, path, sizeof(path), 0) != CR_SUCCESS) { + usbi_warn(ctx, "could not read the device id path for devinst %X, skipping", + dev_info_data.DevInst); + continue; + } + dev_id_path = sanitize_path(path); + if (dev_id_path == NULL) { + usbi_warn(ctx, "could not sanitize device id path for devinst %X, skipping", + dev_info_data.DevInst); + continue; + } +#ifdef ENUM_DEBUG + usbi_dbg("PRO: %s", dev_id_path); +#endif + + // The SPDRP_ADDRESS for USB devices is the device port number on the hub + port_nr = 0; + if ((pass >= HUB_PASS) && (pass <= GEN_PASS)) { + if ( (!pSetupDiGetDeviceRegistryPropertyA(dev_info, &dev_info_data, SPDRP_ADDRESS, + ®_type, (BYTE*)&port_nr, 4, &size)) + || (size != 4) ) { + usbi_warn(ctx, "could not retrieve port number for device '%s', skipping: %s", + dev_id_path, windows_error_str(0)); + continue; + } + } + + // Set API to use or get additional data from generic pass + api = USB_API_UNSUPPORTED; + switch (pass) { + case HCD_PASS: + break; + case GEN_PASS: + // We use the GEN pass to detect driverless devices... + size = sizeof(strbuf); + if (!pSetupDiGetDeviceRegistryPropertyA(dev_info, &dev_info_data, SPDRP_DRIVER, + ®_type, (BYTE*)strbuf, size, &size)) { + usbi_info(ctx, "The following device has no driver: '%s'", dev_id_path); + usbi_info(ctx, "libusb will not be able to access it."); + } + // ...and to add the additional device interface GUIDs + key = pSetupDiOpenDevRegKey(dev_info, &dev_info_data, DICS_FLAG_GLOBAL, 0, DIREG_DEV, KEY_READ); + if (key != INVALID_HANDLE_VALUE) { + size = sizeof(guid_string_w); + s = pRegQueryValueExW(key, L"DeviceInterfaceGUIDs", NULL, ®_type, + (BYTE*)guid_string_w, &size); + pRegCloseKey(key); + if (s == ERROR_SUCCESS) { + if (nb_guids >= MAX_ENUM_GUIDS) { + // If this assert is ever reported, grow a GUID table dynamically + usbi_err(ctx, "program assertion failed: too many GUIDs"); + LOOP_BREAK(LIBUSB_ERROR_OVERFLOW); + } + if_guid = (GUID*) calloc(1, sizeof(GUID)); + pCLSIDFromString(guid_string_w, if_guid); + guid[nb_guids++] = if_guid; + usbi_dbg("extra GUID: %s", guid_to_string(if_guid)); + } + } + break; + default: + // Get the API type (after checking that the driver installation is OK) + if ( (!pSetupDiGetDeviceRegistryPropertyA(dev_info, &dev_info_data, SPDRP_INSTALL_STATE, + ®_type, (BYTE*)&install_state, 4, &size)) + || (size != 4) ){ + usbi_warn(ctx, "could not detect installation state of driver for '%s': %s", + dev_id_path, windows_error_str(0)); + } else if (install_state != 0) { + usbi_warn(ctx, "driver for device '%s' is reporting an issue (code: %d) - skipping", + dev_id_path, install_state); + continue; + } + api = get_api_type(ctx, &dev_info, &dev_info_data); + break; + } + + // Find parent device (for the passes that need it) + switch (pass) { + case HCD_PASS: + case DEV_PASS: + case HUB_PASS: + break; + default: + // Go through the ancestors until we see a face we recognize + parent_dev = NULL; + for (ancestor = 1; parent_dev == NULL; ancestor++) { + session_id = get_ancestor_session_id(dev_info_data.DevInst, ancestor); + if (session_id == 0) { + break; + } + parent_dev = usbi_get_device_by_session_id(ctx, session_id); + } + if (parent_dev == NULL) { + usbi_dbg("unlisted ancestor for '%s' (newly connected, etc.) - ignoring", dev_id_path); + continue; + } + parent_priv = _device_priv(parent_dev); + // virtual USB devices are also listed during GEN - don't process these yet + if ( (pass == GEN_PASS) && (parent_priv->apib->id != USB_API_HUB) ) { + continue; + } + break; + } + + // Create new or match existing device, using the (hashed) device_id as session id + if (pass <= DEV_PASS) { // For subsequent passes, we'll lookup the parent + // These are the passes that create "new" devices + session_id = htab_hash(dev_id_path); + dev = usbi_get_device_by_session_id(ctx, session_id); + if (dev == NULL) { + if (pass == DEV_PASS) { + // This can occur if the OS only reports a newly plugged device after we started enum + usbi_warn(ctx, "'%s' was only detected in late pass (newly connected device?)" + " - ignoring", dev_id_path); + continue; + } + usbi_dbg("allocating new device for session [%X]", session_id); + if ((dev = usbi_alloc_device(ctx, session_id)) == NULL) { + LOOP_BREAK(LIBUSB_ERROR_NO_MEM); + } + windows_device_priv_init(dev); + // Keep track of devices that need unref + unref_list[unref_cur++] = dev; + if (unref_cur >= unref_size) { + unref_size += 64; + unref_list = realloc(unref_list, unref_size*sizeof(libusb_device*)); + if (unref_list == NULL) { + usbi_err(ctx, "could not realloc list for unref - aborting."); + LOOP_BREAK(LIBUSB_ERROR_NO_MEM); + } + } + } else { + usbi_dbg("found existing device for session [%X] (%d.%d)", + session_id, dev->bus_number, dev->device_address); + } + priv = _device_priv(dev); + } + + // Setup device + switch (pass) { + case HCD_PASS: + dev->bus_number = (uint8_t)(i + 1); // bus 0 is reserved for disconnected + dev->device_address = 0; + dev->num_configurations = 0; + priv->apib = &usb_api_backend[USB_API_HUB]; + priv->depth = UINT8_MAX; // Overflow to 0 for HCD Hubs + priv->path = dev_interface_path; dev_interface_path = NULL; + break; + case HUB_PASS: + case DEV_PASS: + // If the device has already been setup, don't do it again + if (priv->path != NULL) + break; + // Take care of API initialization + priv->path = dev_interface_path; dev_interface_path = NULL; + priv->apib = &usb_api_backend[api]; + switch(api) { + case USB_API_COMPOSITE: + case USB_API_HUB: + break; + default: + // For other devices, the first interface is the same as the device + priv->usb_interface[0].path = (char*) calloc(safe_strlen(priv->path)+1, 1); + if (priv->usb_interface[0].path != NULL) { + safe_strcpy(priv->usb_interface[0].path, safe_strlen(priv->path)+1, priv->path); + } else { + usbi_warn(ctx, "could not duplicate interface path '%s'", priv->path); + } + // The following is needed if we want API calls to work for both simple + // and composite devices. + for(j=0; jusb_interface[j].apib = &usb_api_backend[api]; + } + break; + } + break; + case GEN_PASS: + r = init_device(dev, parent_dev, (uint8_t)port_nr, dev_id_path, dev_info_data.DevInst); + if (r == LIBUSB_SUCCESS) { + // Append device to the list of discovered devices + discdevs = discovered_devs_append(*_discdevs, dev); + if (!discdevs) { + LOOP_BREAK(LIBUSB_ERROR_NO_MEM); + } + *_discdevs = discdevs; + } else if (r == LIBUSB_ERROR_NO_DEVICE) { + // This can occur if the device was disconnected but Windows hasn't + // refreshed its enumeration yet - in that case, we ignore the device + r = LIBUSB_SUCCESS; + } + break; + default: // later passes + if (parent_priv->apib->id == USB_API_COMPOSITE) { + usbi_dbg("setting composite interface for [%lX]:", parent_dev->session_data); + switch (set_composite_interface(ctx, parent_dev, dev_interface_path, dev_id_path, api)) { + case LIBUSB_SUCCESS: + dev_interface_path = NULL; + break; + case LIBUSB_ERROR_ACCESS: + // interface has already been set => make sure dev_interface_path is freed then + break; + default: + LOOP_BREAK(r); + break; + } + } + break; + } + } + } + + // Free any additional GUIDs + for (pass = DEV_PASS+1; pass < nb_guids; pass++) { + safe_free(guid[pass]); + } + + // Unref newly allocated devs + for (i=0; i any concurent wait stalls until the semaphore release + if (WaitForSingleObject(semaphore, INFINITE) != WAIT_OBJECT_0) { + CloseHandle(semaphore); + return; + } + + // Only works if exits and inits are balanced exactly + if (--concurrent_usage < 0) { // Last exit + for (i=0; idev_descriptor), DEVICE_DESC_LENGTH); + *host_endian = 0; + + return LIBUSB_SUCCESS; +} + +static int windows_get_config_descriptor(struct libusb_device *dev, uint8_t config_index, unsigned char *buffer, size_t len, int *host_endian) +{ + struct windows_device_priv *priv = _device_priv(dev); + PUSB_CONFIGURATION_DESCRIPTOR config_header; + size_t size; + + // config index is zero based + if (config_index >= dev->num_configurations) + return LIBUSB_ERROR_INVALID_PARAM; + + if ((priv->config_descriptor == NULL) || (priv->config_descriptor[config_index] == NULL)) + return LIBUSB_ERROR_NOT_FOUND; + + config_header = (PUSB_CONFIGURATION_DESCRIPTOR)priv->config_descriptor[config_index]; + + size = min(config_header->wTotalLength, len); + memcpy(buffer, priv->config_descriptor[config_index], size); + + return LIBUSB_SUCCESS; +} + +/* + * return the cached copy of the active config descriptor + */ +static int windows_get_active_config_descriptor(struct libusb_device *dev, unsigned char *buffer, size_t len, int *host_endian) +{ + struct windows_device_priv *priv = _device_priv(dev); + + if (priv->active_config == 0) + return LIBUSB_ERROR_NOT_FOUND; + + // config index is zero based + return windows_get_config_descriptor(dev, (uint8_t)(priv->active_config-1), buffer, len, host_endian); +} + +static int windows_open(struct libusb_device_handle *dev_handle) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + struct libusb_context *ctx = DEVICE_CTX(dev_handle->dev); + + if (priv->apib == NULL) { + usbi_err(ctx, "program assertion failed - device is not initialized"); + return LIBUSB_ERROR_NO_DEVICE; + } + + return priv->apib->open(dev_handle); +} + +static void windows_close(struct libusb_device_handle *dev_handle) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + + priv->apib->close(dev_handle); +} + +static int windows_get_configuration(struct libusb_device_handle *dev_handle, int *config) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + + if (priv->active_config == 0) { + *config = 0; + return LIBUSB_ERROR_NOT_FOUND; + } + + *config = priv->active_config; + return LIBUSB_SUCCESS; +} + +/* + * from http://msdn.microsoft.com/en-us/library/ms793522.aspx: "The port driver + * does not currently expose a service that allows higher-level drivers to set + * the configuration." + */ +static int windows_set_configuration(struct libusb_device_handle *dev_handle, int config) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + int r = LIBUSB_SUCCESS; + + if (config >= USB_MAXCONFIG) + return LIBUSB_ERROR_INVALID_PARAM; + + r = libusb_control_transfer(dev_handle, LIBUSB_ENDPOINT_OUT | + LIBUSB_REQUEST_TYPE_STANDARD | LIBUSB_RECIPIENT_DEVICE, + LIBUSB_REQUEST_SET_CONFIGURATION, (uint16_t)config, + 0, NULL, 0, 1000); + + if (r == LIBUSB_SUCCESS) { + priv->active_config = (uint8_t)config; + } + return r; +} + +static int windows_claim_interface(struct libusb_device_handle *dev_handle, int iface) +{ + int r = LIBUSB_SUCCESS; + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + + if (iface >= USB_MAXINTERFACES) + return LIBUSB_ERROR_INVALID_PARAM; + + safe_free(priv->usb_interface[iface].endpoint); + priv->usb_interface[iface].nb_endpoints= 0; + + r = priv->apib->claim_interface(dev_handle, iface); + + if (r == LIBUSB_SUCCESS) { + r = windows_assign_endpoints(dev_handle, iface, 0); + } + + return r; +} + +static int windows_set_interface_altsetting(struct libusb_device_handle *dev_handle, int iface, int altsetting) +{ + int r = LIBUSB_SUCCESS; + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + + safe_free(priv->usb_interface[iface].endpoint); + priv->usb_interface[iface].nb_endpoints= 0; + + r = priv->apib->set_interface_altsetting(dev_handle, iface, altsetting); + + if (r == LIBUSB_SUCCESS) { + r = windows_assign_endpoints(dev_handle, iface, altsetting); + } + + return r; +} + +static int windows_release_interface(struct libusb_device_handle *dev_handle, int iface) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + + return priv->apib->release_interface(dev_handle, iface); +} + +static int windows_clear_halt(struct libusb_device_handle *dev_handle, unsigned char endpoint) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + return priv->apib->clear_halt(dev_handle, endpoint); +} + +static int windows_reset_device(struct libusb_device_handle *dev_handle) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + return priv->apib->reset_device(dev_handle); +} + +// The 3 functions below are unlikely to ever get supported on Windows +static int windows_kernel_driver_active(struct libusb_device_handle *dev_handle, int iface) +{ + return LIBUSB_ERROR_NOT_SUPPORTED; +} + +static int windows_attach_kernel_driver(struct libusb_device_handle *dev_handle, int iface) +{ + return LIBUSB_ERROR_NOT_SUPPORTED; +} + +static int windows_detach_kernel_driver(struct libusb_device_handle *dev_handle, int iface) +{ + return LIBUSB_ERROR_NOT_SUPPORTED; +} + +static void windows_destroy_device(struct libusb_device *dev) +{ + windows_device_priv_release(dev); +} + +static void windows_clear_transfer_priv(struct usbi_transfer *itransfer) +{ + struct windows_transfer_priv *transfer_priv = (struct windows_transfer_priv*)usbi_transfer_get_os_priv(itransfer); + + usbi_free_fd(transfer_priv->pollable_fd.fd); + // When auto claim is in use, attempt to release the auto-claimed interface + auto_release(itransfer); +} + +static int submit_bulk_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); + struct windows_transfer_priv *transfer_priv = (struct windows_transfer_priv*)usbi_transfer_get_os_priv(itransfer); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + int r; + + r = priv->apib->submit_bulk_transfer(itransfer); + if (r != LIBUSB_SUCCESS) { + return r; + } + + usbi_add_pollfd(ctx, transfer_priv->pollable_fd.fd, + (short)(IS_XFERIN(transfer) ? POLLIN : POLLOUT)); + + itransfer->flags |= USBI_TRANSFER_UPDATED_FDS; + return LIBUSB_SUCCESS; +} + +static int submit_iso_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); + struct windows_transfer_priv *transfer_priv = (struct windows_transfer_priv*)usbi_transfer_get_os_priv(itransfer); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + int r; + + r = priv->apib->submit_iso_transfer(itransfer); + if (r != LIBUSB_SUCCESS) { + return r; + } + + usbi_add_pollfd(ctx, transfer_priv->pollable_fd.fd, + (short)(IS_XFERIN(transfer) ? POLLIN : POLLOUT)); + + itransfer->flags |= USBI_TRANSFER_UPDATED_FDS; + return LIBUSB_SUCCESS; +} + +static int submit_control_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); + struct windows_transfer_priv *transfer_priv = (struct windows_transfer_priv*)usbi_transfer_get_os_priv(itransfer); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + int r; + + r = priv->apib->submit_control_transfer(itransfer); + if (r != LIBUSB_SUCCESS) { + return r; + } + + usbi_add_pollfd(ctx, transfer_priv->pollable_fd.fd, POLLIN); + + itransfer->flags |= USBI_TRANSFER_UPDATED_FDS; + return LIBUSB_SUCCESS; + +} + +static int windows_submit_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_CONTROL: + return submit_control_transfer(itransfer); + case LIBUSB_TRANSFER_TYPE_BULK: + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + return submit_bulk_transfer(itransfer); + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + return submit_iso_transfer(itransfer); + default: + usbi_err(TRANSFER_CTX(transfer), "unknown endpoint type %d", transfer->type); + return LIBUSB_ERROR_INVALID_PARAM; + } +} + +static int windows_abort_control(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + + return priv->apib->abort_control(itransfer); +} + +static int windows_abort_transfers(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + + return priv->apib->abort_transfers(itransfer); +} + +static int windows_cancel_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_CONTROL: + return windows_abort_control(itransfer); + case LIBUSB_TRANSFER_TYPE_BULK: + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + return windows_abort_transfers(itransfer); + default: + usbi_err(ITRANSFER_CTX(itransfer), "unknown endpoint type %d", transfer->type); + return LIBUSB_ERROR_INVALID_PARAM; + } +} + +static void windows_transfer_callback(struct usbi_transfer *itransfer, uint32_t io_result, uint32_t io_size) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + int status; + + usbi_dbg("handling I/O completion with errcode %d", io_result); + + switch(io_result) { + case NO_ERROR: + status = priv->apib->copy_transfer_data(itransfer, io_size); + break; + case ERROR_GEN_FAILURE: + usbi_dbg("detected endpoint stall"); + status = LIBUSB_TRANSFER_STALL; + break; + case ERROR_SEM_TIMEOUT: + usbi_dbg("detected semaphore timeout"); + status = LIBUSB_TRANSFER_TIMED_OUT; + break; + case ERROR_OPERATION_ABORTED: + if (itransfer->flags & USBI_TRANSFER_TIMED_OUT) { + usbi_dbg("detected timeout"); + status = LIBUSB_TRANSFER_TIMED_OUT; + } else { + usbi_dbg("detected operation aborted"); + status = LIBUSB_TRANSFER_CANCELLED; + } + break; + default: + usbi_err(ITRANSFER_CTX(itransfer), "detected I/O error: %s", windows_error_str(0)); + status = LIBUSB_TRANSFER_ERROR; + break; + } + windows_clear_transfer_priv(itransfer); // Cancel polling + usbi_handle_transfer_completion(itransfer, (enum libusb_transfer_status)status); +} + +static void windows_handle_callback (struct usbi_transfer *itransfer, uint32_t io_result, uint32_t io_size) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + + switch (transfer->type) { + case LIBUSB_TRANSFER_TYPE_CONTROL: + case LIBUSB_TRANSFER_TYPE_BULK: + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + windows_transfer_callback (itransfer, io_result, io_size); + break; + default: + usbi_err(ITRANSFER_CTX(itransfer), "unknown endpoint type %d", transfer->type); + } +} + +static int windows_handle_events(struct libusb_context *ctx, struct pollfd *fds, POLL_NFDS_TYPE nfds, int num_ready) +{ + struct windows_transfer_priv* transfer_priv = NULL; + POLL_NFDS_TYPE i = 0; + bool found = false; + struct usbi_transfer *transfer; + DWORD io_size, io_result; + + usbi_mutex_lock(&ctx->open_devs_lock); + for (i = 0; i < nfds && num_ready > 0; i++) { + + usbi_dbg("checking fd %d with revents = %04x", fds[i].fd, fds[i].revents); + + if (!fds[i].revents) { + continue; + } + + num_ready--; + + // Because a Windows OVERLAPPED is used for poll emulation, + // a pollable fd is created and stored with each transfer + usbi_mutex_lock(&ctx->flying_transfers_lock); + list_for_each_entry(transfer, &ctx->flying_transfers, list, struct usbi_transfer) { + transfer_priv = usbi_transfer_get_os_priv(transfer); + if (transfer_priv->pollable_fd.fd == fds[i].fd) { + found = true; + break; + } + } + usbi_mutex_unlock(&ctx->flying_transfers_lock); + + if (found) { + // Handle async requests that completed synchronously first + if (HasOverlappedIoCompletedSync(transfer_priv->pollable_fd.overlapped)) { + io_result = NO_ERROR; + io_size = (DWORD)transfer_priv->pollable_fd.overlapped->InternalHigh; + // Regular async overlapped + } else if (GetOverlappedResult(transfer_priv->pollable_fd.handle, + transfer_priv->pollable_fd.overlapped, &io_size, false)) { + io_result = NO_ERROR; + } else { + io_result = GetLastError(); + } + usbi_remove_pollfd(ctx, transfer_priv->pollable_fd.fd); + // let handle_callback free the event using the transfer wfd + // If you don't use the transfer wfd, you run a risk of trying to free a + // newly allocated wfd that took the place of the one from the transfer. + windows_handle_callback(transfer, io_result, io_size); + } else { + usbi_err(ctx, "could not find a matching transfer for fd %x", fds[i]); + return LIBUSB_ERROR_NOT_FOUND; + } + } + + usbi_mutex_unlock(&ctx->open_devs_lock); + return LIBUSB_SUCCESS; +} + +/* + * Monotonic and real time functions + */ +unsigned __stdcall windows_clock_gettime_threaded(void* param) +{ + LARGE_INTEGER hires_counter, li_frequency; + LONG nb_responses; + int timer_index; + + // Init - find out if we have access to a monotonic (hires) timer + if (!QueryPerformanceFrequency(&li_frequency)) { + usbi_dbg("no hires timer available on this platform"); + hires_frequency = 0; + hires_ticks_to_ps = UINT64_C(0); + } else { + hires_frequency = li_frequency.QuadPart; + // The hires frequency can go as high as 4 GHz, so we'll use a conversion + // to picoseconds to compute the tv_nsecs part in clock_gettime + hires_ticks_to_ps = UINT64_C(1000000000000) / hires_frequency; + usbi_dbg("hires timer available (Frequency: %I64u Hz)", hires_frequency); + } + + // Main loop - wait for requests + while (1) { + timer_index = WaitForMultipleObjects(2, timer_request, FALSE, INFINITE) - WAIT_OBJECT_0; + if ( (timer_index != 0) && (timer_index != 1) ) { + usbi_dbg("failure to wait on requests: %s", windows_error_str(0)); + continue; + } + if (request_count[timer_index] == 0) { + // Request already handled + ResetEvent(timer_request[timer_index]); + // There's still a possiblity that a thread sends a request between the + // time we test request_count[] == 0 and we reset the event, in which case + // the request would be ignored. The simple solution to that is to test + // request_count again and process requests if non zero. + if (request_count[timer_index] == 0) + continue; + } + switch (timer_index) { + case 0: + WaitForSingleObject(timer_mutex, INFINITE); + // Requests to this thread are for hires always + if (QueryPerformanceCounter(&hires_counter) != 0) { + timer_tp.tv_sec = (long)(hires_counter.QuadPart / hires_frequency); + timer_tp.tv_nsec = (long)(((hires_counter.QuadPart % hires_frequency)/1000) * hires_ticks_to_ps); + } else { + // Fallback to real-time if we can't get monotonic value + // Note that real-time clock does not wait on the mutex or this thread. + windows_clock_gettime(USBI_CLOCK_REALTIME, &timer_tp); + } + ReleaseMutex(timer_mutex); + + nb_responses = InterlockedExchange((LONG*)&request_count[0], 0); + if ( (nb_responses) + && (ReleaseSemaphore(timer_response, nb_responses, NULL) == 0) ) { + usbi_dbg("unable to release timer semaphore %d: %s", windows_error_str(0)); + } + continue; + case 1: // time to quit + usbi_dbg("timer thread quitting"); + return 0; + } + } + usbi_dbg("ERROR: broken timer thread"); + return 1; +} + +static int windows_clock_gettime(int clk_id, struct timespec *tp) +{ + FILETIME filetime; + ULARGE_INTEGER rtime; + DWORD r; + switch(clk_id) { + case USBI_CLOCK_MONOTONIC: + if (hires_frequency != 0) { + while (1) { + InterlockedIncrement((LONG*)&request_count[0]); + SetEvent(timer_request[0]); + r = WaitForSingleObject(timer_response, TIMER_REQUEST_RETRY_MS); + switch(r) { + case WAIT_OBJECT_0: + WaitForSingleObject(timer_mutex, INFINITE); + *tp = timer_tp; + ReleaseMutex(timer_mutex); + return LIBUSB_SUCCESS; + case WAIT_TIMEOUT: + usbi_dbg("could not obtain a timer value within reasonable timeframe - too much load?"); + break; // Retry until successful + default: + usbi_dbg("WaitForSingleObject failed: %s", windows_error_str(0)); + return LIBUSB_ERROR_OTHER; + } + } + } + // Fall through and return real-time if monotonic was not detected @ timer init + case USBI_CLOCK_REALTIME: + // We follow http://msdn.microsoft.com/en-us/library/ms724928%28VS.85%29.aspx + // with a predef epoch_time to have an epoch that starts at 1970.01.01 00:00 + // Note however that our resolution is bounded by the Windows system time + // functions and is at best of the order of 1 ms (or, usually, worse) + GetSystemTimeAsFileTime(&filetime); + rtime.LowPart = filetime.dwLowDateTime; + rtime.HighPart = filetime.dwHighDateTime; + rtime.QuadPart -= epoch_time; + tp->tv_sec = (long)(rtime.QuadPart / 10000000); + tp->tv_nsec = (long)((rtime.QuadPart % 10000000)*100); + return LIBUSB_SUCCESS; + default: + return LIBUSB_ERROR_INVALID_PARAM; + } +} + + +// NB: MSVC6 does not support named initializers. +const struct usbi_os_backend windows_backend = { + "Windows", + windows_init, + windows_exit, + + windows_get_device_list, + windows_open, + windows_close, + + windows_get_device_descriptor, + windows_get_active_config_descriptor, + windows_get_config_descriptor, + + windows_get_configuration, + windows_set_configuration, + windows_claim_interface, + windows_release_interface, + + windows_set_interface_altsetting, + windows_clear_halt, + windows_reset_device, + + windows_kernel_driver_active, + windows_detach_kernel_driver, + windows_attach_kernel_driver, + + windows_destroy_device, + + windows_submit_transfer, + windows_cancel_transfer, + windows_clear_transfer_priv, + + windows_handle_events, + + windows_clock_gettime, +#if defined(USBI_TIMERFD_AVAILABLE) + NULL, +#endif + sizeof(struct windows_device_priv), + sizeof(struct windows_device_handle_priv), + sizeof(struct windows_transfer_priv), + 0, +}; + + +/* + * USB API backends + */ +static int unsupported_init(struct libusb_context *ctx) { + return LIBUSB_SUCCESS; +} +static int unsupported_exit(void) { + return LIBUSB_SUCCESS; +} +static int unsupported_open(struct libusb_device_handle *dev_handle) { + PRINT_UNSUPPORTED_API(open); +} +static void unsupported_close(struct libusb_device_handle *dev_handle) { + usbi_dbg("unsupported API call for 'close'"); +} +static int unsupported_claim_interface(struct libusb_device_handle *dev_handle, int iface) { + PRINT_UNSUPPORTED_API(claim_interface); +} +static int unsupported_set_interface_altsetting(struct libusb_device_handle *dev_handle, int iface, int altsetting) { + PRINT_UNSUPPORTED_API(set_interface_altsetting); +} +static int unsupported_release_interface(struct libusb_device_handle *dev_handle, int iface) { + PRINT_UNSUPPORTED_API(release_interface); +} +static int unsupported_clear_halt(struct libusb_device_handle *dev_handle, unsigned char endpoint) { + PRINT_UNSUPPORTED_API(clear_halt); +} +static int unsupported_reset_device(struct libusb_device_handle *dev_handle) { + PRINT_UNSUPPORTED_API(reset_device); +} +static int unsupported_submit_bulk_transfer(struct usbi_transfer *itransfer) { + PRINT_UNSUPPORTED_API(submit_bulk_transfer); +} +static int unsupported_submit_iso_transfer(struct usbi_transfer *itransfer) { + PRINT_UNSUPPORTED_API(submit_iso_transfer); +} +static int unsupported_submit_control_transfer(struct usbi_transfer *itransfer) { + PRINT_UNSUPPORTED_API(submit_control_transfer); +} +static int unsupported_abort_control(struct usbi_transfer *itransfer) { + PRINT_UNSUPPORTED_API(abort_control); +} +static int unsupported_abort_transfers(struct usbi_transfer *itransfer) { + PRINT_UNSUPPORTED_API(abort_transfers); +} +static int unsupported_copy_transfer_data(struct usbi_transfer *itransfer, uint32_t io_size) { + PRINT_UNSUPPORTED_API(copy_transfer_data); +} + +// These names must be uppercase +const char* hub_driver_names[] = {"USBHUB", "USBHUB3", "USB3HUB", "NUSB3HUB", "RUSB3HUB", "FLXHCIH", "TIHUB3", "ETRONHUB3", "VIAHUB3", "ASMTHUB3", "IUSB3HUB", "VUSB3HUB", "AMDHUB30"}; +const char* composite_driver_names[] = {"USBCCGP"}; +const char* winusb_driver_names[] = {"WINUSB"}; +const struct windows_usb_api_backend usb_api_backend[USB_API_MAX] = { + { + USB_API_UNSUPPORTED, + "Unsupported API", + &CLASS_GUID_UNSUPPORTED, + NULL, + 0, + unsupported_init, + unsupported_exit, + unsupported_open, + unsupported_close, + unsupported_claim_interface, + unsupported_set_interface_altsetting, + unsupported_release_interface, + unsupported_clear_halt, + unsupported_reset_device, + unsupported_submit_bulk_transfer, + unsupported_submit_iso_transfer, + unsupported_submit_control_transfer, + unsupported_abort_control, + unsupported_abort_transfers, + unsupported_copy_transfer_data, + }, { + USB_API_HUB, + "HUB API", + &CLASS_GUID_UNSUPPORTED, + hub_driver_names, + sizeof(hub_driver_names)/sizeof(hub_driver_names[0]), + unsupported_init, + unsupported_exit, + unsupported_open, + unsupported_close, + unsupported_claim_interface, + unsupported_set_interface_altsetting, + unsupported_release_interface, + unsupported_clear_halt, + unsupported_reset_device, + unsupported_submit_bulk_transfer, + unsupported_submit_iso_transfer, + unsupported_submit_control_transfer, + unsupported_abort_control, + unsupported_abort_transfers, + unsupported_copy_transfer_data, + }, { + USB_API_COMPOSITE, + "Composite API", + &CLASS_GUID_COMPOSITE, + composite_driver_names, + sizeof(composite_driver_names)/sizeof(composite_driver_names[0]), + composite_init, + composite_exit, + composite_open, + composite_close, + composite_claim_interface, + composite_set_interface_altsetting, + composite_release_interface, + composite_clear_halt, + composite_reset_device, + composite_submit_bulk_transfer, + composite_submit_iso_transfer, + composite_submit_control_transfer, + composite_abort_control, + composite_abort_transfers, + composite_copy_transfer_data, + }, { + USB_API_WINUSB, + "WinUSB API", + &CLASS_GUID_LIBUSB_WINUSB, + winusb_driver_names, + sizeof(winusb_driver_names)/sizeof(winusb_driver_names[0]), + winusb_init, + winusb_exit, + winusb_open, + winusb_close, + winusb_claim_interface, + winusb_set_interface_altsetting, + winusb_release_interface, + winusb_clear_halt, + winusb_reset_device, + winusb_submit_bulk_transfer, + unsupported_submit_iso_transfer, + winusb_submit_control_transfer, + winusb_abort_control, + winusb_abort_transfers, + winusb_copy_transfer_data, + }, +}; + + +/* + * WinUSB API functions + */ +static int winusb_init(struct libusb_context *ctx) +{ + DLL_LOAD(winusb.dll, WinUsb_Initialize, TRUE); + DLL_LOAD(winusb.dll, WinUsb_Free, TRUE); + DLL_LOAD(winusb.dll, WinUsb_GetAssociatedInterface, TRUE); + DLL_LOAD(winusb.dll, WinUsb_GetDescriptor, TRUE); + DLL_LOAD(winusb.dll, WinUsb_QueryInterfaceSettings, TRUE); + DLL_LOAD(winusb.dll, WinUsb_QueryDeviceInformation, TRUE); + DLL_LOAD(winusb.dll, WinUsb_SetCurrentAlternateSetting, TRUE); + DLL_LOAD(winusb.dll, WinUsb_GetCurrentAlternateSetting, TRUE); + DLL_LOAD(winusb.dll, WinUsb_QueryPipe, TRUE); + DLL_LOAD(winusb.dll, WinUsb_SetPipePolicy, TRUE); + DLL_LOAD(winusb.dll, WinUsb_GetPipePolicy, TRUE); + DLL_LOAD(winusb.dll, WinUsb_ReadPipe, TRUE); + DLL_LOAD(winusb.dll, WinUsb_WritePipe, TRUE); + DLL_LOAD(winusb.dll, WinUsb_ControlTransfer, TRUE); + DLL_LOAD(winusb.dll, WinUsb_ResetPipe, TRUE); + DLL_LOAD(winusb.dll, WinUsb_AbortPipe, TRUE); + DLL_LOAD(winusb.dll, WinUsb_FlushPipe, TRUE); + + api_winusb_available = true; + return LIBUSB_SUCCESS; +} + +static int winusb_exit(void) +{ + return LIBUSB_SUCCESS; +} + +// NB: open and close must ensure that they only handle interface of +// the right API type, as these functions can be called wholesale from +// composite_open(), with interfaces belonging to different APIs +static int winusb_open(struct libusb_device_handle *dev_handle) +{ + struct libusb_context *ctx = DEVICE_CTX(dev_handle->dev); + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + + HANDLE file_handle; + int i; + + CHECK_WINUSB_AVAILABLE; + + // WinUSB requires a seperate handle for each interface + for (i = 0; i < USB_MAXINTERFACES; i++) { + if ( (priv->usb_interface[i].path != NULL) + && (priv->usb_interface[i].apib->id == USB_API_WINUSB) ) { + file_handle = CreateFileA(priv->usb_interface[i].path, GENERIC_WRITE | GENERIC_READ, FILE_SHARE_WRITE | FILE_SHARE_READ, + NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED, NULL); + if (file_handle == INVALID_HANDLE_VALUE) { + usbi_err(ctx, "could not open device %s (interface %d): %s", priv->usb_interface[i].path, i, windows_error_str(0)); + switch(GetLastError()) { + case ERROR_FILE_NOT_FOUND: // The device was disconnected + return LIBUSB_ERROR_NO_DEVICE; + case ERROR_ACCESS_DENIED: + return LIBUSB_ERROR_ACCESS; + default: + return LIBUSB_ERROR_IO; + } + } + handle_priv->interface_handle[i].dev_handle = file_handle; + } + } + + return LIBUSB_SUCCESS; +} + +static void winusb_close(struct libusb_device_handle *dev_handle) +{ + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + HANDLE file_handle; + int i; + + if (!api_winusb_available) + return; + + for (i = 0; i < USB_MAXINTERFACES; i++) { + if (priv->usb_interface[i].apib->id == USB_API_WINUSB) { + file_handle = handle_priv->interface_handle[i].dev_handle; + if ( (file_handle != 0) && (file_handle != INVALID_HANDLE_VALUE)) { + CloseHandle(file_handle); + } + } + } +} + +static int winusb_configure_endpoints(struct libusb_device_handle *dev_handle, int iface) +{ + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + HANDLE winusb_handle = handle_priv->interface_handle[iface].api_handle; + UCHAR policy; + ULONG timeout = 0; + uint8_t endpoint_address; + int i; + + CHECK_WINUSB_AVAILABLE; + + // With handle and enpoints set (in parent), we can setup the default pipe properties + // see http://download.microsoft.com/download/D/1/D/D1DD7745-426B-4CC3-A269-ABBBE427C0EF/DVC-T705_DDC08.pptx + for (i=-1; iusb_interface[iface].nb_endpoints; i++) { + endpoint_address =(i==-1)?0:priv->usb_interface[iface].endpoint[i]; + if (!WinUsb_SetPipePolicy(winusb_handle, endpoint_address, + PIPE_TRANSFER_TIMEOUT, sizeof(ULONG), &timeout)) { + usbi_dbg("failed to set PIPE_TRANSFER_TIMEOUT for control endpoint %02X", endpoint_address); + } + if (i == -1) + continue; // Other policies don't apply to control endpoint + policy = true; + if (!WinUsb_SetPipePolicy(winusb_handle, endpoint_address, + AUTO_CLEAR_STALL, sizeof(UCHAR), &policy)) { + usbi_dbg("failed to enable AUTO_CLEAR_STALL for endpoint %02X", endpoint_address); + } + } + + return LIBUSB_SUCCESS; +} + +static int winusb_claim_interface(struct libusb_device_handle *dev_handle, int iface) +{ + struct libusb_context *ctx = DEVICE_CTX(dev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + bool is_using_usbccgp = (priv->apib->id == USB_API_COMPOSITE); + HANDLE file_handle, winusb_handle; + + CHECK_WINUSB_AVAILABLE; + + // If the device is composite, but using the default Windows composite parent driver (usbccgp) + // or if it's the first WinUSB interface, we get a handle through WinUsb_Initialize(). + if ((is_using_usbccgp) || (iface == 0)) { + // composite device (independent interfaces) or interface 0 + file_handle = handle_priv->interface_handle[iface].dev_handle; + if ((file_handle == 0) || (file_handle == INVALID_HANDLE_VALUE)) { + return LIBUSB_ERROR_NOT_FOUND; + } + + if (!WinUsb_Initialize(file_handle, &winusb_handle)) { + usbi_err(ctx, "could not access interface %d: %s", iface, windows_error_str(0)); + handle_priv->interface_handle[iface].api_handle = INVALID_HANDLE_VALUE; + + switch(GetLastError()) { + case ERROR_BAD_COMMAND: // The device was disconnected + return LIBUSB_ERROR_NO_DEVICE; + default: + usbi_err(ctx, "could not claim interface %d: %s", iface, windows_error_str(0)); + return LIBUSB_ERROR_ACCESS; + } + } + handle_priv->interface_handle[iface].api_handle = winusb_handle; + } else { + // For all other interfaces, use WinUsb_GetAssociatedInterface() + winusb_handle = handle_priv->interface_handle[0].api_handle; + // It is a requirement for multiple interface devices using WinUSB that you + // must first claim the first interface before you claim any other + if ((winusb_handle == 0) || (winusb_handle == INVALID_HANDLE_VALUE)) { + file_handle = handle_priv->interface_handle[0].dev_handle; + if (WinUsb_Initialize(file_handle, &winusb_handle)) { + handle_priv->interface_handle[0].api_handle = winusb_handle; + usbi_warn(ctx, "auto-claimed interface 0 (required to claim %d with WinUSB)", iface); + } else { + usbi_warn(ctx, "failed to auto-claim interface 0 (required to claim %d with WinUSB)", iface); + return LIBUSB_ERROR_ACCESS; + } + } + if (!WinUsb_GetAssociatedInterface(winusb_handle, (UCHAR)(iface-1), + &handle_priv->interface_handle[iface].api_handle)) { + handle_priv->interface_handle[iface].api_handle = INVALID_HANDLE_VALUE; + switch(GetLastError()) { + case ERROR_NO_MORE_ITEMS: // invalid iface + return LIBUSB_ERROR_NOT_FOUND; + case ERROR_BAD_COMMAND: // The device was disconnected + return LIBUSB_ERROR_NO_DEVICE; + case ERROR_ALREADY_EXISTS: // already claimed + return LIBUSB_ERROR_BUSY; + default: + usbi_err(ctx, "could not claim interface %d: %s", iface, windows_error_str(0)); + return LIBUSB_ERROR_ACCESS; + } + } + } + usbi_dbg("claimed interface %d", iface); + handle_priv->active_interface = iface; + + return LIBUSB_SUCCESS; +} + +static int winusb_release_interface(struct libusb_device_handle *dev_handle, int iface) +{ + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + HANDLE winusb_handle; + + CHECK_WINUSB_AVAILABLE; + + winusb_handle = handle_priv->interface_handle[iface].api_handle; + if ((winusb_handle == 0) || (winusb_handle == INVALID_HANDLE_VALUE)) { + return LIBUSB_ERROR_NOT_FOUND; + } + + WinUsb_Free(winusb_handle); + handle_priv->interface_handle[iface].api_handle = INVALID_HANDLE_VALUE; + + return LIBUSB_SUCCESS; +} + +/* + * Return the first valid interface (of the same API type), for control transfers + */ +static int winusb_get_valid_interface(struct libusb_device_handle *dev_handle) +{ + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + int i; + + for (i=0; iinterface_handle[i].dev_handle != 0) + && (handle_priv->interface_handle[i].dev_handle != INVALID_HANDLE_VALUE) + && (handle_priv->interface_handle[i].api_handle != 0) + && (handle_priv->interface_handle[i].api_handle != INVALID_HANDLE_VALUE) ) { + return i; + } + } + return -1; +} + +/* + * Lookup interface by endpoint address. -1 if not found + */ +static int interface_by_endpoint(struct windows_device_priv *priv, + struct windows_device_handle_priv *handle_priv, uint8_t endpoint_address) +{ + int i, j; + for (i=0; iinterface_handle[i].api_handle == INVALID_HANDLE_VALUE) + continue; + if (handle_priv->interface_handle[i].api_handle == 0) + continue; + if (priv->usb_interface[i].endpoint == NULL) + continue; + for (j=0; jusb_interface[i].nb_endpoints; j++) { + if (priv->usb_interface[i].endpoint[j] == endpoint_address) { + return i; + } + } + } + return -1; +} + +static int winusb_submit_control_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + struct windows_transfer_priv *transfer_priv = (struct windows_transfer_priv*)usbi_transfer_get_os_priv(itransfer); + struct windows_device_handle_priv *handle_priv = _device_handle_priv( + transfer->dev_handle); + WINUSB_SETUP_PACKET *setup = (WINUSB_SETUP_PACKET *) transfer->buffer; + ULONG size; + HANDLE winusb_handle; + int current_interface; + struct winfd wfd; + + CHECK_WINUSB_AVAILABLE; + + transfer_priv->pollable_fd = INVALID_WINFD; + size = transfer->length - LIBUSB_CONTROL_SETUP_SIZE; + + if (size > MAX_CTRL_BUFFER_LENGTH) + return LIBUSB_ERROR_INVALID_PARAM; + + current_interface = winusb_get_valid_interface(transfer->dev_handle); + if (current_interface < 0) { + if (auto_claim(transfer, ¤t_interface, USB_API_WINUSB) != LIBUSB_SUCCESS) { + return LIBUSB_ERROR_NOT_FOUND; + } + } + + usbi_dbg("will use interface %d", current_interface); + winusb_handle = handle_priv->interface_handle[current_interface].api_handle; + + wfd = usbi_create_fd(winusb_handle, _O_RDONLY); + // Always use the handle returned from usbi_create_fd (wfd.handle) + if (wfd.fd < 0) { + return LIBUSB_ERROR_NO_MEM; + } + + // Sending of set configuration control requests from WinUSB creates issues + if ( ((setup->request_type & (0x03 << 5)) == LIBUSB_REQUEST_TYPE_STANDARD) + && (setup->request == LIBUSB_REQUEST_SET_CONFIGURATION) ) { + if (setup->value != priv->active_config) { + usbi_warn(ctx, "cannot set configuration other than the default one"); + usbi_free_fd(wfd.fd); + return LIBUSB_ERROR_INVALID_PARAM; + } + wfd.overlapped->Internal = STATUS_COMPLETED_SYNCHRONOUSLY; + wfd.overlapped->InternalHigh = 0; + } else { + if (!WinUsb_ControlTransfer(wfd.handle, *setup, transfer->buffer + LIBUSB_CONTROL_SETUP_SIZE, size, NULL, wfd.overlapped)) { + if(GetLastError() != ERROR_IO_PENDING) { + usbi_err(ctx, "WinUsb_ControlTransfer failed: %s", windows_error_str(0)); + usbi_free_fd(wfd.fd); + return LIBUSB_ERROR_IO; + } + } else { + wfd.overlapped->Internal = STATUS_COMPLETED_SYNCHRONOUSLY; + wfd.overlapped->InternalHigh = (DWORD)size; + } + } + + // Use priv_transfer to store data needed for async polling + transfer_priv->pollable_fd = wfd; + transfer_priv->interface_number = (uint8_t)current_interface; + + return LIBUSB_SUCCESS; +} + +static int winusb_set_interface_altsetting(struct libusb_device_handle *dev_handle, int iface, int altsetting) +{ + struct libusb_context *ctx = DEVICE_CTX(dev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + HANDLE winusb_handle; + + CHECK_WINUSB_AVAILABLE; + + if (altsetting > 255) { + return LIBUSB_ERROR_INVALID_PARAM; + } + + winusb_handle = handle_priv->interface_handle[iface].api_handle; + if ((winusb_handle == 0) || (winusb_handle == INVALID_HANDLE_VALUE)) { + usbi_err(ctx, "interface must be claimed first"); + return LIBUSB_ERROR_NOT_FOUND; + } + + if (!WinUsb_SetCurrentAlternateSetting(winusb_handle, (UCHAR)altsetting)) { + usbi_err(ctx, "WinUsb_SetCurrentAlternateSetting failed: %s", windows_error_str(0)); + return LIBUSB_ERROR_IO; + } + + return LIBUSB_SUCCESS; +} + +static int winusb_submit_bulk_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); + struct windows_transfer_priv *transfer_priv = (struct windows_transfer_priv*)usbi_transfer_get_os_priv(itransfer); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(transfer->dev_handle); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + HANDLE winusb_handle; + bool ret; + int current_interface; + struct winfd wfd; + ULONG ppolicy = sizeof(UCHAR); + UCHAR policy; + + CHECK_WINUSB_AVAILABLE; + + transfer_priv->pollable_fd = INVALID_WINFD; + + current_interface = interface_by_endpoint(priv, handle_priv, transfer->endpoint); + if (current_interface < 0) { + usbi_err(ctx, "unable to match endpoint to an open interface - cancelling transfer"); + return LIBUSB_ERROR_NOT_FOUND; + } + + usbi_dbg("matched endpoint %02X with interface %d", transfer->endpoint, current_interface); + + winusb_handle = handle_priv->interface_handle[current_interface].api_handle; + + wfd = usbi_create_fd(winusb_handle, IS_XFERIN(transfer) ? _O_RDONLY : _O_WRONLY); + // Always use the handle returned from usbi_create_fd (wfd.handle) + if (wfd.fd < 0) { + return LIBUSB_ERROR_NO_MEM; + } + + if (IS_XFERIN(transfer)) { + WinUsb_GetPipePolicy(wfd.handle, transfer->endpoint, AUTO_CLEAR_STALL, &ppolicy, &policy); + if (!policy) { + policy = TRUE; + WinUsb_SetPipePolicy(wfd.handle, transfer->endpoint, AUTO_CLEAR_STALL, ppolicy, &policy); + } + ret = WinUsb_ReadPipe(wfd.handle, transfer->endpoint, transfer->buffer, transfer->length, NULL, wfd.overlapped); + } else { + if (transfer->flags & LIBUSB_TRANSFER_ADD_ZERO_PACKET) { + WinUsb_GetPipePolicy(wfd.handle, transfer->endpoint, SHORT_PACKET_TERMINATE, &ppolicy, &policy); + if (!policy) { + policy = TRUE; + WinUsb_SetPipePolicy(wfd.handle, transfer->endpoint, SHORT_PACKET_TERMINATE, ppolicy, &policy); + } + } + usbi_dbg("writing %d bytes", transfer->length); + ret = WinUsb_WritePipe(wfd.handle, transfer->endpoint, transfer->buffer, transfer->length, NULL, wfd.overlapped); + } + if (!ret) { + if(GetLastError() != ERROR_IO_PENDING) { + usbi_err(ctx, "WinUsb_Pipe Transfer failed: %s", windows_error_str(0)); + usbi_free_fd(wfd.fd); + return LIBUSB_ERROR_IO; + } + } else { + wfd.overlapped->Internal = STATUS_COMPLETED_SYNCHRONOUSLY; + wfd.overlapped->InternalHigh = (DWORD)transfer->length; + } + + transfer_priv->pollable_fd = wfd; + transfer_priv->interface_number = (uint8_t)current_interface; + + return LIBUSB_SUCCESS; +} + +static int winusb_clear_halt(struct libusb_device_handle *dev_handle, unsigned char endpoint) +{ + struct libusb_context *ctx = DEVICE_CTX(dev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + HANDLE winusb_handle; + int current_interface; + + CHECK_WINUSB_AVAILABLE; + + current_interface = interface_by_endpoint(priv, handle_priv, endpoint); + if (current_interface < 0) { + usbi_err(ctx, "unable to match endpoint to an open interface - cannot clear"); + return LIBUSB_ERROR_NOT_FOUND; + } + + usbi_dbg("matched endpoint %02X with interface %d", endpoint, current_interface); + winusb_handle = handle_priv->interface_handle[current_interface].api_handle; + + if (!WinUsb_ResetPipe(winusb_handle, endpoint)) { + usbi_err(ctx, "WinUsb_ResetPipe failed: %s", windows_error_str(0)); + return LIBUSB_ERROR_NO_DEVICE; + } + + return LIBUSB_SUCCESS; +} + +/* + * from http://www.winvistatips.com/winusb-bugchecks-t335323.html (confirmed + * through testing as well): + * "You can not call WinUsb_AbortPipe on control pipe. You can possibly cancel + * the control transfer using CancelIo" + */ +static int winusb_abort_control(struct usbi_transfer *itransfer) +{ + // Cancelling of the I/O is done in the parent + return LIBUSB_SUCCESS; +} + +static int winusb_abort_transfers(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(transfer->dev_handle); + struct windows_transfer_priv *transfer_priv = usbi_transfer_get_os_priv(itransfer); + HANDLE winusb_handle; + int current_interface; + + CHECK_WINUSB_AVAILABLE; + + current_interface = transfer_priv->interface_number; + if ((current_interface < 0) || (current_interface >= USB_MAXINTERFACES)) { + usbi_err(ctx, "program assertion failed: invalid interface_number"); + return LIBUSB_ERROR_NOT_FOUND; + } + usbi_dbg("will use interface %d", current_interface); + + winusb_handle = handle_priv->interface_handle[current_interface].api_handle; + + if (!WinUsb_AbortPipe(winusb_handle, transfer->endpoint)) { + usbi_err(ctx, "WinUsb_AbortPipe failed: %s", windows_error_str(0)); + return LIBUSB_ERROR_NO_DEVICE; + } + + return LIBUSB_SUCCESS; +} + +/* + * from the "How to Use WinUSB to Communicate with a USB Device" Microsoft white paper + * (http://www.microsoft.com/whdc/connect/usb/winusb_howto.mspx): + * "WinUSB does not support host-initiated reset port and cycle port operations" and + * IOCTL_INTERNAL_USB_CYCLE_PORT is only available in kernel mode and the + * IOCTL_USB_HUB_CYCLE_PORT ioctl was removed from Vista => the best we can do is + * cycle the pipes (and even then, the control pipe can not be reset using WinUSB) + */ +// TODO (post hotplug): see if we can force eject the device and redetect it (reuse hotplug?) +static int winusb_reset_device(struct libusb_device_handle *dev_handle) +{ + struct libusb_context *ctx = DEVICE_CTX(dev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + struct winfd wfd; + HANDLE winusb_handle; + int i, j; + + CHECK_WINUSB_AVAILABLE; + + // Reset any available pipe (except control) + for (i=0; iinterface_handle[i].api_handle; + for (wfd = handle_to_winfd(winusb_handle); wfd.fd > 0;) + { + // Cancel any pollable I/O + usbi_remove_pollfd(ctx, wfd.fd); + usbi_free_fd(wfd.fd); + wfd = handle_to_winfd(winusb_handle); + } + + if ( (winusb_handle != 0) && (winusb_handle != INVALID_HANDLE_VALUE)) { + for (j=0; jusb_interface[i].nb_endpoints; j++) { + usbi_dbg("resetting ep %02X", priv->usb_interface[i].endpoint[j]); + if (!WinUsb_AbortPipe(winusb_handle, priv->usb_interface[i].endpoint[j])) { + usbi_err(ctx, "WinUsb_AbortPipe (pipe address %02X) failed: %s", + priv->usb_interface[i].endpoint[j], windows_error_str(0)); + } + // FlushPipe seems to fail on OUT pipes + if (IS_EPIN(priv->usb_interface[i].endpoint[j]) + && (!WinUsb_FlushPipe(winusb_handle, priv->usb_interface[i].endpoint[j])) ) { + usbi_err(ctx, "WinUsb_FlushPipe (pipe address %02X) failed: %s", + priv->usb_interface[i].endpoint[j], windows_error_str(0)); + } + if (!WinUsb_ResetPipe(winusb_handle, priv->usb_interface[i].endpoint[j])) { + usbi_err(ctx, "WinUsb_ResetPipe (pipe address %02X) failed: %s", + priv->usb_interface[i].endpoint[j], windows_error_str(0)); + } + } + } + } + + return LIBUSB_SUCCESS; +} + +static int winusb_copy_transfer_data(struct usbi_transfer *itransfer, uint32_t io_size) +{ + itransfer->transferred += io_size; + return LIBUSB_TRANSFER_COMPLETED; +} + + +/* + * Composite API functions + */ +static int composite_init(struct libusb_context *ctx) +{ + return LIBUSB_SUCCESS; +} + +static int composite_exit(void) +{ + return LIBUSB_SUCCESS; +} + +static int composite_open(struct libusb_device_handle *dev_handle) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + unsigned api; + int r; + uint8_t flag = 1<composite_api_flags & flag) { + r = usb_api_backend[api].open(dev_handle); + if (r != LIBUSB_SUCCESS) { + return r; + } + } + flag <<= 1; + } + return LIBUSB_SUCCESS; +} + +static void composite_close(struct libusb_device_handle *dev_handle) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + unsigned api; + uint8_t flag = 1<composite_api_flags & flag) { + usb_api_backend[api].close(dev_handle); + } + flag <<= 1; + } +} + +static int composite_claim_interface(struct libusb_device_handle *dev_handle, int iface) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + return priv->usb_interface[iface].apib->claim_interface(dev_handle, iface); +} + +static int composite_set_interface_altsetting(struct libusb_device_handle *dev_handle, int iface, int altsetting) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + return priv->usb_interface[iface].apib->set_interface_altsetting(dev_handle, iface, altsetting); +} + +static int composite_release_interface(struct libusb_device_handle *dev_handle, int iface) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + return priv->usb_interface[iface].apib->release_interface(dev_handle, iface); +} + +static int composite_submit_control_transfer(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + int i; + + for (i=0; iusb_interface[i].path != NULL) { + usbi_dbg("using interface %d", i); + return priv->usb_interface[i].apib->submit_control_transfer(itransfer); + } + } + + usbi_err(ctx, "no libusb supported interfaces to complete request"); + return LIBUSB_ERROR_NOT_FOUND; +} + +static int composite_submit_bulk_transfer(struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(transfer->dev_handle); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + int current_interface; + + current_interface = interface_by_endpoint(priv, handle_priv, transfer->endpoint); + if (current_interface < 0) { + usbi_err(ctx, "unable to match endpoint to an open interface - cancelling transfer"); + return LIBUSB_ERROR_NOT_FOUND; + } + + return priv->usb_interface[current_interface].apib->submit_bulk_transfer(itransfer); +} + +static int composite_submit_iso_transfer(struct usbi_transfer *itransfer) { + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct libusb_context *ctx = DEVICE_CTX(transfer->dev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(transfer->dev_handle); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + int current_interface; + + current_interface = interface_by_endpoint(priv, handle_priv, transfer->endpoint); + if (current_interface < 0) { + usbi_err(ctx, "unable to match endpoint to an open interface - cancelling transfer"); + return LIBUSB_ERROR_NOT_FOUND; + } + + return priv->usb_interface[current_interface].apib->submit_iso_transfer(itransfer); +} + +static int composite_clear_halt(struct libusb_device_handle *dev_handle, unsigned char endpoint) +{ + struct libusb_context *ctx = DEVICE_CTX(dev_handle->dev); + struct windows_device_handle_priv *handle_priv = _device_handle_priv(dev_handle); + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + int current_interface; + + current_interface = interface_by_endpoint(priv, handle_priv, endpoint); + if (current_interface < 0) { + usbi_err(ctx, "unable to match endpoint to an open interface - cannot clear"); + return LIBUSB_ERROR_NOT_FOUND; + } + + return priv->usb_interface[current_interface].apib->clear_halt(dev_handle, endpoint); +} + +static int composite_abort_control(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct windows_transfer_priv *transfer_priv = usbi_transfer_get_os_priv(itransfer); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + + return priv->usb_interface[transfer_priv->interface_number].apib->abort_control(itransfer); +} + +static int composite_abort_transfers(struct usbi_transfer *itransfer) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct windows_transfer_priv *transfer_priv = usbi_transfer_get_os_priv(itransfer); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + + return priv->usb_interface[transfer_priv->interface_number].apib->abort_transfers(itransfer); +} + +static int composite_reset_device(struct libusb_device_handle *dev_handle) +{ + struct windows_device_priv *priv = _device_priv(dev_handle->dev); + unsigned api; + int r; + uint8_t flag = 1<composite_api_flags & flag) { + r = usb_api_backend[api].reset_device(dev_handle); + if (r != LIBUSB_SUCCESS) { + return r; + } + } + flag <<= 1; + } + return LIBUSB_SUCCESS; +} + +static int composite_copy_transfer_data(struct usbi_transfer *itransfer, uint32_t io_size) +{ + struct libusb_transfer *transfer = USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer); + struct windows_transfer_priv *transfer_priv = usbi_transfer_get_os_priv(itransfer); + struct windows_device_priv *priv = _device_priv(transfer->dev_handle->dev); + + return priv->usb_interface[transfer_priv->interface_number].apib->copy_transfer_data(itransfer, io_size); +} diff --git a/compat/libusb-1.0/libusb/os/windows_usb.h b/compat/libusb-1.0/libusb/os/windows_usb.h new file mode 100644 index 0000000..ddbd680 --- /dev/null +++ b/compat/libusb-1.0/libusb/os/windows_usb.h @@ -0,0 +1,608 @@ +/* + * Windows backend for libusb 1.0 + * Copyright (C) 2009-2010 Pete Batard + * With contributions from Michael Plante, Orin Eman et al. + * Parts of this code adapted from libusb-win32-v1 by Stephan Meyer + * Major code testing contribution by Xiaofan Chen + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#pragma once + +#if defined(_MSC_VER) +// disable /W4 MSVC warnings that are benign +#pragma warning(disable:4127) // conditional expression is constant +#pragma warning(disable:4100) // unreferenced formal parameter +#pragma warning(disable:4214) // bit field types other than int +#pragma warning(disable:4201) // nameless struct/union +#endif + +// Windows API default is uppercase - ugh! +#if !defined(bool) +#define bool BOOL +#endif +#if !defined(true) +#define true TRUE +#endif +#if !defined(false) +#define false FALSE +#endif + +// Missing from MSVC6 setupapi.h +#if !defined(SPDRP_ADDRESS) +#define SPDRP_ADDRESS 28 +#endif +#if !defined(SPDRP_INSTALL_STATE) +#define SPDRP_INSTALL_STATE 34 +#endif + +#if defined(__CYGWIN__ ) +// cygwin produces a warning unless these prototypes are defined +extern int _snprintf(char *buffer, size_t count, const char *format, ...); +extern char *_strdup(const char *strSource); +// _beginthreadex is MSVCRT => unavailable for cygwin. Fallback to using CreateThread +#define _beginthreadex(a, b, c, d, e, f) CreateThread(a, b, (LPTHREAD_START_ROUTINE)c, d, e, f) +#endif +#define safe_free(p) do {if (p != NULL) {free((void*)p); p = NULL;}} while(0) +#define safe_closehandle(h) do {if (h != INVALID_HANDLE_VALUE) {CloseHandle(h); h = INVALID_HANDLE_VALUE;}} while(0) +#define safe_min(a, b) min((size_t)(a), (size_t)(b)) +#define safe_strcp(dst, dst_max, src, count) do {memcpy(dst, src, safe_min(count, dst_max)); \ + ((char*)dst)[safe_min(count, dst_max)-1] = 0;} while(0) +#define safe_strcpy(dst, dst_max, src) safe_strcp(dst, dst_max, src, safe_strlen(src)+1) +#define safe_strncat(dst, dst_max, src, count) strncat(dst, src, safe_min(count, dst_max - safe_strlen(dst) - 1)) +#define safe_strcat(dst, dst_max, src) safe_strncat(dst, dst_max, src, safe_strlen(src)+1) +#define safe_strcmp(str1, str2) strcmp(((str1==NULL)?"":str1), ((str2==NULL)?"":str2)) +#define safe_strncmp(str1, str2, count) strncmp(((str1==NULL)?"":str1), ((str2==NULL)?"":str2), count) +#define safe_strlen(str) ((str==NULL)?0:strlen(str)) +#define safe_sprintf _snprintf +#define safe_unref_device(dev) do {if (dev != NULL) {libusb_unref_device(dev); dev = NULL;}} while(0) +#define wchar_to_utf8_ms(wstr, str, strlen) WideCharToMultiByte(CP_UTF8, 0, wstr, -1, str, strlen, NULL, NULL) +static inline void upperize(char* str) { + size_t i; + if (str == NULL) return; + for (i=0; ios_priv; +} + +static inline void windows_device_priv_init(libusb_device* dev) { + struct windows_device_priv* p = _device_priv(dev); + int i; + p->depth = 0; + p->port = 0; + p->parent_dev = NULL; + p->path = NULL; + p->apib = &usb_api_backend[USB_API_UNSUPPORTED]; + p->composite_api_flags = 0; + p->active_config = 0; + p->config_descriptor = NULL; + memset(&(p->dev_descriptor), 0, sizeof(USB_DEVICE_DESCRIPTOR)); + for (i=0; iusb_interface[i].path = NULL; + p->usb_interface[i].apib = &usb_api_backend[USB_API_UNSUPPORTED]; + p->usb_interface[i].nb_endpoints = 0; + p->usb_interface[i].endpoint = NULL; + } +} + +static inline void windows_device_priv_release(libusb_device* dev) { + struct windows_device_priv* p = _device_priv(dev); + int i; + safe_free(p->path); + if ((dev->num_configurations > 0) && (p->config_descriptor != NULL)) { + for (i=0; i < dev->num_configurations; i++) + safe_free(p->config_descriptor[i]); + } + safe_free(p->config_descriptor); + for (i=0; iusb_interface[i].path); + safe_free(p->usb_interface[i].endpoint); + } +} + +struct interface_handle_t { + HANDLE dev_handle; // WinUSB needs an extra handle for the file + HANDLE api_handle; // used by the API to communicate with the device +}; + +struct windows_device_handle_priv { + int active_interface; + struct interface_handle_t interface_handle[USB_MAXINTERFACES]; + int autoclaim_count[USB_MAXINTERFACES]; // For auto-release +}; + +static inline struct windows_device_handle_priv *_device_handle_priv( + struct libusb_device_handle *handle) +{ + return (struct windows_device_handle_priv *) handle->os_priv; +} + +// used for async polling functions +struct windows_transfer_priv { + struct winfd pollable_fd; + uint8_t interface_number; +}; + +// used to match a device driver (including filter drivers) against a supported API +struct driver_lookup { + char list[MAX_KEY_LENGTH+1];// REG_MULTI_SZ list of services (driver) names + const DWORD reg_prop; // SPDRP registry key to use to retreive list + const char* designation; // internal designation (for debug output) +}; + +/* + * API macros - from libusb-win32 1.x + */ +#define DLL_DECLARE_PREFIXNAME(api, ret, prefixname, name, args) \ + typedef ret (api * __dll_##name##_t)args; \ + static __dll_##name##_t prefixname = NULL + +#define DLL_LOAD_PREFIXNAME(dll, prefixname, name, ret_on_failure) \ + do { \ + HMODULE h = GetModuleHandleA(#dll); \ + if (!h) \ + h = LoadLibraryA(#dll); \ + if (!h) { \ + if (ret_on_failure) { return LIBUSB_ERROR_NOT_FOUND; }\ + else { break; } \ + } \ + prefixname = (__dll_##name##_t)GetProcAddress(h, #name); \ + if (prefixname) break; \ + prefixname = (__dll_##name##_t)GetProcAddress(h, #name "A"); \ + if (prefixname) break; \ + prefixname = (__dll_##name##_t)GetProcAddress(h, #name "W"); \ + if (prefixname) break; \ + if(ret_on_failure) \ + return LIBUSB_ERROR_NOT_FOUND; \ + } while(0) + +#define DLL_DECLARE(api, ret, name, args) DLL_DECLARE_PREFIXNAME(api, ret, name, name, args) +#define DLL_LOAD(dll, name, ret_on_failure) DLL_LOAD_PREFIXNAME(dll, name, name, ret_on_failure) +#define DLL_DECLARE_PREFIXED(api, ret, prefix, name, args) DLL_DECLARE_PREFIXNAME(api, ret, prefix##name, name, args) +#define DLL_LOAD_PREFIXED(dll, prefix, name, ret_on_failure) DLL_LOAD_PREFIXNAME(dll, prefix##name, name, ret_on_failure) + +/* OLE32 dependency */ +DLL_DECLARE_PREFIXED(WINAPI, HRESULT, p, CLSIDFromString, (LPCOLESTR, LPCLSID)); + +/* SetupAPI dependencies */ +DLL_DECLARE_PREFIXED(WINAPI, HDEVINFO, p, SetupDiGetClassDevsA, (const GUID*, PCSTR, HWND, DWORD)); +DLL_DECLARE_PREFIXED(WINAPI, BOOL, p, SetupDiEnumDeviceInfo, (HDEVINFO, DWORD, PSP_DEVINFO_DATA)); +DLL_DECLARE_PREFIXED(WINAPI, BOOL, p, SetupDiEnumDeviceInterfaces, (HDEVINFO, PSP_DEVINFO_DATA, + const GUID*, DWORD, PSP_DEVICE_INTERFACE_DATA)); +DLL_DECLARE_PREFIXED(WINAPI, BOOL, p, SetupDiGetDeviceInterfaceDetailA, (HDEVINFO, PSP_DEVICE_INTERFACE_DATA, + PSP_DEVICE_INTERFACE_DETAIL_DATA_A, DWORD, PDWORD, PSP_DEVINFO_DATA)); +DLL_DECLARE_PREFIXED(WINAPI, BOOL, p, SetupDiDestroyDeviceInfoList, (HDEVINFO)); +DLL_DECLARE_PREFIXED(WINAPI, HKEY, p, SetupDiOpenDevRegKey, (HDEVINFO, PSP_DEVINFO_DATA, DWORD, DWORD, DWORD, REGSAM)); +DLL_DECLARE_PREFIXED(WINAPI, BOOL, p, SetupDiGetDeviceRegistryPropertyA, (HDEVINFO, + PSP_DEVINFO_DATA, DWORD, PDWORD, PBYTE, DWORD, PDWORD)); +DLL_DECLARE_PREFIXED(WINAPI, LONG, p, RegQueryValueExW, (HKEY, LPCWSTR, LPDWORD, LPDWORD, LPBYTE, LPDWORD)); +DLL_DECLARE_PREFIXED(WINAPI, LONG, p, RegCloseKey, (HKEY)); + +/* + * Windows DDK API definitions. Most of it copied from MinGW's includes + */ +typedef DWORD DEVNODE, DEVINST; +typedef DEVNODE *PDEVNODE, *PDEVINST; +typedef DWORD RETURN_TYPE; +typedef RETURN_TYPE CONFIGRET; + +#define CR_SUCCESS 0x00000000 +#define CR_NO_SUCH_DEVNODE 0x0000000D + +#define USB_DEVICE_DESCRIPTOR_TYPE LIBUSB_DT_DEVICE +#define USB_CONFIGURATION_DESCRIPTOR_TYPE LIBUSB_DT_CONFIG +#define USB_STRING_DESCRIPTOR_TYPE LIBUSB_DT_STRING +#define USB_INTERFACE_DESCRIPTOR_TYPE LIBUSB_DT_INTERFACE +#define USB_ENDPOINT_DESCRIPTOR_TYPE LIBUSB_DT_ENDPOINT + +#define USB_REQUEST_GET_STATUS LIBUSB_REQUEST_GET_STATUS +#define USB_REQUEST_CLEAR_FEATURE LIBUSB_REQUEST_CLEAR_FEATURE +#define USB_REQUEST_SET_FEATURE LIBUSB_REQUEST_SET_FEATURE +#define USB_REQUEST_SET_ADDRESS LIBUSB_REQUEST_SET_ADDRESS +#define USB_REQUEST_GET_DESCRIPTOR LIBUSB_REQUEST_GET_DESCRIPTOR +#define USB_REQUEST_SET_DESCRIPTOR LIBUSB_REQUEST_SET_DESCRIPTOR +#define USB_REQUEST_GET_CONFIGURATION LIBUSB_REQUEST_GET_CONFIGURATION +#define USB_REQUEST_SET_CONFIGURATION LIBUSB_REQUEST_SET_CONFIGURATION +#define USB_REQUEST_GET_INTERFACE LIBUSB_REQUEST_GET_INTERFACE +#define USB_REQUEST_SET_INTERFACE LIBUSB_REQUEST_SET_INTERFACE +#define USB_REQUEST_SYNC_FRAME LIBUSB_REQUEST_SYNCH_FRAME + +#define USB_GET_NODE_INFORMATION 258 +#define USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION 260 +#define USB_GET_NODE_CONNECTION_NAME 261 +#define USB_GET_HUB_CAPABILITIES 271 +#if !defined(USB_GET_NODE_CONNECTION_INFORMATION_EX) +#define USB_GET_NODE_CONNECTION_INFORMATION_EX 274 +#endif +#if !defined(USB_GET_HUB_CAPABILITIES_EX) +#define USB_GET_HUB_CAPABILITIES_EX 276 +#endif + +#ifndef METHOD_BUFFERED +#define METHOD_BUFFERED 0 +#endif +#ifndef FILE_ANY_ACCESS +#define FILE_ANY_ACCESS 0x00000000 +#endif +#ifndef FILE_DEVICE_UNKNOWN +#define FILE_DEVICE_UNKNOWN 0x00000022 +#endif +#ifndef FILE_DEVICE_USB +#define FILE_DEVICE_USB FILE_DEVICE_UNKNOWN +#endif + +#ifndef CTL_CODE +#define CTL_CODE(DeviceType, Function, Method, Access)( \ + ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method)) +#endif + +typedef enum USB_CONNECTION_STATUS { + NoDeviceConnected, + DeviceConnected, + DeviceFailedEnumeration, + DeviceGeneralFailure, + DeviceCausedOvercurrent, + DeviceNotEnoughPower, + DeviceNotEnoughBandwidth, + DeviceHubNestedTooDeeply, + DeviceInLegacyHub +} USB_CONNECTION_STATUS, *PUSB_CONNECTION_STATUS; + +typedef enum USB_HUB_NODE { + UsbHub, + UsbMIParent +} USB_HUB_NODE; + +/* Cfgmgr32.dll interface */ +DLL_DECLARE(WINAPI, CONFIGRET, CM_Get_Parent, (PDEVINST, DEVINST, ULONG)); +DLL_DECLARE(WINAPI, CONFIGRET, CM_Get_Child, (PDEVINST, DEVINST, ULONG)); +DLL_DECLARE(WINAPI, CONFIGRET, CM_Get_Sibling, (PDEVINST, DEVINST, ULONG)); +DLL_DECLARE(WINAPI, CONFIGRET, CM_Get_Device_IDA, (DEVINST, PCHAR, ULONG, ULONG)); + +#define IOCTL_USB_GET_HUB_CAPABILITIES_EX \ + CTL_CODE( FILE_DEVICE_USB, USB_GET_HUB_CAPABILITIES_EX, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#define IOCTL_USB_GET_HUB_CAPABILITIES \ + CTL_CODE(FILE_DEVICE_USB, USB_GET_HUB_CAPABILITIES, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#define IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION \ + CTL_CODE(FILE_DEVICE_USB, USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#define IOCTL_USB_GET_ROOT_HUB_NAME \ + CTL_CODE(FILE_DEVICE_USB, HCD_GET_ROOT_HUB_NAME, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#define IOCTL_USB_GET_NODE_INFORMATION \ + CTL_CODE(FILE_DEVICE_USB, USB_GET_NODE_INFORMATION, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#define IOCTL_USB_GET_NODE_CONNECTION_INFORMATION_EX \ + CTL_CODE(FILE_DEVICE_USB, USB_GET_NODE_CONNECTION_INFORMATION_EX, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#define IOCTL_USB_GET_NODE_CONNECTION_ATTRIBUTES \ + CTL_CODE(FILE_DEVICE_USB, USB_GET_NODE_CONNECTION_ATTRIBUTES, METHOD_BUFFERED, FILE_ANY_ACCESS) + +#define IOCTL_USB_GET_NODE_CONNECTION_NAME \ + CTL_CODE(FILE_DEVICE_USB, USB_GET_NODE_CONNECTION_NAME, METHOD_BUFFERED, FILE_ANY_ACCESS) + +// Most of the structures below need to be packed +#pragma pack(push, 1) + +typedef struct USB_INTERFACE_DESCRIPTOR { + UCHAR bLength; + UCHAR bDescriptorType; + UCHAR bInterfaceNumber; + UCHAR bAlternateSetting; + UCHAR bNumEndpoints; + UCHAR bInterfaceClass; + UCHAR bInterfaceSubClass; + UCHAR bInterfaceProtocol; + UCHAR iInterface; +} USB_INTERFACE_DESCRIPTOR, *PUSB_INTERFACE_DESCRIPTOR; + +typedef struct USB_CONFIGURATION_DESCRIPTOR { + UCHAR bLength; + UCHAR bDescriptorType; + USHORT wTotalLength; + UCHAR bNumInterfaces; + UCHAR bConfigurationValue; + UCHAR iConfiguration; + UCHAR bmAttributes; + UCHAR MaxPower; +} USB_CONFIGURATION_DESCRIPTOR, *PUSB_CONFIGURATION_DESCRIPTOR; + +typedef struct USB_CONFIGURATION_DESCRIPTOR_SHORT { + struct { + ULONG ConnectionIndex; + struct { + UCHAR bmRequest; + UCHAR bRequest; + USHORT wValue; + USHORT wIndex; + USHORT wLength; + } SetupPacket; + } req; + USB_CONFIGURATION_DESCRIPTOR data; +} USB_CONFIGURATION_DESCRIPTOR_SHORT; + +typedef struct USB_ENDPOINT_DESCRIPTOR { + UCHAR bLength; + UCHAR bDescriptorType; + UCHAR bEndpointAddress; + UCHAR bmAttributes; + USHORT wMaxPacketSize; + UCHAR bInterval; +} USB_ENDPOINT_DESCRIPTOR, *PUSB_ENDPOINT_DESCRIPTOR; + +typedef struct USB_DESCRIPTOR_REQUEST { + ULONG ConnectionIndex; + struct { + UCHAR bmRequest; + UCHAR bRequest; + USHORT wValue; + USHORT wIndex; + USHORT wLength; + } SetupPacket; +// UCHAR Data[0]; +} USB_DESCRIPTOR_REQUEST, *PUSB_DESCRIPTOR_REQUEST; + +typedef struct USB_HUB_DESCRIPTOR { + UCHAR bDescriptorLength; + UCHAR bDescriptorType; + UCHAR bNumberOfPorts; + USHORT wHubCharacteristics; + UCHAR bPowerOnToPowerGood; + UCHAR bHubControlCurrent; + UCHAR bRemoveAndPowerMask[64]; +} USB_HUB_DESCRIPTOR, *PUSB_HUB_DESCRIPTOR; + +typedef struct USB_ROOT_HUB_NAME { + ULONG ActualLength; + WCHAR RootHubName[1]; +} USB_ROOT_HUB_NAME, *PUSB_ROOT_HUB_NAME; + +typedef struct USB_ROOT_HUB_NAME_FIXED { + ULONG ActualLength; + WCHAR RootHubName[MAX_PATH_LENGTH]; +} USB_ROOT_HUB_NAME_FIXED; + +typedef struct USB_NODE_CONNECTION_NAME { + ULONG ConnectionIndex; + ULONG ActualLength; + WCHAR NodeName[1]; +} USB_NODE_CONNECTION_NAME, *PUSB_NODE_CONNECTION_NAME; + +typedef struct USB_NODE_CONNECTION_NAME_FIXED { + ULONG ConnectionIndex; + ULONG ActualLength; + WCHAR NodeName[MAX_PATH_LENGTH]; +} USB_NODE_CONNECTION_NAME_FIXED; + +typedef struct USB_HUB_NAME_FIXED { + union { + USB_ROOT_HUB_NAME_FIXED root; + USB_NODE_CONNECTION_NAME_FIXED node; + } u; +} USB_HUB_NAME_FIXED; + +typedef struct USB_HUB_INFORMATION { + USB_HUB_DESCRIPTOR HubDescriptor; + BOOLEAN HubIsBusPowered; +} USB_HUB_INFORMATION, *PUSB_HUB_INFORMATION; + +typedef struct USB_MI_PARENT_INFORMATION { + ULONG NumberOfInterfaces; +} USB_MI_PARENT_INFORMATION, *PUSB_MI_PARENT_INFORMATION; + +typedef struct USB_NODE_INFORMATION { + USB_HUB_NODE NodeType; + union { + USB_HUB_INFORMATION HubInformation; + USB_MI_PARENT_INFORMATION MiParentInformation; + } u; +} USB_NODE_INFORMATION, *PUSB_NODE_INFORMATION; + +typedef struct USB_PIPE_INFO { + USB_ENDPOINT_DESCRIPTOR EndpointDescriptor; + ULONG ScheduleOffset; +} USB_PIPE_INFO, *PUSB_PIPE_INFO; + +typedef struct USB_NODE_CONNECTION_INFORMATION_EX { + ULONG ConnectionIndex; + USB_DEVICE_DESCRIPTOR DeviceDescriptor; + UCHAR CurrentConfigurationValue; + UCHAR Speed; + BOOLEAN DeviceIsHub; + USHORT DeviceAddress; + ULONG NumberOfOpenPipes; + USB_CONNECTION_STATUS ConnectionStatus; +// USB_PIPE_INFO PipeList[0]; +} USB_NODE_CONNECTION_INFORMATION_EX, *PUSB_NODE_CONNECTION_INFORMATION_EX; + +typedef struct USB_HUB_CAP_FLAGS { + ULONG HubIsHighSpeedCapable:1; + ULONG HubIsHighSpeed:1; + ULONG HubIsMultiTtCapable:1; + ULONG HubIsMultiTt:1; + ULONG HubIsRoot:1; + ULONG HubIsArmedWakeOnConnect:1; + ULONG ReservedMBZ:26; +} USB_HUB_CAP_FLAGS, *PUSB_HUB_CAP_FLAGS; + +typedef struct USB_HUB_CAPABILITIES { + ULONG HubIs2xCapable : 1; +} USB_HUB_CAPABILITIES, *PUSB_HUB_CAPABILITIES; + +typedef struct USB_HUB_CAPABILITIES_EX { + USB_HUB_CAP_FLAGS CapabilityFlags; +} USB_HUB_CAPABILITIES_EX, *PUSB_HUB_CAPABILITIES_EX; + +#pragma pack(pop) + +/* winusb.dll interface */ + +#define SHORT_PACKET_TERMINATE 0x01 +#define AUTO_CLEAR_STALL 0x02 +#define PIPE_TRANSFER_TIMEOUT 0x03 +#define IGNORE_SHORT_PACKETS 0x04 +#define ALLOW_PARTIAL_READS 0x05 +#define AUTO_FLUSH 0x06 +#define RAW_IO 0x07 +#define MAXIMUM_TRANSFER_SIZE 0x08 +#define AUTO_SUSPEND 0x81 +#define SUSPEND_DELAY 0x83 +#define DEVICE_SPEED 0x01 +#define LowSpeed 0x01 +#define FullSpeed 0x02 +#define HighSpeed 0x03 + +typedef enum USBD_PIPE_TYPE { + UsbdPipeTypeControl, + UsbdPipeTypeIsochronous, + UsbdPipeTypeBulk, + UsbdPipeTypeInterrupt +} USBD_PIPE_TYPE; + +typedef struct { + USBD_PIPE_TYPE PipeType; + UCHAR PipeId; + USHORT MaximumPacketSize; + UCHAR Interval; +} WINUSB_PIPE_INFORMATION, *PWINUSB_PIPE_INFORMATION; + +#pragma pack(1) +typedef struct { + UCHAR request_type; + UCHAR request; + USHORT value; + USHORT index; + USHORT length; +} WINUSB_SETUP_PACKET, *PWINUSB_SETUP_PACKET; +#pragma pack() + +typedef void *WINUSB_INTERFACE_HANDLE, *PWINUSB_INTERFACE_HANDLE; + +DLL_DECLARE(WINAPI, BOOL, WinUsb_Initialize, (HANDLE, PWINUSB_INTERFACE_HANDLE)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_Free, (WINUSB_INTERFACE_HANDLE)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_GetAssociatedInterface, (WINUSB_INTERFACE_HANDLE, UCHAR, PWINUSB_INTERFACE_HANDLE)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_GetDescriptor, (WINUSB_INTERFACE_HANDLE, UCHAR, UCHAR, USHORT, PUCHAR, ULONG, PULONG)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_QueryInterfaceSettings, (WINUSB_INTERFACE_HANDLE, UCHAR, PUSB_INTERFACE_DESCRIPTOR)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_QueryDeviceInformation, (WINUSB_INTERFACE_HANDLE, ULONG, PULONG, PVOID)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_SetCurrentAlternateSetting, (WINUSB_INTERFACE_HANDLE, UCHAR)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_GetCurrentAlternateSetting, (WINUSB_INTERFACE_HANDLE, PUCHAR)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_QueryPipe, (WINUSB_INTERFACE_HANDLE, UCHAR, UCHAR, PWINUSB_PIPE_INFORMATION)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_SetPipePolicy, (WINUSB_INTERFACE_HANDLE, UCHAR, ULONG, ULONG, PVOID)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_GetPipePolicy, (WINUSB_INTERFACE_HANDLE, UCHAR, ULONG, PULONG, PVOID)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_ReadPipe, (WINUSB_INTERFACE_HANDLE, UCHAR, PUCHAR, ULONG, PULONG, LPOVERLAPPED)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_WritePipe, (WINUSB_INTERFACE_HANDLE, UCHAR, PUCHAR, ULONG, PULONG, LPOVERLAPPED)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_ControlTransfer, (WINUSB_INTERFACE_HANDLE, WINUSB_SETUP_PACKET, PUCHAR, ULONG, PULONG, LPOVERLAPPED)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_ResetPipe, (WINUSB_INTERFACE_HANDLE, UCHAR)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_AbortPipe, (WINUSB_INTERFACE_HANDLE, UCHAR)); +DLL_DECLARE(WINAPI, BOOL, WinUsb_FlushPipe, (WINUSB_INTERFACE_HANDLE, UCHAR)); diff --git a/compat/libusb-1.0/libusb/sync.c b/compat/libusb-1.0/libusb/sync.c new file mode 100644 index 0000000..ac3ab7e --- /dev/null +++ b/compat/libusb-1.0/libusb/sync.c @@ -0,0 +1,322 @@ +/* + * Synchronous I/O functions for libusb + * Copyright (C) 2007-2008 Daniel Drake + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include + +#include "libusbi.h" + +/** + * @defgroup syncio Synchronous device I/O + * + * This page documents libusb's synchronous (blocking) API for USB device I/O. + * This interface is easy to use but has some limitations. More advanced users + * may wish to consider using the \ref asyncio "asynchronous I/O API" instead. + */ + +static void LIBUSB_CALL ctrl_transfer_cb(struct libusb_transfer *transfer) +{ + int *completed = transfer->user_data; + *completed = 1; + usbi_dbg("actual_length=%d", transfer->actual_length); + /* caller interprets result and frees transfer */ +} + +/** \ingroup syncio + * Perform a USB control transfer. + * + * The direction of the transfer is inferred from the bmRequestType field of + * the setup packet. + * + * The wValue, wIndex and wLength fields values should be given in host-endian + * byte order. + * + * \param dev_handle a handle for the device to communicate with + * \param bmRequestType the request type field for the setup packet + * \param bRequest the request field for the setup packet + * \param wValue the value field for the setup packet + * \param wIndex the index field for the setup packet + * \param data a suitably-sized data buffer for either input or output + * (depending on direction bits within bmRequestType) + * \param wLength the length field for the setup packet. The data buffer should + * be at least this size. + * \param timeout timeout (in millseconds) that this function should wait + * before giving up due to no response being received. For an unlimited + * timeout, use value 0. + * \returns on success, the number of bytes actually transferred + * \returns LIBUSB_ERROR_TIMEOUT if the transfer timed out + * \returns LIBUSB_ERROR_PIPE if the control request was not supported by the + * device + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns another LIBUSB_ERROR code on other failures + */ +int API_EXPORTED libusb_control_transfer(libusb_device_handle *dev_handle, + uint8_t bmRequestType, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, + unsigned char *data, uint16_t wLength, unsigned int timeout) +{ + struct libusb_transfer *transfer = libusb_alloc_transfer(0); + unsigned char *buffer; + int completed = 0; + int r; + + if (!transfer) + return LIBUSB_ERROR_NO_MEM; + + buffer = malloc(LIBUSB_CONTROL_SETUP_SIZE + wLength); + if (!buffer) { + libusb_free_transfer(transfer); + return LIBUSB_ERROR_NO_MEM; + } + + libusb_fill_control_setup(buffer, bmRequestType, bRequest, wValue, wIndex, + wLength); + if ((bmRequestType & LIBUSB_ENDPOINT_DIR_MASK) == LIBUSB_ENDPOINT_OUT) + memcpy(buffer + LIBUSB_CONTROL_SETUP_SIZE, data, wLength); + + libusb_fill_control_transfer(transfer, dev_handle, buffer, + ctrl_transfer_cb, &completed, timeout); + transfer->flags = LIBUSB_TRANSFER_FREE_BUFFER; + r = libusb_submit_transfer(transfer); + if (r < 0) { + libusb_free_transfer(transfer); + return r; + } + + while (!completed) { + r = libusb_handle_events_completed(HANDLE_CTX(dev_handle), &completed); + if (r < 0) { + if (r == LIBUSB_ERROR_INTERRUPTED) + continue; + libusb_cancel_transfer(transfer); + while (!completed) + if (libusb_handle_events_completed(HANDLE_CTX(dev_handle), &completed) < 0) + break; + libusb_free_transfer(transfer); + return r; + } + } + + if ((bmRequestType & LIBUSB_ENDPOINT_DIR_MASK) == LIBUSB_ENDPOINT_IN) + memcpy(data, libusb_control_transfer_get_data(transfer), + transfer->actual_length); + + switch (transfer->status) { + case LIBUSB_TRANSFER_COMPLETED: + r = transfer->actual_length; + break; + case LIBUSB_TRANSFER_TIMED_OUT: + r = LIBUSB_ERROR_TIMEOUT; + break; + case LIBUSB_TRANSFER_STALL: + r = LIBUSB_ERROR_PIPE; + break; + case LIBUSB_TRANSFER_NO_DEVICE: + r = LIBUSB_ERROR_NO_DEVICE; + break; + case LIBUSB_TRANSFER_OVERFLOW: + r = LIBUSB_ERROR_OVERFLOW; + break; + case LIBUSB_TRANSFER_ERROR: + case LIBUSB_TRANSFER_CANCELLED: + r = LIBUSB_ERROR_IO; + break; + default: + usbi_warn(HANDLE_CTX(dev_handle), + "unrecognised status code %d", transfer->status); + r = LIBUSB_ERROR_OTHER; + } + + libusb_free_transfer(transfer); + return r; +} + +static void LIBUSB_CALL bulk_transfer_cb(struct libusb_transfer *transfer) +{ + int *completed = transfer->user_data; + *completed = 1; + usbi_dbg("actual_length=%d", transfer->actual_length); + /* caller interprets results and frees transfer */ +} + +static int do_sync_bulk_transfer(struct libusb_device_handle *dev_handle, + unsigned char endpoint, unsigned char *buffer, int length, + int *transferred, unsigned int timeout, unsigned char type) +{ + struct libusb_transfer *transfer = libusb_alloc_transfer(0); + int completed = 0; + int r; + + if (!transfer) + return LIBUSB_ERROR_NO_MEM; + + libusb_fill_bulk_transfer(transfer, dev_handle, endpoint, buffer, length, + bulk_transfer_cb, &completed, timeout); + transfer->type = type; + + r = libusb_submit_transfer(transfer); + if (r < 0) { + libusb_free_transfer(transfer); + return r; + } + + while (!completed) { + r = libusb_handle_events_completed(HANDLE_CTX(dev_handle), &completed); + if (r < 0) { + if (r == LIBUSB_ERROR_INTERRUPTED) + continue; + libusb_cancel_transfer(transfer); + while (!completed) + if (libusb_handle_events_completed(HANDLE_CTX(dev_handle), &completed) < 0) + break; + libusb_free_transfer(transfer); + return r; + } + } + + *transferred = transfer->actual_length; + switch (transfer->status) { + case LIBUSB_TRANSFER_COMPLETED: + r = 0; + break; + case LIBUSB_TRANSFER_TIMED_OUT: + r = LIBUSB_ERROR_TIMEOUT; + break; + case LIBUSB_TRANSFER_STALL: + r = LIBUSB_ERROR_PIPE; + break; + case LIBUSB_TRANSFER_OVERFLOW: + r = LIBUSB_ERROR_OVERFLOW; + break; + case LIBUSB_TRANSFER_NO_DEVICE: + r = LIBUSB_ERROR_NO_DEVICE; + break; + case LIBUSB_TRANSFER_ERROR: + case LIBUSB_TRANSFER_CANCELLED: + r = LIBUSB_ERROR_IO; + break; + default: + usbi_warn(HANDLE_CTX(dev_handle), + "unrecognised status code %d", transfer->status); + r = LIBUSB_ERROR_OTHER; + } + + libusb_free_transfer(transfer); + return r; +} + +/** \ingroup syncio + * Perform a USB bulk transfer. The direction of the transfer is inferred from + * the direction bits of the endpoint address. + * + * For bulk reads, the length field indicates the maximum length of + * data you are expecting to receive. If less data arrives than expected, + * this function will return that data, so be sure to check the + * transferred output parameter. + * + * You should also check the transferred parameter for bulk writes. + * Not all of the data may have been written. + * + * Also check transferred when dealing with a timeout error code. + * libusb may have to split your transfer into a number of chunks to satisfy + * underlying O/S requirements, meaning that the timeout may expire after + * the first few chunks have completed. libusb is careful not to lose any data + * that may have been transferred; do not assume that timeout conditions + * indicate a complete lack of I/O. + * + * \param dev_handle a handle for the device to communicate with + * \param endpoint the address of a valid endpoint to communicate with + * \param data a suitably-sized data buffer for either input or output + * (depending on endpoint) + * \param length for bulk writes, the number of bytes from data to be sent. for + * bulk reads, the maximum number of bytes to receive into the data buffer. + * \param transferred output location for the number of bytes actually + * transferred. + * \param timeout timeout (in millseconds) that this function should wait + * before giving up due to no response being received. For an unlimited + * timeout, use value 0. + * + * \returns 0 on success (and populates transferred) + * \returns LIBUSB_ERROR_TIMEOUT if the transfer timed out (and populates + * transferred) + * \returns LIBUSB_ERROR_PIPE if the endpoint halted + * \returns LIBUSB_ERROR_OVERFLOW if the device offered more data, see + * \ref packetoverflow + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns another LIBUSB_ERROR code on other failures + */ +int API_EXPORTED libusb_bulk_transfer(struct libusb_device_handle *dev_handle, + unsigned char endpoint, unsigned char *data, int length, int *transferred, + unsigned int timeout) +{ + return do_sync_bulk_transfer(dev_handle, endpoint, data, length, + transferred, timeout, LIBUSB_TRANSFER_TYPE_BULK); +} + +/** \ingroup syncio + * Perform a USB interrupt transfer. The direction of the transfer is inferred + * from the direction bits of the endpoint address. + * + * For interrupt reads, the length field indicates the maximum length + * of data you are expecting to receive. If less data arrives than expected, + * this function will return that data, so be sure to check the + * transferred output parameter. + * + * You should also check the transferred parameter for interrupt + * writes. Not all of the data may have been written. + * + * Also check transferred when dealing with a timeout error code. + * libusb may have to split your transfer into a number of chunks to satisfy + * underlying O/S requirements, meaning that the timeout may expire after + * the first few chunks have completed. libusb is careful not to lose any data + * that may have been transferred; do not assume that timeout conditions + * indicate a complete lack of I/O. + * + * The default endpoint bInterval value is used as the polling interval. + * + * \param dev_handle a handle for the device to communicate with + * \param endpoint the address of a valid endpoint to communicate with + * \param data a suitably-sized data buffer for either input or output + * (depending on endpoint) + * \param length for bulk writes, the number of bytes from data to be sent. for + * bulk reads, the maximum number of bytes to receive into the data buffer. + * \param transferred output location for the number of bytes actually + * transferred. + * \param timeout timeout (in millseconds) that this function should wait + * before giving up due to no response being received. For an unlimited + * timeout, use value 0. + * + * \returns 0 on success (and populates transferred) + * \returns LIBUSB_ERROR_TIMEOUT if the transfer timed out + * \returns LIBUSB_ERROR_PIPE if the endpoint halted + * \returns LIBUSB_ERROR_OVERFLOW if the device offered more data, see + * \ref packetoverflow + * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected + * \returns another LIBUSB_ERROR code on other error + */ +int API_EXPORTED libusb_interrupt_transfer( + struct libusb_device_handle *dev_handle, unsigned char endpoint, + unsigned char *data, int length, int *transferred, unsigned int timeout) +{ + return do_sync_bulk_transfer(dev_handle, endpoint, data, length, + transferred, timeout, LIBUSB_TRANSFER_TYPE_INTERRUPT); +} + diff --git a/compat/libusb-1.0/libusb/version.h b/compat/libusb-1.0/libusb/version.h new file mode 100644 index 0000000..01ee5ef --- /dev/null +++ b/compat/libusb-1.0/libusb/version.h @@ -0,0 +1,18 @@ +/* This file is parsed by m4 and windres and RC.EXE so please keep it simple. */ +#ifndef LIBUSB_MAJOR +#define LIBUSB_MAJOR 1 +#endif +#ifndef LIBUSB_MINOR +#define LIBUSB_MINOR 0 +#endif +#ifndef LIBUSB_MICRO +#define LIBUSB_MICRO 16 +#endif +/* LIBUSB_NANO may be used for Windows internal versioning. 0 means unused. */ +#ifndef LIBUSB_NANO +#define LIBUSB_NANO 0 +#endif +/* LIBUSB_RC is the release candidate suffix. Should normally be empty. */ +#ifndef LIBUSB_RC +#define LIBUSB_RC "-rc10" +#endif diff --git a/compat/libusb-1.0/m4/.gitignore b/compat/libusb-1.0/m4/.gitignore new file mode 100644 index 0000000..464ba5c --- /dev/null +++ b/compat/libusb-1.0/m4/.gitignore @@ -0,0 +1,5 @@ +libtool.m4 +lt~obsolete.m4 +ltoptions.m4 +ltsugar.m4 +ltversion.m4 diff --git a/configure.ac b/configure.ac new file mode 100644 index 0000000..c2884df --- /dev/null +++ b/configure.ac @@ -0,0 +1,832 @@ +##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## +##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## +m4_define([v_maj], [1]) +m4_define([v_min], [0]) +m4_define([v_mic], [0]) +##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## +m4_define([v_ver], [v_maj.v_min.v_mic]) +m4_define([lt_rev], m4_eval(v_maj + v_min)) +m4_define([lt_cur], v_mic) +m4_define([lt_age], v_min) +##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## +##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## + +AC_INIT([bmminer], [v_ver], [support@bitmain.com]) + +AC_PREREQ(2.59) +AC_CANONICAL_SYSTEM +AC_CONFIG_MACRO_DIR([m4]) +AC_CONFIG_SRCDIR([cgminer.c]) +AC_CONFIG_HEADERS([config.h]) + +AM_INIT_AUTOMAKE([foreign subdir-objects]) +m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) +AC_USE_SYSTEM_EXTENSIONS + +##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## +##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## +m4_ifdef([v_rev], , [m4_define([v_rev], [0])]) +m4_ifdef([v_rel], , [m4_define([v_rel], [])]) +AC_DEFINE_UNQUOTED(CGMINER_MAJOR_VERSION, [v_maj], [Major version]) +AC_DEFINE_UNQUOTED(CGMINER_MINOR_VERSION, [v_min], [Minor version]) +AC_DEFINE_UNQUOTED(CGMINER_MINOR_SUBVERSION, [v_mic], [Micro version]) +version_info="lt_rev:lt_cur:lt_age" +release_info="v_rel" +AC_SUBST(version_info) +AC_SUBST(release_info) +##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## +##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## +VMAJ=v_maj +AC_SUBST(VMAJ) + +AC_CANONICAL_BUILD +AC_CANONICAL_HOST + +dnl Make sure anyone changing configure.ac/Makefile.am has a clue +AM_MAINTAINER_MODE + +dnl Checks for programs +AC_PROG_CC +gl_EARLY +AC_PROG_GCC_TRADITIONAL +AM_PROG_CC_C_O +LT_INIT([disable-shared]) + +gl_INIT + +dnl Checks for header files. +AC_HEADER_STDC +AC_CHECK_HEADERS(syslog.h) + +AC_FUNC_ALLOCA + +have_win32=false +PTHREAD_FLAGS="-lpthread" +DLOPEN_FLAGS="-ldl" +WS2_LIBS="" +MM_LIBS="" +MATH_LIBS="-lm" +RT_LIBS="-lrt" + +case $target in + amd64-*) + have_x86_64=true + ;; + x86_64-*) + have_x86_64=true + ;; + *) + have_x86_64=false + ;; +esac + +case $target in + *-*-linux-gnu*) + have_linux=true + ;; + *-*-mingw*) + have_win32=true + PTHREAD_FLAGS="" + DLOPEN_FLAGS="" + WS2_LIBS="-lws2_32" + MM_LIBS="-lwinmm" + RT_LIBS="" + AC_DEFINE([_WIN32_WINNT], [0x0501], "WinNT version for XP+ support") + ;; + powerpc-*-darwin*) + have_darwin=true + CFLAGS="$CFLAGS -faltivec" + PTHREAD_FLAGS="" + RT_LIBS="" + ;; + *-*-darwin*) + have_darwin=true + PTHREAD_FLAGS="" + RT_LIBS="" + ;; +esac + +has_winpthread=false +if test "x$have_win32" = xtrue; then + has_winpthread=true + AC_CHECK_LIB(winpthread, nanosleep, , has_winpthread=false) + PTHREAD_LIBS=-lwinpthread +fi + +if test "x$has_winpthread" != xtrue; then + AC_CHECK_LIB(pthread, pthread_create, , + AC_MSG_ERROR([Could not find pthread library - please install libpthread])) + PTHREAD_LIBS=-lpthread +fi + +# Drivers that are designed to be run on dedicated hardware should set standalone to yes +# All drivers should prepend an x to the drivercount + +standalone="no" +bmsc="no" +drivercount="" + +AC_ARG_ENABLE([bmsc], + [AC_HELP_STRING([--enable-bmsc],[Compile support for Bitmain Single Chain (default disabled)])], + [bmsc=$enableval] + ) +if test "x$bmsc" = xyes; then + AC_DEFINE([USE_BMSC], [1], [Defined to 1 if Bitmain Single Chain support is wanted]) + drivercount=x$drivercount + standalone="yes" +fi +AM_CONDITIONAL([HAS_BMSC], [test x$bmsc = xyes]) + + +bitmain="no" + +AC_ARG_ENABLE([bitmain], + [AC_HELP_STRING([--enable-bitmain],[Compile support for Bitmain Multi Chain (default disabled)])], + [bitmain=$enableval] + ) +if test "x$bitmain" = xyes; then + AC_DEFINE([USE_BITMAIN], [1], [Defined to 1 if Bitmain Multi Chain support is wanted]) + drivercount=x$drivercount + standalone="yes" +fi +AM_CONDITIONAL([HAS_BITMAIN], [test x$bitmain = xyes]) + +bitmain_c5="no" + +AC_ARG_ENABLE([bitmain_c5], + [AC_HELP_STRING([--enable-bitmain-c5],[Compile support for Bitmain C5(default disabled)])], + [bitmain_c5=$enableval] + ) +if test "x$bitmain_c5" = xyes; then + AC_DEFINE([USE_BITMAIN_C5], [1], [Defined to 1 if Bitmain C5 support is wanted]) + drivercount=x$drivercount + standalone="yes" +fi +AM_CONDITIONAL([HAS_BITMAIN_C5], [test x$bitmain_c5 = xyes]) + + +avalon="no" + +AC_ARG_ENABLE([avalon], + [AC_HELP_STRING([--enable-avalon],[Compile support for Avalon (default disabled)])], + [avalon=$enableval] + ) +if test "x$avalon" = xyes; then + AC_DEFINE([USE_AVALON], [1], [Defined to 1 if Avalon support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_AVALON], [test x$avalon = xyes]) + + +avalon2="no" + +AC_ARG_ENABLE([avalon2], + [AC_HELP_STRING([--enable-avalon2],[Compile support for Avalon2 (default disabled)])], + [avalon2=$enableval] + ) +if test "x$avalon2" = xyes; then + AC_DEFINE([USE_AVALON2], [1], [Defined to 1 if Avalon2 support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_AVALON2], [test x$avalon2 = xyes]) + + +avalon4="no" + +AC_ARG_ENABLE([avalon4], + [AC_HELP_STRING([--enable-avalon4],[Compile support for Avalon4 (default disabled)])], + [avalon4=$enableval] + ) +if test "x$avalon4" = xyes; then + AC_DEFINE([USE_AVALON4], [1], [Defined to 1 if Avalon4 support is wanted]) +fi +AM_CONDITIONAL([HAS_AVALON4], [test x$avalon4 = xyes]) + + +bab="no" + +AC_ARG_ENABLE([bab], + [AC_HELP_STRING([--enable-bab],[Compile support for BlackArrow Bitfury STANDALONE(default disabled)])], + [bab=$enableval] + ) +if test "x$bab" = xyes; then + AC_DEFINE([USE_BAB], [1], [Defined to 1 if BlackArrow Bitfury support is wanted]) + drivercount=x$drivercount + standalone="yes" +fi +AM_CONDITIONAL([HAS_BAB], [test x$bab = xyes]) + + +bflsc="no" + +AC_ARG_ENABLE([bflsc], + [AC_HELP_STRING([--enable-bflsc],[Compile support for BFL ASICs (default disabled)])], + [bflsc=$enableval] + ) +if test "x$bflsc" = xyes; then + AC_DEFINE([USE_BFLSC], [1], [Defined to 1 if BFL ASIC support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_BFLSC], [test x$bflsc = xyes]) + + +bitforce="no" + +AC_ARG_ENABLE([bitforce], + [AC_HELP_STRING([--enable-bitforce],[Compile support for BitForce FPGAs (default disabled)])], + [bitforce=$enableval] + ) +if test "x$bitforce" = xyes; then + AC_DEFINE([USE_BITFORCE], [1], [Defined to 1 if BitForce support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_BITFORCE], [test x$bitforce = xyes]) + + +bitfury="no" + +AC_ARG_ENABLE([bitfury], + [AC_HELP_STRING([--enable-bitfury],[Compile support for BitFury ASICs (default disabled)])], + [bitfury=$enableval] + ) +if test "x$bitfury" = xyes; then + AC_DEFINE([USE_BITFURY], [1], [Defined to 1 if BitFury ASIC support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_BITFURY], [test x$bitfury = xyes]) + + +bitmine_A1="no" + +AC_ARG_ENABLE([bitmine_A1], + [AC_HELP_STRING([--enable-bitmine_A1],[Compile support for Bitmine.ch A1 ASICs STANDALONE(default disabled)])], + [bitmine_A1=$enableval] + ) +if test "x$bitmine_A1" = xyes; then + AC_DEFINE([USE_BITMINE_A1], [1], [Defined to 1 if Bitmine A1 support is wanted]) + drivercount=x$drivercount + standalone="yes" +fi +AM_CONDITIONAL([HAS_BITMINE_A1], [test x$bitmine_A1 = xyes]) + + +blockerupter="no" + +AC_ARG_ENABLE([blockerupter], + [AC_HELP_STRING([--enable-blockerupter],[Compile support for BlockErupter (default disabled)])], + [blockerupter=$enableval] + ) +if test "x$blockerupter" = xyes; then + AC_DEFINE([USE_BLOCKERUPTER], [1], [Defined to 1 if BlockErupter support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_BLOCKERUPTER], [test x$blockerupter = xyes]) + + +cointerra="no" + +AC_ARG_ENABLE([cointerra], + [AC_HELP_STRING([--enable-cointerra],[Compile support for Cointerra ASICs (default disabled)])], + [cointerra=$enableval] + ) +if test "x$cointerra" = xyes; then + AC_DEFINE([USE_COINTERRA], [1], [Defined to 1 if Cointerra support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_COINTERRA], [test x$cointerra = xyes]) + + +drillbit="no" + +AC_ARG_ENABLE([drillbit], + [AC_HELP_STRING([--enable-drillbit],[Compile support for Drillbit BitFury ASICs (default disabled)])], + [drillbit=$enableval] + ) +if test "x$drillbit" = xyes; then + AC_DEFINE([USE_DRILLBIT], [1], [Defined to 1 if Drillbit BitFury support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_DRILLBIT], [test x$drillbit = xyes]) + + +hashfast="no" + +AC_ARG_ENABLE([hashfast], + [AC_HELP_STRING([--enable-hashfast],[Compile support for Hashfast (default disabled)])], + [hashfast=$enableval] + ) +if test "x$hashfast" = xyes; then + AC_DEFINE([USE_HASHFAST], [1], [Defined to 1 if Hashfast support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_HASHFAST], [test x$hashfast = xyes]) + + +hashratio="no" + +AC_ARG_ENABLE([hashratio], + [AC_HELP_STRING([--enable-hashratio],[Compile support for Hashratio (default disabled)])], + [hashratio=$enableval] + ) +if test "x$hashratio" = xyes; then + AC_DEFINE([USE_HASHRATIO], [1], [Defined to 1 if Hashratiosupport is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_HASHRATIO], [test x$hashratio = xyes]) + + +icarus="no" + +AC_ARG_ENABLE([icarus], + [AC_HELP_STRING([--enable-icarus],[Compile support for Icarus (default disabled)])], + [icarus=$enableval] + ) +if test "x$icarus" = xyes; then + AC_DEFINE([USE_ICARUS], [1], [Defined to 1 if Icarus support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_ICARUS], [test x$icarus = xyes]) + + +klondike="no" + +AC_ARG_ENABLE([klondike], + [AC_HELP_STRING([--enable-klondike],[Compile support for Klondike (default disabled)])], + [klondike=$enableval] + ) +if test "x$klondike" = xyes; then + AC_DEFINE([USE_KLONDIKE], [1], [Defined to 1 if Klondike support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_KLONDIKE], [test x$klondike = xyes]) + + +knc="no" + +AC_ARG_ENABLE([knc], + [AC_HELP_STRING([--enable-knc],[Compile support for KnC miners STANDALONE(default disabled)])], + [knc=$enableval] + ) +if test "x$knc" = xyes; then + AC_DEFINE([USE_KNC], [1], [Defined to 1 if KnC miner support is wanted]) + drivercount=x$drivercount + standalone="yes" +fi +AM_CONDITIONAL([HAS_KNC], [test x$knc = xyes]) + + +minion="no" + +AC_ARG_ENABLE([minion], + [AC_HELP_STRING([--enable-minion],[Compile support for Minion BlackArrow ASIC STANDALONE(default disabled)])], + [minion=$enableval] + ) +if test "x$minion" = xyes; then + AC_DEFINE([USE_MINION], [1], [Defined to 1 if Minion BlackArrow ASIC support is wanted]) + drivercount=x$drivercount + standalone="yes" +fi +AM_CONDITIONAL([HAS_MINION], [test x$minion = xyes]) + + +modminer="no" + +AC_ARG_ENABLE([modminer], + [AC_HELP_STRING([--enable-modminer],[Compile support for ModMiner FPGAs(default disabled)])], + [modminer=$enableval] + ) +if test "x$modminer" = xyes; then + AC_DEFINE([USE_MODMINER], [1], [Defined to 1 if ModMiner support is wanted]) + drivercount=x$drivercount +fi +AM_CONDITIONAL([HAS_MODMINER], [test x$modminer = xyes]) + + +sp10="no" + +AC_ARG_ENABLE([sp10], + [AC_HELP_STRING([--enable-sp10],[Compile support for Spondoolies SP10 STANDALONE(default disabled)])], + [sp10=$enableval] + ) +if test "x$sp10" = xyes; then + AC_DEFINE([USE_SP10], [1], [Defined to 1 if Spondoolies SP10 support is wanted]) + drivercount=x$drivercount + standalone="yes" +fi +AM_CONDITIONAL([HAS_SP10], [test x$sp10 = xyes]) + + + +sp30="no" + +AC_ARG_ENABLE([sp30], + [AC_HELP_STRING([--enable-sp30],[Compile support for Spondoolies SP30 STANDALONE(default disabled)])], + [sp30=$enableval] + ) +if test "x$sp30" = xyes; then + AC_DEFINE([USE_SP30], [1], [Defined to 1 if SP30 support is wanted]) + drivercount=x$drivercount + standalone="yes" +fi +AM_CONDITIONAL([HAS_SP30], [test x$sp30 = xyes]) + + +forcecombo="no" + +AC_ARG_ENABLE([forcecombo], + [AC_HELP_STRING([--enable-forcecombo],[Allow combinations of drivers not intended to be built together(default disabled)])], + [forcecombo=$enableval] + ) +if test "x$forcecombo" = xyes; then + standalone="no" +fi + +curses="auto" + +AC_ARG_WITH([curses], + [AC_HELP_STRING([--without-curses],[Compile support for curses TUI (default enabled)])], + [curses=$withval] + ) +if test "x$curses" = "xno"; then + cursesmsg='User specified --without-curses. TUI support DISABLED' +else + AC_SEARCH_LIBS(addstr, ncurses pdcurses, [ + curses=yes + cursesmsg="FOUND: ${ac_cv_search_addstr}" + AC_DEFINE([HAVE_CURSES], [1], [Defined to 1 if curses TUI support is wanted]) + ], [ + if test "x$curses" = "xyes"; then + AC_MSG_ERROR([Could not find curses library - please install libncurses-dev or pdcurses-dev (or configure --without-curses)]) + else + AC_MSG_WARN([Could not find curses library - if you want a TUI, install libncurses-dev or pdcurses-dev]) + curses=no + cursesmsg='NOT FOUND. TUI support DISABLED' + fi + ]) +fi + + +#Add a new device to this list if it needs libusb, along with a no on the end. +if test x$avalon$avalon2$avalon4$bitforce$bitfury$blockerupter$modminer$bflsc$icarus$hashfast$hashratio$klondike$drillbit$cointerra$bmsc$bitmain != xnononononononononononononononono; then + want_usbutils=true +else + want_usbutils=false +fi + +if test x$bitfury != xno; then + want_libbitfury=true +else + want_libbitfury=false +fi + +if test x$avalon2$avalon4$hashratio$bitmain_c5 != xnonono; then + want_crc16=true +else + want_crc16=false +fi + +AM_CONDITIONAL([NEED_FPGAUTILS], [test x$modminer != xno]) +AM_CONDITIONAL([WANT_USBUTILS], [test x$want_usbutils != xfalse]) +AM_CONDITIONAL([WANT_LIBBITFURY], [test x$want_libbitfury != xfalse]) +AM_CONDITIONAL([HAVE_CURSES], [test x$curses = xyes]) +AM_CONDITIONAL([HAVE_WINDOWS], [test x$have_win32 = xtrue]) +AM_CONDITIONAL([HAVE_x86_64], [test x$have_x86_64 = xtrue]) +AM_CONDITIONAL([WANT_CRC16], [test x$want_crc16 != xfalse]) + +if test "x$want_usbutils" != xfalse; then + dlibusb="no" + AC_DEFINE([USE_USBUTILS], [1], [Defined to 1 if usbutils support required]) + AC_ARG_WITH([system-libusb], + [AC_HELP_STRING([--with-system-libusb],[NOT RECOMMENDED! Compile against dynamic system libusb. (Default use included static libusb)])], + [dlibusb=$withval] + ) + + if test "x$dlibusb" != xno; then + case $target in + *-*-freebsd*) + LIBUSB_LIBS="-lusb" + LIBUSB_CFLAGS="" + AC_DEFINE(HAVE_LIBUSB, 1, [Define if you have libusb-1.0]) + ;; + *) + PKG_CHECK_MODULES(LIBUSB, libusb-1.0, [AC_DEFINE(HAVE_LIBUSB, 1, [Define if you have libusb-1.0])], [AC_MSG_ERROR([Could not find usb library - please install libusb-1.0])]) + ;; + esac + else + AC_CONFIG_SUBDIRS([compat/libusb-1.0]) + LIBUSB_LIBS="compat/libusb-1.0/libusb/.libs/libusb-1.0.a" + if test "x$have_linux" = "xtrue"; then + AC_ARG_ENABLE([udev], + [AC_HELP_STRING([--disable-udev],[Disable building libusb with udev])], + [udev=$enableval] + ) + + if test "x$udev" != xno; then + LIBUSB_LIBS+=" -ludev" + fi + fi + if test "x$have_darwin" = "xtrue"; then + LIBUSB_LIBS+=" -lobjc" + LDFLAGS+=" -framework CoreFoundation -framework IOKit" + fi + fi +else + LIBUSB_LIBS="" +fi + +AM_CONDITIONAL([WANT_STATIC_LIBUSB], [test x$dlibusb = xno]) + +AC_CONFIG_SUBDIRS([compat/jansson-2.6]) +JANSSON_LIBS="compat/jansson-2.6/src/.libs/libjansson.a" + +PKG_PROG_PKG_CONFIG() + +if test "x$have_cgminer_sdk" = "xtrue"; then + if test "x$have_x86_64" = xtrue; then + ARCH_DIR=x86_64 + else + ARCH_DIR=x86 + fi + PKG_CONFIG="${PKG_CONFIG:-pkg-config} --define-variable=arch=$ARCH_DIR --define-variable=target=$target --define-variable=cgminersdkdir=$CGMINER_SDK" + PKG_CONFIG_PATH="$CGMINER_SDK/lib/pkgconfig${PKG_CONFIG_PATH:+:$PKG_CONFIG_PATH}" +fi + +AC_SUBST(LIBUSB_LIBS) +AC_SUBST(LIBUSB_CFLAGS) + +AC_ARG_ENABLE([libcurl], + [AC_HELP_STRING([--disable-libcurl],[Disable building with libcurl for getwork and GBT support])], + [libcurl=$enableval] + ) + +if test "x$libcurl" != xno; then + if test "x$have_win32" != xtrue; then + PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.25.0], [AC_DEFINE([CURL_HAS_KEEPALIVE], [1], [Defined if version of curl supports keepalive.])], + [PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.18.2], ,[AC_MSG_ERROR([Missing required libcurl dev >= 7.18.2])])]) + else + PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.25.0], ,[AC_MSG_ERROR([Missing required libcurl dev >= 7.25.0])]) + AC_DEFINE([CURL_HAS_KEEPALIVE], [1]) + fi + AC_DEFINE([HAVE_LIBCURL], [1], [Defined to 1 if libcurl support built in]) +else + LIBCURL_LIBS="" +fi +AC_SUBST(LIBCURL_LIBS) + + +#check execv signature +AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + #include + int execv(const char*, const char*const*); + ])], + AC_DEFINE([EXECV_2ND_ARG_TYPE], [const char* const*], [int execv(const char*, const char*const*);]), + AC_DEFINE([EXECV_2ND_ARG_TYPE], [char* const*], [int execv(const char*, char*const*);])) + +dnl CCAN wants to know a lot of vars. +# All the configuration checks. Regrettably, the __attribute__ checks will +# give false positives on old GCCs, since they just cause warnings. But that's +# fairly harmless. +AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((cold)) cleanup(void) { }])], + AC_DEFINE([HAVE_ATTRIBUTE_COLD], [1], + [Define if __attribute__((cold))])) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((const)) cleanup(void) { }])], + AC_DEFINE([HAVE_ATTRIBUTE_CONST], [1], + [Define if __attribute__((const))])) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((noreturn)) cleanup(void) { exit(1); }])], + AC_DEFINE([HAVE_ATTRIBUTE_NORETURN], [1], + [Define if __attribute__((noreturn))])) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((format(__printf__, 1, 2))) cleanup(const char *fmt, ...) { }])], + AC_DEFINE([HAVE_ATTRIBUTE_PRINTF], [1], + [Define if __attribute__((format(__printf__)))])) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((unused)) cleanup(void) { }])], + AC_DEFINE([HAVE_ATTRIBUTE_UNUSED], [1], + [Define if __attribute__((unused))])) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((used)) cleanup(void) { }])], + AC_DEFINE([HAVE_ATTRIBUTE_USED], [1], + [Define if __attribute__((used))])) +AC_LINK_IFELSE([AC_LANG_SOURCE([int main(void) { return __builtin_constant_p(1) ? 0 : 1; }])], + AC_DEFINE([HAVE_BUILTIN_CONSTANT_P], [1], + [Define if have __builtin_constant_p])) +AC_LINK_IFELSE([AC_LANG_SOURCE([int main(void) { return __builtin_types_compatible_p(char *, int) ? 1 : 0; }])], + AC_DEFINE([HAVE_BUILTIN_TYPES_COMPATIBLE_P], [1], + [Define if have __builtin_types_compatible_p])) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([static int __attribute__((warn_unused_result)) func(int x) { return x; }])], + AC_DEFINE([HAVE_WARN_UNUSED_RESULT], [1], + [Define if __attribute__((warn_unused_result))])) + +if test "x$prefix" = xNONE; then + prefix=/usr/local +fi + +AC_DEFINE_UNQUOTED([CGMINER_PREFIX], ["$prefix/bin"], [Path to cgminer install]) + +AC_SUBST(JANSSON_LIBS) +AC_SUBST(PTHREAD_FLAGS) +AC_SUBST(DLOPEN_FLAGS) +AC_SUBST(PTHREAD_LIBS) +AC_SUBST(NCURSES_LIBS) +AC_SUBST(PDCURSES_LIBS) +AC_SUBST(WS2_LIBS) +AC_SUBST(MM_LIBS) +AC_SUBST(MATH_LIBS) +AC_SUBST(RT_LIBS) + +AC_CONFIG_FILES([ + Makefile + compat/Makefile + ccan/Makefile + lib/Makefile + ]) +AC_OUTPUT + + +echo +echo +echo +echo "------------------------------------------------------------------------" +echo "$PACKAGE $VERSION" +echo "------------------------------------------------------------------------" +echo +echo +echo "Configuration Options Summary:" +echo + +if test "x$libcurl" != xno; then + echo " libcurl(GBT+getwork).: Enabled: $LIBCURL_LIBS" +else + echo " libcurl(GBT+getwork).: Disabled" +fi + +echo " curses.TUI...........: $cursesmsg" + + +echo +if test "x$bmsc" = xyes; then + echo " Bitmain.SingleChain..: Enabled" +else + echo " Bitmain.SingleChain..: Disabled" +fi + +if test "x$bitmain" = xyes; then + echo " Bitmain.MultiChain...: Enabled" +else + echo " Bitmain.MultiChain...: Disabled" +fi + +if test "x$bitmain_c5" = xyes; then + echo " Bitmain.C5...: Enabled" +else + echo " Bitmain.C5...: Disabled" +fi + +if test "x$avalon" = xyes; then + echo " Avalon.ASICs.........: Enabled" +else + echo " Avalon.ASICs.........: Disabled" +fi + +if test "x$avalon2" = xyes; then + echo " Avalon2.ASICs........: Enabled" +else + echo " Avalon2.ASICs........: Disabled" +fi + +if test "x$avalon4" = xyes; then + echo " Avalon4.ASICs........: Enabled" +else + echo " Avalon4.ASICs........: Disabled" +fi + +if test "x$minion" = xyes; then + echo " BlackArrowMinion.ASIC: Enabled" +else + echo " BlackArrowMinion.ASIC: Disabled" +fi + +if test "x$bab" = xyes; then + echo " BlackArrow.ASICs.....: Enabled" +else + echo " BlackArrow.ASICs.....: Disabled" +fi + +if test "x$bflsc" = xyes; then + echo " BFL.ASICs............: Enabled" +else + echo " BFL.ASICs............: Disabled" +fi + +if test "x$bitforce" = xyes; then + echo " BitForce.FPGAs.......: Enabled" +else + echo " BitForce.FPGAs.......: Disabled" +fi + +if test "x$bitfury" = xyes; then + echo " BitFury.ASICs........: Enabled" +else + echo " BitFury.ASICs........: Disabled" +fi + +if test "x$blockerupter" = xyes; then + echo " BlockErupter.ASICs...: Enabled" +else + echo " BlockErupter.ASICs...: Disabled" +fi + +if test "x$cointerra" = xyes; then + echo " Cointerra.ASICs......: Enabled" +else + echo " Cointerra.ASICs......: Disabled" +fi + +if test "x$sp10" = xyes; then + echo " Spond-sp10.ASICs.....: Enabled" +else + echo " Spond-sp10.ASICs.....: Disabled" +fi + + +if test "x$sp30" = xyes; then + echo " Spond-sp30.ASICs.....: Enabled" +else + echo " Spond-sp30.ASICs.....: Disabled" +fi + + +if test "x$bitmine_A1" = xyes; then + echo " Bitmine-A1.ASICs.....: Enabled" +else + echo " Bitmine-A1.ASICs.....: Disabled" +fi + +if test "x$drillbit" = xyes; then + echo " Drillbit.BitFury.....: Enabled" +else + echo " Drillbit.BitFury.....: Disabled" +fi + +if test "x$hashfast" = xyes; then + echo " Hashfast.ASICs.......: Enabled" +else + echo " Hashfast.ASICs.......: Disabled" +fi + +if test "x$hashratio" = xyes; then + echo " Hashratio.ASICs......: Enabled" +else + echo " Hashratio.ASICs......: Disabled" +fi + +if test "x$icarus" = xyes; then + echo " Icarus.ASICs/FPGAs...: Enabled" +else + echo " Icarus.ASICs/FPGAs...: Disabled" +fi + +if test "x$klondike" = xyes; then + echo " Klondike.ASICs.......: Enabled" +else + echo " Klondike.ASICs.......: Disabled" +fi + +if test "x$knc" = xyes; then + echo " KnC.ASICs............: Enabled" +else + echo " KnC.ASICs............: Disabled" +fi + +if test "x$modminer" = xyes; then + echo " ModMiner.FPGAs.......: Enabled" +else + echo " ModMiner.FPGAs.......: Disabled" +fi + +#Add any new device to this, along with a no on the end of the test +if test "x$avalon$avalon2$bab$bflsc$bitforce$bitfury$blockerupter$hashfast$hashratio$icarus$klondike$knc$modminer$drillbit$minion$cointerra$bitmine_A1$bmsc$bitmain$sp10$sp30$bitmain_c5" = xnonononononononononononononononononononono; then + echo + AC_MSG_ERROR([No mining devices configured in]) + echo +fi + +if test "x$standalone" = xyes; then + if test $drivercount != x; then + echo + AC_MSG_ERROR([You have configured more than one driver in with a driver that is designed to be standalone only (see ./configure --help)]) + echo + fi +fi + +echo +echo "Compilation............: make (or gmake)" +echo " CPPFLAGS.............: $CPPFLAGS" +echo " CFLAGS...............: $CFLAGS" +echo " LDFLAGS..............: $LDFLAGS $PTHREAD_FLAGS" +echo " LDADD................: $DLOPEN_FLAGS $LIBCURL_LIBS $JANSSON_LIBS $PTHREAD_LIBS $NCURSES_LIBS $PDCURSES_LIBS $WS2_LIBS $MATH_LIBS $LIBUSB_LIBS $RT_LIBS" +echo +echo "Installation...........: make install (as root if needed, with 'su' or 'sudo')" +echo " prefix...............: $prefix" +echo +if test "x$want_usbutils$dlibusb" = xyesyes; then +echo "*** SYSTEM LIBUSB BEING ADDED - NOT RECOMMENDED UNLESS YOU ARE UNABLE TO COMPILE THE INCLUDED LIBUSB ***" +echo +fi diff --git a/crc.h b/crc.h new file mode 100644 index 0000000..820fbe7 --- /dev/null +++ b/crc.h @@ -0,0 +1,23 @@ +/* + * Milkymist SoC (Software) + * Copyright (C) 2007, 2008, 2009 Sebastien Bourdeauducq + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 3 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _CRC_H_ +#define _CRC_H_ + +unsigned short crc16(const unsigned char *buffer, int len); + +#endif /* _CRC_H_ */ diff --git a/crc16.c b/crc16.c new file mode 100644 index 0000000..7d8374e --- /dev/null +++ b/crc16.c @@ -0,0 +1,45 @@ +unsigned int crc16_table[256] = { + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7, + 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6, + 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485, + 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4, + 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC, + 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B, + 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12, + 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A, + 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41, + 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49, + 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70, + 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78, + 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F, + 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E, + 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D, + 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C, + 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3, + 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A, + 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92, + 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9, + 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1, + 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8, + 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0 +}; + +unsigned short crc16(const unsigned char *buffer, int len) +{ + unsigned short crc; + + crc = 0; + while(len-- > 0) + crc = crc16_table[((crc >> 8) ^ (*buffer++)) & 0xFF] ^ (crc << 8); + + return crc; +} diff --git a/driver-SPI-bitmine-A1.c b/driver-SPI-bitmine-A1.c new file mode 100644 index 0000000..21d2545 --- /dev/null +++ b/driver-SPI-bitmine-A1.c @@ -0,0 +1,1123 @@ +/* + * cgminer SPI driver for Bitmine.ch A1 devices + * + * Copyright 2013, 2014 Zefir Kurtisi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "spi-context.h" +#include "logging.h" +#include "miner.h" +#include "util.h" + +#include "A1-common.h" +#include "A1-board-selector.h" +#include "A1-trimpot-mcp4x.h" + +/* one global board_selector and spi context is enough */ +static struct board_selector *board_selector; +static struct spi_ctx *spi; + +/********** work queue */ +static bool wq_enqueue(struct work_queue *wq, struct work *work) +{ + if (work == NULL) + return false; + struct work_ent *we = malloc(sizeof(*we)); + assert(we != NULL); + + we->work = work; + INIT_LIST_HEAD(&we->head); + list_add_tail(&we->head, &wq->head); + wq->num_elems++; + return true; +} + +static struct work *wq_dequeue(struct work_queue *wq) +{ + if (wq == NULL) + return NULL; + if (wq->num_elems == 0) + return NULL; + struct work_ent *we; + we = list_entry(wq->head.next, struct work_ent, head); + struct work *work = we->work; + + list_del(&we->head); + free(we); + wq->num_elems--; + return work; +} + +/* + * if not cooled sufficiently, communication fails and chip is temporary + * disabled. we let it inactive for 30 seconds to cool down + * + * TODO: to be removed after bring up / test phase + */ +#define COOLDOWN_MS (30 * 1000) +/* if after this number of retries a chip is still inaccessible, disable it */ +#define DISABLE_CHIP_FAIL_THRESHOLD 3 + + +enum A1_command { + A1_BIST_START = 0x01, + A1_BIST_FIX = 0x03, + A1_RESET = 0x04, + A1_WRITE_JOB = 0x07, + A1_READ_RESULT = 0x08, + A1_WRITE_REG = 0x09, + A1_READ_REG = 0x0a, + A1_READ_REG_RESP = 0x1a, +}; + +/* + * for now, we have one global config, defaulting values: + * - ref_clk 16MHz / sys_clk 800MHz + * - 2000 kHz SPI clock + */ +struct A1_config_options A1_config_options = { + .ref_clk_khz = 16000, .sys_clk_khz = 800000, .spi_clk_khz = 2000, +}; + +/* override values with --bitmine-a1-options ref:sys:spi: - use 0 for default */ +static struct A1_config_options *parsed_config_options; + +/********** temporary helper for hexdumping SPI traffic */ +static void applog_hexdump(char *prefix, uint8_t *buff, int len, int level) +{ + static char line[256]; + char *pos = line; + int i; + if (len < 1) + return; + + pos += sprintf(pos, "%s: %d bytes:", prefix, len); + for (i = 0; i < len; i++) { + if (i > 0 && (i % 32) == 0) { + applog(LOG_DEBUG, "%s", line); + pos = line; + pos += sprintf(pos, "\t"); + } + pos += sprintf(pos, "%.2X ", buff[i]); + } + applog(level, "%s", line); +} + +static void hexdump(char *prefix, uint8_t *buff, int len) +{ + applog_hexdump(prefix, buff, len, LOG_DEBUG); +} + +static void hexdump_error(char *prefix, uint8_t *buff, int len) +{ + applog_hexdump(prefix, buff, len, LOG_ERR); +} + +static void flush_spi(struct A1_chain *a1) +{ + memset(a1->spi_tx, 0, 64); + spi_transfer(a1->spi_ctx, a1->spi_tx, a1->spi_rx, 64); +} + + +/********** upper layer SPI functions */ +static uint8_t *exec_cmd(struct A1_chain *a1, + uint8_t cmd, uint8_t chip_id, + uint8_t *data, uint8_t len, + uint8_t resp_len) +{ + int tx_len = 4 + len; + memset(a1->spi_tx, 0, tx_len); + a1->spi_tx[0] = cmd; + a1->spi_tx[1] = chip_id; + + if (data != NULL) + memcpy(a1->spi_tx + 2, data, len); + + assert(spi_transfer(a1->spi_ctx, a1->spi_tx, a1->spi_rx, tx_len)); + hexdump("send: TX", a1->spi_tx, tx_len); + hexdump("send: RX", a1->spi_rx, tx_len); + + int poll_len = resp_len; + if (chip_id == 0) { + if (a1->num_chips == 0) { + applog(LOG_INFO, "%d: unknown chips in chain, " + "assuming 8", a1->chain_id); + poll_len += 32; + } + poll_len += 4 * a1->num_chips; + } + else { + poll_len += 4 * chip_id - 2; + } + + assert(spi_transfer(a1->spi_ctx, NULL, a1->spi_rx + tx_len, poll_len)); + hexdump("poll: RX", a1->spi_rx + tx_len, poll_len); + int ack_len = tx_len + resp_len; + int ack_pos = tx_len + poll_len - ack_len; + hexdump("poll: ACK", a1->spi_rx + ack_pos, ack_len - 2); + + return (a1->spi_rx + ack_pos); +} + + +/********** A1 SPI commands */ +static uint8_t *cmd_BIST_FIX_BCAST(struct A1_chain *a1) +{ + uint8_t *ret = exec_cmd(a1, A1_BIST_FIX, 0x00, NULL, 0, 0); + if (ret == NULL || ret[0] != A1_BIST_FIX) { + applog(LOG_ERR, "%d: cmd_BIST_FIX_BCAST failed", a1->chain_id); + return NULL; + } + return ret; +} + +static uint8_t *cmd_RESET_BCAST(struct A1_chain *a1, uint8_t strategy) +{ + static uint8_t s[2]; + s[0] = strategy; + s[1] = strategy; + uint8_t *ret = exec_cmd(a1, A1_RESET, 0x00, s, 2, 0); + if (ret == NULL || (ret[0] != A1_RESET && a1->num_chips != 0)) { + applog(LOG_ERR, "%d: cmd_RESET_BCAST failed", a1->chain_id); + return NULL; + } + return ret; +} + +static uint8_t *cmd_READ_RESULT_BCAST(struct A1_chain *a1) +{ + int tx_len = 8; + memset(a1->spi_tx, 0, tx_len); + a1->spi_tx[0] = A1_READ_RESULT; + + assert(spi_transfer(a1->spi_ctx, a1->spi_tx, a1->spi_rx, tx_len)); + hexdump("send: TX", a1->spi_tx, tx_len); + hexdump("send: RX", a1->spi_rx, tx_len); + + int poll_len = tx_len + 4 * a1->num_chips; + assert(spi_transfer(a1->spi_ctx, NULL, a1->spi_rx + tx_len, poll_len)); + hexdump("poll: RX", a1->spi_rx + tx_len, poll_len); + + uint8_t *scan = a1->spi_rx; + int i; + for (i = 0; i < poll_len; i += 2) { + if ((scan[i] & 0x0f) == A1_READ_RESULT) + return scan + i; + } + applog(LOG_ERR, "%d: cmd_READ_RESULT_BCAST failed", a1->chain_id); + return NULL; +} + +static uint8_t *cmd_WRITE_REG(struct A1_chain *a1, uint8_t chip, uint8_t *reg) +{ + uint8_t *ret = exec_cmd(a1, A1_WRITE_REG, chip, reg, 6, 0); + if (ret == NULL || ret[0] != A1_WRITE_REG) { + applog(LOG_ERR, "%d: cmd_WRITE_REG failed", a1->chain_id); + return NULL; + } + return ret; +} + +static uint8_t *cmd_READ_REG(struct A1_chain *a1, uint8_t chip) +{ + uint8_t *ret = exec_cmd(a1, A1_READ_REG, chip, NULL, 0, 6); + if (ret == NULL || ret[0] != A1_READ_REG_RESP || ret[1] != chip) { + applog(LOG_ERR, "%d: cmd_READ_REG chip %d failed", + a1->chain_id, chip); + return NULL; + } + memcpy(a1->spi_rx, ret, 8); + return ret; +} + +static uint8_t *cmd_WRITE_JOB(struct A1_chain *a1, uint8_t chip_id, + uint8_t *job) +{ + /* ensure we push the SPI command to the last chip in chain */ + int tx_len = WRITE_JOB_LENGTH + 2; + memcpy(a1->spi_tx, job, WRITE_JOB_LENGTH); + memset(a1->spi_tx + WRITE_JOB_LENGTH, 0, tx_len - WRITE_JOB_LENGTH); + + assert(spi_transfer(a1->spi_ctx, a1->spi_tx, a1->spi_rx, tx_len)); + hexdump("send: TX", a1->spi_tx, tx_len); + hexdump("send: RX", a1->spi_rx, tx_len); + + int poll_len = 4 * chip_id - 2; + + assert(spi_transfer(a1->spi_ctx, NULL, a1->spi_rx + tx_len, poll_len)); + hexdump("poll: RX", a1->spi_rx + tx_len, poll_len); + + int ack_len = tx_len; + int ack_pos = tx_len + poll_len - ack_len; + hexdump("poll: ACK", a1->spi_rx + ack_pos, tx_len); + + uint8_t *ret = a1->spi_rx + ack_pos; + if (ret[0] != a1->spi_tx[0] || ret[1] != a1->spi_tx[1]){ + applog(LOG_ERR, "%d: cmd_WRITE_JOB failed: " + "0x%02x%02x/0x%02x%02x", a1->chain_id, + ret[0], ret[1], a1->spi_tx[0], a1->spi_tx[1]); + return NULL; + } + return ret; +} + +/********** A1 low level functions */ +#define MAX_PLL_WAIT_CYCLES 25 +#define PLL_CYCLE_WAIT_TIME 40 +static bool check_chip_pll_lock(struct A1_chain *a1, int chip_id, uint8_t *wr) +{ + int n; + for (n = 0; n < MAX_PLL_WAIT_CYCLES; n++) { + /* check for PLL lock status */ + if (cmd_READ_REG(a1, chip_id) && (a1->spi_rx[4] & 1) == 1) + /* double check that we read back what we set before */ + return wr[0] == a1->spi_rx[2] && wr[1] == a1->spi_rx[3]; + + cgsleep_ms(PLL_CYCLE_WAIT_TIME); + } + applog(LOG_ERR, "%d: chip %d failed PLL lock", a1->chain_id, chip_id); + return false; +} + +static uint8_t *get_pll_reg(struct A1_chain *a1, int ref_clock_khz, + int sys_clock_khz) +{ + /* + * PLL parameters after: + * sys_clk = (ref_clk * pll_fbdiv) / (pll_prediv * 2^(pll_postdiv - 1)) + * + * with a higher pll_postdiv being desired over a higher pll_prediv + */ + + static uint8_t writereg[6] = { 0x00, 0x00, 0x21, 0x84, }; + uint8_t pre_div = 1; + uint8_t post_div = 1; + uint32_t fb_div; + + int cid = a1->chain_id; + + applog(LOG_WARNING, "%d: Setting PLL: CLK_REF=%dMHz, SYS_CLK=%dMHz", + cid, ref_clock_khz / 1000, sys_clock_khz / 1000); + + /* Euclidean search for GCD */ + int a = ref_clock_khz; + int b = sys_clock_khz; + while (b != 0) { + int h = a % b; + a = b; + b = h; + } + fb_div = sys_clock_khz / a; + int n = ref_clock_khz / a; + /* approximate multiplier if not exactly matchable */ + if (fb_div > 511) { + int f = fb_div / n; + int m = (f < 32) ? 16 : (f < 64) ? 8 : + (f < 128) ? 4 : (256 < 2) ? 2 : 1; + fb_div = m * fb_div / n; + n = m; + } + /* try to maximize post divider */ + if ((n & 3) == 0) + post_div = 3; + else if ((n & 1) == 0) + post_div = 2; + else + post_div = 1; + /* remainder goes to pre_div */ + pre_div = n / (1 << (post_div - 1)); + /* correct pre_div overflow */ + if (pre_div > 31) { + fb_div = 31 * fb_div / pre_div; + pre_div = 31; + } + writereg[0] = (post_div << 6) | (pre_div << 1) | (fb_div >> 8); + writereg[1] = fb_div & 0xff; + applog(LOG_WARNING, "%d: setting PLL: pre_div=%d, post_div=%d, " + "fb_div=%d: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x", cid, + pre_div, post_div, fb_div, + writereg[0], writereg[1], writereg[2], + writereg[3], writereg[4], writereg[5]); + return writereg; +} + +static bool set_pll_config(struct A1_chain *a1, int chip_id, + int ref_clock_khz, int sys_clock_khz) +{ + uint8_t *writereg = get_pll_reg(a1, ref_clock_khz, sys_clock_khz); + if (writereg == NULL) + return false; + if (!cmd_WRITE_REG(a1, chip_id, writereg)) + return false; + + int from = (chip_id == 0) ? 0 : chip_id - 1; + int to = (chip_id == 0) ? a1->num_active_chips : chip_id - 1; + + int i; + for (i = from; i < to; i++) { + int cid = i + 1; + if (!check_chip_pll_lock(a1, chip_id, writereg)) { + applog(LOG_ERR, "%d: chip %d failed PLL lock", + a1->chain_id, cid); + return false; + } + } + return true; +} + +#define WEAK_CHIP_THRESHOLD 30 +#define BROKEN_CHIP_THRESHOLD 26 +#define WEAK_CHIP_SYS_CLK (600 * 1000) +#define BROKEN_CHIP_SYS_CLK (400 * 1000) +static bool check_chip(struct A1_chain *a1, int i) +{ + int chip_id = i + 1; + int cid = a1->chain_id; + if (!cmd_READ_REG(a1, chip_id)) { + applog(LOG_WARNING, "%d: Failed to read register for " + "chip %d -> disabling", cid, chip_id); + a1->chips[i].num_cores = 0; + a1->chips[i].disabled = 1; + return false;; + } + a1->chips[i].num_cores = a1->spi_rx[7]; + a1->num_cores += a1->chips[i].num_cores; + applog(LOG_WARNING, "%d: Found chip %d with %d active cores", + cid, chip_id, a1->chips[i].num_cores); + if (a1->chips[i].num_cores < BROKEN_CHIP_THRESHOLD) { + applog(LOG_WARNING, "%d: broken chip %d with %d active " + "cores (threshold = %d)", cid, chip_id, + a1->chips[i].num_cores, BROKEN_CHIP_THRESHOLD); + set_pll_config(a1, chip_id, A1_config_options.ref_clk_khz, + BROKEN_CHIP_SYS_CLK); + cmd_READ_REG(a1, chip_id); + hexdump_error("new.PLL", a1->spi_rx, 8); + a1->chips[i].disabled = true; + a1->num_cores -= a1->chips[i].num_cores; + return false; + } + + if (a1->chips[i].num_cores < WEAK_CHIP_THRESHOLD) { + applog(LOG_WARNING, "%d: weak chip %d with %d active " + "cores (threshold = %d)", cid, + chip_id, a1->chips[i].num_cores, WEAK_CHIP_THRESHOLD); + set_pll_config(a1, chip_id, A1_config_options.ref_clk_khz, + WEAK_CHIP_SYS_CLK); + cmd_READ_REG(a1, chip_id); + hexdump_error("new.PLL", a1->spi_rx, 8); + return false; + } + return true; +} + +/* + * BIST_START works only once after HW reset, on subsequent calls it + * returns 0 as number of chips. + */ +static int chain_detect(struct A1_chain *a1) +{ + int tx_len = 6; + + memset(a1->spi_tx, 0, tx_len); + a1->spi_tx[0] = A1_BIST_START; + a1->spi_tx[1] = 0; + + if (!spi_transfer(a1->spi_ctx, a1->spi_tx, a1->spi_rx, tx_len)) + return 0; + hexdump("TX", a1->spi_tx, 6); + hexdump("RX", a1->spi_rx, 6); + + int i; + int cid = a1->chain_id; + int max_poll_words = MAX_CHAIN_LENGTH * 2; + for(i = 1; i < max_poll_words; i++) { + if (a1->spi_rx[0] == A1_BIST_START && a1->spi_rx[1] == 0) { + spi_transfer(a1->spi_ctx, NULL, a1->spi_rx, 2); + hexdump("RX", a1->spi_rx, 2); + uint8_t n = a1->spi_rx[1]; + a1->num_chips = (i / 2) + 1; + if (a1->num_chips != n) { + applog(LOG_ERR, "%d: enumeration: %d <-> %d", + cid, a1->num_chips, n); + if (n != 0) + a1->num_chips = n; + } + applog(LOG_WARNING, "%d: detected %d chips", + cid, a1->num_chips); + return a1->num_chips; + } + bool s = spi_transfer(a1->spi_ctx, NULL, a1->spi_rx, 2); + hexdump("RX", a1->spi_rx, 2); + if (!s) + return 0; + } + applog(LOG_WARNING, "%d: no A1 chip-chain detected", cid); + return 0; +} + +/********** disable / re-enable related section (temporary for testing) */ +static int get_current_ms(void) +{ + cgtimer_t ct; + cgtimer_time(&ct); + return cgtimer_to_ms(&ct); +} + +static bool is_chip_disabled(struct A1_chain *a1, uint8_t chip_id) +{ + struct A1_chip *chip = &a1->chips[chip_id - 1]; + return chip->disabled || chip->cooldown_begin != 0; +} + +/* check and disable chip, remember time */ +static void disable_chip(struct A1_chain *a1, uint8_t chip_id) +{ + flush_spi(a1); + struct A1_chip *chip = &a1->chips[chip_id - 1]; + int cid = a1->chain_id; + if (is_chip_disabled(a1, chip_id)) { + applog(LOG_WARNING, "%d: chip %d already disabled", + cid, chip_id); + return; + } + applog(LOG_WARNING, "%d: temporary disabling chip %d", cid, chip_id); + chip->cooldown_begin = get_current_ms(); +} + +/* check if disabled chips can be re-enabled */ +void check_disabled_chips(struct A1_chain *a1) +{ + int i; + int cid = a1->chain_id; + for (i = 0; i < a1->num_active_chips; i++) { + int chip_id = i + 1; + struct A1_chip *chip = &a1->chips[i]; + if (!is_chip_disabled(a1, chip_id)) + continue; + /* do not re-enable fully disabled chips */ + if (chip->disabled) + continue; + if (chip->cooldown_begin + COOLDOWN_MS > get_current_ms()) + continue; + if (!cmd_READ_REG(a1, chip_id)) { + chip->fail_count++; + applog(LOG_WARNING, "%d: chip %d not yet working - %d", + cid, chip_id, chip->fail_count); + if (chip->fail_count > DISABLE_CHIP_FAIL_THRESHOLD) { + applog(LOG_WARNING, + "%d: completely disabling chip %d at %d", + cid, chip_id, chip->fail_count); + chip->disabled = true; + a1->num_cores -= chip->num_cores; + continue; + } + /* restart cooldown period */ + chip->cooldown_begin = get_current_ms(); + continue; + } + applog(LOG_WARNING, "%d: chip %d is working again", + cid, chip_id); + chip->cooldown_begin = 0; + chip->fail_count = 0; + } +} + +/********** job creation and result evaluation */ +uint32_t get_diff(double diff) +{ + uint32_t n_bits; + int shift = 29; + double f = (double) 0x0000ffff / diff; + while (f < (double) 0x00008000) { + shift--; + f *= 256.0; + } + while (f >= (double) 0x00800000) { + shift++; + f /= 256.0; + } + n_bits = (int) f + (shift << 24); + return n_bits; +} + +static uint8_t *create_job(uint8_t chip_id, uint8_t job_id, struct work *work) +{ + static uint8_t job[WRITE_JOB_LENGTH] = { + /* command */ + 0x00, 0x00, + /* midstate */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* wdata */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + /* start nonce */ + 0x00, 0x00, 0x00, 0x00, + /* difficulty 1 */ + 0xff, 0xff, 0x00, 0x1d, + /* end nonce */ + 0xff, 0xff, 0xff, 0xff, + }; + uint8_t *midstate = work->midstate; + uint8_t *wdata = work->data + 64; + + uint32_t *p1 = (uint32_t *) &job[34]; + uint32_t *p2 = (uint32_t *) wdata; + + job[0] = (job_id << 4) | A1_WRITE_JOB; + job[1] = chip_id; + + swab256(job + 2, midstate); + p1[0] = bswap_32(p2[0]); + p1[1] = bswap_32(p2[1]); + p1[2] = bswap_32(p2[2]); +#ifdef USE_REAL_DIFF + p1[4] = get_diff(work->sdiff); +#endif + return job; +} + +/* set work for given chip, returns true if a nonce range was finished */ +static bool set_work(struct A1_chain *a1, uint8_t chip_id, struct work *work, + uint8_t queue_states) +{ + int cid = a1->chain_id; + struct A1_chip *chip = &a1->chips[chip_id - 1]; + bool retval = false; + + int job_id = chip->last_queued_id + 1; + + applog(LOG_INFO, "%d: queuing chip %d with job_id %d, state=0x%02x", + cid, chip_id, job_id, queue_states); + if (job_id == (queue_states & 0x0f) || job_id == (queue_states >> 4)) + applog(LOG_WARNING, "%d: job overlap: %d, 0x%02x", + cid, job_id, queue_states); + + if (chip->work[chip->last_queued_id] != NULL) { + work_completed(a1->cgpu, chip->work[chip->last_queued_id]); + chip->work[chip->last_queued_id] = NULL; + retval = true; + } + uint8_t *jobdata = create_job(chip_id, job_id, work); + if (!cmd_WRITE_JOB(a1, chip_id, jobdata)) { + /* give back work */ + work_completed(a1->cgpu, work); + + applog(LOG_ERR, "%d: failed to set work for chip %d.%d", + cid, chip_id, job_id); + disable_chip(a1, chip_id); + } else { + chip->work[chip->last_queued_id] = work; + chip->last_queued_id++; + chip->last_queued_id &= 3; + } + return retval; +} + +static bool get_nonce(struct A1_chain *a1, uint8_t *nonce, + uint8_t *chip, uint8_t *job_id) +{ + uint8_t *ret = cmd_READ_RESULT_BCAST(a1); + if (ret == NULL) + return false; + if (ret[1] == 0) { + applog(LOG_DEBUG, "%d: output queue empty", a1->chain_id); + return false; + } + *job_id = ret[0] >> 4; + *chip = ret[1]; + memcpy(nonce, ret + 2, 4); + return true; +} + +/* reset input work queues in chip chain */ +static bool abort_work(struct A1_chain *a1) +{ + /* drop jobs already queued: reset strategy 0xed */ + return cmd_RESET_BCAST(a1, 0xed); +} + +/********** driver interface */ +void exit_A1_chain(struct A1_chain *a1) +{ + if (a1 == NULL) + return; + free(a1->chips); + a1->chips = NULL; + a1->spi_ctx = NULL; + free(a1); +} + +struct A1_chain *init_A1_chain(struct spi_ctx *ctx, int chain_id) +{ + int i; + struct A1_chain *a1 = malloc(sizeof(*a1)); + assert(a1 != NULL); + + applog(LOG_DEBUG, "%d: A1 init chain", chain_id); + memset(a1, 0, sizeof(*a1)); + a1->spi_ctx = ctx; + a1->chain_id = chain_id; + + a1->num_chips = chain_detect(a1); + if (a1->num_chips == 0) + goto failure; + + applog(LOG_WARNING, "spidev%d.%d: %d: Found %d A1 chips", + a1->spi_ctx->config.bus, a1->spi_ctx->config.cs_line, + a1->chain_id, a1->num_chips); + + if (!set_pll_config(a1, 0, A1_config_options.ref_clk_khz, + A1_config_options.sys_clk_khz)) + goto failure; + + /* override max number of active chips if requested */ + a1->num_active_chips = a1->num_chips; + if (A1_config_options.override_chip_num > 0 && + a1->num_chips > A1_config_options.override_chip_num) { + a1->num_active_chips = A1_config_options.override_chip_num; + applog(LOG_WARNING, "%d: limiting chain to %d chips", + a1->chain_id, a1->num_active_chips); + } + + a1->chips = calloc(a1->num_active_chips, sizeof(struct A1_chip)); + assert (a1->chips != NULL); + + if (!cmd_BIST_FIX_BCAST(a1)) + goto failure; + + for (i = 0; i < a1->num_active_chips; i++) + check_chip(a1, i); + + applog(LOG_WARNING, "%d: found %d chips with total %d active cores", + a1->chain_id, a1->num_active_chips, a1->num_cores); + + mutex_init(&a1->lock); + INIT_LIST_HEAD(&a1->active_wq.head); + + return a1; + +failure: + exit_A1_chain(a1); + return NULL; +} + +static bool detect_single_chain(void) +{ + board_selector = (struct board_selector*)&dummy_board_selector; + applog(LOG_WARNING, "A1: checking single chain"); + struct A1_chain *a1 = init_A1_chain(spi, 0); + if (a1 == NULL) + return false; + + struct cgpu_info *cgpu = malloc(sizeof(*cgpu)); + assert(cgpu != NULL); + + memset(cgpu, 0, sizeof(*cgpu)); + cgpu->drv = &bitmineA1_drv; + cgpu->name = "BitmineA1.SingleChain"; + cgpu->threads = 1; + + cgpu->device_data = a1; + + a1->cgpu = cgpu; + add_cgpu(cgpu); + applog(LOG_WARNING, "Detected single A1 chain with %d chips / %d cores", + a1->num_active_chips, a1->num_cores); + return true; +} + +bool detect_coincraft_desk(void) +{ + static const uint8_t mcp4x_mapping[] = { 0x2c, 0x2b, 0x2a, 0x29, 0x28 }; + board_selector = ccd_board_selector_init(); + if (board_selector == NULL) { + applog(LOG_INFO, "No CoinCrafd Desk backplane detected."); + return false; + } + board_selector->reset_all(); + + int boards_detected = 0; + int board_id; + for (board_id = 0; board_id < CCD_MAX_CHAINS; board_id++) { + uint8_t mcp_slave = mcp4x_mapping[board_id]; + struct mcp4x *mcp = mcp4x_init(mcp_slave); + if (mcp == NULL) + continue; + + if (A1_config_options.wiper != 0) + mcp->set_wiper(mcp, 0, A1_config_options.wiper); + + applog(LOG_WARNING, "checking board %d...", board_id); + board_selector->select(board_id); + + struct A1_chain *a1 = init_A1_chain(spi, board_id); + board_selector->release(); + if (a1 == NULL) + continue; + + struct cgpu_info *cgpu = malloc(sizeof(*cgpu)); + assert(cgpu != NULL); + + memset(cgpu, 0, sizeof(*cgpu)); + cgpu->drv = &bitmineA1_drv; + cgpu->name = "BitmineA1.CCD"; + cgpu->threads = 1; + + cgpu->device_data = a1; + + a1->cgpu = cgpu; + add_cgpu(cgpu); + boards_detected++; + } + if (boards_detected == 0) + return false; + + applog(LOG_WARNING, "Detected CoinCraft Desk with %d boards", + boards_detected); + return true; +} + +bool detect_coincraft_rig_v3(void) +{ + board_selector = ccr_board_selector_init(); + if (board_selector == NULL) + return false; + + board_selector->reset_all(); + int chains_detected = 0; + int c; + for (c = 0; c < CCR_MAX_CHAINS; c++) { + applog(LOG_WARNING, "checking RIG chain %d...", c); + + if (!board_selector->select(c)) + continue; + + struct A1_chain *a1 = init_A1_chain(spi, c); + board_selector->release(); + + if (a1 == NULL) + continue; + + if (A1_config_options.wiper != 0 && (c & 1) == 0) { + struct mcp4x *mcp = mcp4x_init(0x28); + if (mcp == NULL) { + applog(LOG_ERR, "%d: Cant access poti", c); + } else { + mcp->set_wiper(mcp, 0, A1_config_options.wiper); + mcp->set_wiper(mcp, 1, A1_config_options.wiper); + mcp->exit(mcp); + applog(LOG_WARNING, "%d: set wiper to 0x%02x", + c, A1_config_options.wiper); + } + } + + struct cgpu_info *cgpu = malloc(sizeof(*cgpu)); + assert(cgpu != NULL); + + memset(cgpu, 0, sizeof(*cgpu)); + cgpu->drv = &bitmineA1_drv; + cgpu->name = "BitmineA1.CCR"; + cgpu->threads = 1; + + cgpu->device_data = a1; + + a1->cgpu = cgpu; + add_cgpu(cgpu); + chains_detected++; + } + if (chains_detected == 0) + return false; + + applog(LOG_WARNING, "Detected CoinCraft Rig with %d chains", + chains_detected); + return true; +} + +/* Probe SPI channel and register chip chain */ +void A1_detect(bool hotplug) +{ + /* no hotplug support for SPI */ + if (hotplug) + return; + + /* parse bimine-a1-options */ + if (opt_bitmine_a1_options != NULL && parsed_config_options == NULL) { + int ref_clk = 0; + int sys_clk = 0; + int spi_clk = 0; + int override_chip_num = 0; + int wiper = 0; + + sscanf(opt_bitmine_a1_options, "%d:%d:%d:%d:%d", + &ref_clk, &sys_clk, &spi_clk, &override_chip_num, + &wiper); + if (ref_clk != 0) + A1_config_options.ref_clk_khz = ref_clk; + if (sys_clk != 0) { + if (sys_clk < 100000) + quit(1, "system clock must be above 100MHz"); + A1_config_options.sys_clk_khz = sys_clk; + } + if (spi_clk != 0) + A1_config_options.spi_clk_khz = spi_clk; + if (override_chip_num != 0) + A1_config_options.override_chip_num = override_chip_num; + if (wiper != 0) + A1_config_options.wiper = wiper; + + /* config options are global, scan them once */ + parsed_config_options = &A1_config_options; + } + applog(LOG_DEBUG, "A1 detect"); + + /* register global SPI context */ + struct spi_config cfg = default_spi_config; + cfg.mode = SPI_MODE_1; + cfg.speed = A1_config_options.spi_clk_khz * 1000; + spi = spi_init(&cfg); + if (spi == NULL) + return; + + /* detect and register supported products */ + if (detect_coincraft_desk()) + return; + if (detect_coincraft_rig_v3()) + return; + if (detect_single_chain()) + return; + /* release SPI context if no A1 products found */ + spi_exit(spi); +} + +#define TEMP_UPDATE_INT_MS 2000 +static int64_t A1_scanwork(struct thr_info *thr) +{ + int i; + struct cgpu_info *cgpu = thr->cgpu; + struct A1_chain *a1 = cgpu->device_data; + int32_t nonce_ranges_processed = 0; + + if (a1->num_cores == 0) { + cgpu->deven = DEV_DISABLED; + return 0; + } + board_selector->select(a1->chain_id); + + applog(LOG_DEBUG, "A1 running scanwork"); + + uint32_t nonce; + uint8_t chip_id; + uint8_t job_id; + bool work_updated = false; + + mutex_lock(&a1->lock); + + if (a1->last_temp_time + TEMP_UPDATE_INT_MS < get_current_ms()) { + a1->temp = board_selector->get_temp(0); + a1->last_temp_time = get_current_ms(); + } + int cid = a1->chain_id; + /* poll queued results */ + while (true) { + if (!get_nonce(a1, (uint8_t*)&nonce, &chip_id, &job_id)) + break; + nonce = bswap_32(nonce); + work_updated = true; + if (chip_id < 1 || chip_id > a1->num_active_chips) { + applog(LOG_WARNING, "%d: wrong chip_id %d", + cid, chip_id); + continue; + } + if (job_id < 1 && job_id > 4) { + applog(LOG_WARNING, "%d: chip %d: result has wrong " + "job_id %d", cid, chip_id, job_id); + flush_spi(a1); + continue; + } + + struct A1_chip *chip = &a1->chips[chip_id - 1]; + struct work *work = chip->work[job_id - 1]; + if (work == NULL) { + /* already been flushed => stale */ + applog(LOG_WARNING, "%d: chip %d: stale nonce 0x%08x", + cid, chip_id, nonce); + chip->stales++; + continue; + } + if (!submit_nonce(thr, work, nonce)) { + applog(LOG_WARNING, "%d: chip %d: invalid nonce 0x%08x", + cid, chip_id, nonce); + chip->hw_errors++; + /* add a penalty of a full nonce range on HW errors */ + nonce_ranges_processed--; + continue; + } + applog(LOG_DEBUG, "YEAH: %d: chip %d / job_id %d: nonce 0x%08x", + cid, chip_id, job_id, nonce); + chip->nonces_found++; + } + + /* check for completed works */ + for (i = a1->num_active_chips; i > 0; i--) { + uint8_t c = i; + if (is_chip_disabled(a1, c)) + continue; + if (!cmd_READ_REG(a1, c)) { + disable_chip(a1, c); + continue; + } + uint8_t qstate = a1->spi_rx[5] & 3; + uint8_t qbuff = a1->spi_rx[6]; + struct work *work; + struct A1_chip *chip = &a1->chips[i - 1]; + switch(qstate) { + case 3: + continue; + case 2: + applog(LOG_ERR, "%d: chip %d: invalid state = 2", + cid, c); + continue; + case 1: + /* fall through */ + case 0: + work_updated = true; + + work = wq_dequeue(&a1->active_wq); + if (work == NULL) { + applog(LOG_INFO, "%d: chip %d: work underflow", + cid, c); + break; + } + if (set_work(a1, c, work, qbuff)) { + chip->nonce_ranges_done++; + nonce_ranges_processed++; + } + applog(LOG_DEBUG, "%d: chip %d: job done: %d/%d/%d/%d", + cid, c, + chip->nonce_ranges_done, chip->nonces_found, + chip->hw_errors, chip->stales); + break; + } + } + check_disabled_chips(a1); + mutex_unlock(&a1->lock); + + board_selector->release(); + + if (nonce_ranges_processed < 0) + nonce_ranges_processed = 0; + + if (nonce_ranges_processed != 0) { + applog(LOG_DEBUG, "%d, nonces processed %d", + cid, nonce_ranges_processed); + } + /* in case of no progress, prevent busy looping */ + if (!work_updated) + cgsleep_ms(40); + + return (int64_t)nonce_ranges_processed << 32; +} + + +/* queue two work items per chip in chain */ +static bool A1_queue_full(struct cgpu_info *cgpu) +{ + struct A1_chain *a1 = cgpu->device_data; + int queue_full = false; + + mutex_lock(&a1->lock); + applog(LOG_DEBUG, "%d, A1 running queue_full: %d/%d", + a1->chain_id, a1->active_wq.num_elems, a1->num_active_chips); + + if (a1->active_wq.num_elems >= a1->num_active_chips * 2) + queue_full = true; + else + wq_enqueue(&a1->active_wq, get_queued(cgpu)); + + mutex_unlock(&a1->lock); + + return queue_full; +} + +static void A1_flush_work(struct cgpu_info *cgpu) +{ + struct A1_chain *a1 = cgpu->device_data; + int cid = a1->chain_id; + board_selector->select(cid); + + applog(LOG_DEBUG, "%d: A1 running flushwork", cid); + + int i; + + mutex_lock(&a1->lock); + /* stop chips hashing current work */ + if (!abort_work(a1)) { + applog(LOG_ERR, "%d: failed to abort work in chip chain!", cid); + } + /* flush the work chips were currently hashing */ + for (i = 0; i < a1->num_active_chips; i++) { + int j; + struct A1_chip *chip = &a1->chips[i]; + for (j = 0; j < 4; j++) { + struct work *work = chip->work[j]; + if (work == NULL) + continue; + applog(LOG_DEBUG, "%d: flushing chip %d, work %d: 0x%p", + cid, i, j + 1, work); + work_completed(cgpu, work); + chip->work[j] = NULL; + } + chip->last_queued_id = 0; + } + /* flush queued work */ + applog(LOG_DEBUG, "%d: flushing queued work...", cid); + while (a1->active_wq.num_elems > 0) { + struct work *work = wq_dequeue(&a1->active_wq); + assert(work != NULL); + work_completed(cgpu, work); + } + mutex_unlock(&a1->lock); + + board_selector->release(); +} + +static void A1_get_statline_before(char *buf, size_t len, + struct cgpu_info *cgpu) +{ + struct A1_chain *a1 = cgpu->device_data; + char temp[10]; + if (a1->temp != 0) + snprintf(temp, 9, "%2dC", a1->temp); + tailsprintf(buf, len, " %2d:%2d/%3d %s", + a1->chain_id, a1->num_active_chips, a1->num_cores, + a1->temp == 0 ? " " : temp); +} + +struct device_drv bitmineA1_drv = { + .drv_id = DRIVER_bitmineA1, + .dname = "BitmineA1", + .name = "BA1", + .drv_detect = A1_detect, + + .hash_work = hash_queued_work, + .scanwork = A1_scanwork, + .queue_full = A1_queue_full, + .flush_work = A1_flush_work, + .get_statline_before = A1_get_statline_before, +}; diff --git a/driver-avalon.c b/driver-avalon.c new file mode 100644 index 0000000..1d00a5f --- /dev/null +++ b/driver-avalon.c @@ -0,0 +1,1716 @@ +/* + * Copyright 2013-2014 Con Kolivas + * Copyright 2012-2013 Xiangfu + * Copyright 2012 Luke Dashjr + * Copyright 2012 Andrew Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef WIN32 + #include + #include + #include + #include + #ifndef O_CLOEXEC + #define O_CLOEXEC 0 + #endif +#else + #include "compat.h" + #include + #include +#endif + +#include "elist.h" +#include "miner.h" +#include "usbutils.h" +#include "driver-avalon.h" +#include "hexdump.c" +#include "util.h" + +int opt_avalon_temp = AVALON_TEMP_TARGET; +int opt_avalon_overheat = AVALON_TEMP_OVERHEAT; +int opt_avalon_fan_min = AVALON_DEFAULT_FAN_MIN_PWM; +int opt_avalon_fan_max = AVALON_DEFAULT_FAN_MAX_PWM; +int opt_avalon_freq_min = AVALON_MIN_FREQUENCY; +int opt_avalon_freq_max = AVALON_MAX_FREQUENCY; +int opt_bitburner_core_voltage = BITBURNER_DEFAULT_CORE_VOLTAGE; +int opt_bitburner_fury_core_voltage = BITBURNER_FURY_DEFAULT_CORE_VOLTAGE; +bool opt_avalon_auto; + +static int option_offset = -1; +static int bbf_option_offset = -1; + +static int avalon_init_task(struct avalon_task *at, + uint8_t reset, uint8_t ff, uint8_t fan, + uint8_t timeout, uint8_t asic_num, + uint8_t miner_num, uint8_t nonce_elf, + uint8_t gate_miner, int frequency, int asic) +{ + uint16_t *lefreq16; + uint8_t *buf; + static bool first = true; + + if (unlikely(!at)) + return -1; + + if (unlikely(timeout <= 0 || asic_num <= 0 || miner_num <= 0)) + return -1; + + memset(at, 0, sizeof(struct avalon_task)); + + if (unlikely(reset)) { + at->reset = 1; + at->fan_eft = 1; + at->timer_eft = 1; + first = true; + } + + at->flush_fifo = (ff ? 1 : 0); + at->fan_eft = (fan ? 1 : 0); + + if (unlikely(first && !at->reset)) { + at->fan_eft = 1; + at->timer_eft = 1; + first = false; + } + + at->fan_pwm_data = (fan ? fan : AVALON_DEFAULT_FAN_MAX_PWM); + at->timeout_data = timeout; + at->asic_num = asic_num; + at->miner_num = miner_num; + at->nonce_elf = nonce_elf; + + at->gate_miner_elf = 1; + at->asic_pll = 1; + + if (unlikely(gate_miner)) { + at-> gate_miner = 1; + at->asic_pll = 0; + } + + buf = (uint8_t *)at; + buf[5] = 0x00; + buf[8] = 0x74; + buf[9] = 0x01; + buf[10] = 0x00; + buf[11] = 0x00; + + /* With 55nm, this is the real clock in Mhz, 1Mhz means 2Mhs */ + lefreq16 = (uint16_t *)&buf[6]; + if (asic == AVALON_A3256) + frequency *= 8; + else + frequency = frequency * 32 / 50 + 0x7FE0; + *lefreq16 = htole16(frequency); + + return 0; +} + +static inline void avalon_create_task(struct avalon_task *at, + struct work *work) +{ + memcpy(at->midstate, work->midstate, 32); + memcpy(at->data, work->data + 64, 12); +} + +static int avalon_write(struct cgpu_info *avalon, char *buf, ssize_t len, int ep) +{ + int err, amount; + + err = usb_write(avalon, buf, len, &amount, ep); + applog(LOG_DEBUG, "%s%i: usb_write got err %d", avalon->drv->name, + avalon->device_id, err); + + if (unlikely(err != 0)) { + applog(LOG_WARNING, "usb_write error on avalon_write"); + return AVA_SEND_ERROR; + } + if (amount != len) { + applog(LOG_WARNING, "usb_write length mismatch on avalon_write"); + return AVA_SEND_ERROR; + } + + return AVA_SEND_OK; +} + +static int avalon_send_task(const struct avalon_task *at, struct cgpu_info *avalon, + struct avalon_info *info) + +{ + uint8_t buf[AVALON_WRITE_SIZE + 4 * AVALON_DEFAULT_ASIC_NUM]; + int delay, ret, i, ep = C_AVALON_TASK; + uint32_t nonce_range; + size_t nr_len; + + if (at->nonce_elf) + nr_len = AVALON_WRITE_SIZE + 4 * at->asic_num; + else + nr_len = AVALON_WRITE_SIZE; + + memcpy(buf, at, AVALON_WRITE_SIZE); + + if (at->nonce_elf) { + nonce_range = (uint32_t)0xffffffff / at->asic_num; + for (i = 0; i < at->asic_num; i++) { + buf[AVALON_WRITE_SIZE + (i * 4) + 3] = + (i * nonce_range & 0xff000000) >> 24; + buf[AVALON_WRITE_SIZE + (i * 4) + 2] = + (i * nonce_range & 0x00ff0000) >> 16; + buf[AVALON_WRITE_SIZE + (i * 4) + 1] = + (i * nonce_range & 0x0000ff00) >> 8; + buf[AVALON_WRITE_SIZE + (i * 4) + 0] = + (i * nonce_range & 0x000000ff) >> 0; + } + } +#if defined(__BIG_ENDIAN__) || defined(MIPSEB) + uint8_t tt = 0; + + tt = (buf[0] & 0x0f) << 4; + tt |= ((buf[0] & 0x10) ? (1 << 3) : 0); + tt |= ((buf[0] & 0x20) ? (1 << 2) : 0); + tt |= ((buf[0] & 0x40) ? (1 << 1) : 0); + tt |= ((buf[0] & 0x80) ? (1 << 0) : 0); + buf[0] = tt; + + tt = (buf[4] & 0x0f) << 4; + tt |= ((buf[4] & 0x10) ? (1 << 3) : 0); + tt |= ((buf[4] & 0x20) ? (1 << 2) : 0); + tt |= ((buf[4] & 0x40) ? (1 << 1) : 0); + tt |= ((buf[4] & 0x80) ? (1 << 0) : 0); + buf[4] = tt; +#endif + delay = nr_len * 10 * 1000000; + delay = delay / info->baud; + delay += 4000; + + if (at->reset) { + ep = C_AVALON_RESET; + nr_len = 1; + } + if (opt_debug) { + applog(LOG_DEBUG, "Avalon: Sent(%u):", (unsigned int)nr_len); + hexdump(buf, nr_len); + } + /* Sleep from the last time we sent data */ + cgsleep_us_r(&info->cgsent, info->send_delay); + + cgsleep_prepare_r(&info->cgsent); + ret = avalon_write(avalon, (char *)buf, nr_len, ep); + + applog(LOG_DEBUG, "Avalon: Sent: Buffer delay: %dus", info->send_delay); + info->send_delay = delay; + + return ret; +} + +static int bitburner_send_task(const struct avalon_task *at, struct cgpu_info *avalon) + +{ + uint8_t buf[AVALON_WRITE_SIZE + 4 * AVALON_DEFAULT_ASIC_NUM]; + int ret, ep = C_AVALON_TASK; + cgtimer_t ts_start; + size_t nr_len; + + if (at->nonce_elf) + nr_len = AVALON_WRITE_SIZE + 4 * at->asic_num; + else + nr_len = AVALON_WRITE_SIZE; + + memset(buf, 0, nr_len); + memcpy(buf, at, AVALON_WRITE_SIZE); + +#if defined(__BIG_ENDIAN__) || defined(MIPSEB) + uint8_t tt = 0; + + tt = (buf[0] & 0x0f) << 4; + tt |= ((buf[0] & 0x10) ? (1 << 3) : 0); + tt |= ((buf[0] & 0x20) ? (1 << 2) : 0); + tt |= ((buf[0] & 0x40) ? (1 << 1) : 0); + tt |= ((buf[0] & 0x80) ? (1 << 0) : 0); + buf[0] = tt; + + tt = (buf[4] & 0x0f) << 4; + tt |= ((buf[4] & 0x10) ? (1 << 3) : 0); + tt |= ((buf[4] & 0x20) ? (1 << 2) : 0); + tt |= ((buf[4] & 0x40) ? (1 << 1) : 0); + tt |= ((buf[4] & 0x80) ? (1 << 0) : 0); + buf[4] = tt; +#endif + + if (at->reset) { + ep = C_AVALON_RESET; + nr_len = 1; + } + if (opt_debug) { + applog(LOG_DEBUG, "Avalon: Sent(%u):", (unsigned int)nr_len); + hexdump(buf, nr_len); + } + cgsleep_prepare_r(&ts_start); + ret = avalon_write(avalon, (char *)buf, nr_len, ep); + cgsleep_us_r(&ts_start, 3000); // 3 ms = 333 tasks per second, or 1.4 TH/s + + return ret; +} + +static bool avalon_decode_nonce(struct thr_info *thr, struct cgpu_info *avalon, + struct avalon_info *info, struct avalon_result *ar, + struct work *work) +{ + uint32_t nonce; + + info = avalon->device_data; + info->matching_work[work->subid]++; + nonce = htole32(ar->nonce); + if (info->asic == AVALON_A3255) + nonce -= 0xc0; + applog(LOG_DEBUG, "Avalon: nonce = %0x08x", nonce); + return submit_nonce(thr, work, nonce); +} + +/* Wait until the ftdi chip returns a CTS saying we can send more data. */ +static void wait_avalon_ready(struct cgpu_info *avalon) +{ + while (avalon_buffer_full(avalon)) { + cgsleep_ms(40); + } +} + +#define AVALON_CTS (1 << 4) + +static inline bool avalon_cts(char c) +{ + return (c & AVALON_CTS); +} + +static int avalon_read(struct cgpu_info *avalon, char *buf, size_t bufsize, int ep) +{ + size_t total = 0, readsize = bufsize + 2; + char readbuf[AVALON_READBUF_SIZE]; + int err, amount, ofs = 2, cp; + + err = usb_read_once(avalon, readbuf, readsize, &amount, ep); + applog(LOG_DEBUG, "%s%i: Get avalon read got err %d", + avalon->drv->name, avalon->device_id, err); + if (err && err != LIBUSB_ERROR_TIMEOUT) + return err; + + if (amount < 2) + goto out; + + /* The first 2 of every 64 bytes are status on FTDIRL */ + while (amount > 2) { + cp = amount - 2; + if (cp > 62) + cp = 62; + memcpy(&buf[total], &readbuf[ofs], cp); + total += cp; + amount -= cp + 2; + ofs += 64; + } +out: + return total; +} + +static int avalon_reset(struct cgpu_info *avalon, bool initial) +{ + struct avalon_result ar; + int ret, i, spare; + struct avalon_task at; + uint8_t *buf, *tmp; + struct timespec p; + struct avalon_info *info = avalon->device_data; + + /* Send reset, then check for result */ + avalon_init_task(&at, 1, 0, + AVALON_DEFAULT_FAN_MAX_PWM, + AVALON_DEFAULT_TIMEOUT, + AVALON_DEFAULT_ASIC_NUM, + AVALON_DEFAULT_MINER_NUM, + 0, 0, + AVALON_DEFAULT_FREQUENCY, + AVALON_A3256); + + wait_avalon_ready(avalon); + ret = avalon_send_task(&at, avalon, info); + if (unlikely(ret == AVA_SEND_ERROR)) + return -1; + + if (!initial) { + applog(LOG_ERR, "%s%d reset sequence sent", avalon->drv->name, avalon->device_id); + return 0; + } + + ret = avalon_read(avalon, (char *)&ar, AVALON_READ_SIZE, C_GET_AVALON_RESET); + + /* What do these sleeps do?? */ + p.tv_sec = 0; + p.tv_nsec = AVALON_RESET_PITCH; + nanosleep(&p, NULL); + + /* Look for the first occurrence of 0xAA, the reset response should be: + * AA 55 AA 55 00 00 00 00 00 00 */ + spare = ret - 10; + buf = tmp = (uint8_t *)&ar; + if (opt_debug) { + applog(LOG_DEBUG, "%s%d reset: get:", avalon->drv->name, avalon->device_id); + hexdump(tmp, AVALON_READ_SIZE); + } + + for (i = 0; i <= spare; i++) { + buf = &tmp[i]; + if (buf[0] == 0xAA) + break; + } + i = 0; + + if (buf[0] == 0xAA && buf[1] == 0x55 && + buf[2] == 0xAA && buf[3] == 0x55) { + for (i = 4; i < 11; i++) + if (buf[i] != 0) + break; + } + + if (i != 11) { + applog(LOG_ERR, "%s%d: Reset failed! not an Avalon?" + " (%d: %02x %02x %02x %02x)", avalon->drv->name, avalon->device_id, + i, buf[0], buf[1], buf[2], buf[3]); + /* FIXME: return 1; */ + } else { + /* buf[44]: minor + * buf[45]: day + * buf[46]: year,month, d6: 201306 + */ + info->ctlr_ver = ((buf[46] >> 4) + 2000) * 1000000 + + (buf[46] & 0x0f) * 10000 + + buf[45] * 100 + buf[44]; + applog(LOG_WARNING, "%s%d: Reset succeeded (Controller version: %d)", + avalon->drv->name, avalon->device_id, info->ctlr_ver); + } + + return 0; +} + +static int avalon_calc_timeout(int frequency) +{ + return AVALON_TIMEOUT_FACTOR / frequency; +} + +static bool get_options(int this_option_offset, int *baud, int *miner_count, + int *asic_count, int *timeout, int *frequency, int *asic, + char *options) +{ + char buf[BUFSIZ+1]; + char *ptr, *comma, *colon, *colon2, *colon3, *colon4, *colon5; + bool timeout_default; + size_t max; + int i, tmp; + + if (options == NULL) + buf[0] = '\0'; + else { + ptr = options; + for (i = 0; i < this_option_offset; i++) { + comma = strchr(ptr, ','); + if (comma == NULL) + break; + ptr = comma + 1; + } + + comma = strchr(ptr, ','); + if (comma == NULL) + max = strlen(ptr); + else + max = comma - ptr; + + if (max > BUFSIZ) + max = BUFSIZ; + strncpy(buf, ptr, max); + buf[max] = '\0'; + } + + if (!(*buf)) + return false; + + colon = strchr(buf, ':'); + if (colon) + *(colon++) = '\0'; + + tmp = atoi(buf); + switch (tmp) { + case 115200: + *baud = 115200; + break; + case 57600: + *baud = 57600; + break; + case 38400: + *baud = 38400; + break; + case 19200: + *baud = 19200; + break; + default: + quit(1, "Invalid avalon-options for baud (%s) " + "must be 115200, 57600, 38400 or 19200", buf); + } + + if (colon && *colon) { + colon2 = strchr(colon, ':'); + if (colon2) + *(colon2++) = '\0'; + + if (*colon) { + tmp = atoi(colon); + if (tmp > 0 && tmp <= AVALON_MAX_MINER_NUM) { + *miner_count = tmp; + } else { + quit(1, "Invalid avalon-options for " + "miner_count (%s) must be 1 ~ %d", + colon, AVALON_MAX_MINER_NUM); + } + } + + if (colon2 && *colon2) { + colon3 = strchr(colon2, ':'); + if (colon3) + *(colon3++) = '\0'; + + tmp = atoi(colon2); + if (tmp > 0 && tmp <= AVALON_DEFAULT_ASIC_NUM) + *asic_count = tmp; + else { + quit(1, "Invalid avalon-options for " + "asic_count (%s) must be 1 ~ %d", + colon2, AVALON_DEFAULT_ASIC_NUM); + } + + timeout_default = false; + if (colon3 && *colon3) { + colon4 = strchr(colon3, ':'); + if (colon4) + *(colon4++) = '\0'; + + if (tolower(*colon3) == 'd') + timeout_default = true; + else { + tmp = atoi(colon3); + if (tmp > 0 && tmp <= 0xff) + *timeout = tmp; + else { + quit(1, "Invalid avalon-options for " + "timeout (%s) must be 1 ~ %d", + colon3, 0xff); + } + } + if (colon4 && *colon4) { + colon5 = strchr(colon4, ':'); + if (colon5) + *(colon5++) = '\0'; + + tmp = atoi(colon4); + if (tmp < AVALON_MIN_FREQUENCY || tmp > AVALON_MAX_FREQUENCY) { + quit(1, "Invalid avalon-options for frequency, must be %d <= frequency <= %d", + AVALON_MIN_FREQUENCY, AVALON_MAX_FREQUENCY); + } + *frequency = tmp; + if (timeout_default) + *timeout = avalon_calc_timeout(*frequency); + if (colon5 && *colon5) { + tmp = atoi(colon5); + if (tmp != AVALON_A3256 && tmp != AVALON_A3255) + quit(1, "Invalid avalon-options for asic, must be 110 or 55"); + *asic = tmp; + } + } + } + } + } + return true; +} + +char *set_avalon_fan(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to avalon-fan"; + if (ret == 1) + val2 = val1; + + if (val1 < 0 || val1 > 100 || val2 < 0 || val2 > 100 || val2 < val1) + return "Invalid value passed to avalon-fan"; + + opt_avalon_fan_min = val1 * AVALON_PWM_MAX / 100; + opt_avalon_fan_max = val2 * AVALON_PWM_MAX / 100; + + return NULL; +} + +char *set_avalon_freq(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to avalon-freq"; + if (ret == 1) + val2 = val1; + + if (val1 < AVALON_MIN_FREQUENCY || val1 > AVALON_MAX_FREQUENCY || + val2 < AVALON_MIN_FREQUENCY || val2 > AVALON_MAX_FREQUENCY || + val2 < val1) + return "Invalid value passed to avalon-freq"; + + opt_avalon_freq_min = val1; + opt_avalon_freq_max = val2; + + return NULL; +} + +static void avalon_idle(struct cgpu_info *avalon, struct avalon_info *info) +{ + int i; + + wait_avalon_ready(avalon); + /* Send idle to all miners */ + for (i = 0; i < info->miner_count; i++) { + struct avalon_task at; + + if (unlikely(avalon_buffer_full(avalon))) + break; + info->idle++; + avalon_init_task(&at, 0, 0, info->fan_pwm, info->timeout, + info->asic_count, info->miner_count, 1, 1, + info->frequency, info->asic); + if (avalon_send_task(&at, avalon, info) == AVA_SEND_ERROR) + break; + } + applog(LOG_WARNING, "%s%i: Idling %d miners", avalon->drv->name, avalon->device_id, i); + wait_avalon_ready(avalon); +} + +static void avalon_initialise(struct cgpu_info *avalon) +{ + int err, interface; + + if (avalon->usbinfo.nodev) + return; + + interface = usb_interface(avalon); + // Reset + err = usb_transfer(avalon, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, + FTDI_VALUE_RESET, interface, C_RESET); + + applog(LOG_DEBUG, "%s%i: reset got err %d", + avalon->drv->name, avalon->device_id, err); + + if (avalon->usbinfo.nodev) + return; + + // Set latency + err = usb_transfer(avalon, FTDI_TYPE_OUT, FTDI_REQUEST_LATENCY, + AVALON_LATENCY, interface, C_LATENCY); + + applog(LOG_DEBUG, "%s%i: latency got err %d", + avalon->drv->name, avalon->device_id, err); + + if (avalon->usbinfo.nodev) + return; + + // Set data + err = usb_transfer(avalon, FTDI_TYPE_OUT, FTDI_REQUEST_DATA, + FTDI_VALUE_DATA_AVA, interface, C_SETDATA); + + applog(LOG_DEBUG, "%s%i: data got err %d", + avalon->drv->name, avalon->device_id, err); + + if (avalon->usbinfo.nodev) + return; + + // Set the baud + err = usb_transfer(avalon, FTDI_TYPE_OUT, FTDI_REQUEST_BAUD, FTDI_VALUE_BAUD_AVA, + (FTDI_INDEX_BAUD_AVA & 0xff00) | interface, + C_SETBAUD); + + applog(LOG_DEBUG, "%s%i: setbaud got err %d", + avalon->drv->name, avalon->device_id, err); + + if (avalon->usbinfo.nodev) + return; + + // Set Modem Control + err = usb_transfer(avalon, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, + FTDI_VALUE_MODEM, interface, C_SETMODEM); + + applog(LOG_DEBUG, "%s%i: setmodemctrl got err %d", + avalon->drv->name, avalon->device_id, err); + + if (avalon->usbinfo.nodev) + return; + + // Set Flow Control + err = usb_transfer(avalon, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, + FTDI_VALUE_FLOW, interface, C_SETFLOW); + + applog(LOG_DEBUG, "%s%i: setflowctrl got err %d", + avalon->drv->name, avalon->device_id, err); + + if (avalon->usbinfo.nodev) + return; + + /* Avalon repeats the following */ + // Set Modem Control + err = usb_transfer(avalon, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, + FTDI_VALUE_MODEM, interface, C_SETMODEM); + + applog(LOG_DEBUG, "%s%i: setmodemctrl 2 got err %d", + avalon->drv->name, avalon->device_id, err); + + if (avalon->usbinfo.nodev) + return; + + // Set Flow Control + err = usb_transfer(avalon, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, + FTDI_VALUE_FLOW, interface, C_SETFLOW); + + applog(LOG_DEBUG, "%s%i: setflowctrl 2 got err %d", + avalon->drv->name, avalon->device_id, err); +} + +static bool is_bitburner(struct cgpu_info *avalon) +{ + enum sub_ident ident; + + ident = usb_ident(avalon); + return ident == IDENT_BTB || ident == IDENT_BBF; +} + +static bool bitburner_set_core_voltage(struct cgpu_info *avalon, int core_voltage) +{ + uint8_t buf[2]; + int err; + + if (is_bitburner(avalon)) { + buf[0] = (uint8_t)core_voltage; + buf[1] = (uint8_t)(core_voltage >> 8); + err = usb_transfer_data(avalon, FTDI_TYPE_OUT, BITBURNER_REQUEST, + BITBURNER_VALUE, BITBURNER_INDEX_SET_VOLTAGE, + (uint32_t *)buf, sizeof(buf), C_BB_SET_VOLTAGE); + if (unlikely(err < 0)) { + applog(LOG_ERR, "%s%i: SetCoreVoltage failed: err = %d", + avalon->drv->name, avalon->device_id, err); + return false; + } else { + applog(LOG_WARNING, "%s%i: Core voltage set to %d millivolts", + avalon->drv->name, avalon->device_id, + core_voltage); + } + return true; + } + return false; +} + +static int bitburner_get_core_voltage(struct cgpu_info *avalon) +{ + uint8_t buf[2]; + int err; + int amount; + + if (is_bitburner(avalon)) { + err = usb_transfer_read(avalon, FTDI_TYPE_IN, BITBURNER_REQUEST, + BITBURNER_VALUE, BITBURNER_INDEX_GET_VOLTAGE, + (char *)buf, sizeof(buf), &amount, + C_BB_GET_VOLTAGE); + if (unlikely(err != 0 || amount != 2)) { + applog(LOG_ERR, "%s%i: GetCoreVoltage failed: err = %d, amount = %d", + avalon->drv->name, avalon->device_id, err, amount); + return 0; + } else { + return (int)(buf[0] + ((unsigned int)buf[1] << 8)); + } + } else { + return 0; + } +} + +static void bitburner_get_version(struct cgpu_info *avalon) +{ + struct avalon_info *info = avalon->device_data; + uint8_t buf[3]; + int err; + int amount; + + err = usb_transfer_read(avalon, FTDI_TYPE_IN, BITBURNER_REQUEST, + BITBURNER_VALUE, BITBURNER_INDEX_GET_VERSION, + (char *)buf, sizeof(buf), &amount, + C_GETVERSION); + if (unlikely(err != 0 || amount != sizeof(buf))) { + applog(LOG_DEBUG, "%s%i: GetVersion failed: err=%d, amt=%d assuming %d.%d.%d", + avalon->drv->name, avalon->device_id, err, amount, + BITBURNER_VERSION1, BITBURNER_VERSION2, BITBURNER_VERSION3); + info->version1 = BITBURNER_VERSION1; + info->version2 = BITBURNER_VERSION2; + info->version3 = BITBURNER_VERSION3; + } else { + info->version1 = buf[0]; + info->version2 = buf[1]; + info->version3 = buf[2]; + } +} + +static struct cgpu_info *avalon_detect_one(libusb_device *dev, struct usb_find_devices *found) +{ + int baud, miner_count, asic_count, timeout, frequency, asic; + int this_option_offset; + struct avalon_info *info; + struct cgpu_info *avalon; + bool configured; + int ret; + + avalon = usb_alloc_cgpu(&avalon_drv, AVALON_MINER_THREADS); + + baud = AVALON_IO_SPEED; + miner_count = AVALON_DEFAULT_MINER_NUM; + asic_count = AVALON_DEFAULT_ASIC_NUM; + timeout = AVALON_DEFAULT_TIMEOUT; + frequency = AVALON_DEFAULT_FREQUENCY; + asic = AVALON_A3256; + + if (!usb_init(avalon, dev, found)) + goto shin; + + this_option_offset = usb_ident(avalon) == IDENT_BBF ? ++bbf_option_offset : ++option_offset; + configured = get_options(this_option_offset, &baud, &miner_count, + &asic_count, &timeout, &frequency, &asic, + (usb_ident(avalon) == IDENT_BBF && opt_bitburner_fury_options != NULL) ? opt_bitburner_fury_options : opt_avalon_options); + + /* Even though this is an FTDI type chip, we want to do the parsing + * all ourselves so set it to std usb type */ + avalon->usbdev->usb_type = USB_TYPE_STD; + + /* We have a real Avalon! */ + avalon_initialise(avalon); + + avalon->device_data = calloc(sizeof(struct avalon_info), 1); + if (unlikely(!(avalon->device_data))) + quit(1, "Failed to calloc avalon_info data"); + info = avalon->device_data; + + if (configured) { + info->asic = asic; + info->baud = baud; + info->miner_count = miner_count; + info->asic_count = asic_count; + info->timeout = timeout; + info->frequency = frequency; + } else { + info->asic = AVALON_A3256; + info->baud = AVALON_IO_SPEED; + info->asic_count = AVALON_DEFAULT_ASIC_NUM; + switch (usb_ident(avalon)) { + case IDENT_BBF: + info->miner_count = BITBURNER_FURY_DEFAULT_MINER_NUM; + info->timeout = BITBURNER_FURY_DEFAULT_TIMEOUT; + info->frequency = BITBURNER_FURY_DEFAULT_FREQUENCY; + break; + default: + info->miner_count = AVALON_DEFAULT_MINER_NUM; + info->timeout = AVALON_DEFAULT_TIMEOUT; + info->frequency = AVALON_DEFAULT_FREQUENCY; + } + } + if (info->asic == AVALON_A3255) + info->increment = info->decrement = 50; + else { + info->increment = 2; + info->decrement = 1; + } + + info->fan_pwm = AVALON_DEFAULT_FAN_MIN_PWM; + /* This is for check the temp/fan every 3~4s */ + info->temp_history_count = + (4 / (float)((float)info->timeout * (AVALON_A3256 / info->asic) * ((float)1.67/0x32))) + 1; + if (info->temp_history_count <= 0) + info->temp_history_count = 1; + + info->temp_history_index = 0; + info->temp_sum = 0; + info->temp_old = 0; + + if (!add_cgpu(avalon)) + goto unshin; + + ret = avalon_reset(avalon, true); + if (ret && !configured) + goto unshin; + + update_usb_stats(avalon); + + avalon_idle(avalon, info); + + applog(LOG_DEBUG, "Avalon Detected: %s " + "(miner_count=%d asic_count=%d timeout=%d frequency=%d chip=%d)", + avalon->device_path, info->miner_count, info->asic_count, info->timeout, + info->frequency, info->asic); + + if (usb_ident(avalon) == IDENT_BTB) { + if (opt_bitburner_core_voltage < BITBURNER_MIN_COREMV || + opt_bitburner_core_voltage > BITBURNER_MAX_COREMV) { + quit(1, "Invalid bitburner-voltage %d must be %dmv - %dmv", + opt_bitburner_core_voltage, + BITBURNER_MIN_COREMV, + BITBURNER_MAX_COREMV); + } else + bitburner_set_core_voltage(avalon, opt_bitburner_core_voltage); + } else if (usb_ident(avalon) == IDENT_BBF) { + if (opt_bitburner_fury_core_voltage < BITBURNER_FURY_MIN_COREMV || + opt_bitburner_fury_core_voltage > BITBURNER_FURY_MAX_COREMV) { + quit(1, "Invalid bitburner-fury-voltage %d must be %dmv - %dmv", + opt_bitburner_fury_core_voltage, + BITBURNER_FURY_MIN_COREMV, + BITBURNER_FURY_MAX_COREMV); + } else + bitburner_set_core_voltage(avalon, opt_bitburner_fury_core_voltage); + } + + if (is_bitburner(avalon)) { + bitburner_get_version(avalon); + } + + return avalon; + +unshin: + + usb_uninit(avalon); + +shin: + + free(avalon->device_data); + avalon->device_data = NULL; + + avalon = usb_free_cgpu(avalon); + + return NULL; +} + +static void avalon_detect(bool __maybe_unused hotplug) +{ + usb_detect(&avalon_drv, avalon_detect_one); +} + +static void avalon_init(struct cgpu_info *avalon) +{ + applog(LOG_INFO, "Avalon: Opened on %s", avalon->device_path); +} + +static struct work *avalon_valid_result(struct cgpu_info *avalon, struct avalon_result *ar) +{ + return clone_queued_work_bymidstate(avalon, (char *)ar->midstate, 32, + (char *)ar->data, 64, 12); +} + +static void avalon_update_temps(struct cgpu_info *avalon, struct avalon_info *info, + struct avalon_result *ar); + +static void avalon_inc_nvw(struct avalon_info *info, struct thr_info *thr) +{ + applog(LOG_INFO, "%s%d: No matching work - HW error", + thr->cgpu->drv->name, thr->cgpu->device_id); + + inc_hw_errors(thr); + info->no_matching_work++; +} + +static void avalon_parse_results(struct cgpu_info *avalon, struct avalon_info *info, + struct thr_info *thr, char *buf, int *offset) +{ + int i, spare = *offset - AVALON_READ_SIZE; + bool found = false; + + for (i = 0; i <= spare; i++) { + struct avalon_result *ar; + struct work *work; + + ar = (struct avalon_result *)&buf[i]; + work = avalon_valid_result(avalon, ar); + if (work) { + bool gettemp = false; + + found = true; + + if (avalon_decode_nonce(thr, avalon, info, ar, work)) { + mutex_lock(&info->lock); + if (!info->nonces++) + gettemp = true; + info->auto_nonces++; + mutex_unlock(&info->lock); + } else if (opt_avalon_auto) { + mutex_lock(&info->lock); + info->auto_hw++; + mutex_unlock(&info->lock); + } + free_work(work); + + if (gettemp) + avalon_update_temps(avalon, info, ar); + break; + } + } + + if (!found) { + spare = *offset - AVALON_READ_SIZE; + /* We are buffering and haven't accumulated one more corrupt + * work result. */ + if (spare < (int)AVALON_READ_SIZE) + return; + avalon_inc_nvw(info, thr); + } else { + spare = AVALON_READ_SIZE + i; + if (i) { + if (i >= (int)AVALON_READ_SIZE) + avalon_inc_nvw(info, thr); + else + applog(LOG_WARNING, "Avalon: Discarding %d bytes from buffer", i); + } + } + + *offset -= spare; + memmove(buf, buf + spare, *offset); +} + +static void avalon_running_reset(struct cgpu_info *avalon, + struct avalon_info *info) +{ + avalon_reset(avalon, false); + avalon_idle(avalon, info); + avalon->results = 0; + info->reset = false; +} + +static void *avalon_get_results(void *userdata) +{ + struct cgpu_info *avalon = (struct cgpu_info *)userdata; + struct avalon_info *info = avalon->device_data; + const int rsize = AVALON_FTDI_READSIZE; + char readbuf[AVALON_READBUF_SIZE]; + struct thr_info *thr = info->thr; + int offset = 0, ret = 0; + char threadname[16]; + + snprintf(threadname, sizeof(threadname), "%d/AvaRecv", avalon->device_id); + RenameThread(threadname); + + while (likely(!avalon->shutdown)) { + char buf[rsize]; + + if (offset >= (int)AVALON_READ_SIZE) + avalon_parse_results(avalon, info, thr, readbuf, &offset); + + if (unlikely(offset + rsize >= AVALON_READBUF_SIZE)) { + /* This should never happen */ + applog(LOG_ERR, "Avalon readbuf overflow, resetting buffer"); + offset = 0; + } + + if (unlikely(info->reset)) { + avalon_running_reset(avalon, info); + /* Discard anything in the buffer */ + offset = 0; + } + + ret = avalon_read(avalon, buf, rsize, C_AVALON_READ); + + if (unlikely(ret < 0)) + break; + + if (ret < 1) + continue; + + if (opt_debug) { + applog(LOG_DEBUG, "Avalon: get:"); + hexdump((uint8_t *)buf, ret); + } + + memcpy(&readbuf[offset], &buf, ret); + offset += ret; + } + return NULL; +} + +static void avalon_rotate_array(struct cgpu_info *avalon, struct avalon_info *info) +{ + mutex_lock(&info->qlock); + avalon->queued = 0; + if (++avalon->work_array >= AVALON_ARRAY_SIZE) + avalon->work_array = 0; + mutex_unlock(&info->qlock); +} + +static void bitburner_rotate_array(struct cgpu_info *avalon) +{ + avalon->queued = 0; + if (++avalon->work_array >= BITBURNER_ARRAY_SIZE) + avalon->work_array = 0; +} + +static void avalon_set_timeout(struct avalon_info *info) +{ + info->timeout = avalon_calc_timeout(info->frequency); +} + +static void avalon_set_freq(struct cgpu_info *avalon, int frequency) +{ + struct avalon_info *info = avalon->device_data; + + info->frequency = frequency; + if (info->frequency > opt_avalon_freq_max) + info->frequency = opt_avalon_freq_max; + if (info->frequency < opt_avalon_freq_min) + info->frequency = opt_avalon_freq_min; + avalon_set_timeout(info); + applog(LOG_WARNING, "%s%i: Set frequency to %d, timeout %d", + avalon->drv->name, avalon->device_id, + info->frequency, info->timeout); +} + +static void avalon_inc_freq(struct avalon_info *info) +{ + info->frequency += info->increment; + if (info->frequency > opt_avalon_freq_max) + info->frequency = opt_avalon_freq_max; + avalon_set_timeout(info); + applog(LOG_NOTICE, "Avalon increasing frequency to %d, timeout %d", + info->frequency, info->timeout); +} + +static void avalon_dec_freq(struct avalon_info *info) +{ + info->frequency -= info->decrement; + if (info->frequency < opt_avalon_freq_min) + info->frequency = opt_avalon_freq_min; + avalon_set_timeout(info); + applog(LOG_NOTICE, "Avalon decreasing frequency to %d, timeout %d", + info->frequency, info->timeout); +} + +static void avalon_reset_auto(struct avalon_info *info) +{ + info->auto_queued = + info->auto_nonces = + info->auto_hw = 0; +} + +static void avalon_adjust_freq(struct avalon_info *info, struct cgpu_info *avalon) +{ + if (opt_avalon_auto && info->auto_queued >= AVALON_AUTO_CYCLE) { + mutex_lock(&info->lock); + if (!info->optimal) { + if (info->fan_pwm >= opt_avalon_fan_max) { + applog(LOG_WARNING, + "%s%i: Above optimal temperature, throttling", + avalon->drv->name, avalon->device_id); + avalon_dec_freq(info); + } + } else if (info->auto_nonces >= AVALON_AUTO_CYCLE / 2) { + int total = info->auto_nonces + info->auto_hw; + + /* Try to keep hw errors < 2% */ + if (info->auto_hw * 100 < total) + avalon_inc_freq(info); + else if (info->auto_hw * 66 > total) + avalon_dec_freq(info); + } + avalon_reset_auto(info); + mutex_unlock(&info->lock); + } +} + +static void *avalon_send_tasks(void *userdata) +{ + struct cgpu_info *avalon = (struct cgpu_info *)userdata; + struct avalon_info *info = avalon->device_data; + const int avalon_get_work_count = info->miner_count; + char threadname[16]; + + snprintf(threadname, sizeof(threadname), "%d/AvaSend", avalon->device_id); + RenameThread(threadname); + + while (likely(!avalon->shutdown)) { + int start_count, end_count, i, j, ret; + cgtimer_t ts_start; + struct avalon_task at; + bool idled = false; + int64_t us_timeout; + + while (avalon_buffer_full(avalon)) + cgsleep_ms(40); + + avalon_adjust_freq(info, avalon); + + /* A full nonce range */ + us_timeout = 0x100000000ll / info->asic_count / info->frequency; + cgsleep_prepare_r(&ts_start); + + start_count = avalon->work_array * avalon_get_work_count; + end_count = start_count + avalon_get_work_count; + for (i = start_count, j = 0; i < end_count; i++, j++) { + if (avalon_buffer_full(avalon)) { + applog(LOG_INFO, + "%s%i: Buffer full after only %d of %d work queued", + avalon->drv->name, avalon->device_id, j, avalon_get_work_count); + break; + } + + mutex_lock(&info->qlock); + if (likely(j < avalon->queued && !info->overheat && avalon->works[i])) { + avalon_init_task(&at, 0, 0, info->fan_pwm, + info->timeout, info->asic_count, + info->miner_count, 1, 0, info->frequency, info->asic); + avalon_create_task(&at, avalon->works[i]); + info->auto_queued++; + } else { + int idle_freq = info->frequency; + + if (!info->idle++) + idled = true; + if (unlikely(info->overheat && opt_avalon_auto)) + idle_freq = AVALON_MIN_FREQUENCY; + avalon_init_task(&at, 0, 0, info->fan_pwm, + info->timeout, info->asic_count, + info->miner_count, 1, 1, idle_freq, info->asic); + /* Reset the auto_queued count if we end up + * idling any miners. */ + avalon_reset_auto(info); + } + mutex_unlock(&info->qlock); + + ret = avalon_send_task(&at, avalon, info); + + if (unlikely(ret == AVA_SEND_ERROR)) { + /* Send errors are fatal */ + applog(LOG_ERR, "%s%i: Comms error(buffer)", + avalon->drv->name, avalon->device_id); + dev_error(avalon, REASON_DEV_COMMS_ERROR); + goto out; + } + } + + avalon_rotate_array(avalon, info); + + cgsem_post(&info->qsem); + + if (unlikely(idled)) { + applog(LOG_WARNING, "%s%i: Idled %d miners", + avalon->drv->name, avalon->device_id, idled); + } + + /* Sleep how long it would take to complete a full nonce range + * at the current frequency using the clock_nanosleep function + * timed from before we started loading new work so it will + * fall short of the full duration. */ + cgsleep_us_r(&ts_start, us_timeout); + } +out: + return NULL; +} + +static void *bitburner_send_tasks(void *userdata) +{ + struct cgpu_info *avalon = (struct cgpu_info *)userdata; + struct avalon_info *info = avalon->device_data; + const int avalon_get_work_count = info->miner_count; + char threadname[16]; + + snprintf(threadname, sizeof(threadname), "%d/AvaSend", avalon->device_id); + RenameThread(threadname); + + while (likely(!avalon->shutdown)) { + int start_count, end_count, i, j, ret; + struct avalon_task at; + bool idled = false; + + while (avalon_buffer_full(avalon)) + cgsleep_ms(40); + + avalon_adjust_freq(info, avalon); + + /* Give other threads a chance to acquire qlock. */ + i = 0; + do { + cgsleep_ms(40); + } while (!avalon->shutdown && i++ < 15 + && avalon->queued < avalon_get_work_count); + + mutex_lock(&info->qlock); + start_count = avalon->work_array * avalon_get_work_count; + end_count = start_count + avalon_get_work_count; + for (i = start_count, j = 0; i < end_count; i++, j++) { + while (avalon_buffer_full(avalon)) + cgsleep_ms(40); + + if (likely(j < avalon->queued && !info->overheat && avalon->works[i])) { + avalon_init_task(&at, 0, 0, info->fan_pwm, + info->timeout, info->asic_count, + info->miner_count, 1, 0, info->frequency, info->asic); + avalon_create_task(&at, avalon->works[i]); + info->auto_queued++; + } else { + int idle_freq = info->frequency; + + if (!info->idle++) + idled = true; + if (unlikely(info->overheat && opt_avalon_auto)) + idle_freq = AVALON_MIN_FREQUENCY; + avalon_init_task(&at, 0, 0, info->fan_pwm, + info->timeout, info->asic_count, + info->miner_count, 1, 1, idle_freq, info->asic); + /* Reset the auto_queued count if we end up + * idling any miners. */ + avalon_reset_auto(info); + } + + ret = bitburner_send_task(&at, avalon); + + if (unlikely(ret == AVA_SEND_ERROR)) { + applog(LOG_ERR, "%s%i: Comms error(buffer)", + avalon->drv->name, avalon->device_id); + dev_error(avalon, REASON_DEV_COMMS_ERROR); + info->reset = true; + break; + } + } + + bitburner_rotate_array(avalon); + mutex_unlock(&info->qlock); + + cgsem_post(&info->qsem); + + if (unlikely(idled)) { + applog(LOG_WARNING, "%s%i: Idled %d miners", + avalon->drv->name, avalon->device_id, idled); + } + } + return NULL; +} + +static bool avalon_prepare(struct thr_info *thr) +{ + struct cgpu_info *avalon = thr->cgpu; + struct avalon_info *info = avalon->device_data; + int array_size = AVALON_ARRAY_SIZE; + void *(*write_thread_fn)(void *) = avalon_send_tasks; + + if (is_bitburner(avalon)) { + array_size = BITBURNER_ARRAY_SIZE; + write_thread_fn = bitburner_send_tasks; + } + + free(avalon->works); + avalon->works = calloc(info->miner_count * sizeof(struct work *), + array_size); + if (!avalon->works) + quit(1, "Failed to calloc avalon works in avalon_prepare"); + + info->thr = thr; + mutex_init(&info->lock); + mutex_init(&info->qlock); + cgsem_init(&info->qsem); + + if (pthread_create(&info->read_thr, NULL, avalon_get_results, (void *)avalon)) + quit(1, "Failed to create avalon read_thr"); + + if (pthread_create(&info->write_thr, NULL, write_thread_fn, (void *)avalon)) + quit(1, "Failed to create avalon write_thr"); + + avalon_init(avalon); + + return true; +} + +static inline void record_temp_fan(struct cgpu_info *avalon, struct avalon_info *info, + struct avalon_result *ar) +{ + double temp_max; + + info->fan0 = ar->fan0 * AVALON_FAN_FACTOR; + info->fan1 = ar->fan1 * AVALON_FAN_FACTOR; + info->fan2 = ar->fan2 * AVALON_FAN_FACTOR; + + info->temp0 = ar->temp0; + info->temp1 = ar->temp1; + info->temp2 = ar->temp2; + if (ar->temp0 & 0x80) { + ar->temp0 &= 0x7f; + info->temp0 = 0 - ((~ar->temp0 & 0x7f) + 1); + } + if (ar->temp1 & 0x80) { + ar->temp1 &= 0x7f; + info->temp1 = 0 - ((~ar->temp1 & 0x7f) + 1); + } + if (ar->temp2 & 0x80) { + ar->temp2 &= 0x7f; + info->temp2 = 0 - ((~ar->temp2 & 0x7f) + 1); + } + + temp_max = info->temp0; + if (info->temp1 > temp_max) + temp_max = info->temp1; + if (info->temp2 > temp_max) + temp_max = info->temp2; + avalon->temp = avalon->temp * 0.63 + temp_max * 0.37; +} + +static void temp_rise(struct avalon_info *info, int temp) +{ + if (temp >= opt_avalon_temp + AVALON_TEMP_HYSTERESIS * 3) { + info->fan_pwm = AVALON_PWM_MAX; + return; + } + if (temp >= opt_avalon_temp + AVALON_TEMP_HYSTERESIS * 2) + info->fan_pwm += 10; + else if (temp > opt_avalon_temp) + info->fan_pwm += 5; + else if (temp >= opt_avalon_temp - AVALON_TEMP_HYSTERESIS) + info->fan_pwm += 1; + else + return; + + if (info->fan_pwm > opt_avalon_fan_max) + info->fan_pwm = opt_avalon_fan_max; +} + +static void temp_drop(struct avalon_info *info, int temp) +{ + if (temp <= opt_avalon_temp - AVALON_TEMP_HYSTERESIS * 3) { + info->fan_pwm = opt_avalon_fan_min; + return; + } + if (temp <= opt_avalon_temp - AVALON_TEMP_HYSTERESIS * 2) + info->fan_pwm -= 10; + else if (temp <= opt_avalon_temp - AVALON_TEMP_HYSTERESIS) + info->fan_pwm -= 5; + else if (temp < opt_avalon_temp) + info->fan_pwm -= 1; + + if (info->fan_pwm < opt_avalon_fan_min) + info->fan_pwm = opt_avalon_fan_min; +} + +static inline void adjust_fan(struct avalon_info *info) +{ + int temp_new; + + temp_new = info->temp_sum / info->temp_history_count; + + if (temp_new > info->temp_old) + temp_rise(info, temp_new); + else if (temp_new < info->temp_old) + temp_drop(info, temp_new); + else { + /* temp_new == info->temp_old */ + if (temp_new > opt_avalon_temp) + temp_rise(info, temp_new); + else if (temp_new < opt_avalon_temp - AVALON_TEMP_HYSTERESIS) + temp_drop(info, temp_new); + } + info->temp_old = temp_new; + if (info->temp_old <= opt_avalon_temp) + info->optimal = true; + else + info->optimal = false; +} + +static void avalon_update_temps(struct cgpu_info *avalon, struct avalon_info *info, + struct avalon_result *ar) +{ + record_temp_fan(avalon, info, ar); + applog(LOG_INFO, + "Avalon: Fan1: %d/m, Fan2: %d/m, Fan3: %d/m\t" + "Temp1: %dC, Temp2: %dC, Temp3: %dC, TempMAX: %.0fC", + info->fan0, info->fan1, info->fan2, + info->temp0, info->temp1, info->temp2, avalon->temp); + info->temp_history_index++; + info->temp_sum += avalon->temp; + applog(LOG_DEBUG, "Avalon: temp_index: %d, temp_count: %d, temp_old: %d", + info->temp_history_index, info->temp_history_count, info->temp_old); + if (is_bitburner(avalon)) { + info->core_voltage = bitburner_get_core_voltage(avalon); + } + if (info->temp_history_index == info->temp_history_count) { + adjust_fan(info); + info->temp_history_index = 0; + info->temp_sum = 0; + } + if (unlikely(info->temp_old >= opt_avalon_overheat)) { + applog(LOG_WARNING, "%s%d overheat! Idling", avalon->drv->name, avalon->device_id); + info->overheat = true; + } else if (info->overheat && info->temp_old <= opt_avalon_temp) { + applog(LOG_WARNING, "%s%d cooled, restarting", avalon->drv->name, avalon->device_id); + info->overheat = false; + } +} + +static void get_avalon_statline_before(char *buf, size_t bufsiz, struct cgpu_info *avalon) +{ + struct avalon_info *info = avalon->device_data; + int lowfan = 10000; + + if (is_bitburner(avalon)) { + int temp = info->temp0; + if (info->temp2 > temp) + temp = info->temp2; + if (temp > 99) + temp = 99; + if (temp < 0) + temp = 0; + tailsprintf(buf, bufsiz, "%2dC %3dMHz %4dmV", temp, info->frequency, info->core_voltage); + } else { + /* Find the lowest fan speed of the ASIC cooling fans. */ + if (info->fan1 >= 0 && info->fan1 < lowfan) + lowfan = info->fan1; + if (info->fan2 >= 0 && info->fan2 < lowfan) + lowfan = info->fan2; + + tailsprintf(buf, bufsiz, "%2dC/%3dC %04dR", info->temp0, info->temp2, lowfan); + } +} + +/* We use a replacement algorithm to only remove references to work done from + * the buffer when we need the extra space for new work. */ +static bool avalon_fill(struct cgpu_info *avalon) +{ + struct avalon_info *info = avalon->device_data; + int subid, slot, mc; + struct work *work; + bool ret = true; + + mc = info->miner_count; + mutex_lock(&info->qlock); + if (avalon->queued >= mc) + goto out_unlock; + work = get_queued(avalon); + if (unlikely(!work)) { + ret = false; + goto out_unlock; + } + subid = avalon->queued++; + work->subid = subid; + slot = avalon->work_array * mc + subid; + if (likely(avalon->works[slot])) + work_completed(avalon, avalon->works[slot]); + avalon->works[slot] = work; + if (avalon->queued < mc) + ret = false; +out_unlock: + mutex_unlock(&info->qlock); + + return ret; +} + +static int64_t avalon_scanhash(struct thr_info *thr) +{ + struct cgpu_info *avalon = thr->cgpu; + struct avalon_info *info = avalon->device_data; + const int miner_count = info->miner_count; + int64_t hash_count, ms_timeout; + + /* Half nonce range */ + ms_timeout = 0x80000000ll / info->asic_count / info->frequency / 1000; + + /* Wait until avalon_send_tasks signals us that it has completed + * sending its work or a full nonce range timeout has occurred. We use + * cgsems to never miss a wakeup. */ + cgsem_mswait(&info->qsem, ms_timeout); + + mutex_lock(&info->lock); + hash_count = 0xffffffffull * (uint64_t)info->nonces; + avalon->results += info->nonces; + if (avalon->results > miner_count || info->idle) + avalon->results = miner_count; + if (!info->reset) + avalon->results--; + info->nonces = info->idle = 0; + mutex_unlock(&info->lock); + + /* Check for nothing but consecutive bad results or consistently less + * results than we should be getting and reset the FPGA if necessary */ + if (!is_bitburner(avalon)) { + if (avalon->results < -miner_count && !info->reset) { + applog(LOG_ERR, "%s%d: Result return rate low, resetting!", + avalon->drv->name, avalon->device_id); + avalon->results = miner_count; + info->reset = true; + } + } + + if (unlikely(avalon->usbinfo.nodev)) { + applog(LOG_ERR, "%s%d: Device disappeared, shutting down thread", + avalon->drv->name, avalon->device_id); + hash_count = -1; + } + + /* This hashmeter is just a utility counter based on returned shares */ + return hash_count; +} + +static void avalon_flush_work(struct cgpu_info *avalon) +{ + struct avalon_info *info = avalon->device_data; + + /* Will overwrite any work queued. Do this unlocked since it's just + * changing a single non-critical value and prevents deadlocks */ + avalon->queued = 0; + + /* Signal main loop we need more work */ + cgsem_post(&info->qsem); +} + +static struct api_data *avalon_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + struct avalon_info *info = cgpu->device_data; + char buf[64]; + int i; + double hwp = (cgpu->hw_errors + cgpu->diff1) ? + (double)(cgpu->hw_errors) / (double)(cgpu->hw_errors + cgpu->diff1) : 0; + + root = api_add_int(root, "baud", &(info->baud), false); + root = api_add_int(root, "miner_count", &(info->miner_count),false); + root = api_add_int(root, "asic_count", &(info->asic_count), false); + root = api_add_int(root, "timeout", &(info->timeout), false); + root = api_add_int(root, "frequency", &(info->frequency), false); + + root = api_add_int(root, "fan1", &(info->fan0), false); + root = api_add_int(root, "fan2", &(info->fan1), false); + root = api_add_int(root, "fan3", &(info->fan2), false); + + root = api_add_int(root, "temp1", &(info->temp0), false); + root = api_add_int(root, "temp2", &(info->temp1), false); + root = api_add_int(root, "temp3", &(info->temp2), false); + root = api_add_double(root, "temp_max", &cgpu->temp, false); + + root = api_add_percent(root, "Device Hardware%", &hwp, true); + root = api_add_int(root, "no_matching_work", &(info->no_matching_work), false); + for (i = 0; i < info->miner_count; i++) { + char mcw[24]; + + sprintf(mcw, "match_work_count%d", i + 1); + root = api_add_int(root, mcw, &(info->matching_work[i]), false); + } + if (is_bitburner(cgpu)) { + root = api_add_int(root, "core_voltage", &(info->core_voltage), false); + snprintf(buf, sizeof(buf), "%"PRIu8".%"PRIu8".%"PRIu8, + info->version1, info->version2, info->version3); + root = api_add_string(root, "version", buf, true); + } + root = api_add_uint32(root, "Controller Version", &(info->ctlr_ver), false); + root = api_add_uint32(root, "Avalon Chip", &(info->asic), false); + + return root; +} + +static void avalon_shutdown(struct thr_info *thr) +{ + struct cgpu_info *avalon = thr->cgpu; + struct avalon_info *info = avalon->device_data; + + pthread_join(info->read_thr, NULL); + pthread_join(info->write_thr, NULL); + avalon_running_reset(avalon, info); + cgsem_destroy(&info->qsem); + mutex_destroy(&info->qlock); + mutex_destroy(&info->lock); + free(avalon->works); + avalon->works = NULL; +} + +static char *avalon_set_device(struct cgpu_info *avalon, char *option, char *setting, char *replybuf) +{ + int val; + + if (strcasecmp(option, "help") == 0) { + sprintf(replybuf, "freq: range %d-%d millivolts: range %d-%d", + AVALON_MIN_FREQUENCY, AVALON_MAX_FREQUENCY, + BITBURNER_MIN_COREMV, BITBURNER_MAX_COREMV); + return replybuf; + } + + if (strcasecmp(option, "millivolts") == 0 || strcasecmp(option, "mv") == 0) { + if (!is_bitburner(avalon)) { + sprintf(replybuf, "%s cannot set millivolts", avalon->drv->name); + return replybuf; + } + + if (!setting || !*setting) { + sprintf(replybuf, "missing millivolts setting"); + return replybuf; + } + + val = atoi(setting); + if (val < BITBURNER_MIN_COREMV || val > BITBURNER_MAX_COREMV) { + sprintf(replybuf, "invalid millivolts: '%s' valid range %d-%d", + setting, BITBURNER_MIN_COREMV, BITBURNER_MAX_COREMV); + return replybuf; + } + + if (bitburner_set_core_voltage(avalon, val)) + return NULL; + else { + sprintf(replybuf, "Set millivolts failed"); + return replybuf; + } + } + + if (strcasecmp(option, "freq") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing freq setting"); + return replybuf; + } + + val = atoi(setting); + if (val < AVALON_MIN_FREQUENCY || val > AVALON_MAX_FREQUENCY) { + sprintf(replybuf, "invalid freq: '%s' valid range %d-%d", + setting, AVALON_MIN_FREQUENCY, AVALON_MAX_FREQUENCY); + return replybuf; + } + + avalon_set_freq(avalon, val); + return NULL; + } + + sprintf(replybuf, "Unknown option: %s", option); + return replybuf; +} + +struct device_drv avalon_drv = { + .drv_id = DRIVER_avalon, + .dname = "avalon", + .name = "AVA", + .drv_detect = avalon_detect, + .thread_prepare = avalon_prepare, + .hash_work = hash_queued_work, + .queue_full = avalon_fill, + .scanwork = avalon_scanhash, + .flush_work = avalon_flush_work, + .get_api_stats = avalon_api_stats, + .get_statline_before = get_avalon_statline_before, + .set_device = avalon_set_device, + .reinit_device = avalon_init, + .thread_shutdown = avalon_shutdown, +}; diff --git a/driver-avalon.h b/driver-avalon.h new file mode 100644 index 0000000..c718366 --- /dev/null +++ b/driver-avalon.h @@ -0,0 +1,207 @@ +/* + * Copyright 2013 Avalon project + * Copyright 2013-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef AVALON_H +#define AVALON_H + +#ifdef USE_AVALON + +#include "util.h" + +#define AVALON_RESET_FAULT_DECISECONDS 1 +#define AVALON_MINER_THREADS 1 + +#define AVALON_IO_SPEED 115200 +#define AVALON_HASH_TIME_FACTOR ((float)1.67/0x32) +#define AVALON_RESET_PITCH (300*1000*1000) + + +#define AVALON_A3256 110 +#define AVALON_A3255 55 + +#define AVALON_FAN_FACTOR 120 +#define AVALON_PWM_MAX 0xA0 +#define AVALON_DEFAULT_FAN_MIN 20 +#define AVALON_DEFAULT_FAN_MAX 100 +#define AVALON_DEFAULT_FAN_MAX_PWM 0xA0 /* 100% */ +#define AVALON_DEFAULT_FAN_MIN_PWM 0x20 /* 20% */ + +#define AVALON_TEMP_TARGET 50 +#define AVALON_TEMP_HYSTERESIS 3 +#define AVALON_TEMP_OVERHEAT 60 + +/* Avalon-based BitBurner. */ +#define BITBURNER_DEFAULT_CORE_VOLTAGE 1200 /* in millivolts */ +#define BITBURNER_MIN_COREMV 1000 +/* change here if you want to risk killing it :) */ +#define BITBURNER_MAX_COREMV 1400 + +/* BitFury-based BitBurner. */ +#define BITBURNER_FURY_DEFAULT_CORE_VOLTAGE 900 /* in millivolts */ +#define BITBURNER_FURY_MIN_COREMV 700 +/* change here if you want to risk killing it :) */ +#define BITBURNER_FURY_MAX_COREMV 1100 + + +#define AVALON_DEFAULT_TIMEOUT 0x2D +#define AVALON_MIN_FREQUENCY 256 +#define AVALON_MAX_FREQUENCY 2000 +#define AVALON_TIMEOUT_FACTOR 12690 +#define AVALON_DEFAULT_FREQUENCY 282 +#define AVALON_DEFAULT_MINER_NUM 0x20 +#define AVALON_MAX_MINER_NUM 0x100 +#define AVALON_DEFAULT_ASIC_NUM 0xA + +/* Default number of miners for Bitburner Fury is for a stack of 8 boards, + but it will work acceptably for smaller stacks, too */ +#define BITBURNER_FURY_DEFAULT_MINER_NUM 128 +#define BITBURNER_FURY_DEFAULT_FREQUENCY 256 +#define BITBURNER_FURY_DEFAULT_TIMEOUT 50 + +#define AVALON_AUTO_CYCLE 1024 + +#define AVALON_FTDI_READSIZE 510 +#define AVALON_READBUF_SIZE 8192 +/* Set latency to just less than full 64 byte packet size at 115200 baud */ +#define AVALON_LATENCY 4 + +struct avalon_task { + uint8_t reset :1; + uint8_t flush_fifo :1; + uint8_t fan_eft :1; + uint8_t timer_eft :1; + uint8_t asic_num :4; + uint8_t fan_pwm_data; + uint8_t timeout_data; + uint8_t miner_num; + + uint8_t nonce_elf :1; + uint8_t gate_miner_elf :1; + uint8_t asic_pll :1; + uint8_t gate_miner :1; + uint8_t _pad0 :4; + uint8_t _pad1[3]; + uint32_t _pad2; + + uint8_t midstate[32]; + uint8_t data[12]; +} __attribute__((packed, aligned(4))); + +struct avalon_result { + uint32_t nonce; + uint8_t data[12]; + uint8_t midstate[32]; + + uint8_t fan0; + uint8_t fan1; + uint8_t fan2; + uint8_t temp0; + uint8_t temp1; + uint8_t temp2; + uint8_t _pad0[2]; + + uint16_t fifo_wp; + uint16_t fifo_rp; + uint8_t chip_num; + uint8_t pwm_data; + uint8_t timeout; + uint8_t miner_num; +} __attribute__((packed, aligned(4))); + +struct avalon_info { + int baud; + int miner_count; + int asic_count; + int timeout; + + int fan0; + int fan1; + int fan2; + + int temp0; + int temp1; + int temp2; + int temp_history_count; + int temp_history_index; + int temp_sum; + int temp_old; + int fan_pwm; + + int core_voltage; + + int no_matching_work; + int matching_work[AVALON_MAX_MINER_NUM]; + + int frequency; + uint32_t asic; + uint32_t ctlr_ver; + + struct thr_info *thr; + pthread_t read_thr; + pthread_t write_thr; + pthread_mutex_t lock; + pthread_mutex_t qlock; + cgsem_t qsem; + cgtimer_t cgsent; + int send_delay; + + int nonces; + int auto_queued; + int auto_nonces; + int auto_hw; + int increment; + int decrement; + + int idle; + bool reset; + bool overheat; + bool optimal; + + uint8_t version1; + uint8_t version2; + uint8_t version3; +}; + +#define BITBURNER_VERSION1 1 +#define BITBURNER_VERSION2 0 +#define BITBURNER_VERSION3 0 + +#define AVALON_WRITE_SIZE (sizeof(struct avalon_task)) +#define AVALON_READ_SIZE (sizeof(struct avalon_result)) +#define AVALON_ARRAY_SIZE 3 +#define BITBURNER_ARRAY_SIZE 4 + +#define AVA_GETS_ERROR -1 +#define AVA_GETS_OK 0 + +#define AVA_SEND_ERROR -1 +#define AVA_SEND_OK 0 + +#define avalon_buffer_full(avalon) !usb_ftdi_cts(avalon) + +#define AVALON_READ_TIME(baud) ((double)AVALON_READ_SIZE * (double)8.0 / (double)(baud)) +#define ASSERT1(condition) __maybe_unused static char sizeof_uint32_t_must_be_4[(condition)?1:-1] +ASSERT1(sizeof(uint32_t) == 4); + +extern struct avalon_info **avalon_info; +extern int opt_avalon_temp; +extern int opt_avalon_overheat; +extern int opt_avalon_fan_min; +extern int opt_avalon_fan_max; +extern int opt_avalon_freq_min; +extern int opt_avalon_freq_max; +extern bool opt_avalon_auto; +extern int opt_bitburner_core_voltage; +extern int opt_bitburner_fury_core_voltage; +extern char *set_avalon_fan(char *arg); +extern char *set_avalon_freq(char *arg); + +#endif /* USE_AVALON */ +#endif /* AVALON_H */ diff --git a/driver-avalon2.c b/driver-avalon2.c new file mode 100644 index 0000000..2ce216d --- /dev/null +++ b/driver-avalon2.c @@ -0,0 +1,1097 @@ +/* + * Copyright 2013-2014 Con Kolivas + * Copyright 2012-2014 Xiangfu + * Copyright 2012 Luke Dashjr + * Copyright 2012 Andrew Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#ifndef WIN32 + #include + #include + #include + #ifndef O_CLOEXEC + #define O_CLOEXEC 0 + #endif +#else + #include + #include +#endif + +#include "elist.h" +#include "miner.h" +#include "fpgautils.h" +#include "driver-avalon2.h" +#include "crc.h" +#include "sha2.h" + +#define ASSERT1(condition) __maybe_unused static char sizeof_uint32_t_must_be_4[(condition)?1:-1] +ASSERT1(sizeof(uint32_t) == 4); + +#define get_fan_pwm(v) (AVA2_PWM_MAX - (v) * AVA2_PWM_MAX / 100) + +int opt_avalon2_freq_min; +int opt_avalon2_freq_max; + +int opt_avalon2_fan_min = AVA2_DEFAULT_FAN_MIN; +int opt_avalon2_fan_max = AVA2_DEFAULT_FAN_MAX; +static int avalon2_fan_min = get_fan_pwm(AVA2_DEFAULT_FAN_MIN); +static int avalon2_fan_max = get_fan_pwm(AVA2_DEFAULT_FAN_MAX); + +int opt_avalon2_voltage_min; +int opt_avalon2_voltage_max; + +int opt_avalon2_overheat = AVALON2_TEMP_OVERHEAT; +int opt_avalon2_polling_delay = AVALON2_DEFAULT_POLLING_DELAY; + +enum avalon2_fan_fixed opt_avalon2_fan_fixed = FAN_AUTO; + +#define UNPACK32(x, str) \ +{ \ + *((str) + 3) = (uint8_t) ((x) ); \ + *((str) + 2) = (uint8_t) ((x) >> 8); \ + *((str) + 1) = (uint8_t) ((x) >> 16); \ + *((str) + 0) = (uint8_t) ((x) >> 24); \ +} + +static void sha256_prehash(const unsigned char *message, unsigned int len, unsigned char *digest) +{ + sha256_ctx ctx; + int i; + sha256_init(&ctx); + sha256_update(&ctx, message, len); + + for (i = 0; i < 8; i++) { + UNPACK32(ctx.h[i], &digest[i << 2]); + } +} + +static inline uint8_t rev8(uint8_t d) +{ + int i; + uint8_t out = 0; + + /* (from left to right) */ + for (i = 0; i < 8; i++) + if (d & (1 << i)) + out |= (1 << (7 - i)); + + return out; +} + +char *set_avalon2_fan(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to avalon2-fan"; + if (ret == 1) + val2 = val1; + + if (val1 < 0 || val1 > 100 || val2 < 0 || val2 > 100 || val2 < val1) + return "Invalid value passed to avalon2-fan"; + + opt_avalon2_fan_min = val1; + opt_avalon2_fan_max = val2; + avalon2_fan_min = get_fan_pwm(val1); + avalon2_fan_max = get_fan_pwm(val2); + + return NULL; +} + +char *set_avalon2_fixed_speed(enum avalon2_fan_fixed *f) +{ + *f = FAN_FIXED; + return NULL; +} + +char *set_avalon2_freq(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to avalon2-freq"; + if (ret == 1) + val2 = val1; + + if (val1 < AVA2_DEFAULT_FREQUENCY_MIN || val1 > AVA2_DEFAULT_FREQUENCY_MAX || + val2 < AVA2_DEFAULT_FREQUENCY_MIN || val2 > AVA2_DEFAULT_FREQUENCY_MAX || + val2 < val1) + return "Invalid value passed to avalon2-freq"; + + opt_avalon2_freq_min = val1; + opt_avalon2_freq_max = val2; + + return NULL; +} + +char *set_avalon2_voltage(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to avalon2-voltage"; + if (ret == 1) + val2 = val1; + + if (val1 < AVA2_DEFAULT_VOLTAGE_MIN || val1 > AVA2_DEFAULT_VOLTAGE_MAX || + val2 < AVA2_DEFAULT_VOLTAGE_MIN || val2 > AVA2_DEFAULT_VOLTAGE_MAX || + val2 < val1) + return "Invalid value passed to avalon2-voltage"; + + opt_avalon2_voltage_min = val1; + opt_avalon2_voltage_max = val2; + + return NULL; +} + +static int avalon2_init_pkg(struct avalon2_pkg *pkg, uint8_t type, uint8_t idx, uint8_t cnt) +{ + unsigned short crc; + + pkg->head[0] = AVA2_H1; + pkg->head[1] = AVA2_H2; + + pkg->type = type; + pkg->idx = idx; + pkg->cnt = cnt; + + crc = crc16(pkg->data, AVA2_P_DATA_LEN); + + pkg->crc[0] = (crc & 0xff00) >> 8; + pkg->crc[1] = crc & 0x00ff; + return 0; +} + +static int job_idcmp(uint8_t *job_id, char *pool_job_id) +{ + int job_id_len; + unsigned short crc, crc_expect; + + if (!pool_job_id) + return 1; + + job_id_len = strlen(pool_job_id); + crc_expect = crc16((unsigned char *)pool_job_id, job_id_len); + + crc = job_id[0] << 8 | job_id[1]; + + if (crc_expect == crc) + return 0; + + applog(LOG_DEBUG, "Avalon2: job_id not match! [%04x:%04x (%s)]", + crc, crc_expect, pool_job_id); + + return 1; +} + +static inline int get_temp_max(struct avalon2_info *info) +{ + int i; + for (i = 0; i < 2 * AVA2_DEFAULT_MODULARS; i++) { + if (info->temp_max <= info->temp[i]) + info->temp_max = info->temp[i]; + } + return info->temp_max; +} + +static inline int get_current_temp_max(struct avalon2_info *info) +{ + int i; + int t = info->temp[0]; + + for (i = 1; i < 2 * AVA2_DEFAULT_MODULARS; i++) { + if (info->temp[i] > t) + t = info->temp[i]; + } + return t; +} + +/* http://www.onsemi.com/pub_link/Collateral/ADP3208D.PDF */ +static inline uint32_t encode_voltage(uint32_t v) +{ + return rev8((0x78 - v / 125) << 1 | 1) << 8; +} + +static inline uint32_t decode_voltage(uint32_t v) +{ + return (0x78 - (rev8(v >> 8) >> 1)) * 125; +} + +static void adjust_fan(struct avalon2_info *info) +{ + int t; + + if (opt_avalon2_fan_fixed == FAN_FIXED) { + info->fan_pct = opt_avalon2_fan_min; + info->fan_pwm = get_fan_pwm(info->fan_pct); + return; + } + + t = get_current_temp_max(info); + + /* TODO: Add options for temperature range and fan adjust function */ + if (t < 60) + info->fan_pct = opt_avalon2_fan_min; + else if (t > 80) + info->fan_pct = opt_avalon2_fan_max; + else + info->fan_pct = (t - 60) * (opt_avalon2_fan_max - opt_avalon2_fan_min) / 20 + opt_avalon2_fan_min; + + info->fan_pwm = get_fan_pwm(info->fan_pct); +} + +static inline int mm_cmp_1404(struct avalon2_info *info, int modular) +{ + /* <= 1404 return 1 */ + char *mm_1404 = "1404"; + return strncmp(info->mm_version[modular] + 2, mm_1404, 4) > 0 ? 0 : 1; +} + +static inline int mm_cmp_1406(struct avalon2_info *info) +{ + /* <= 1406 return 1 */ + char *mm_1406 = "1406"; + int i; + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if (info->enable[i] && + strncmp(info->mm_version[i] + 2, mm_1406, 4) <= 0) + return 1; + } + + return 0; +} + +static int decode_pkg(struct thr_info *thr, struct avalon2_ret *ar, uint8_t *pkg) +{ + struct cgpu_info *avalon2 = thr->cgpu; + struct avalon2_info *info = avalon2->device_data; + struct pool *pool, *real_pool, *pool_stratum = &info->pool; + + unsigned int expected_crc; + unsigned int actual_crc; + uint32_t nonce, nonce2, miner, modular_id; + int pool_no; + uint8_t job_id[4]; + int tmp; + + int type = AVA2_GETS_ERROR; + + memcpy((uint8_t *)ar, pkg, AVA2_READ_SIZE); + + if (ar->head[0] == AVA2_H1 && ar->head[1] == AVA2_H2) { + expected_crc = crc16(ar->data, AVA2_P_DATA_LEN); + actual_crc = (ar->crc[0] & 0xff) | + ((ar->crc[1] & 0xff) << 8); + + type = ar->type; + applog(LOG_DEBUG, "Avalon2: %d: expected crc(%04x), actual_crc(%04x)", + type, expected_crc, actual_crc); + if (expected_crc != actual_crc) + goto out; + + memcpy(&modular_id, ar->data + 28, 4); + modular_id = be32toh(modular_id); + if (modular_id > 3) + modular_id = 0; + + switch(type) { + case AVA2_P_NONCE: + applog(LOG_DEBUG, "Avalon2: AVA2_P_NONCE"); + memcpy(&miner, ar->data + 0, 4); + memcpy(&pool_no, ar->data + 4, 4); + memcpy(&nonce2, ar->data + 8, 4); + /* Calc time ar->data + 12 */ + memcpy(&nonce, ar->data + 16, 4); + memcpy(job_id, ar->data + 20, 4); + + miner = be32toh(miner); + pool_no = be32toh(pool_no); + if (miner >= AVA2_DEFAULT_MINERS || + modular_id >= AVA2_DEFAULT_MINERS || + pool_no >= total_pools || + pool_no < 0) { + applog(LOG_DEBUG, "Avalon2: Wrong miner/pool/id no %d,%d,%d", miner, pool_no, modular_id); + break; + } else + info->matching_work[modular_id * AVA2_DEFAULT_MINERS + miner]++; + nonce2 = be32toh(nonce2); + nonce = be32toh(nonce); + nonce -= 0x180; + + applog(LOG_DEBUG, "Avalon2: Found! %d: (%08x) (%08x)", + pool_no, nonce2, nonce); + + real_pool = pool = pools[pool_no]; + if (job_idcmp(job_id, pool->swork.job_id)) { + if (!job_idcmp(job_id, pool_stratum->swork.job_id)) { + applog(LOG_DEBUG, "Avalon2: Match to previous stratum! (%s)", pool_stratum->swork.job_id); + pool = pool_stratum; + } else { + applog(LOG_ERR, "Avalon2: Cannot match to any stratum! (%s)", pool->swork.job_id); + break; + } + } + + if (submit_nonce2_nonce(thr, pool, real_pool, nonce2, nonce, 0)) + info->failing = false; + break; + case AVA2_P_STATUS: + applog(LOG_DEBUG, "Avalon2: AVA2_P_STATUS"); + memcpy(&tmp, ar->data, 4); + tmp = be32toh(tmp); + info->temp[0 + modular_id * 2] = tmp >> 16; + info->temp[1 + modular_id * 2] = tmp & 0xffff; + + memcpy(&tmp, ar->data + 4, 4); + tmp = be32toh(tmp); + info->fan[0 + modular_id * 2] = tmp >> 16; + info->fan[1 + modular_id * 2] = tmp & 0xffff; + + memcpy(&(info->get_frequency[modular_id]), ar->data + 8, 4); + memcpy(&(info->get_voltage[modular_id]), ar->data + 12, 4); + memcpy(&(info->local_work[modular_id]), ar->data + 16, 4); + memcpy(&(info->hw_work[modular_id]), ar->data + 20, 4); + memcpy(&(info->power_good[modular_id]), ar->data + 24, 4); + + info->get_frequency[modular_id] = be32toh(info->get_frequency[modular_id]); + if (info->dev_type[modular_id] == AVA2_ID_AVA3) + info->get_frequency[modular_id] = info->get_frequency[modular_id] * 768 / 65; + info->get_voltage[modular_id] = be32toh(info->get_voltage[modular_id]); + info->local_work[modular_id] = be32toh(info->local_work[modular_id]); + info->hw_work[modular_id] = be32toh(info->hw_work[modular_id]); + + info->local_works[modular_id] += info->local_work[modular_id]; + info->hw_works[modular_id] += info->hw_work[modular_id]; + + info->get_voltage[modular_id] = decode_voltage(info->get_voltage[modular_id]); + info->power_good[modular_id] = info->power_good[modular_id] >> 24; + + avalon2->temp = get_temp_max(info); + break; + case AVA2_P_ACKDETECT: + applog(LOG_DEBUG, "Avalon2: AVA2_P_ACKDETECT"); + break; + case AVA2_P_ACK: + applog(LOG_DEBUG, "Avalon2: AVA2_P_ACK"); + break; + case AVA2_P_NAK: + applog(LOG_DEBUG, "Avalon2: AVA2_P_NAK"); + break; + default: + applog(LOG_DEBUG, "Avalon2: Unknown response"); + type = AVA2_GETS_ERROR; + break; + } + } + +out: + return type; +} + +static inline int avalon2_gets(struct cgpu_info *avalon2, uint8_t *buf) +{ + int read_amount = AVA2_READ_SIZE, ret = 0; + uint8_t *buf_back = buf; + + while (true) { + int err; + + do { + memset(buf, 0, read_amount); + err = usb_read(avalon2, (char *)buf, read_amount, &ret, C_AVA2_READ); + if (unlikely(err && err != LIBUSB_ERROR_TIMEOUT)) { + applog(LOG_ERR, "Avalon2: Error %d on read in avalon_gets got %d", err, ret); + return AVA2_GETS_ERROR; + } + if (likely(ret >= read_amount)) { + if (unlikely(buf_back[0] != AVA2_H1 || buf_back[1] != AVA2_H2)) + return AVA2_GETS_ERROR; + return AVA2_GETS_OK; + } + buf += ret; + read_amount -= ret; + } while (ret > 0); + + return AVA2_GETS_TIMEOUT; + } +} + +static int avalon2_send_pkg(struct cgpu_info *avalon2, const struct avalon2_pkg *pkg) +{ + int err, amount; + uint8_t buf[AVA2_WRITE_SIZE]; + int nr_len = AVA2_WRITE_SIZE; + + if (unlikely(avalon2->usbinfo.nodev)) + return AVA2_SEND_ERROR; + + memcpy(buf, pkg, AVA2_WRITE_SIZE); + err = usb_write(avalon2, (char *)buf, nr_len, &amount, C_AVA2_WRITE); + if (err || amount != nr_len) { + applog(LOG_DEBUG, "Avalon2: Send(%d)!", amount); + usb_nodev(avalon2); + return AVA2_SEND_ERROR; + } + + return AVA2_SEND_OK; +} + +static void avalon2_stratum_pkgs(struct cgpu_info *avalon2, struct pool *pool) +{ + const int merkle_offset = 36; + struct avalon2_pkg pkg; + int i, a, b, tmp; + unsigned char target[32]; + int job_id_len, n2size; + unsigned short crc; + int diff; + + /* Cap maximum diff in order to still get shares */ + diff = pool->swork.diff; + if (diff > 64) + diff = 64; + else if (unlikely(diff < 1)) + diff = 1; + + /* Send out the first stratum message STATIC */ + applog(LOG_DEBUG, "Avalon2: Pool stratum message STATIC: %d, %d, %d, %d, %d", + pool->coinbase_len, + pool->nonce2_offset, + pool->n2size, + merkle_offset, + pool->merkles); + memset(pkg.data, 0, AVA2_P_DATA_LEN); + tmp = be32toh(pool->coinbase_len); + memcpy(pkg.data, &tmp, 4); + + tmp = be32toh(pool->nonce2_offset); + memcpy(pkg.data + 4, &tmp, 4); + + n2size = pool->n2size >= 4 ? 4 : pool->n2size; + tmp = be32toh(n2size); + memcpy(pkg.data + 8, &tmp, 4); + + tmp = be32toh(merkle_offset); + memcpy(pkg.data + 12, &tmp, 4); + + tmp = be32toh(pool->merkles); + memcpy(pkg.data + 16, &tmp, 4); + + tmp = be32toh(diff); + memcpy(pkg.data + 20, &tmp, 4); + + tmp = be32toh((int)pool->pool_no); + memcpy(pkg.data + 24, &tmp, 4); + + avalon2_init_pkg(&pkg, AVA2_P_STATIC, 1, 1); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + + set_target(target, pool->sdiff); + memcpy(pkg.data, target, 32); + if (opt_debug) { + char *target_str; + target_str = bin2hex(target, 32); + applog(LOG_DEBUG, "Avalon2: Pool stratum target: %s", target_str); + free(target_str); + } + avalon2_init_pkg(&pkg, AVA2_P_TARGET, 1, 1); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + + applog(LOG_DEBUG, "Avalon2: Pool stratum message JOBS_ID: %s", + pool->swork.job_id); + memset(pkg.data, 0, AVA2_P_DATA_LEN); + + job_id_len = strlen(pool->swork.job_id); + crc = crc16((unsigned char *)pool->swork.job_id, job_id_len); + pkg.data[0] = (crc & 0xff00) >> 8; + pkg.data[1] = crc & 0x00ff; + avalon2_init_pkg(&pkg, AVA2_P_JOB_ID, 1, 1); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + + if (pool->coinbase_len > AVA2_P_COINBASE_SIZE) { + int coinbase_len_posthash, coinbase_len_prehash; + uint8_t coinbase_prehash[32]; + coinbase_len_prehash = pool->nonce2_offset - (pool->nonce2_offset % SHA256_BLOCK_SIZE); + coinbase_len_posthash = pool->coinbase_len - coinbase_len_prehash; + sha256_prehash(pool->coinbase, coinbase_len_prehash, coinbase_prehash); + + a = (coinbase_len_posthash / AVA2_P_DATA_LEN) + 1; + b = coinbase_len_posthash % AVA2_P_DATA_LEN; + memcpy(pkg.data, coinbase_prehash, 32); + avalon2_init_pkg(&pkg, AVA2_P_COINBASE, 1, a + (b ? 1 : 0)); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + applog(LOG_DEBUG, "Avalon2: Pool stratum message modified COINBASE: %d %d", a, b); + for (i = 1; i < a; i++) { + memcpy(pkg.data, pool->coinbase + coinbase_len_prehash + i * 32 - 32, 32); + avalon2_init_pkg(&pkg, AVA2_P_COINBASE, i + 1, a + (b ? 1 : 0)); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + } + if (b) { + memset(pkg.data, 0, AVA2_P_DATA_LEN); + memcpy(pkg.data, pool->coinbase + coinbase_len_prehash + i * 32 - 32, b); + avalon2_init_pkg(&pkg, AVA2_P_COINBASE, i + 1, i + 1); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + } + } else { + a = pool->coinbase_len / AVA2_P_DATA_LEN; + b = pool->coinbase_len % AVA2_P_DATA_LEN; + applog(LOG_DEBUG, "Avalon2: Pool stratum message COINBASE: %d %d", a, b); + for (i = 0; i < a; i++) { + memcpy(pkg.data, pool->coinbase + i * 32, 32); + avalon2_init_pkg(&pkg, AVA2_P_COINBASE, i + 1, a + (b ? 1 : 0)); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + } + if (b) { + memset(pkg.data, 0, AVA2_P_DATA_LEN); + memcpy(pkg.data, pool->coinbase + i * 32, b); + avalon2_init_pkg(&pkg, AVA2_P_COINBASE, i + 1, i + 1); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + } + } + + + b = pool->merkles; + applog(LOG_DEBUG, "Avalon2: Pool stratum message MERKLES: %d", b); + for (i = 0; i < b; i++) { + memset(pkg.data, 0, AVA2_P_DATA_LEN); + memcpy(pkg.data, pool->swork.merkle_bin[i], 32); + avalon2_init_pkg(&pkg, AVA2_P_MERKLES, i + 1, b); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + } + + applog(LOG_DEBUG, "Avalon2: Pool stratum message HEADER: 4"); + for (i = 0; i < 4; i++) { + memset(pkg.data, 0, AVA2_P_HEADER); + memcpy(pkg.data, pool->header_bin + i * 32, 32); + avalon2_init_pkg(&pkg, AVA2_P_HEADER, i + 1, 4); + if (avalon2_send_pkg(avalon2, &pkg)) + return; + } +} + +static void avalon2_initialise(struct cgpu_info *avalon2) +{ + uint32_t ava2_data[2] = { PL2303_VALUE_LINE0, PL2303_VALUE_LINE1 }; + int interface; + + if (avalon2->usbinfo.nodev) + return; + + interface = usb_interface(avalon2); + // Set Data Control + usb_transfer(avalon2, PL2303_VENDOR_OUT, PL2303_REQUEST_VENDOR, 8, + interface, C_VENDOR); + if (avalon2->usbinfo.nodev) + return; + + usb_transfer(avalon2, PL2303_VENDOR_OUT, PL2303_REQUEST_VENDOR, 9, + interface, C_VENDOR); + + if (avalon2->usbinfo.nodev) + return; + + // Set Line Control + usb_transfer_data(avalon2, PL2303_CTRL_OUT, PL2303_REQUEST_LINE, PL2303_VALUE_LINE, + interface, ava2_data, PL2303_VALUE_LINE_SIZE, C_SETLINE); + if (avalon2->usbinfo.nodev) + return; + + // Vendor + usb_transfer(avalon2, PL2303_VENDOR_OUT, PL2303_REQUEST_VENDOR, PL2303_VALUE_VENDOR, + interface, C_VENDOR); + + if (avalon2->usbinfo.nodev) + return; + + // Set More Line Control ? + usb_transfer(avalon2, PL2303_CTRL_OUT, PL2303_REQUEST_CTRL, 3, interface, C_SETLINE); +} + +static struct cgpu_info *avalon2_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + struct avalon2_info *info; + int ackdetect; + int err, amount; + int tmp, i, j, modular[AVA2_DEFAULT_MODULARS] = {}; + char mm_version[AVA2_DEFAULT_MODULARS][16]; + + struct cgpu_info *avalon2 = usb_alloc_cgpu(&avalon2_drv, 1); + struct avalon2_pkg detect_pkg; + struct avalon2_ret ret_pkg; + + if (!usb_init(avalon2, dev, found)) { + applog(LOG_ERR, "Avalon2 failed usb_init"); + avalon2 = usb_free_cgpu(avalon2); + return NULL; + } + avalon2_initialise(avalon2); + + for (j = 0; j < 2; j++) { + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + strcpy(mm_version[i], AVA2_MM_VERNULL); + /* Send out detect pkg */ + memset(detect_pkg.data, 0, AVA2_P_DATA_LEN); + tmp = be32toh(i); + memcpy(detect_pkg.data + 28, &tmp, 4); + + avalon2_init_pkg(&detect_pkg, AVA2_P_DETECT, 1, 1); + avalon2_send_pkg(avalon2, &detect_pkg); + err = usb_read(avalon2, (char *)&ret_pkg, AVA2_READ_SIZE, &amount, C_AVA2_READ); + if (err < 0 || amount != AVA2_READ_SIZE) { + applog(LOG_DEBUG, "%s %d: Avalon2 failed usb_read with err %d amount %d", + avalon2->drv->name, avalon2->device_id, err, amount); + continue; + } + ackdetect = ret_pkg.type; + applog(LOG_DEBUG, "Avalon2 Detect ID[%d]: %d", i, ackdetect); + if (ackdetect != AVA2_P_ACKDETECT && modular[i] == 0) + continue; + modular[i] = 1; + memcpy(mm_version[i], ret_pkg.data, 15); + mm_version[i][15] = '\0'; + } + } + if (!modular[0] && !modular[1] && !modular[2] && !modular[3]) { + applog(LOG_DEBUG, "Not an Avalon2 device"); + usb_uninit(avalon2); + usb_free_cgpu(avalon2); + return NULL; + } + + /* We have a real Avalon! */ + avalon2->threads = AVA2_MINER_THREADS; + add_cgpu(avalon2); + + update_usb_stats(avalon2); + + applog(LOG_INFO, "%s %d: Found at %s", avalon2->drv->name, avalon2->device_id, + avalon2->device_path); + + avalon2->device_data = calloc(sizeof(struct avalon2_info), 1); + if (unlikely(!(avalon2->device_data))) + quit(1, "Failed to calloc avalon2_info"); + + info = avalon2->device_data; + + info->fan_pwm = get_fan_pwm(AVA2_DEFAULT_FAN_PWM); + info->temp_max = 0; + + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + strcpy(info->mm_version[i], mm_version[i]); + info->modulars[i] = modular[i]; /* Enable modular */ + info->enable[i] = modular[i]; + info->dev_type[i] = AVA2_ID_AVAX; + + if (!strncmp((char *)&(info->mm_version[i]), AVA2_FW2_PREFIXSTR, 2)) { + info->dev_type[i] = AVA2_ID_AVA2; + info->set_voltage = AVA2_DEFAULT_VOLTAGE_MIN; + info->set_frequency = AVA2_DEFAULT_FREQUENCY; + } + if (!strncmp((char *)&(info->mm_version[i]), AVA2_FW3_PREFIXSTR, 2)) { + info->dev_type[i] = AVA2_ID_AVA3; + info->set_voltage = AVA2_AVA3_VOLTAGE; + info->set_frequency = AVA2_AVA3_FREQUENCY; + } + } + + if (!opt_avalon2_voltage_min) + opt_avalon2_voltage_min = opt_avalon2_voltage_max = info->set_voltage; + if (!opt_avalon2_freq_min) + opt_avalon2_freq_min = opt_avalon2_freq_max = info->set_frequency; + + return avalon2; +} + +static inline void avalon2_detect(bool __maybe_unused hotplug) +{ + usb_detect(&avalon2_drv, avalon2_detect_one); +} + +static bool avalon2_prepare(struct thr_info *thr) +{ + struct cgpu_info *avalon2 = thr->cgpu; + struct avalon2_info *info = avalon2->device_data; + + cglock_init(&info->pool.data_lock); + + return true; +} + +static int polling(struct thr_info *thr, struct cgpu_info *avalon2, struct avalon2_info *info) +{ + struct avalon2_pkg send_pkg; + struct avalon2_ret ar; + int i, tmp; + + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if (info->modulars[i] && info->enable[i]) { + uint8_t result[AVA2_READ_SIZE]; + int ret; + + cgsleep_ms(opt_avalon2_polling_delay); + memset(send_pkg.data, 0, AVA2_P_DATA_LEN); + + tmp = be32toh(info->led_red[i]); /* RED LED */ + memcpy(send_pkg.data + 12, &tmp, 4); + + tmp = be32toh(i); /* ID */ + memcpy(send_pkg.data + 28, &tmp, 4); + if (info->led_red[i] && mm_cmp_1404(info, i)) { + avalon2_init_pkg(&send_pkg, AVA2_P_TEST, 1, 1); + avalon2_send_pkg(avalon2, &send_pkg); + info->enable[i] = 0; + continue; + } else + avalon2_init_pkg(&send_pkg, AVA2_P_POLLING, 1, 1); + + avalon2_send_pkg(avalon2, &send_pkg); + ret = avalon2_gets(avalon2, result); + if (ret == AVA2_GETS_OK) + decode_pkg(thr, &ar, result); + } + } + + return 0; +} + +static void copy_pool_stratum(struct avalon2_info *info, struct pool *pool) +{ + int i; + int merkles = pool->merkles; + size_t coinbase_len = pool->coinbase_len; + struct pool *pool_stratum = &info->pool; + + if (!job_idcmp((unsigned char *)pool->swork.job_id, pool_stratum->swork.job_id)) + return; + + cg_wlock(&pool_stratum->data_lock); + free(pool_stratum->swork.job_id); + free(pool_stratum->nonce1); + free(pool_stratum->coinbase); + + align_len(&coinbase_len); + pool_stratum->coinbase = calloc(coinbase_len, 1); + if (unlikely(!pool_stratum->coinbase)) + quit(1, "Failed to calloc pool_stratum coinbase in avalon2"); + memcpy(pool_stratum->coinbase, pool->coinbase, coinbase_len); + + + for (i = 0; i < pool_stratum->merkles; i++) + free(pool_stratum->swork.merkle_bin[i]); + if (merkles) { + pool_stratum->swork.merkle_bin = realloc(pool_stratum->swork.merkle_bin, + sizeof(char *) * merkles + 1); + for (i = 0; i < merkles; i++) { + pool_stratum->swork.merkle_bin[i] = malloc(32); + if (unlikely(!pool_stratum->swork.merkle_bin[i])) + quit(1, "Failed to malloc pool_stratum swork merkle_bin"); + memcpy(pool_stratum->swork.merkle_bin[i], pool->swork.merkle_bin[i], 32); + } + } + + pool_stratum->sdiff = pool->sdiff; + pool_stratum->coinbase_len = pool->coinbase_len; + pool_stratum->nonce2_offset = pool->nonce2_offset; + pool_stratum->n2size = pool->n2size; + pool_stratum->merkles = pool->merkles; + + pool_stratum->swork.job_id = strdup(pool->swork.job_id); + pool_stratum->nonce1 = strdup(pool->nonce1); + + memcpy(pool_stratum->ntime, pool->ntime, sizeof(pool_stratum->ntime)); + memcpy(pool_stratum->header_bin, pool->header_bin, sizeof(pool_stratum->header_bin)); + cg_wunlock(&pool_stratum->data_lock); +} + +static void avalon2_update(struct cgpu_info *avalon2) +{ + struct avalon2_info *info = avalon2->device_data; + struct thr_info *thr = avalon2->thr[0]; + struct avalon2_pkg send_pkg; + uint32_t tmp, range, start; + struct work *work; + struct pool *pool; + + applog(LOG_DEBUG, "Avalon2: New stratum: restart: %d, update: %d", + thr->work_restart, thr->work_update); + thr->work_update = false; + thr->work_restart = false; + + work = get_work(thr, thr->id); /* Make sure pool is ready */ + discard_work(work); /* Don't leak memory */ + + pool = current_pool(); + if (!pool->has_stratum) + quit(1, "Avalon2: MM have to use stratum pool"); + + if (pool->coinbase_len > AVA2_P_COINBASE_SIZE) { + applog(LOG_INFO, "Avalon2: MM pool coinbase length(%d) is more than %d", + pool->coinbase_len, AVA2_P_COINBASE_SIZE); + if (mm_cmp_1406(info)) { + applog(LOG_ERR, "Avalon2: MM version less then 1406"); + return; + } + if ((pool->coinbase_len - pool->nonce2_offset + 64) > AVA2_P_COINBASE_SIZE) { + applog(LOG_ERR, "Avalon2: MM pool modified coinbase length(%d) is more than %d", + pool->coinbase_len - pool->nonce2_offset + 64, AVA2_P_COINBASE_SIZE); + return; + } + } + if (pool->merkles > AVA2_P_MERKLES_COUNT) { + applog(LOG_ERR, "Avalon2: MM merkles have to less then %d", AVA2_P_MERKLES_COUNT); + return; + } + if (pool->n2size < 3) { + applog(LOG_ERR, "Avalon2: MM nonce2 size have to >= 3 (%d)", pool->n2size); + return; + } + + cgtime(&info->last_stratum); + cg_rlock(&pool->data_lock); + info->pool_no = pool->pool_no; + copy_pool_stratum(info, pool); + avalon2_stratum_pkgs(avalon2, pool); + cg_runlock(&pool->data_lock); + + /* Configuer the parameter from outside */ + adjust_fan(info); + info->set_voltage = opt_avalon2_voltage_min; + info->set_frequency = opt_avalon2_freq_min; + + /* Set the Fan, Voltage and Frequency */ + memset(send_pkg.data, 0, AVA2_P_DATA_LEN); + + tmp = be32toh(info->fan_pwm); + memcpy(send_pkg.data, &tmp, 4); + + applog(LOG_INFO, "Avalon2: Temp max: %d, Cut off temp: %d", + get_current_temp_max(info), opt_avalon2_overheat); + if (get_current_temp_max(info) >= opt_avalon2_overheat) + tmp = encode_voltage(0); + else + tmp = encode_voltage(info->set_voltage); + tmp = be32toh(tmp); + memcpy(send_pkg.data + 4, &tmp, 4); + + tmp = be32toh(info->set_frequency); + memcpy(send_pkg.data + 8, &tmp, 4); + + /* Configure the nonce2 offset and range */ + if (pool->n2size == 3) + range = 0xffffff / (total_devices + 1); + else + range = 0xffffffff / (total_devices + 1); + start = range * (avalon2->device_id + 1); + + tmp = be32toh(start); + memcpy(send_pkg.data + 12, &tmp, 4); + + tmp = be32toh(range); + memcpy(send_pkg.data + 16, &tmp, 4); + + /* Package the data */ + avalon2_init_pkg(&send_pkg, AVA2_P_SET, 1, 1); + avalon2_send_pkg(avalon2, &send_pkg); +} + +static int64_t avalon2_scanhash(struct thr_info *thr) +{ + struct timeval current_stratum; + struct cgpu_info *avalon2 = thr->cgpu; + struct avalon2_info *info = avalon2->device_data; + int stdiff; + int64_t h; + int i; + + if (unlikely(avalon2->usbinfo.nodev)) { + applog(LOG_ERR, "%s %d: Device disappeared, shutting down thread", + avalon2->drv->name, avalon2->device_id); + return -1; + } + + /* Stop polling the device if there is no stratum in 3 minutes, network is down */ + cgtime(¤t_stratum); + if (tdiff(¤t_stratum, &(info->last_stratum)) > (double)(3.0 * 60.0)) + return 0; + + polling(thr, avalon2, info); + + stdiff = share_work_tdiff(avalon2); + if (unlikely(info->failing)) { + if (stdiff > 120) { + applog(LOG_ERR, "%s %d: No valid shares for over 2 minutes, shutting down thread", + avalon2->drv->name, avalon2->device_id); + return -1; + } + } else if (stdiff > 60) { + applog(LOG_ERR, "%s %d: No valid shares for over 1 minute, issuing a USB reset", + avalon2->drv->name, avalon2->device_id); + usb_reset(avalon2); + info->failing = true; + + } + + h = 0; + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + h += info->enable[i] ? (info->local_work[i] - info->hw_work[i]) : 0; + } + return h * 0xffffffff; +} + +static struct api_data *avalon2_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + struct avalon2_info *info = cgpu->device_data; + int i, j, a, b; + char buf[24]; + double hwp; + int minerindex, minercount; + + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i] == AVA2_ID_AVAX) + continue; + sprintf(buf, "ID%d MM Version", i + 1); + root = api_add_string(root, buf, (char *)&(info->mm_version[i]), false); + } + + minerindex = 0; + minercount = 0; + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if (info->dev_type[i] == AVA2_ID_AVAX) { + minerindex += AVA2_DEFAULT_MINERS; + continue; + } + + if (info->dev_type[i] == AVA2_ID_AVA2) + minercount = AVA2_DEFAULT_MINERS; + + if (info->dev_type[i] == AVA2_ID_AVA3) + minercount = AVA2_AVA3_MINERS; + + for (j = minerindex; j < (minerindex + minercount); j++) { + sprintf(buf, "Match work count%02d", j+1); + root = api_add_int(root, buf, &(info->matching_work[j]), false); + } + minerindex += AVA2_DEFAULT_MINERS; + } + + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i] == AVA2_ID_AVAX) + continue; + sprintf(buf, "Local works%d", i + 1); + root = api_add_int(root, buf, &(info->local_works[i]), false); + } + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i] == AVA2_ID_AVAX) + continue; + sprintf(buf, "Hardware error works%d", i + 1); + root = api_add_int(root, buf, &(info->hw_works[i]), false); + } + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i] == AVA2_ID_AVAX) + continue; + a = info->hw_works[i]; + b = info->local_works[i]; + hwp = b ? ((double)a / (double)b) : 0; + + sprintf(buf, "Device hardware error%d%%", i + 1); + root = api_add_percent(root, buf, &hwp, true); + } + for (i = 0; i < 2 * AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i/2] == AVA2_ID_AVAX) + continue; + sprintf(buf, "Temperature%d", i + 1); + root = api_add_int(root, buf, &(info->temp[i]), false); + } + for (i = 0; i < 2 * AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i/2] == AVA2_ID_AVAX) + continue; + sprintf(buf, "Fan%d", i + 1); + root = api_add_int(root, buf, &(info->fan[i]), false); + } + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i] == AVA2_ID_AVAX) + continue; + sprintf(buf, "Voltage%d", i + 1); + root = api_add_int(root, buf, &(info->get_voltage[i]), false); + } + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i] == AVA2_ID_AVAX) + continue; + sprintf(buf, "Frequency%d", i + 1); + root = api_add_int(root, buf, &(info->get_frequency[i]), false); + } + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i] == AVA2_ID_AVAX) + continue; + sprintf(buf, "Power good %02x", i + 1); + root = api_add_int(root, buf, &(info->power_good[i]), false); + } + for (i = 0; i < AVA2_DEFAULT_MODULARS; i++) { + if(info->dev_type[i] == AVA2_ID_AVAX) + continue; + sprintf(buf, "Led %02x", i + 1); + root = api_add_int(root, buf, &(info->led_red[i]), false); + } + + return root; +} + +static void avalon2_statline_before(char *buf, size_t bufsiz, struct cgpu_info *avalon2) +{ + struct avalon2_info *info = avalon2->device_data; + int temp = get_current_temp_max(info); + float volts = (float)info->set_voltage / 10000; + + tailsprintf(buf, bufsiz, "%4dMhz %2dC %3d%% %.3fV", info->set_frequency, + temp, info->fan_pct, volts); +} + +static void avalon2_shutdown(struct thr_info *thr) +{ + struct cgpu_info *avalon2 = thr->cgpu; + int interface = usb_interface(avalon2); + + usb_transfer(avalon2, PL2303_CTRL_OUT, PL2303_REQUEST_CTRL, 0, interface, C_SETLINE); +} + +struct device_drv avalon2_drv = { + .drv_id = DRIVER_avalon2, + .dname = "avalon2", + .name = "AV2", + .get_api_stats = avalon2_api_stats, + .get_statline_before = avalon2_statline_before, + .drv_detect = avalon2_detect, + .thread_prepare = avalon2_prepare, + .hash_work = hash_driver_work, + .flush_work = avalon2_update, + .update_work = avalon2_update, + .scanwork = avalon2_scanhash, + .thread_shutdown = avalon2_shutdown, +}; diff --git a/driver-avalon2.h b/driver-avalon2.h new file mode 100644 index 0000000..17f1c2e --- /dev/null +++ b/driver-avalon2.h @@ -0,0 +1,163 @@ +/* + * Copyright 2013-2014 Con Kolivas + * Copyright 2012-2014 Xiangfu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef _AVALON2_H_ +#define _AVALON2_H_ + +#include "util.h" +#include "fpgautils.h" + +#ifdef USE_AVALON2 + +#define AVA2_MINER_THREADS 1 + +#define AVA2_RESET_FAULT_DECISECONDS 10 +#define AVA2_IO_SPEED 115200 + +#define AVA2_DEFAULT_MODULARS 4 + +#define AVA2_PWM_MAX 0x3FF +#define AVA2_DEFAULT_FAN_PWM 15 /* % */ +#define AVA2_DEFAULT_FAN_MIN 10 +#define AVA2_DEFAULT_FAN_MAX 85 + +#define AVALON2_TEMP_OVERHEAT 98 +#define AVALON2_DEFAULT_POLLING_DELAY 20 /* ms */ + +#define AVA2_DEFAULT_VOLTAGE_MIN 6000 +#define AVA2_DEFAULT_VOLTAGE_MAX 11000 + +#define AVA2_DEFAULT_FREQUENCY_MIN 300 +#define AVA2_DEFAULT_FREQUENCY_MAX 2000 + +/* Avalon2 default values */ +#define AVA2_DEFAULT_MINERS 10 +#define AVA2_DEFAULT_VOLTAGE 10000 /* v * 10000 */ +#define AVA2_DEFAULT_FREQUENCY 1500 /* In MHs */ + +/* Avalon3 default values */ +#define AVA2_AVA3_MINERS 5 +#define AVA2_AVA3_VOLTAGE 6660 /* 0.666v */ +#define AVA2_AVA3_FREQUENCY 450 /* MHz * 11.8 = MHs: 450MHz means ~5.3GHs */ + +/* Avalon2 protocol package type */ +#define AVA2_H1 'A' +#define AVA2_H2 'V' + +#define AVA2_P_COINBASE_SIZE (6 * 1024) +#define AVA2_P_MERKLES_COUNT 20 + +#define AVA2_P_COUNT 39 +#define AVA2_P_DATA_LEN (AVA2_P_COUNT - 7) + +#define AVA2_P_DETECT 10 +#define AVA2_P_STATIC 11 +#define AVA2_P_JOB_ID 12 +#define AVA2_P_COINBASE 13 +#define AVA2_P_MERKLES 14 +#define AVA2_P_HEADER 15 +#define AVA2_P_POLLING 16 +#define AVA2_P_TARGET 17 +#define AVA2_P_REQUIRE 18 +#define AVA2_P_SET 19 +#define AVA2_P_TEST 20 + +#define AVA2_P_ACK 21 +#define AVA2_P_NAK 22 +#define AVA2_P_NONCE 23 +#define AVA2_P_STATUS 24 +#define AVA2_P_ACKDETECT 25 +#define AVA2_P_TEST_RET 26 +/* Avalon2 protocol package type */ + +/* Avalon2/3 firmware prefix */ +#define AVA2_FW2_PREFIXSTR "20" +#define AVA2_FW3_PREFIXSTR "33" + +#define AVA2_MM_VERNULL "NONE" + +#define AVA2_ID_AVA2 3255 +#define AVA2_ID_AVA3 3233 +#define AVA2_ID_AVAX 3200 + +enum avalon2_fan_fixed { + FAN_FIXED, + FAN_AUTO, +}; + +struct avalon2_pkg { + uint8_t head[2]; + uint8_t type; + uint8_t idx; + uint8_t cnt; + uint8_t data[32]; + uint8_t crc[2]; +}; +#define avalon2_ret avalon2_pkg + +struct avalon2_info { + struct timeval last_stratum; + struct pool pool; + int pool_no; + + int modulars[AVA2_DEFAULT_MODULARS]; + char mm_version[AVA2_DEFAULT_MODULARS][16]; + int dev_type[AVA2_DEFAULT_MODULARS]; + bool enable[AVA2_DEFAULT_MODULARS]; + + int set_frequency; + int set_voltage; + + int get_voltage[AVA2_DEFAULT_MODULARS]; + int get_frequency[AVA2_DEFAULT_MODULARS]; + int power_good[AVA2_DEFAULT_MODULARS]; + + int fan_pwm; + int fan_pct; + int temp_max; + + int fan[2 * AVA2_DEFAULT_MODULARS]; + int temp[2 * AVA2_DEFAULT_MODULARS]; + + int local_works[AVA2_DEFAULT_MODULARS]; + int hw_works[AVA2_DEFAULT_MODULARS]; + + int local_work[AVA2_DEFAULT_MODULARS]; + int hw_work[AVA2_DEFAULT_MODULARS]; + int matching_work[AVA2_DEFAULT_MINERS * AVA2_DEFAULT_MODULARS]; + + int led_red[AVA2_DEFAULT_MODULARS]; + + bool failing; +}; + +#define AVA2_WRITE_SIZE (sizeof(struct avalon2_pkg)) +#define AVA2_READ_SIZE AVA2_WRITE_SIZE + +#define AVA2_GETS_OK 0 +#define AVA2_GETS_TIMEOUT -1 +#define AVA2_GETS_RESTART -2 +#define AVA2_GETS_ERROR -3 + +#define AVA2_SEND_OK 0 +#define AVA2_SEND_ERROR -1 + +#define avalon2_open(devpath, baud, purge) serial_open(devpath, baud, AVA2_RESET_FAULT_DECISECONDS, purge) +#define avalon2_close(fd) close(fd) + +extern char *set_avalon2_fan(char *arg); +extern char *set_avalon2_freq(char *arg); +extern char *set_avalon2_voltage(char *arg); +extern char *set_avalon2_fixed_speed(enum avalon2_fan_fixed *f); +extern enum avalon2_fan_fixed opt_avalon2_fan_fixed; +extern int opt_avalon2_overheat; +extern int opt_avalon2_polling_delay; +#endif /* USE_AVALON2 */ +#endif /* _AVALON2_H_ */ diff --git a/driver-avalon4.c b/driver-avalon4.c new file mode 100644 index 0000000..34329ae --- /dev/null +++ b/driver-avalon4.c @@ -0,0 +1,1637 @@ +/* + * Copyright 2014 Mikeqin + * Copyright 2013-2014 Con Kolivas + * Copyright 2012-2014 Xiangfu + * Copyright 2012 Luke Dashjr + * Copyright 2012 Andrew Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include "miner.h" +#include "driver-avalon4.h" +#include "crc.h" +#include "sha2.h" +#include "hexdump.c" + +#define get_fan_pwm(v) (AVA4_PWM_MAX - (v) * AVA4_PWM_MAX / 100) + +int opt_avalon4_temp_target = AVA4_DEFAULT_TEMP_TARGET; +int opt_avalon4_overheat = AVA4_DEFAULT_TEMP_OVERHEAT; + +int opt_avalon4_fan_min = AVA4_DEFAULT_FAN_MIN; +int opt_avalon4_fan_max = AVA4_DEFAULT_FAN_MAX; + +bool opt_avalon4_autov; +int opt_avalon4_voltage_min = AVA4_DEFAULT_VOLTAGE; +int opt_avalon4_voltage_max = AVA4_DEFAULT_VOLTAGE; +int opt_avalon4_freq[3] = {AVA4_DEFAULT_FREQUENCY, + AVA4_DEFAULT_FREQUENCY, + AVA4_DEFAULT_FREQUENCY}; + +int opt_avalon4_polling_delay = AVA4_DEFAULT_POLLING_DELAY; + +int opt_avalon4_aucspeed = AVA4_AUC_SPEED; +int opt_avalon4_aucxdelay = AVA4_AUC_XDELAY; + +int opt_avalon4_ntime_offset = AVA4_DEFAULT_ASIC_COUNT; + +#define UNPACK32(x, str) \ +{ \ + *((str) + 3) = (uint8_t) ((x) ); \ + *((str) + 2) = (uint8_t) ((x) >> 8); \ + *((str) + 1) = (uint8_t) ((x) >> 16); \ + *((str) + 0) = (uint8_t) ((x) >> 24); \ +} + +static inline void sha256_prehash(const unsigned char *message, unsigned int len, unsigned char *digest) +{ + sha256_ctx ctx; + int i; + sha256_init(&ctx); + sha256_update(&ctx, message, len); + + for (i = 0; i < 8; i++) { + UNPACK32(ctx.h[i], &digest[i << 2]); + } +} + +static inline uint8_t rev8(uint8_t d) +{ + int i; + uint8_t out = 0; + + /* (from left to right) */ + for (i = 0; i < 8; i++) + if (d & (1 << i)) + out |= (1 << (7 - i)); + + return out; +} + +char *set_avalon4_fan(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to avalon4-fan"; + if (ret == 1) + val2 = val1; + + if (val1 < 0 || val1 > 100 || val2 < 0 || val2 > 100 || val2 < val1) + return "Invalid value passed to avalon4-fan"; + + opt_avalon4_fan_min = val1; + opt_avalon4_fan_max = val2; + + return NULL; +} + +char *set_avalon4_freq(char *arg) +{ + char *colon1, *colon2; + int val1 = 0, val2 = 0, val3 = 0; + + if (!(*arg)) + return NULL; + + colon1 = strchr(arg, ':'); + if (colon1) + *(colon1++) = '\0'; + + if (*arg) { + val1 = atoi(arg); + if (val1 < AVA4_DEFAULT_FREQUENCY_MIN || val1 > AVA4_DEFAULT_FREQUENCY_MAX) + return "Invalid value1 passed to avalon4-freq"; + } + + if (colon1 && *colon1) { + colon2 = strchr(colon1, ':'); + if (colon2) + *(colon2++) = '\0'; + + if (*colon1) { + val2 = atoi(colon1); + if (val2 < AVA4_DEFAULT_FREQUENCY_MIN || val2 > AVA4_DEFAULT_FREQUENCY_MAX) + return "Invalid value2 passed to avalon4-freq"; + } + + if (colon2 && *colon2) { + val3 = atoi(colon2); + if (val3 < AVA4_DEFAULT_FREQUENCY_MIN || val3 > AVA4_DEFAULT_FREQUENCY_MAX) + return "Invalid value3 passed to avalon4-freq"; + } + } + + if (!val1) + val3 = val2 = val1 = AVA4_DEFAULT_FREQUENCY; + + if (!val2) + val3 = val2 = val1; + + if (!val3) + val3 = val2; + + opt_avalon4_freq[0] = val1; + opt_avalon4_freq[1] = val2; + opt_avalon4_freq[2] = val3; + + return NULL; +} + +char *set_avalon4_voltage(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to avalon4-voltage"; + if (ret == 1) + val2 = val1; + + if (val1 < AVA4_DEFAULT_VOLTAGE_MIN || val1 > AVA4_DEFAULT_VOLTAGE_MAX || + val2 < AVA4_DEFAULT_VOLTAGE_MIN || val2 > AVA4_DEFAULT_VOLTAGE_MAX || + val2 < val1) + return "Invalid value passed to avalon4-voltage"; + + opt_avalon4_voltage_min = val1; + opt_avalon4_voltage_max = val2; + + return NULL; +} + +static int avalon4_init_pkg(struct avalon4_pkg *pkg, uint8_t type, uint8_t idx, uint8_t cnt) +{ + unsigned short crc; + + pkg->head[0] = AVA4_H1; + pkg->head[1] = AVA4_H2; + + pkg->type = type; + pkg->opt = 0; + pkg->idx = idx; + pkg->cnt = cnt; + + crc = crc16(pkg->data, AVA4_P_DATA_LEN); + + pkg->crc[0] = (crc & 0xff00) >> 8; + pkg->crc[1] = crc & 0x00ff; + return 0; +} + +static int job_idcmp(uint8_t *job_id, char *pool_job_id) +{ + int job_id_len; + unsigned short crc, crc_expect; + + if (!pool_job_id) + return 1; + + job_id_len = strlen(pool_job_id); + crc_expect = crc16((unsigned char *)pool_job_id, job_id_len); + + crc = job_id[0] << 8 | job_id[1]; + + if (crc_expect == crc) + return 0; + + applog(LOG_DEBUG, "Avalon4: job_id doesn't match! [%04x:%04x (%s)]", + crc, crc_expect, pool_job_id); + + return 1; +} + +static inline int get_current_temp_max(struct avalon4_info *info) +{ + int i; + int t = info->temp[0]; + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (info->temp[i] > t) + t = info->temp[i]; + } + return t; +} + +/* http://www.onsemi.com/pub_link/Collateral/ADP3208D.PDF */ +static uint32_t encode_voltage_adp3208d(uint32_t v) +{ + return rev8((0x78 - v / 125) << 1 | 1) << 8; +} + +static uint32_t decode_voltage_adp3208d(uint32_t v) +{ + return (0x78 - (rev8(v >> 8) >> 1)) * 125; +} + +/* http://www.onsemi.com/pub/Collateral/NCP5392P-D.PDF */ +static uint32_t encode_voltage_ncp5392p(uint32_t v) +{ + if (v == 0) + return 0xff00; + + return rev8(((0x59 - (v - 5000) / 125) & 0xff) << 1 | 1) << 8; +} + +static uint32_t decode_voltage_ncp5392p(uint32_t v) +{ + if (v == 0xff00) + return 0; + + return (0x59 - (rev8(v >> 8) >> 1)) * 125 + 5000; +} + +static inline uint32_t adjust_fan(struct avalon4_info *info, int id) +{ + uint32_t pwm; + int t = info->temp[id]; + + if (t < opt_avalon4_temp_target - 10) + info->fan_pct[id] = opt_avalon4_fan_min; + else if (t > opt_avalon4_temp_target + 10 || t > opt_avalon4_overheat - 3) + info->fan_pct[id] = opt_avalon4_fan_max; + else if (t > opt_avalon4_temp_target + 1) + info->fan_pct[id] += 2; + else if (t < opt_avalon4_temp_target - 1) + info->fan_pct[id] -= 2; + + if (info->fan_pct[id] < opt_avalon4_fan_min) + info->fan_pct[id] = opt_avalon4_fan_min; + if (info->fan_pct[id] > opt_avalon4_fan_max) + info->fan_pct[id] = opt_avalon4_fan_max; + + pwm = get_fan_pwm(info->fan_pct[id]); + applog(LOG_DEBUG, "[%d], Adjust_fan: %dC-%d%%(%03x)", id, t, info->fan_pct[id], pwm); + + return pwm; +} + +static int decode_pkg(struct thr_info *thr, struct avalon4_ret *ar, int modular_id) +{ + struct cgpu_info *avalon4 = thr->cgpu; + struct avalon4_info *info = avalon4->device_data; + struct pool *pool, *real_pool; + struct pool *pool_stratum0 = &info->pool0; + struct pool *pool_stratum1 = &info->pool1; + struct pool *pool_stratum2 = &info->pool2; + + unsigned int expected_crc; + unsigned int actual_crc; + uint32_t nonce, nonce2, ntime, miner, chip_id, volt, tmp; + uint8_t job_id[4]; + int pool_no; + + if (ar->head[0] != AVA4_H1 && ar->head[1] != AVA4_H2) { + applog(LOG_DEBUG, "Avalon4: H1 %02x, H2 %02x", ar->head[0], ar->head[1]); + hexdump(ar->data, 32); + return 1; + } + + expected_crc = crc16(ar->data, AVA4_P_DATA_LEN); + actual_crc = (ar->crc[0] & 0xff) | ((ar->crc[1] & 0xff) << 8); + if (expected_crc != actual_crc) { + applog(LOG_DEBUG, "Avalon4: %02x: expected crc(%04x), actual_crc(%04x)", + ar->type, expected_crc, actual_crc); + return 1; + } + + switch(ar->type) { + case AVA4_P_NONCE: + applog(LOG_DEBUG, "Avalon4: AVA4_P_NONCE"); + memcpy(&miner, ar->data + 0, 4); + memcpy(&pool_no, ar->data + 4, 4); + memcpy(&nonce2, ar->data + 8, 4); + memcpy(&ntime, ar->data + 12, 4); + memcpy(&nonce, ar->data + 16, 4); + memcpy(job_id, ar->data + 20, 4); + + miner = be32toh(miner); + chip_id = (miner >> 16) & 0xffff; + miner &= 0xffff; + pool_no = be32toh(pool_no); + ntime = be32toh(ntime); + if (miner >= AVA4_DEFAULT_MINERS || + pool_no >= total_pools || pool_no < 0) { + applog(LOG_DEBUG, "Avalon4: Wrong miner/pool_no %d/%d", miner, pool_no); + break; + } else { + info->matching_work[modular_id][miner]++; + info->chipmatching_work[modular_id][miner][chip_id]++; + } + nonce2 = be32toh(nonce2); + nonce = be32toh(nonce); + nonce -= 0x4000; + + applog(LOG_DEBUG, "%s-%d-%d: Found! P:%d - N2:%08x N:%08x NR:%d [M:%d - MW: %d(%d,%d,%d,%d)]", + avalon4->drv->name, avalon4->device_id, modular_id, + pool_no, nonce2, nonce, ntime, + miner, info->matching_work[modular_id][miner], + info->chipmatching_work[modular_id][miner][0], + info->chipmatching_work[modular_id][miner][1], + info->chipmatching_work[modular_id][miner][2], + info->chipmatching_work[modular_id][miner][3]); + + real_pool = pool = pools[pool_no]; + if (job_idcmp(job_id, pool->swork.job_id)) { + if (!job_idcmp(job_id, pool_stratum0->swork.job_id)) { + applog(LOG_DEBUG, "Avalon4: Match to previous stratum0! (%s)", pool_stratum0->swork.job_id); + pool = pool_stratum0; + } else if (!job_idcmp(job_id, pool_stratum1->swork.job_id)) { + applog(LOG_DEBUG, "Avalon4: Match to previous stratum1! (%s)", pool_stratum1->swork.job_id); + pool = pool_stratum1; + } else if (!job_idcmp(job_id, pool_stratum2->swork.job_id)) { + applog(LOG_DEBUG, "Avalon4: Match to previous stratum2! (%s)", pool_stratum2->swork.job_id); + pool = pool_stratum2; + } else { + applog(LOG_ERR, "Avalon4: Cannot match to any stratum! (%s)", pool->swork.job_id); + inc_hw_errors(thr); + break; + } + } + + submit_nonce2_nonce(thr, pool, real_pool, nonce2, nonce, ntime); + break; + case AVA4_P_STATUS: + applog(LOG_DEBUG, "Avalon4: AVA4_P_STATUS"); + hexdump(ar->data, 32); + memcpy(&tmp, ar->data, 4); + tmp = be32toh(tmp); + info->temp[modular_id] = tmp; + + memcpy(&tmp, ar->data + 4, 4); + tmp = be32toh(tmp); + info->fan[modular_id] = tmp; + + memcpy(&(info->get_frequency[modular_id]), ar->data + 8, 4); + memcpy(&(info->get_voltage[modular_id]), ar->data + 12, 4); + memcpy(&(info->local_work[modular_id]), ar->data + 16, 4); + memcpy(&(info->hw_work[modular_id]), ar->data + 20, 4); + memcpy(&(info->power_good[modular_id]), ar->data + 24, 4); + + info->get_frequency[modular_id] = be32toh(info->get_frequency[modular_id]) * 3968 / 65; + info->get_voltage[modular_id] = be32toh(info->get_voltage[modular_id]); + info->local_work[modular_id] = be32toh(info->local_work[modular_id]); + info->hw_work[modular_id] = be32toh(info->hw_work[modular_id]); + info->power_good[modular_id] = be32toh(info->power_good[modular_id]); + + volt = info->get_voltage[modular_id]; + if (info->mod_type[modular_id] == AVA4_TYPE_MM40) + tmp = decode_voltage_adp3208d(volt); + if (info->mod_type[modular_id] == AVA4_TYPE_MM41) + tmp = decode_voltage_ncp5392p(volt); + info->get_voltage[modular_id] = tmp; + + info->local_works[modular_id] += info->local_work[modular_id]; + info->hw_works[modular_id] += info->hw_work[modular_id]; + + info->lw5[modular_id][info->i_1m] += info->local_work[modular_id]; + info->hw5[modular_id][info->i_1m] += info->hw_work[modular_id]; + + avalon4->temp = get_current_temp_max(info); + break; + case AVA4_P_ACKDETECT: + applog(LOG_DEBUG, "Avalon4: AVA4_P_ACKDETECT"); + break; + default: + applog(LOG_DEBUG, "Avalon4: Unknown response"); + break; + } + return 0; +} + +/* + # IIC packet format: length[1]+transId[1]+sesId[1]+req[1]+data[60] + # length: 4+len(data) + # transId: 0 + # sesId: 0 + # req: checkout the header file + # data: + # INIT: clock_rate[4] + reserved[4] + payload[52] + # XFER: txSz[1]+rxSz[1]+options[1]+slaveAddr[1] + payload[56] + */ +static int avalon4_iic_init_pkg(uint8_t *iic_pkg, struct avalon4_iic_info *iic_info, uint8_t *buf, int wlen, int rlen) +{ + memset(iic_pkg, 0, AVA4_AUC_P_SIZE); + + switch (iic_info->iic_op) { + case AVA4_IIC_INIT: + iic_pkg[0] = 12; /* 4 bytes IIC header + 4 bytes speed + 4 bytes xfer delay */ + iic_pkg[3] = AVA4_IIC_INIT; + iic_pkg[4] = iic_info->iic_param.aucParam[0] & 0xff; + iic_pkg[5] = (iic_info->iic_param.aucParam[0] >> 8) & 0xff; + iic_pkg[6] = (iic_info->iic_param.aucParam[0] >> 16) & 0xff; + iic_pkg[7] = iic_info->iic_param.aucParam[0] >> 24; + iic_pkg[8] = iic_info->iic_param.aucParam[1] & 0xff; + iic_pkg[9] = (iic_info->iic_param.aucParam[1] >> 8) & 0xff; + iic_pkg[10] = (iic_info->iic_param.aucParam[1] >> 16) & 0xff; + iic_pkg[11] = iic_info->iic_param.aucParam[1] >> 24; + break; + case AVA4_IIC_XFER: + iic_pkg[0] = 8 + wlen; + iic_pkg[3] = AVA4_IIC_XFER; + iic_pkg[4] = wlen; + iic_pkg[5] = rlen; + iic_pkg[7] = iic_info->iic_param.slave_addr; + if (buf && wlen) + memcpy(iic_pkg + 8, buf, wlen); + break; + case AVA4_IIC_RESET: + case AVA4_IIC_DEINIT: + case AVA4_IIC_INFO: + iic_pkg[0] = 4; + iic_pkg[3] = iic_info->iic_op; + break; + + default: + break; + } + + return 0; +} + +static int avalon4_iic_xfer(struct cgpu_info *avalon4, + uint8_t *wbuf, int wlen, int *write, + uint8_t *rbuf, int rlen, int *read) +{ + int err = -1; + + if (unlikely(avalon4->usbinfo.nodev)) + goto out; + + err = usb_write(avalon4, (char *)wbuf, wlen, write, C_AVA4_WRITE); + if (err || *write != wlen) { + applog(LOG_DEBUG, "Avalon4: AUC xfer %d, w(%d-%d)!", err, wlen, *write); + usb_nodev(avalon4); + goto out; + } + + cgsleep_ms(opt_avalon4_aucxdelay / 4800 + 1); + + rlen += 4; /* Add 4 bytes IIC header */ + err = usb_read(avalon4, (char *)rbuf, rlen, read, C_AVA4_READ); + if (err || *read != rlen) { + applog(LOG_DEBUG, "Avalon4: AUC xfer %d, r(%d-%d)!", err, rlen - 4, *read); + hexdump(rbuf, rlen); + } + + *read = rbuf[0] - 4; /* Remove 4 bytes IIC header */ +out: + return err; +} + +static int avalon4_auc_init(struct cgpu_info *avalon4, char *ver) +{ + struct avalon4_iic_info iic_info; + int err, wlen, rlen; + uint8_t wbuf[AVA4_AUC_P_SIZE]; + uint8_t rbuf[AVA4_AUC_P_SIZE]; + + if (unlikely(avalon4->usbinfo.nodev)) + return 1; + + /* Try to clean the AUC buffer */ + err = usb_read(avalon4, (char *)rbuf, AVA4_AUC_P_SIZE, &rlen, C_AVA4_READ); + applog(LOG_DEBUG, "Avalon4: AUC usb_read %d, %d!", err, rlen); + hexdump(rbuf, AVA4_AUC_P_SIZE); + + /* Reset */ + iic_info.iic_op = AVA4_IIC_RESET; + rlen = 0; + avalon4_iic_init_pkg(wbuf, &iic_info, NULL, 0, rlen); + + memset(rbuf, 0, AVA4_AUC_P_SIZE); + err = avalon4_iic_xfer(avalon4, wbuf, AVA4_AUC_P_SIZE, &wlen, rbuf, rlen, &rlen); + if (err) { + applog(LOG_ERR, "Avalon4: Failed to reset Avalon USB2IIC Converter"); + return 1; + } + + /* Deinit */ + iic_info.iic_op = AVA4_IIC_DEINIT; + rlen = 0; + avalon4_iic_init_pkg(wbuf, &iic_info, NULL, 0, rlen); + + memset(rbuf, 0, AVA4_AUC_P_SIZE); + err = avalon4_iic_xfer(avalon4, wbuf, AVA4_AUC_P_SIZE, &wlen, rbuf, rlen, &rlen); + if (err) { + applog(LOG_ERR, "Avalon4: Failed to deinit Avalon USB2IIC Converter"); + return 1; + } + + /* Init */ + iic_info.iic_op = AVA4_IIC_INIT; + iic_info.iic_param.aucParam[0] = opt_avalon4_aucspeed; + iic_info.iic_param.aucParam[1] = opt_avalon4_aucxdelay; + rlen = AVA4_AUC_VER_LEN; + avalon4_iic_init_pkg(wbuf, &iic_info, NULL, 0, rlen); + + memset(rbuf, 0, AVA4_AUC_P_SIZE); + err = avalon4_iic_xfer(avalon4, wbuf, AVA4_AUC_P_SIZE, &wlen, rbuf, rlen, &rlen); + if (err) { + applog(LOG_ERR, "Avalon4: Failed to init Avalon USB2IIC Converter"); + return 1; + } + + hexdump(rbuf, AVA4_AUC_P_SIZE); + + memcpy(ver, rbuf + 4, AVA4_AUC_VER_LEN); + ver[AVA4_AUC_VER_LEN] = '\0'; + + applog(LOG_DEBUG, "Avalon4: USB2IIC Converter version: %s!", ver); + return 0; +} + +static int avalon4_auc_getinfo(struct cgpu_info *avalon4) +{ + struct avalon4_iic_info iic_info; + int err, wlen, rlen; + uint8_t wbuf[AVA4_AUC_P_SIZE]; + uint8_t rbuf[AVA4_AUC_P_SIZE]; + uint8_t *pdata = rbuf + 4; + uint16_t adc_val; + struct avalon4_info *info = avalon4->device_data; + + iic_info.iic_op = AVA4_IIC_INFO; + /* Device info: (9 bytes) + * tempadc(2), reqRdIndex, reqWrIndex, + * respRdIndex, respWrIndex, tx_flags, state + * */ + rlen = 7; + avalon4_iic_init_pkg(wbuf, &iic_info, NULL, 0, rlen); + + memset(rbuf, 0, AVA4_AUC_P_SIZE); + err = avalon4_iic_xfer(avalon4, wbuf, AVA4_AUC_P_SIZE, &wlen, rbuf, rlen, &rlen); + if (err) { + applog(LOG_ERR, "Avalon4: AUC Failed to get info "); + return 1; + } + + applog(LOG_DEBUG, "Avalon4: AUC tempADC(%03d), reqcnt(%d), respcnt(%d), txflag(%d), state(%d)", + pdata[1] << 8 | pdata[0], + pdata[2], + pdata[3], + pdata[5] << 8 | pdata[4], + pdata[6]); + + adc_val = pdata[1] << 8 | pdata[0]; + + info->auc_temp = 3.3 * adc_val * 10000 / 1023; + return 0; +} + +static int avalon4_iic_xfer_pkg(struct cgpu_info *avalon4, uint8_t slave_addr, + const struct avalon4_pkg *pkg, struct avalon4_ret *ret) +{ + struct avalon4_iic_info iic_info; + int err, wcnt, rcnt, rlen = 0; + uint8_t wbuf[AVA4_AUC_P_SIZE]; + uint8_t rbuf[AVA4_AUC_P_SIZE]; + + struct avalon4_info *info = avalon4->device_data; + + iic_info.iic_op = AVA4_IIC_XFER; + iic_info.iic_param.slave_addr = slave_addr; + if (ret) + rlen = AVA4_READ_SIZE; + + avalon4_iic_init_pkg(wbuf, &iic_info, (uint8_t *)pkg, AVA4_WRITE_SIZE, rlen); + err = avalon4_iic_xfer(avalon4, wbuf, wbuf[0], &wcnt, rbuf, rlen, &rcnt); + if ((pkg->type != AVA4_P_DETECT) && err == -7 && !rcnt && rlen) { + avalon4_iic_init_pkg(wbuf, &iic_info, NULL, 0, rlen); + err = avalon4_iic_xfer(avalon4, wbuf, wbuf[0], &wcnt, rbuf, rlen, &rcnt); + applog(LOG_DEBUG, "Avalon4: IIC read again!(err:%d)", err); + } + if (err || rcnt != rlen) { + if (info->xfer_err_cnt++ == 100) { + applog(LOG_DEBUG, "Avalon4: AUC xfer_err_cnt reach err = %d, rcnt = %d, rlen = %d", err, rcnt, rlen); + + cgsleep_ms(5 * 1000); /* Wait MM reset */ + avalon4_auc_init(avalon4, info->auc_version); + } + return AVA4_SEND_ERROR; + } + + if (ret) + memcpy((char *)ret, rbuf + 4, AVA4_READ_SIZE); + + info->xfer_err_cnt = 0; + return AVA4_SEND_OK; +} + +static int avalon4_send_bc_pkgs(struct cgpu_info *avalon4, const struct avalon4_pkg *pkg) +{ + int ret; + + do { + if (unlikely(avalon4->usbinfo.nodev)) + return -1; + ret = avalon4_iic_xfer_pkg(avalon4, AVA4_MODULE_BROADCAST, pkg, NULL); + } while (ret != AVA4_SEND_OK); + + return 0; +} + +static void avalon4_stratum_pkgs(struct cgpu_info *avalon4, struct pool *pool) +{ + const int merkle_offset = 36; + struct avalon4_pkg pkg; + int i, a, b, tmp; + unsigned char target[32]; + int job_id_len, n2size; + unsigned short crc; + + int coinbase_len_posthash, coinbase_len_prehash; + uint8_t coinbase_prehash[32]; + + /* Send out the first stratum message STATIC */ + applog(LOG_DEBUG, "Avalon4: Pool stratum message STATIC: %d, %d, %d, %d, %d", + pool->coinbase_len, + pool->nonce2_offset, + pool->n2size, + merkle_offset, + pool->merkles); + memset(pkg.data, 0, AVA4_P_DATA_LEN); + tmp = be32toh(pool->coinbase_len); + memcpy(pkg.data, &tmp, 4); + + tmp = be32toh(pool->nonce2_offset); + memcpy(pkg.data + 4, &tmp, 4); + + n2size = pool->n2size >= 4 ? 4 : pool->n2size; + tmp = be32toh(n2size); + memcpy(pkg.data + 8, &tmp, 4); + + tmp = be32toh(merkle_offset); + memcpy(pkg.data + 12, &tmp, 4); + + tmp = be32toh(pool->merkles); + memcpy(pkg.data + 16, &tmp, 4); + + tmp = be32toh((int)pool->swork.diff); + memcpy(pkg.data + 20, &tmp, 4); + + tmp = be32toh((int)pool->pool_no); + memcpy(pkg.data + 24, &tmp, 4); + + avalon4_init_pkg(&pkg, AVA4_P_STATIC, 1, 1); + if (avalon4_send_bc_pkgs(avalon4, &pkg)) + return; + + set_target(target, pool->sdiff); + memcpy(pkg.data, target, 32); + if (opt_debug) { + char *target_str; + target_str = bin2hex(target, 32); + applog(LOG_DEBUG, "Avalon4: Pool stratum target: %s", target_str); + free(target_str); + } + avalon4_init_pkg(&pkg, AVA4_P_TARGET, 1, 1); + if (avalon4_send_bc_pkgs(avalon4, &pkg)) + return; + + memset(pkg.data, 0, AVA4_P_DATA_LEN); + + job_id_len = strlen(pool->swork.job_id); + crc = crc16((unsigned char *)pool->swork.job_id, job_id_len); + applog(LOG_DEBUG, "Avalon4: Pool stratum message JOBS_ID[%04x]: %s", + crc, pool->swork.job_id); + + pkg.data[0] = (crc & 0xff00) >> 8; + pkg.data[1] = crc & 0x00ff; + avalon4_init_pkg(&pkg, AVA4_P_JOB_ID, 1, 1); + if (avalon4_send_bc_pkgs(avalon4, &pkg)) + return; + + coinbase_len_prehash = pool->nonce2_offset - (pool->nonce2_offset % SHA256_BLOCK_SIZE); + coinbase_len_posthash = pool->coinbase_len - coinbase_len_prehash; + sha256_prehash(pool->coinbase, coinbase_len_prehash, coinbase_prehash); + + a = (coinbase_len_posthash / AVA4_P_DATA_LEN) + 1; + b = coinbase_len_posthash % AVA4_P_DATA_LEN; + memcpy(pkg.data, coinbase_prehash, 32); + avalon4_init_pkg(&pkg, AVA4_P_COINBASE, 1, a + (b ? 1 : 0)); + if (avalon4_send_bc_pkgs(avalon4, &pkg)) + return; + applog(LOG_DEBUG, "Avalon4: Pool stratum message modified COINBASE: %d %d", a, b); + for (i = 1; i < a; i++) { + memcpy(pkg.data, pool->coinbase + coinbase_len_prehash + i * 32 - 32, 32); + avalon4_init_pkg(&pkg, AVA4_P_COINBASE, i + 1, a + (b ? 1 : 0)); + if (avalon4_send_bc_pkgs(avalon4, &pkg)) + return; + } + if (b) { + memset(pkg.data, 0, AVA4_P_DATA_LEN); + memcpy(pkg.data, pool->coinbase + coinbase_len_prehash + i * 32 - 32, b); + avalon4_init_pkg(&pkg, AVA4_P_COINBASE, i + 1, i + 1); + if (avalon4_send_bc_pkgs(avalon4, &pkg)) + return; + } + + b = pool->merkles; + applog(LOG_DEBUG, "Avalon4: Pool stratum message MERKLES: %d", b); + for (i = 0; i < b; i++) { + memset(pkg.data, 0, AVA4_P_DATA_LEN); + memcpy(pkg.data, pool->swork.merkle_bin[i], 32); + avalon4_init_pkg(&pkg, AVA4_P_MERKLES, i + 1, b); + if (avalon4_send_bc_pkgs(avalon4, &pkg)) + return; + } + + applog(LOG_DEBUG, "Avalon4: Pool stratum message HEADER: 4"); + for (i = 0; i < 4; i++) { + memset(pkg.data, 0, AVA4_P_DATA_LEN); + memcpy(pkg.data, pool->header_bin + i * 32, 32); + avalon4_init_pkg(&pkg, AVA4_P_HEADER, i + 1, 4); + if (avalon4_send_bc_pkgs(avalon4, &pkg)) + return; + } + + avalon4_auc_getinfo(avalon4); +} + +static struct cgpu_info *avalon4_auc_detect(struct libusb_device *dev, struct usb_find_devices *found) +{ + int i; + struct avalon4_info *info; + struct cgpu_info *avalon4 = usb_alloc_cgpu(&avalon4_drv, 1); + char auc_ver[AVA4_AUC_VER_LEN]; + + if (!usb_init(avalon4, dev, found)) { + applog(LOG_ERR, "Avalon4 failed usb_init"); + avalon4 = usb_free_cgpu(avalon4); + return NULL; + } + + /* Avalon4 prefers not to use zero length packets */ + avalon4->nozlp = true; + + /* We try twice on AUC init */ + if (avalon4_auc_init(avalon4, auc_ver) && avalon4_auc_init(avalon4, auc_ver)) + return NULL; + + /* We have an Avalon4 AUC connected */ + avalon4->threads = 1; + add_cgpu(avalon4); + + update_usb_stats(avalon4); + applog(LOG_INFO, "%s-%d: Found at %s", avalon4->drv->name, avalon4->device_id, + avalon4->device_path); + + avalon4->device_data = calloc(sizeof(struct avalon4_info), 1); + if (unlikely(!(avalon4->device_data))) + quit(1, "Failed to calloc avalon4_info"); + + info = avalon4->device_data; + memcpy(info->auc_version, auc_ver, AVA4_AUC_VER_LEN); + info->auc_version[AVA4_AUC_VER_LEN] = '\0'; + info->auc_speed = opt_avalon4_aucspeed; + info->auc_xdelay = opt_avalon4_aucxdelay; + + info->polling_first = 1; + + info->set_voltage_broadcat = 1; + + for (i = 0; i < AVA4_DEFAULT_MODULARS; i++) { + info->enable[i] = 0; + info->mod_type[i] = AVA4_TYPE_NULL; + info->fan_pct[i] = AVA4_DEFAULT_FAN_START; + info->set_voltage[i] = opt_avalon4_voltage_min; + } + + info->enable[0] = 1; + info->mod_type[0] = AVA4_TYPE_MM40; + + info->set_frequency[0] = opt_avalon4_freq[0]; + info->set_frequency[1] = opt_avalon4_freq[1]; + info->set_frequency[2] = opt_avalon4_freq[2]; + + return avalon4; +} + +static inline void avalon4_detect(bool __maybe_unused hotplug) +{ + usb_detect(&avalon4_drv, avalon4_auc_detect); +} + +static bool avalon4_prepare(struct thr_info *thr) +{ + int i; + struct cgpu_info *avalon4 = thr->cgpu; + struct avalon4_info *info = avalon4->device_data; + + info->polling_first = 1; + + cgtime(&(info->last_fan)); + cgtime(&(info->last_5m)); + cgtime(&(info->last_1m)); + + cglock_init(&info->update_lock); + cglock_init(&info->pool0.data_lock); + cglock_init(&info->pool1.data_lock); + cglock_init(&info->pool2.data_lock); + + info->set_voltage_broadcat = 1; + + for (i = 0; i < AVA4_DEFAULT_MODULARS; i++) + info->fan_pct[i] = AVA4_DEFAULT_FAN_START; + + + return true; +} + +static void detect_modules(struct cgpu_info *avalon4) +{ + struct avalon4_info *info = avalon4->device_data; + struct thr_info *thr = avalon4->thr[0]; + + struct avalon4_pkg detect_pkg; + struct avalon4_ret ret_pkg; + uint32_t tmp; + int i, err; + + /* Detect new modules here */ + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (info->enable[i]) + continue; + + /* Send out detect pkg */ + applog(LOG_DEBUG, "%s %d: AVA4_P_DETECT ID[%d]", + avalon4->drv->name, avalon4->device_id, i); + memset(detect_pkg.data, 0, AVA4_P_DATA_LEN); + tmp = be32toh(i); /* ID */ + memcpy(detect_pkg.data + 28, &tmp, 4); + avalon4_init_pkg(&detect_pkg, AVA4_P_DETECT, 1, 1); + err = avalon4_iic_xfer_pkg(avalon4, AVA4_MODULE_BROADCAST, &detect_pkg, &ret_pkg); + if (err == AVA4_SEND_OK) { + if (decode_pkg(thr, &ret_pkg, AVA4_MODULE_BROADCAST)) { + applog(LOG_DEBUG, "%s %d: Should be AVA4_P_ACKDETECT(%d), but %d", + avalon4->drv->name, avalon4->device_id, AVA4_P_ACKDETECT, ret_pkg.type); + continue; + } + } + + if (err != AVA4_SEND_OK) { + applog(LOG_DEBUG, "%s %d: AVA4_P_DETECT: Failed AUC xfer data with err %d", + avalon4->drv->name, avalon4->device_id, err); + break; + } + + applog(LOG_DEBUG, "%s %d: Module detect ID[%d]: %d", + avalon4->drv->name, avalon4->device_id, i, ret_pkg.type); + if (ret_pkg.type != AVA4_P_ACKDETECT) + break; + + cgtime(&info->elapsed[i]); + info->enable[i] = 1; + memcpy(info->mm_dna[i], ret_pkg.data, AVA4_MM_DNA_LEN); + info->mm_dna[i][AVA4_MM_DNA_LEN] = '\0'; + memcpy(info->mm_version[i], ret_pkg.data + AVA4_MM_DNA_LEN, AVA4_MM_VER_LEN); + info->mm_version[i][AVA4_MM_VER_LEN] = '\0'; + if (!strncmp((char *)&(info->mm_version[i]), AVA4_MM40_PREFIXSTR, 2)) + info->mod_type[i] = AVA4_TYPE_MM40; + if (!strncmp((char *)&(info->mm_version[i]), AVA4_MM41_PREFIXSTR, 2)) + info->mod_type[i] = AVA4_TYPE_MM41; + + info->fan_pct[i] = AVA4_DEFAULT_FAN_START; + info->set_voltage[i] = opt_avalon4_voltage_min; + info->led_red[i] = 0; + applog(LOG_NOTICE, "%s %d: New module detect! ID[%d]", + avalon4->drv->name, avalon4->device_id, i); + } +} + +static int polling(struct thr_info *thr, struct cgpu_info *avalon4, struct avalon4_info *info) +{ + struct avalon4_pkg send_pkg; + struct avalon4_ret ar; + int i, j, tmp, ret, decode_err = 0, do_polling = 0; + struct timeval current_fan; + int do_adjust_fan = 0; + uint32_t fan_pwm; + double device_tdiff; + + if (info->polling_first) { + cgsleep_ms(300); + info->polling_first = 0; + } + + cgtime(¤t_fan); + device_tdiff = tdiff(¤t_fan, &(info->last_fan)); + if (device_tdiff > 5.0 || device_tdiff < 0) { + cgtime(&info->last_fan); + do_adjust_fan = 1; + } + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (!info->enable[i]) + continue; + + do_polling = 1; + cgsleep_ms(opt_avalon4_polling_delay); + + memset(send_pkg.data, 0, AVA4_P_DATA_LEN); + /* Red LED */ + tmp = be32toh(info->led_red[i]); + memcpy(send_pkg.data, &tmp, 4); + + /* Adjust fan every 10 seconds*/ + if (do_adjust_fan) { + fan_pwm = adjust_fan(info, i); + fan_pwm |= 0x80000000; + tmp = be32toh(fan_pwm); + memcpy(send_pkg.data + 4, &tmp, 4); + } + + avalon4_init_pkg(&send_pkg, AVA4_P_POLLING, 1, 1); + ret = avalon4_iic_xfer_pkg(avalon4, i, &send_pkg, &ar); + if (ret == AVA4_SEND_OK) + decode_err = decode_pkg(thr, &ar, i); + + if (ret != AVA4_SEND_OK || decode_err) { + info->polling_err_cnt[i]++; + if (info->polling_err_cnt[i] >= 4) { + info->polling_err_cnt[i] = 0; + info->mod_type[i] = AVA4_TYPE_NULL; + info->enable[i] = 0; + info->get_voltage[i] = 0; + info->get_frequency[i] = 0; + info->power_good[i] = 0; + info->local_work[i] = 0; + info->local_works[i] = 0; + info->hw_work[i] = 0; + info->hw_works[i] = 0; + for (j = 0; j < 6; j++) { + info->lw5[i][j] = 0; + info->hw5[i][j] = 0; + } + + for (j = 0; j < AVA4_DEFAULT_MINERS; j++) { + info->matching_work[i][j] = 0; + info->chipmatching_work[i][j][0] = 0; + info->chipmatching_work[i][j][1] = 0; + info->chipmatching_work[i][j][2] = 0; + info->chipmatching_work[i][j][3] = 0; + } + applog(LOG_NOTICE, "%s %d: Module detached! ID[%d]", + avalon4->drv->name, avalon4->device_id, i); + } + } + + if (ret == AVA4_SEND_OK && !decode_err) + info->polling_err_cnt[i] = 0; + } + + if (!do_polling) + detect_modules(avalon4); + + return 0; +} + +static void copy_pool_stratum(struct pool *pool_stratum, struct pool *pool) +{ + int i; + int merkles = pool->merkles; + size_t coinbase_len = pool->coinbase_len; + + if (!pool->swork.job_id) + return; + + if (!job_idcmp((unsigned char *)pool->swork.job_id, pool_stratum->swork.job_id)) + return; + + cg_wlock(&pool_stratum->data_lock); + free(pool_stratum->swork.job_id); + free(pool_stratum->nonce1); + free(pool_stratum->coinbase); + + align_len(&coinbase_len); + pool_stratum->coinbase = calloc(coinbase_len, 1); + if (unlikely(!pool_stratum->coinbase)) + quit(1, "Failed to calloc pool_stratum coinbase in avalon4"); + memcpy(pool_stratum->coinbase, pool->coinbase, coinbase_len); + + + for (i = 0; i < pool_stratum->merkles; i++) + free(pool_stratum->swork.merkle_bin[i]); + if (merkles) { + pool_stratum->swork.merkle_bin = realloc(pool_stratum->swork.merkle_bin, + sizeof(char *) * merkles + 1); + for (i = 0; i < merkles; i++) { + pool_stratum->swork.merkle_bin[i] = malloc(32); + if (unlikely(!pool_stratum->swork.merkle_bin[i])) + quit(1, "Failed to malloc pool_stratum swork merkle_bin"); + memcpy(pool_stratum->swork.merkle_bin[i], pool->swork.merkle_bin[i], 32); + } + } + + pool_stratum->sdiff = pool->sdiff; + pool_stratum->coinbase_len = pool->coinbase_len; + pool_stratum->nonce2_offset = pool->nonce2_offset; + pool_stratum->n2size = pool->n2size; + pool_stratum->merkles = pool->merkles; + + pool_stratum->swork.job_id = strdup(pool->swork.job_id); + pool_stratum->nonce1 = strdup(pool->nonce1); + + memcpy(pool_stratum->ntime, pool->ntime, sizeof(pool_stratum->ntime)); + memcpy(pool_stratum->header_bin, pool->header_bin, sizeof(pool_stratum->header_bin)); + cg_wunlock(&pool_stratum->data_lock); +} + +static void avalon4_stratum_set(struct cgpu_info *avalon4, struct pool *pool, int addr, int cutoff) +{ + struct avalon4_info *info = avalon4->device_data; + struct avalon4_pkg send_pkg; + uint32_t tmp = 0, range, start, volt; + + info->set_frequency[0] = opt_avalon4_freq[0]; + info->set_frequency[1] = opt_avalon4_freq[1]; + info->set_frequency[2] = opt_avalon4_freq[2]; + + /* Set the NTime, Voltage and Frequency */ + memset(send_pkg.data, 0, AVA4_P_DATA_LEN); + + if (opt_avalon4_ntime_offset != AVA4_DEFAULT_ASIC_COUNT) { + tmp = opt_avalon4_ntime_offset | 0x80000000; + tmp = be32toh(tmp); + memcpy(send_pkg.data, &tmp, 4); + } + + volt = info->set_voltage[addr]; + if (cutoff) + volt = 0; + if (info->mod_type[addr] == AVA4_TYPE_MM40) + tmp = encode_voltage_adp3208d(volt); + if (info->mod_type[addr] == AVA4_TYPE_MM41) + tmp = encode_voltage_ncp5392p(volt); + tmp = be32toh(tmp); + memcpy(send_pkg.data + 4, &tmp, 4); + + tmp = info->set_frequency[0] | (info->set_frequency[1] << 10) | (info->set_frequency[2] << 20); + tmp = be32toh(tmp); + memcpy(send_pkg.data + 8, &tmp, 4); + + /* Configure the nonce2 offset and range */ + if (pool->n2size == 3) + range = 0xffffff / (total_devices + 1); + else + range = 0xffffffff / (total_devices + 1); + start = range * (avalon4->device_id + 1); + + tmp = be32toh(start); + memcpy(send_pkg.data + 12, &tmp, 4); + + tmp = be32toh(range); + memcpy(send_pkg.data + 16, &tmp, 4); + + /* Package the data */ + avalon4_init_pkg(&send_pkg, AVA4_P_SET, 1, 1); + if (addr == AVA4_MODULE_BROADCAST) + avalon4_send_bc_pkgs(avalon4, &send_pkg); + else + avalon4_iic_xfer_pkg(avalon4, addr, &send_pkg, NULL); +} + +static void avalon4_stratum_finish(struct cgpu_info *avalon4) +{ + struct avalon4_pkg send_pkg; + + memset(send_pkg.data, 0, AVA4_P_DATA_LEN); + avalon4_init_pkg(&send_pkg, AVA4_P_FINISH, 1, 1); + avalon4_send_bc_pkgs(avalon4, &send_pkg); +} + +static void avalon4_update(struct cgpu_info *avalon4) +{ + struct avalon4_info *info = avalon4->device_data; + struct thr_info *thr = avalon4->thr[0]; + struct work *work; + struct pool *pool; + int coinbase_len_posthash, coinbase_len_prehash; + int i, count = 0; + + applog(LOG_DEBUG, "Avalon4: New stratum: restart: %d, update: %d", + thr->work_restart, thr->work_update); + thr->work_update = false; + thr->work_restart = false; + + /* Step 1: Make sure pool is ready */ + work = get_work(thr, thr->id); + discard_work(work); /* Don't leak memory */ + + /* Step 2: MM protocol check */ + pool = current_pool(); + if (!pool->has_stratum) + quit(1, "Avalon4: MM has to use stratum pools"); + + coinbase_len_prehash = pool->nonce2_offset - (pool->nonce2_offset % SHA256_BLOCK_SIZE); + coinbase_len_posthash = pool->coinbase_len - coinbase_len_prehash; + + if (coinbase_len_posthash + SHA256_BLOCK_SIZE > AVA4_P_COINBASE_SIZE) { + applog(LOG_ERR, "Avalon4: MM pool modified coinbase length(%d) is more than %d", + coinbase_len_posthash + SHA256_BLOCK_SIZE, AVA4_P_COINBASE_SIZE); + return; + } + if (pool->merkles > AVA4_P_MERKLES_COUNT) { + applog(LOG_ERR, "Avalon4: MM merkles has to be less then %d", AVA4_P_MERKLES_COUNT); + return; + } + if (pool->n2size < 3) { + applog(LOG_ERR, "Avalon4: MM nonce2 size has to be >= 3 (%d)", pool->n2size); + return; + } + + /* Step 3: Send out stratum pkgs */ + cg_wlock(&info->update_lock); + cg_rlock(&pool->data_lock); + + cgtime(&info->last_stratum); + info->pool_no = pool->pool_no; + copy_pool_stratum(&info->pool2, &info->pool1); + copy_pool_stratum(&info->pool1, &info->pool0); + copy_pool_stratum(&info->pool0, pool); + avalon4_stratum_pkgs(avalon4, pool); + + cg_runlock(&pool->data_lock); + cg_wunlock(&info->update_lock); + + /* Step 4: Try to detect new modules */ + detect_modules(avalon4); + + /* Step 5: Configure the parameter from outside */ + avalon4_stratum_set(avalon4, pool, AVA4_MODULE_BROADCAST, 0); + + if (!info->set_voltage_broadcat) { + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (!info->enable[i]) + continue; + if (info->set_voltage[i] == info->set_voltage[0]) + continue; + + avalon4_stratum_set(avalon4, pool, i, 0); + } + } else { + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (!info->enable[i]) + continue; + if (info->mod_type[i] == AVA4_TYPE_MM40) + continue; + + avalon4_stratum_set(avalon4, pool, i, 0); + } + } + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (!info->enable[i]) + continue; + + count++; + if (info->temp[i] < opt_avalon4_overheat) + continue; + + avalon4_stratum_set(avalon4, pool, i, 1); + } + info->mm_count = count; + + /* Step 6: Send out finish pkg */ + avalon4_stratum_finish(avalon4); +} + +static int64_t avalon4_scanhash(struct thr_info *thr) +{ + struct cgpu_info *avalon4 = thr->cgpu; + struct avalon4_info *info = avalon4->device_data; + struct timeval current; + double device_tdiff, hwp; + uint32_t a = 0, b = 0; + uint64_t h; + int i, j; + + if (unlikely(avalon4->usbinfo.nodev)) { + applog(LOG_ERR, "%s-%d: Device disappeared, shutting down thread", + avalon4->drv->name, avalon4->device_id); + return -1; + } + + /* Stop polling the device if there is no stratum in 3 minutes, network is down */ + cgtime(¤t); + if (tdiff(¤t, &(info->last_stratum)) > 180.0) + return 0; + + cg_rlock(&info->update_lock); + polling(thr, avalon4, info); + cg_runlock(&info->update_lock); + + cgtime(¤t); + device_tdiff = tdiff(¤t, &(info->last_1m)); + if (device_tdiff >= 60.0 || device_tdiff < 0) { + copy_time(&info->last_1m, ¤t); + if (info->i_1m++ >= 6) + info->i_1m = 0; + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (!info->enable[i]) + continue; + + info->lw5[i][info->i_1m] = 0; + info->hw5[i][info->i_1m] = 0; + } + } + + cgtime(¤t); + device_tdiff = tdiff(¤t, &(info->last_5m)); + if (opt_avalon4_autov && (device_tdiff > 480.0 || device_tdiff < 0)) { + copy_time(&info->last_5m, ¤t); + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (!info->enable[i]) + continue; + + a = 0; + b = 0; + for (j = 0; j < 6; j++) { + a += info->lw5[i][j]; + b += info->hw5[i][j]; + } + + hwp = a ? (double)b / (double)a : 0; + if (hwp > AVA4_DH_INC && (info->set_voltage[i] < info->set_voltage[0] + 125)) { + info->set_voltage[i] += 125; + applog(LOG_NOTICE, "%s %d: Automatic increase module[%d] voltage to %d", + avalon4->drv->name, avalon4->device_id, i, info->set_voltage[i]); + } + if (hwp < AVA4_DH_DEC && (info->set_voltage[i] > info->set_voltage[0] - (4 * 125))) { + info->set_voltage[i] -= 125; + applog(LOG_NOTICE, "%s %d: Automatic decrease module[%d] voltage to %d", + avalon4->drv->name, avalon4->device_id, i, info->set_voltage[i]); + } + + } + } + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (info->set_voltage[i] != info->set_voltage[0]) + break; + } + + if (i < AVA4_DEFAULT_MODULARS) + info->set_voltage_broadcat = 0; + else + info->set_voltage_broadcat = 1; + + h = 0; + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + h += info->enable[i] ? (info->local_work[i] - info->hw_work[i]) : 0; + info->local_work[i] = 0; + info->hw_work[i] = 0; + } + return h * 0xffffffffull; +} + +#define STATBUFLEN 512 +static struct api_data *avalon4_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + struct avalon4_info *info = cgpu->device_data; + int i, j; + uint32_t a,b ; + double hwp, diff; + char buf[256]; + char statbuf[AVA4_DEFAULT_MODULARS][STATBUFLEN]; + struct timeval current; + + memset(statbuf, 0, AVA4_DEFAULT_MODULARS * STATBUFLEN); + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, "Ver[%s]", info->mm_version[i]); + strcat(statbuf[i], buf); + } + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, " DNA[%02x%02x%02x%02x%02x%02x%02x%02x]", + info->mm_dna[i][0], + info->mm_dna[i][1], + info->mm_dna[i][2], + info->mm_dna[i][3], + info->mm_dna[i][4], + info->mm_dna[i][5], + info->mm_dna[i][6], + info->mm_dna[i][7]); + strcat(statbuf[i], buf); + } + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + struct timeval now; + if (info->mod_type[i] == AVA4_TYPE_NULL) + continue; + + cgtime(&now); + sprintf(buf, " Elapsed[%.0f]", tdiff(&now, &(info->elapsed[i]))); + strcat(statbuf[i], buf); + } + +#if 0 + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (info->mod_type[i] == AVA4_TYPE_NULL) + continue; + + strcat(statbuf[i], " MW["); + for (j = 0; j < AVA4_DEFAULT_MINERS; j++) { + sprintf(buf, "%d ", info->matching_work[i][j]); + strcat(statbuf[i], buf); + } + statbuf[i][strlen(statbuf[i]) - 1] = ']'; + } +#endif + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, " LW[%"PRIu64"]", info->local_works[i]); + strcat(statbuf[i], buf); + } + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, " HW[%"PRIu64"]", info->hw_works[i]); + strcat(statbuf[i], buf); + } + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + a = info->hw_works[i]; + b = info->local_works[i]; + hwp = b ? ((double)a / (double)b) * 100: 0; + + sprintf(buf, " DH[%.3f%%]", hwp); + strcat(statbuf[i], buf); + } + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + + a = 0; + b = 0; + for (j = 0; j < 6; j++) { + a += info->lw5[i][j]; + b += info->hw5[i][j]; + } + + cgtime(¤t); + diff = tdiff(¤t, &(info->last_1m)) + 300.0; + + hwp = a ? (double)b / (double)a * 100 : 0; + + sprintf(buf, " GHS5m[%.2f] DH5m[%.3f%%]", ((double)a - (double)b) * 4.295 / diff, hwp); + strcat(statbuf[i], buf); + } + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, " Temp[%d]", info->temp[i]); + strcat(statbuf[i], buf); + } + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, " Fan[%d]", info->fan[i]); + strcat(statbuf[i], buf); + } + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, " Vol[%.4f]", (float)info->get_voltage[i] / 10000); + strcat(statbuf[i], buf); + } + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, " Freq[%.2f]", (float)info->get_frequency[i] / 1000); + strcat(statbuf[i], buf); + } + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, " PG[%d]", info->power_good[i]); + strcat(statbuf[i], buf); + } + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, " Led[%d]", info->led_red[i]); + strcat(statbuf[i], buf); + } + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if(info->mod_type[i] == AVA4_TYPE_NULL) + continue; + sprintf(buf, "MM ID%d", i); + root = api_add_string(root, buf, statbuf[i], true); + } + + root = api_add_int(root, "MM Count", &(info->mm_count), true); + root = api_add_bool(root, "Automatic Voltage", &opt_avalon4_autov, true); + root = api_add_string(root, "AUC VER", info->auc_version, false); + root = api_add_int(root, "AUC I2C Speed", &(info->auc_speed), true); + root = api_add_int(root, "AUC I2C XDelay", &(info->auc_xdelay), true); + root = api_add_int(root, "AUC ADC", &(info->auc_temp), true); + + return root; +} + +static char *avalon4_set_device(struct cgpu_info *avalon4, char *option, char *setting, char *replybuf) +{ + int val, i; + struct avalon4_info *info = avalon4->device_data; + + if (strcasecmp(option, "help") == 0) { + sprintf(replybuf, "led|fan|voltage|frequency|pdelay"); + return replybuf; + } + + if (strcasecmp(option, "pdelay") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing polling delay setting"); + return replybuf; + } + + val = atoi(setting); + if (val < 1 || val > 65535) { + sprintf(replybuf, "invalid polling delay: %d, valid range 1-65535", val); + return replybuf; + } + + opt_avalon4_polling_delay = val; + + applog(LOG_NOTICE, "%s %d: Update polling delay to: %d", + avalon4->drv->name, avalon4->device_id, val); + + return NULL; + } + + if (strcasecmp(option, "fan") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing fan value"); + return replybuf; + } + + if (set_avalon4_fan(setting)) { + sprintf(replybuf, "invalid fan value, valid range 0-100"); + return replybuf; + } + + applog(LOG_NOTICE, "%s %d: Update fan to %d-%d", + avalon4->drv->name, avalon4->device_id, + opt_avalon4_fan_min, opt_avalon4_fan_max); + + return NULL; + } + + if (strcasecmp(option, "frequency") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing frequency value"); + return replybuf; + } + + if (set_avalon4_freq(setting)) { + sprintf(replybuf, "invalid frequency value, valid range %d-%d", + AVA4_DEFAULT_FREQUENCY_MIN, AVA4_DEFAULT_FREQUENCY_MAX); + return replybuf; + } + + applog(LOG_NOTICE, "%s %d: Update frequency to %d", + avalon4->drv->name, avalon4->device_id, + (opt_avalon4_freq[0] * 4 + opt_avalon4_freq[1] * 4 + opt_avalon4_freq[2]) / 9); + + return NULL; + } + + if (strcasecmp(option, "led") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing module_id setting"); + return replybuf; + } + + val = atoi(setting); + if (val < 1 || val >= AVA4_DEFAULT_MODULARS) { + sprintf(replybuf, "invalid module_id: %d, valid range 1-%d", val, AVA4_DEFAULT_MODULARS); + return replybuf; + } + + if (!info->enable[val]) { + sprintf(replybuf, "the current module was disabled %d", val); + return replybuf; + } + + info->led_red[val] = !info->led_red[val]; + + applog(LOG_NOTICE, "%s %d: Module:%d, LED: %s", + avalon4->drv->name, avalon4->device_id, + val, info->led_red[val] ? "on" : "off"); + + return NULL; + } + + if (strcasecmp(option, "voltage") == 0) { + int val_mod, val_volt, ret; + + if (!setting || !*setting) { + sprintf(replybuf, "missing voltage value"); + return replybuf; + } + + ret = sscanf(setting, "%d-%d", &val_mod, &val_volt); + if (ret != 2) { + sprintf(replybuf, "invalid voltage parameter, format: moduleid-voltage"); + return replybuf; + } + + if (val_mod < 0 || val_mod >= AVA4_DEFAULT_MODULARS || + val_volt < AVA4_DEFAULT_VOLTAGE_MIN || val_volt > AVA4_DEFAULT_VOLTAGE_MAX) { + sprintf(replybuf, "invalid module_id or voltage value, valid module_id range %d-%d, valid voltage range %d-%d", + 0, AVA4_DEFAULT_MODULARS, + AVA4_DEFAULT_VOLTAGE_MIN, AVA4_DEFAULT_VOLTAGE_MAX); + return replybuf; + } + + if (!info->enable[val_mod]) { + sprintf(replybuf, "the current module was disabled %d", val_mod); + return replybuf; + } + + info->set_voltage[val_mod] = val_volt; + + if (val_mod == AVA4_MODULE_BROADCAST) { + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) + info->set_voltage[i] = val_volt; + info->set_voltage_broadcat = 1; + } else + info->set_voltage_broadcat = 0; + + applog(LOG_NOTICE, "%s %d: Update module[%d] voltage to %d", + avalon4->drv->name, avalon4->device_id, val_mod, val_volt); + + return NULL; + } + + sprintf(replybuf, "Unknown option: %s", option); + return replybuf; +} + +static void avalon4_statline_before(char *buf, size_t bufsiz, struct cgpu_info *avalon4) +{ + struct avalon4_info *info = avalon4->device_data; + int temp = get_current_temp_max(info); + int voltsmin = AVA4_DEFAULT_VOLTAGE_MAX, voltsmax = AVA4_DEFAULT_VOLTAGE_MIN; + int fanmin = AVA4_DEFAULT_FAN_MAX, fanmax = AVA4_DEFAULT_FAN_MIN; + int i, frequency; + + for (i = 1; i < AVA4_DEFAULT_MODULARS; i++) { + if (!info->enable[i]) + continue; + + if (fanmax <= info->fan_pct[i]) + fanmax = info->fan_pct[i]; + if (fanmin >= info->fan_pct[i]) + fanmin = info->fan_pct[i]; + + if (voltsmax <= info->get_voltage[i]) + voltsmax = info->get_voltage[i]; + if (voltsmin >= info->get_voltage[i]) + voltsmin = info->get_voltage[i]; + } +#if 0 + tailsprintf(buf, bufsiz, "%2dMMs %.4fV-%.4fV %4dMhz %2dC %3d%%-%3d%%", + info->mm_count, (float)voltsmin / 10000, (float)voltsmax / 10000, + (info->set_frequency[0] * 4 + info->set_frequency[1] * 4 + info->set_frequency[2]) / 9, + temp, fanmin, fanmax); +#endif + frequency = (info->set_frequency[0] * 4 + info->set_frequency[1] * 4 + info->set_frequency[2]) / 9; + tailsprintf(buf, bufsiz, "%4dMhz %2dC %3d%% %.3fV", frequency, + temp, fanmin, (float)voltsmax / 10000); +} + +struct device_drv avalon4_drv = { + .drv_id = DRIVER_avalon4, + .dname = "avalon4", + .name = "AV4", + .set_device = avalon4_set_device, + .get_api_stats = avalon4_api_stats, + .get_statline_before = avalon4_statline_before, + .drv_detect = avalon4_detect, + .thread_prepare = avalon4_prepare, + .hash_work = hash_driver_work, + .flush_work = avalon4_update, + .update_work = avalon4_update, + .scanwork = avalon4_scanhash, +}; diff --git a/driver-avalon4.h b/driver-avalon4.h new file mode 100644 index 0000000..3bc7da0 --- /dev/null +++ b/driver-avalon4.h @@ -0,0 +1,200 @@ +/* + * Copyright 2013-2014 Con Kolivas + * Copyright 2012-2014 Xiangfu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef _AVALON4_H_ +#define _AVALON4_H_ + +#include "util.h" + +#ifdef USE_AVALON4 + +#define AVA4_DEFAULT_FAN_MIN 5 /* % */ +#define AVA4_DEFAULT_FAN_MAX 85 +/* Percentage required to make sure fan starts spinning, then we can go down */ +#define AVA4_DEFAULT_FAN_START 15 + +#define AVA4_DEFAULT_TEMP_TARGET 42 +#define AVA4_DEFAULT_TEMP_OVERHEAT 65 + +#define AVA4_DEFAULT_VOLTAGE_MIN 4000 +#define AVA4_DEFAULT_VOLTAGE_MAX 9000 + +#define AVA4_DEFAULT_FREQUENCY_MIN 100 +#define AVA4_DEFAULT_FREQUENCY_MAX 1000 + +#define AVA4_DEFAULT_MODULARS 64 +#define AVA4_DEFAULT_MINERS 10 +#define AVA4_DEFAULT_ASIC_COUNT 4 + +#define AVA4_DEFAULT_VOLTAGE 6875 +#define AVA4_DEFAULT_FREQUENCY 200 +#define AVA4_DEFAULT_POLLING_DELAY 20 /* ms */ + +#define AVA4_DH_INC 0.03 +#define AVA4_DH_DEC 0.001 + +#define AVA4_PWM_MAX 0x3FF + +#define AVA4_AUC_VER_LEN 12 /* Version length: 12 (AUC-YYYYMMDD) */ +#define AVA4_AUC_SPEED 400000 +#define AVA4_AUC_XDELAY 9600 /* 4800 = 1ms in AUC (11U14) */ +#define AVA4_AUC_P_SIZE 64 + + +/* Avalon4 protocol package type from MM protocol.h + * https://github.com/Canaan-Creative/MM/blob/avalon4/firmware/protocol.h */ +#define AVA4_MM_VER_LEN 15 +#define AVA4_MM_DNA_LEN 8 +#define AVA4_H1 'C' +#define AVA4_H2 'N' + +#define AVA4_P_COINBASE_SIZE (6 * 1024 + 64) +#define AVA4_P_MERKLES_COUNT 30 + +#define AVA4_P_COUNT 40 +#define AVA4_P_DATA_LEN 32 + +/* Broadcase with block iic_write*/ +#define AVA4_P_DETECT 0x10 + +/* Broadcase With non-block iic_write*/ +#define AVA4_P_STATIC 0x11 +#define AVA4_P_JOB_ID 0x12 +#define AVA4_P_COINBASE 0x13 +#define AVA4_P_MERKLES 0x14 +#define AVA4_P_HEADER 0x15 +#define AVA4_P_TARGET 0x16 + +/* Broadcase or Address */ +#define AVA4_P_SET 0x20 +#define AVA4_P_FINISH 0x21 + +/* Have to with I2C address */ +#define AVA4_P_POLLING 0x30 +#define AVA4_P_REQUIRE 0x31 +#define AVA4_P_TEST 0x32 + +/* Back to host */ +#define AVA4_P_ACKDETECT 0x40 +#define AVA4_P_STATUS 0x41 +#define AVA4_P_NONCE 0x42 +#define AVA4_P_TEST_RET 0x43 + +#define AVA4_MODULE_BROADCAST 0 +/* Endof Avalon4 protocol package type */ + +#define AVA4_MM40_PREFIXSTR "40" +#define AVA4_MM41_PREFIXSTR "41" +#define AVA4_MM_VERNULL "NONE" + +#define AVA4_TYPE_MM40 40 +#define AVA4_TYPE_MM41 41 +#define AVA4_TYPE_NULL 00 + +#define AVA4_IIC_RESET 0xa0 +#define AVA4_IIC_INIT 0xa1 +#define AVA4_IIC_DEINIT 0xa2 +#define AVA4_IIC_XFER 0xa5 +#define AVA4_IIC_INFO 0xa6 + +struct avalon4_pkg { + uint8_t head[2]; + uint8_t type; + uint8_t opt; + uint8_t idx; + uint8_t cnt; + uint8_t data[32]; + uint8_t crc[2]; +}; +#define avalon4_ret avalon4_pkg + +struct avalon4_info { + cglock_t update_lock; + + int polling_first; + int polling_err_cnt[AVA4_DEFAULT_MODULARS]; + int xfer_err_cnt; + + int pool_no; + struct pool pool0; + struct pool pool1; + struct pool pool2; + + struct timeval last_fan; + struct timeval last_stratum; + + char auc_version[AVA4_AUC_VER_LEN + 1]; + int auc_speed; + int auc_xdelay; + int auc_temp; + + int mm_count; + + int set_frequency[3]; + int set_voltage[AVA4_DEFAULT_MODULARS]; + int set_voltage_broadcat; + + int mod_type[AVA4_DEFAULT_MODULARS]; + bool enable[AVA4_DEFAULT_MODULARS]; + + struct timeval elapsed[AVA4_DEFAULT_MODULARS]; + char mm_version[AVA4_DEFAULT_MODULARS][AVA4_MM_VER_LEN + 1]; + uint8_t mm_dna[AVA4_DEFAULT_MODULARS][AVA4_MM_DNA_LEN + 1]; + int get_voltage[AVA4_DEFAULT_MODULARS]; + int get_frequency[AVA4_DEFAULT_MODULARS]; + int power_good[AVA4_DEFAULT_MODULARS]; + int fan_pct[AVA4_DEFAULT_MODULARS]; + int fan[AVA4_DEFAULT_MODULARS]; + int temp[AVA4_DEFAULT_MODULARS]; + int led_red[AVA4_DEFAULT_MODULARS]; + + uint64_t local_works[AVA4_DEFAULT_MODULARS]; + uint64_t hw_works[AVA4_DEFAULT_MODULARS]; + + uint32_t local_work[AVA4_DEFAULT_MODULARS]; + uint32_t hw_work[AVA4_DEFAULT_MODULARS]; + + uint32_t lw5[AVA4_DEFAULT_MODULARS][6]; + uint32_t hw5[AVA4_DEFAULT_MODULARS][6]; + int i_1m; + struct timeval last_5m; + struct timeval last_1m; + + int matching_work[AVA4_DEFAULT_MODULARS][AVA4_DEFAULT_MINERS]; + int chipmatching_work[AVA4_DEFAULT_MODULARS][AVA4_DEFAULT_MINERS][4]; +}; + +struct avalon4_iic_info { + uint8_t iic_op; + union { + uint32_t aucParam[2]; + uint8_t slave_addr; + } iic_param; +}; + +#define AVA4_WRITE_SIZE (sizeof(struct avalon4_pkg)) +#define AVA4_READ_SIZE AVA4_WRITE_SIZE + +#define AVA4_SEND_OK 0 +#define AVA4_SEND_ERROR -1 + +extern char *set_avalon4_fan(char *arg); +extern char *set_avalon4_temp(char *arg); +extern char *set_avalon4_freq(char *arg); +extern char *set_avalon4_voltage(char *arg); +extern bool opt_avalon4_autov; +extern int opt_avalon4_temp_target; +extern int opt_avalon4_overheat; +extern int opt_avalon4_polling_delay; +extern int opt_avalon4_aucspeed; +extern int opt_avalon4_aucxdelay; +extern int opt_avalon4_ntime_offset; +#endif /* USE_AVALON4 */ +#endif /* _AVALON4_H_ */ diff --git a/driver-bab.c b/driver-bab.c new file mode 100644 index 0000000..1bba8e0 --- /dev/null +++ b/driver-bab.c @@ -0,0 +1,3062 @@ +/* + * Copyright 2013-2014 Andrew Smith + * Copyright 2013 bitfury + * + * BitFury GPIO code originally based on chainminer code: + * https://github.com/bfsb/chainminer + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" +#include "compat.h" +#include "miner.h" +#include "sha2.h" +#include "klist.h" +#include + +/* + * Tested on RPi running both Raspbian and Arch + * with BlackArrow BitFury V1 & V2 GPIO Controller + * with 16 chip BlackArrow BitFury boards + */ + +#ifndef LINUX +static void bab_detect(__maybe_unused bool hotplug) +{ +} +#else + +#include +#include +#include +#include +#include + +#define BAB_SPI_BUS 0 +#define BAB_SPI_CHIP 0 + +#define BAB_SPI_SPEED 96000 +#define BAB_SPI_BUFSIZ 1024 + +#define BAB_DELAY_USECS 0 +#define BAB_TRF_DELAY 0 + +#define BAB_ADDR(_n) (*((babinfo->gpio) + (_n))) + +#define BAB_INP_GPIO(_n) BAB_ADDR((_n) / 10) &= (~(7 << (((_n) % 10) * 3))) +#define BAB_OUT_GPIO(_n) BAB_ADDR((_n) / 10) |= (1 << (((_n) % 10) * 3)) +#define BAB_OUT_GPIO_V(_n, _v) BAB_ADDR((_n) / 10) |= (((_v) <= 3 ? (_v) + 4 : \ + ((_v) == 4 ? 3 : 2)) << (((_n) % 10) * 3)) + +#define BAB_GPIO_SET BAB_ADDR(7) +#define BAB_GPIO_CLR BAB_ADDR(10) +#define BAB_GPIO_LEVEL BAB_ADDR(13) + +// If the V1 test of this many chips finds no chips it will try V2 +#define BAB_V1_CHIP_TEST 32 + +//maximum number of chips per board +#define BAB_BOARDCHIPS 16 +#define BAB_MAXBUF (BAB_MAXCHIPS * 512) +#define BAB_V1_BANK 0 +//maximum number of alternative banks +#define BAB_MAXBANKS 4 +//maximum number of boards in a bank +#define BAB_BANKBOARDS 6 +//maximum number of chips on alternative bank +#define BAB_BANKCHIPS (BAB_BOARDCHIPS * BAB_BANKBOARDS) +//maximum number of chips +#define BAB_MAXCHIPS (BAB_MAXBANKS * BAB_BANKCHIPS) +#define BAB_CORES 16 +#define BAB_X_COORD 21 +#define BAB_Y_COORD 36 + +#define BAB_NOOP 0 +#define BAB_BREAK ((uint8_t *)"\04") +#define BAB_ASYNC ((uint8_t *)"\05") +#define BAB_SYNC ((uint8_t *)"\06") + +#define BAB_FFL " - from %s %s() line %d" +#define BAB_FFL_HERE __FILE__, __func__, __LINE__ +#define BAB_FFL_PASS file, func, line + +#define bab_reset(_bank, _times) _bab_reset(babcgpu, babinfo, _bank, _times) +#define bab_txrx(_item, _det) _bab_txrx(babcgpu, babinfo, _item, _det, BAB_FFL_HERE) +#define bab_add_buf(_item, _data) _bab_add_buf(_item, _data, sizeof(_data)-1, BAB_FFL_HERE) +#define BAB_ADD_BREAK(_item) _bab_add_buf(_item, BAB_BREAK, 1, BAB_FFL_HERE) +#define BAB_ADD_ASYNC(_item) _bab_add_buf(_item, BAB_ASYNC, 1, BAB_FFL_HERE) +#define bab_config_reg(_item, _reg, _ena) _bab_config_reg(_item, _reg, _ena, BAB_FFL_HERE) +#define bab_add_data(_item, _addr, _data, _siz) _bab_add_data(_item, _addr, (const uint8_t *)(_data), _siz, BAB_FFL_HERE) + +#define BAB_ADD_NOOPs(_item, _count) _bab_add_noops(_item, _count, BAB_FFL_HERE) + +#define BAB_ADD_MIN 4 +#define BAB_ADD_MAX 128 + +#define BAB_BASEA 4 +#define BAB_BASEB 61 +#define BAB_COUNTERS 16 +static const uint8_t bab_counters[BAB_COUNTERS] = { + 64, 64, + BAB_BASEA, BAB_BASEA+4, + BAB_BASEA+2, BAB_BASEA+2+16, + BAB_BASEA, BAB_BASEA+1, + (BAB_BASEB)%65, (BAB_BASEB+1)%65, + (BAB_BASEB+3)%65, (BAB_BASEB+3+16)%65, + (BAB_BASEB+4)%65, (BAB_BASEB+4+4)%65, + (BAB_BASEB+3+3)%65, (BAB_BASEB+3+1+3)%65 +}; + +#define BAB_W1 16 +static const uint32_t bab_w1[BAB_W1] = { + 0, 0, 0, 0xffffffff, + 0x80000000, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0x00000280 +}; + +#define BAB_W2 8 +static const uint32_t bab_w2[BAB_W2] = { + 0x80000000, 0, 0, 0, + 0, 0, 0, 0x00000100 +}; + +#define BAB_TEST_DATA 19 +static const uint32_t bab_test_data[BAB_TEST_DATA] = { + 0xb0e72d8e, 0x1dc5b862, 0xe9e7c4a6, 0x3050f1f5, + 0x8a1a6b7e, 0x7ec384e8, 0x42c1c3fc, 0x8ed158a1, + 0x8a1a6b7e, 0x6f484872, 0x4ff0bb9b, 0x12c97f07, + 0xb0e72d8e, 0x55d979bc, 0x39403296, 0x40f09e84, + 0x8a0bb7b7, 0x33af304f, 0x0b290c1a //, 0xf0c4e61f +}; + +/* + * maximum chip speed available for auto tuner + * speed/nrate/hrate/watt + * 53/ 97/ 100/ 84 + * 54/ 98/ 107/ 88 + * 55/ 99/ 115/ 93 + * 56/ 101/ 125/ 99 + */ +#define BAB_MAXSPEED 57 +#define BAB_DEFMAXSPEED 55 +#define BAB_DEFSPEED 53 +#define BAB_MINSPEED 52 +#define BAB_ABSMINSPEED 32 + +/* + * % of errors to tune the speed up or down + * 1.0 to 10.0 should average around 5.5% errors + */ +#define BAB_TUNEUP 1.0 +#define BAB_TUNEDOWN 10.0 + +#define MIDSTATE_BYTES 32 +#define MERKLE_OFFSET 64 +#define MERKLE_BYTES 12 +#define BLOCK_HEADER_BYTES 80 + +#define MIDSTATE_UINTS (MIDSTATE_BYTES / sizeof(uint32_t)) +#define DATA_UINTS ((BLOCK_HEADER_BYTES / sizeof(uint32_t)) - 1) + +// Auto adjust +#define BAB_AUTO_REG 0 +#define BAB_AUTO_VAL 0x01 +// iclk +#define BAB_ICLK_REG 1 +#define BAB_ICLK_VAL 0x02 +// No fast clock +#define BAB_FAST_REG 2 +#define BAB_FAST_VAL 0x04 +// Divide by 2 +#define BAB_DIV2_REG 3 +#define BAB_DIV2_VAL 0x08 +// Slow Clock +#define BAB_SLOW_REG 4 +#define BAB_SLOW_VAL 0x10 +// No oclk +#define BAB_OCLK_REG 6 +#define BAB_OCLK_VAL 0x20 +// Has configured +#define BAB_CFGD_VAL 0x40 + +#define BAB_DEFCONF (BAB_AUTO_VAL | \ + BAB_ICLK_VAL | \ + BAB_DIV2_VAL | \ + BAB_SLOW_VAL) + +#define BAB_REG_CLR_FROM 7 +#define BAB_REG_CLR_TO 11 + +#define BAB_AUTO_SET(_c) ((_c) & BAB_AUTO_VAL) +#define BAB_ICLK_SET(_c) ((_c) & BAB_ICLK_VAL) +#define BAB_FAST_SET(_c) ((_c) & BAB_FAST_VAL) +#define BAB_DIV2_SET(_c) ((_c) & BAB_DIV2_VAL) +#define BAB_SLOW_SET(_c) ((_c) & BAB_SLOW_VAL) +#define BAB_OCLK_SET(_c) ((_c) & BAB_OCLK_VAL) +#define BAB_CFGD_SET(_c) ((_c) & BAB_CFGD_VAL) + +#define BAB_AUTO_BIT(_c) (BAB_AUTO_SET(_c) ? true : false) +#define BAB_ICLK_BIT(_c) (BAB_ICLK_SET(_c) ? false : true) +#define BAB_FAST_BIT(_c) (BAB_FAST_SET(_c) ? true : false) +#define BAB_DIV2_BIT(_c) (BAB_DIV2_SET(_c) ? false : true) +#define BAB_SLOW_BIT(_c) (BAB_SLOW_SET(_c) ? true : false) +#define BAB_OCLK_BIT(_c) (BAB_OCLK_SET(_c) ? true : false) + +#define BAB_COUNT_ADDR 0x0100 +#define BAB_W1A_ADDR 0x1000 +#define BAB_W1B_ADDR 0x1400 +#define BAB_W2_ADDR 0x1900 +#define BAB_INP_ADDR 0x3000 +#define BAB_OSC_ADDR 0x6000 +#define BAB_REG_ADDR 0x7000 + +/* + * valid: 0x01 0x03 0x07 0x0F 0x1F 0x3F 0x7F 0xFF + * max { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F, 0x00 } + * max { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00 } + * avg { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00 } + * slo { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F, 0x00 } + * min { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } + * good: 0x1F (97) 0x3F (104) 0x7F (109) 0xFF (104) + */ + +#define BAB_OSC 8 +static const uint8_t bab_osc_bits[BAB_OSC] = + { 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F, 0xFF }; + +static const uint8_t bab_reg_ena[4] = { 0xc1, 0x6a, 0x59, 0xe3 }; +static const uint8_t bab_reg_dis[4] = { 0x00, 0x00, 0x00, 0x00 }; + +#define BAB_NONCE_OFFSETS 3 +#define BAB_OFF_0x1C_STA 2 +#define BAB_OFF_0x1C_FIN 2 +#define BAB_OFF_OTHER_STA 0 +#define BAB_OFF_OTHER_FIN 1 + +#define BAB_EVIL_NONCE 0xe0 +#define BAB_EVIL_MASK 0xff + +static const uint32_t bab_nonce_offsets[] = {-0x800000, 0, -0x400000}; + +struct bab_work_send { + uint32_t midstate[MIDSTATE_UINTS]; + uint32_t ms3steps[MIDSTATE_UINTS]; + uint32_t merkle7; + uint32_t ntime; + uint32_t bits; +}; + +#define BAB_REPLY_NONCES 16 +struct bab_work_reply { + uint32_t nonce[BAB_REPLY_NONCES]; + uint32_t jobsel; + uint32_t spichk; +}; + +#define BAB_CHIP_MIN (sizeof(struct bab_work_reply)+16) + +#define ALLOC_WITEMS 1024 +#define LIMIT_WITEMS 0 + +// Work +typedef struct witem { + struct work *work; + struct bab_work_send chip_input; + bool ci_setup; + bool rolled; + int nonces; + struct timeval work_start; +} WITEM; + +#define ALLOC_SITEMS 8 +#define LIMIT_SITEMS 0 + +// SPI I/O +typedef struct sitem { + uint32_t siz; + uint8_t wbuf[BAB_MAXBUF]; + uint8_t rbuf[BAB_MAXBUF]; + uint32_t chip_off[BAB_MAXCHIPS+1]; + uint32_t bank_off[BAB_MAXBANKS+2]; + // WITEMs used to build the work + K_ITEM *witems[BAB_MAXCHIPS]; + struct timeval work_start; +} SITEM; + +#define ALLOC_RITEMS 256 +#define LIMIT_RITEMS 0 + +// Results +typedef struct ritem { + int chip; + int nonces; + uint32_t nonce[BAB_REPLY_NONCES]; + bool not_first_reply; + struct timeval when; +} RITEM; + +#define ALLOC_NITEMS 102400 +#define LIMIT_NITEMS 0 + +// Nonce History +typedef struct nitem { + struct timeval found; +} NITEM; + +#define DATAW(_item) ((WITEM *)(_item->data)) +#define DATAS(_item) ((SITEM *)(_item->data)) +#define DATAR(_item) ((RITEM *)(_item->data)) +#define DATAN(_item) ((NITEM *)(_item->data)) + +// Record the number of each band between work sends +#define BAB_DELAY_BANDS 10 +#define BAB_DELAY_BASE 0.5 +#define BAB_DELAY_STEP 0.2 + +#define BAB_CHIP_SPEEDS 6 +// less than or equal GH/s +static double chip_speed_ranges[BAB_CHIP_SPEEDS - 1] = + { 0.0, 0.8, 1.6, 2.2, 2.8 }; +// Greater than the last one above means it's the last speed +static char *chip_speed_names[BAB_CHIP_SPEEDS] = + { "Bad", "V.Slow", "Slow", "OK", "Good", "Fast" }; + +/* + * This is required to do chip tuning + * If disabled, it will simply run the chips at default speed + * unless they never return valid results + */ +#define UPDATE_HISTORY 1 + +struct bab_info { + struct thr_info spi_thr; + struct thr_info res_thr; + + pthread_mutex_t did_lock; + pthread_mutex_t nonce_lock; + + // All GPIO goes through this + volatile unsigned *gpio; + + int version; + int spifd; + int chips; + int chips_per_bank[BAB_MAXBANKS+1]; + int missing_chips_per_bank[BAB_MAXBANKS+1]; + int bank_first_chip[BAB_MAXBANKS+1]; + int bank_last_chip[BAB_MAXBANKS+1]; + int boards; + int banks; + uint32_t chip_spis[BAB_MAXCHIPS+1]; + + int reply_wait; + uint64_t reply_waits; + + cgsem_t scan_work; + cgsem_t spi_work; + cgsem_t spi_reply; + cgsem_t process_reply; + + bool disabled[BAB_MAXCHIPS]; + int total_disabled; + + struct bab_work_reply chip_results[BAB_MAXCHIPS]; + struct bab_work_reply chip_prev[BAB_MAXCHIPS]; + + uint8_t chip_fast[BAB_MAXCHIPS]; + uint8_t chip_conf[BAB_MAXCHIPS]; + uint8_t old_fast[BAB_MAXCHIPS]; + uint8_t old_conf[BAB_MAXCHIPS]; + uint8_t chip_bank[BAB_MAXCHIPS+1]; + + uint8_t osc[BAB_OSC]; + + /* + * Ignore errors in the first work reply since + * they may be from a previous run or random junk + * There can be >100 with just one 16 chip board + */ + uint32_t initial_ignored; + bool not_first_reply[BAB_MAXCHIPS]; + + // Stats + uint64_t core_good[BAB_MAXCHIPS][BAB_CORES]; + uint64_t core_bad[BAB_MAXCHIPS][BAB_CORES]; + uint64_t chip_spie[BAB_MAXCHIPS]; // spi errors + uint64_t chip_miso[BAB_MAXCHIPS]; // msio errors + uint64_t chip_nonces[BAB_MAXCHIPS]; + uint64_t chip_good[BAB_MAXCHIPS]; + uint64_t chip_bad[BAB_MAXCHIPS]; + uint64_t chip_ncore[BAB_MAXCHIPS][BAB_X_COORD][BAB_Y_COORD]; + + uint64_t chip_cont_bad[BAB_MAXCHIPS]; + uint64_t chip_max_bad[BAB_MAXCHIPS]; + + uint64_t discarded_e0s; + + uint64_t untested_nonces; + uint64_t tested_nonces; + + uint64_t new_nonces; + uint64_t ok_nonces; + + uint64_t nonce_offset_count[BAB_NONCE_OFFSETS]; + uint64_t total_tests; + uint64_t max_tests_per_nonce; + uint64_t total_links; + uint64_t total_proc_links; + uint64_t max_links; + uint64_t max_proc_links; + uint64_t total_work_links; + + uint64_t fail; + uint64_t fail_total_tests; + uint64_t fail_total_links; + uint64_t fail_total_work_links; + + uint64_t ign_total_tests; + uint64_t ign_total_links; + uint64_t ign_total_work_links; + + struct timeval last_sent_work; + uint64_t delay_count; + double delay_min; + double delay_max; + /* + * 0 is below band ranges + * BAB_DELAY_BANDS+1 is above band ranges + */ + uint64_t delay_bands[BAB_DELAY_BANDS+2]; + + uint64_t send_count; + double send_total; + double send_min; + double send_max; + + // Work + K_LIST *wfree_list; + K_STORE *available_work; + K_STORE *chip_work[BAB_MAXCHIPS]; + + // SPI I/O + K_LIST *sfree_list; + // Waiting to send + K_STORE *spi_list; + // Sent + K_STORE *spi_sent; + + // Results + K_LIST *rfree_list; + K_STORE *res_list; + + // Nonce History + K_LIST *nfree_list; + K_STORE *good_nonces[BAB_MAXCHIPS]; + K_STORE *bad_nonces[BAB_MAXCHIPS]; + + struct timeval first_work[BAB_MAXCHIPS]; +#if UPDATE_HISTORY + uint32_t work_count[BAB_MAXCHIPS]; + struct timeval last_tune[BAB_MAXCHIPS]; + uint8_t bad_fast[BAB_MAXCHIPS]; + bool bad_msg[BAB_MAXCHIPS]; +#endif + uint64_t work_unrolled; + uint64_t work_rolled; + + // bab-options (in order) + uint8_t max_speed; + uint8_t def_speed; + uint8_t min_speed; + double tune_up; + double tune_down; + uint32_t speed_hz; + uint16_t delay_usecs; + uint64_t trf_delay; + + struct timeval last_did; + + bool initialised; +}; + +/* + * Amount of time for history + * Older items in nonce_history are discarded + * 300s / 5 minutes + */ +#define HISTORY_TIME_S 300 + +/* + * If the SPI I/O thread waits longer than this long for work + * it will report an error saying how long it's waiting + * and again every BAB_STD_WAIT_mS after that + */ +#define BAB_LONG_uS 1200000 + +/* + * If work wasn't available early enough, + * report every BAB_LONG_WAIT_mS until it is + */ +#define BAB_LONG_WAIT_mS 888 + +/* + * Some amount of time to wait for work + * before checking how long we've waited + */ +#define BAB_STD_WAIT_mS 888 + +/* + * How long to wait for the ioctl() to complete (per BANK) + * This is a failsafe in case the ioctl() fails + * since bab_txrx() will already post a wakeup when it completes + * V1 is set to this x 2 + * V2 is set to this x active banks + */ +#define BAB_REPLY_WAIT_mS 160 + +/* + * Work items older than this should not expect results + * It has to allow for the result buffer returned with the next result + * 0.75GH/s takes 5.727s to do a full nonce range + * If HW is too high, consider increasing this to see if work is being + * expired too early (due to slow chips) + */ +#define BAB_WORK_EXPIRE_mS 7800 + +// Don't send work more often than this +#define BAB_EXPECTED_WORK_DELAY_mS 899 + +/* + * If a chip only has bad results after this time limit in seconds, + * then switch it down to min_speed + */ +#define BAB_BAD_TO_MIN (HISTORY_TIME_S + 10) + +/* + * Also, just to be sure it's actually mining, it must have got this + * many bad results before considering disabling it + */ +#define BAB_BAD_COUNT 100 + +/* + * If a chip only has bad results after this time limit in seconds, + * then disable it + * A chip only returning bad results will use a lot more CPU than + * an ok chip since all results will be tested against all unexpired + * work that's been sent to the chip + */ +#define BAB_BAD_DEAD (BAB_BAD_TO_MIN * 2) + +/* + * Maximum bab_queue_full() will roll work if it is allowed to + * Since work can somtimes (rarely) queue up with many chips, + * limit it to avoid it getting too much range in the pending work + */ +#define BAB_MAX_ROLLTIME 42 + +static void bab_ms3steps(uint32_t *p) +{ + uint32_t a, b, c, d, e, f, g, h, new_e, new_a; + int i; + + a = p[0]; + b = p[1]; + c = p[2]; + d = p[3]; + e = p[4]; + f = p[5]; + g = p[6]; + h = p[7]; + for (i = 0; i < 3; i++) { + new_e = p[i+16] + sha256_k[i] + h + CH(e,f,g) + SHA256_F2(e) + d; + new_a = p[i+16] + sha256_k[i] + h + CH(e,f,g) + SHA256_F2(e) + + SHA256_F1(a) + MAJ(a,b,c); + d = c; + c = b; + b = a; + a = new_a; + h = g; + g = f; + f = e; + e = new_e; + } + p[15] = a; + p[14] = b; + p[13] = c; + p[12] = d; + p[11] = e; + p[10] = f; + p[9] = g; + p[8] = h; +} + +static uint32_t bab_decnonce(uint32_t in) +{ + uint32_t out; + + /* First part load */ + out = (in & 0xFF) << 24; + in >>= 8; + + /* Byte reversal */ + in = (((in & 0xaaaaaaaa) >> 1) | ((in & 0x55555555) << 1)); + in = (((in & 0xcccccccc) >> 2) | ((in & 0x33333333) << 2)); + in = (((in & 0xf0f0f0f0) >> 4) | ((in & 0x0f0f0f0f) << 4)); + + out |= (in >> 2) & 0x3FFFFF; + + /* Extraction */ + if (in & 1) + out |= (1 << 23); + if (in & 2) + out |= (1 << 22); + + out -= 0x800004; + return out; +} + +static void cleanup_older(struct cgpu_info *babcgpu, int chip, K_ITEM *witem) +{ + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + struct timeval now; + bool expired_item; + K_ITEM *tail; + + cgtime(&now); + + K_WLOCK(babinfo->chip_work[chip]); + tail = babinfo->chip_work[chip]->tail; + expired_item = false; + // Discard expired work + while (tail) { + if (ms_tdiff(&now, &(DATAW(tail)->work_start)) < BAB_WORK_EXPIRE_mS) + break; + + if (tail == witem) + expired_item = true; + + k_unlink_item(babinfo->chip_work[chip], tail); + K_WUNLOCK(babinfo->chip_work[chip]); + if (DATAW(tail)->rolled) + free_work(DATAW(tail)->work); + else + work_completed(babcgpu, DATAW(tail)->work); + K_WLOCK(babinfo->chip_work[chip]); + k_add_head(babinfo->wfree_list, tail); + tail = babinfo->chip_work[chip]->tail; + } + // If we didn't expire witem, then remove all older than it + if (!expired_item && witem && witem->next) { + tail = babinfo->chip_work[chip]->tail; + while (tail && tail != witem) { + k_unlink_item(babinfo->chip_work[chip], tail); + K_WUNLOCK(babinfo->chip_work[chip]); + if (DATAW(tail)->rolled) + free_work(DATAW(tail)->work); + else + work_completed(babcgpu, DATAW(tail)->work); + K_WLOCK(babinfo->chip_work[chip]); + k_add_head(babinfo->wfree_list, tail); + tail = babinfo->chip_work[chip]->tail; + } + } + K_WUNLOCK(babinfo->chip_work[chip]); +} + +static void _bab_reset(__maybe_unused struct cgpu_info *babcgpu, struct bab_info *babinfo, int bank, int times) +{ + const int banks[BAB_MAXBANKS] = { 18, 23, 24, 25 }; + int i; + + BAB_INP_GPIO(10); + BAB_OUT_GPIO(10); + BAB_INP_GPIO(11); + BAB_OUT_GPIO(11); + + if (bank) { + for (i = 0; i < BAB_MAXBANKS; i++) { + BAB_INP_GPIO(banks[i]); + BAB_OUT_GPIO(banks[i]); + if (bank == i+1) + BAB_GPIO_SET = 1 << banks[i]; + else + BAB_GPIO_CLR = 1 << banks[i]; + } + cgsleep_us(4096); + } else { + for (i = 0; i < BAB_MAXBANKS; i++) + BAB_INP_GPIO(banks[i]); + } + + BAB_GPIO_SET = 1 << 11; + for (i = 0; i < times; i++) { // 1us = 1MHz + BAB_GPIO_SET = 1 << 10; + cgsleep_us(1); + BAB_GPIO_CLR = 1 << 10; + cgsleep_us(1); + } + BAB_GPIO_CLR = 1 << 11; + BAB_INP_GPIO(11); + BAB_INP_GPIO(10); + BAB_INP_GPIO(9); + BAB_OUT_GPIO_V(11, 0); + BAB_OUT_GPIO_V(10, 0); + BAB_OUT_GPIO_V(9, 0); +} + +// TODO: handle a false return where this is called? +static bool _bab_txrx(struct cgpu_info *babcgpu, struct bab_info *babinfo, K_ITEM *item, bool detect_ignore, const char *file, const char *func, const int line) +{ + int bank, i, count, chip1, chip2; + uint32_t siz, pos; + struct spi_ioc_transfer tran; + uintptr_t rbuf, wbuf; + + wbuf = (uintptr_t)(DATAS(item)->wbuf); + rbuf = (uintptr_t)(DATAS(item)->rbuf); + siz = (uint32_t)(DATAS(item)->siz); + + memset(&tran, 0, sizeof(tran)); + tran.speed_hz = babinfo->speed_hz; + tran.delay_usecs = babinfo->delay_usecs; + + i = 0; + pos = 0; + for (bank = 0; bank <= BAB_MAXBANKS; bank++) { + if (DATAS(item)->bank_off[bank]) { + bab_reset(bank, 64); + break; + } + } + + if (unlikely(bank > BAB_MAXBANKS)) { + applog(LOG_ERR, "%s%d: %s() failed to find a bank" BAB_FFL, + babcgpu->drv->name, babcgpu->device_id, + __func__, BAB_FFL_PASS); + return false; + } + + count = 0; + while (siz > 0) { + tran.tx_buf = wbuf; + tran.rx_buf = rbuf; + tran.speed_hz = BAB_SPI_SPEED; + if (pos == DATAS(item)->bank_off[bank]) { + for (; ++bank <= BAB_MAXBANKS; ) { + if (DATAS(item)->bank_off[bank] > pos) { + bab_reset(bank, 64); + break; + } + } + } + if (siz < BAB_SPI_BUFSIZ) + tran.len = siz; + else + tran.len = BAB_SPI_BUFSIZ; + + if (pos < DATAS(item)->bank_off[bank] && + DATAS(item)->bank_off[bank] < (pos + tran.len)) + tran.len = DATAS(item)->bank_off[bank] - pos; + + for (; i < babinfo->chips; i++) { + if (!DATAS(item)->chip_off[i]) + continue; + if (DATAS(item)->chip_off[i] >= pos + tran.len) { + tran.speed_hz = babinfo->chip_spis[i]; + break; + } + } + + if (unlikely(i > babinfo->chips)) { + applog(LOG_ERR, "%s%d: %s() failed to find chip" BAB_FFL, + babcgpu->drv->name, babcgpu->device_id, + __func__, BAB_FFL_PASS); + return false; + } + + if (unlikely(babinfo->chip_spis[i] == BAB_SPI_SPEED)) { + applog(LOG_DEBUG, "%s%d: %s() chip[%d] speed %d shouldn't be %d" BAB_FFL, + babcgpu->drv->name, babcgpu->device_id, + __func__, i, (int)babinfo->chip_spis[i], + BAB_SPI_SPEED, BAB_FFL_PASS); + } + + if (unlikely(tran.speed_hz == BAB_SPI_SPEED)) { + applog(LOG_DEBUG, "%s%d: %s() transfer speed %d shouldn't be %d" BAB_FFL, + babcgpu->drv->name, babcgpu->device_id, + __func__, (int)tran.speed_hz, + BAB_SPI_SPEED, BAB_FFL_PASS); + } + + count++; + if (ioctl(babinfo->spifd, SPI_IOC_MESSAGE(1), (void *)&tran) < 0) { + if (!detect_ignore || errno != 110) { + for (bank = BAB_MAXBANKS; bank >= 0; bank--) { + if (DATAS(item)->bank_off[bank] && + pos >= DATAS(item)->bank_off[bank]) { + break; + } + } + for (chip1 = babinfo->chips-1; chip1 >= 0; chip1--) { + if (DATAS(item)->chip_off[chip1] && + pos >= DATAS(item)->chip_off[chip1]) { + break; + } + } + for (chip2 = babinfo->chips-1; chip2 >= 0; chip2--) { + if (DATAS(item)->chip_off[chip2] && + (pos + tran.len) >= DATAS(item)->chip_off[chip2]) { + break; + } + } + applog(LOG_ERR, "%s%d: ioctl (%d) siz=%d bank=%d chip=%d-%d" + " failed err=%d" BAB_FFL, + babcgpu->drv->name, + babcgpu->device_id, + count, (int)(tran.len), + bank, chip1, chip2, + errno, BAB_FFL_PASS); + } + return false; + } + + siz -= tran.len; + wbuf += tran.len; + rbuf += tran.len; + pos += tran.len; + + if (siz > 0 && babinfo->trf_delay > 0) + cgsleep_us(babinfo->trf_delay); + } + cgtime(&(DATAS(item)->work_start)); + mutex_lock(&(babinfo->did_lock)); + cgtime(&(babinfo->last_did)); + mutex_unlock(&(babinfo->did_lock)); + return true; +} + +static void _bab_add_buf_rev(K_ITEM *item, const uint8_t *data, uint32_t siz, const char *file, const char *func, const int line) +{ + uint32_t now_used, i; + uint8_t tmp; + + now_used = DATAS(item)->siz; + if (now_used + siz >= BAB_MAXBUF) { + quitfrom(1, file, func, line, + "%s() buffer limit of %d exceeded=%d siz=%d", + __func__, BAB_MAXBUF, (int)(now_used + siz), (int)siz); + } + + for (i = 0; i < siz; i++) { + tmp = data[i]; + tmp = ((tmp & 0xaa)>>1) | ((tmp & 0x55) << 1); + tmp = ((tmp & 0xcc)>>2) | ((tmp & 0x33) << 2); + tmp = ((tmp & 0xf0)>>4) | ((tmp & 0x0f) << 4); + DATAS(item)->wbuf[now_used + i] = tmp; + } + + DATAS(item)->siz += siz; +} + +static void _bab_add_buf(K_ITEM *item, const uint8_t *data, size_t siz, const char *file, const char *func, const int line) +{ + uint32_t now_used; + + now_used = DATAS(item)->siz; + if (now_used + siz >= BAB_MAXBUF) { + quitfrom(1, file, func, line, + "%s() DATAS buffer limit of %d exceeded=%d siz=%d", + __func__, BAB_MAXBUF, (int)(now_used + siz), (int)siz); + } + + memcpy(&(DATAS(item)->wbuf[now_used]), data, siz); + DATAS(item)->siz += siz; +} + +static void _bab_add_noops(K_ITEM *item, size_t siz, const char *file, const char *func, const int line) +{ + uint32_t now_used; + + now_used = DATAS(item)->siz; + if (now_used + siz >= BAB_MAXBUF) { + quitfrom(1, file, func, line, + "%s() DATAS buffer limit of %d exceeded=%d siz=%d", + __func__, BAB_MAXBUF, (int)(now_used + siz), (int)siz); + } + + memset(&(DATAS(item)->wbuf[now_used]), BAB_NOOP, siz); + DATAS(item)->siz += siz; +} + +static void _bab_add_data(K_ITEM *item, uint32_t addr, const uint8_t *data, size_t siz, const char *file, const char *func, const int line) +{ + uint8_t tmp[3]; + int trf_siz; + + if (siz < BAB_ADD_MIN || siz > BAB_ADD_MAX) { + quitfrom(1, file, func, line, + "%s() called with invalid siz=%d (min=%d max=%d)", + __func__, (int)siz, BAB_ADD_MIN, BAB_ADD_MAX); + } + trf_siz = siz / 4; + tmp[0] = (trf_siz - 1) | 0xE0; + tmp[1] = (addr >> 8) & 0xff; + tmp[2] = addr & 0xff; + _bab_add_buf(item, tmp, sizeof(tmp), BAB_FFL_PASS); + _bab_add_buf_rev(item, data, siz, BAB_FFL_PASS); +} + +static void _bab_config_reg(K_ITEM *item, uint32_t reg, bool enable, const char *file, const char *func, const int line) +{ + if (enable) { + _bab_add_data(item, BAB_REG_ADDR + reg*32, + bab_reg_ena, sizeof(bab_reg_ena), BAB_FFL_PASS); + } else { + _bab_add_data(item, BAB_REG_ADDR + reg*32, + bab_reg_dis, sizeof(bab_reg_dis), BAB_FFL_PASS); + } + +} + +static void bab_set_osc(struct bab_info *babinfo, int chip) +{ + int fast, i; + + fast = babinfo->chip_fast[chip]; + + for (i = 0; i < BAB_OSC && fast > BAB_OSC; i++, fast -= BAB_OSC) { + babinfo->osc[i] = 0xff; + } + if (i < BAB_OSC && fast > 0 && fast <= BAB_OSC) + babinfo->osc[i++] = bab_osc_bits[fast - 1]; + for (; i < BAB_OSC; i++) + babinfo->osc[i] = 0x00; + + applog(LOG_DEBUG, "@osc(chip=%d) fast=%d 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x", chip, fast, babinfo->osc[0], babinfo->osc[1], babinfo->osc[2], babinfo->osc[3], babinfo->osc[4], babinfo->osc[5], babinfo->osc[6], babinfo->osc[7]); +} + +static void bab_put(struct bab_info *babinfo, K_ITEM *sitem) +{ + struct bab_work_send *chip_input; + int i, reg, bank = 0; + size_t chip_siz; + + BAB_ADD_BREAK(sitem); + for (i = 0; i < babinfo->chips; i++) { + if (babinfo->chip_bank[i] != bank) { + DATAS(sitem)->bank_off[bank] = DATAS(sitem)->siz; + bank = babinfo->chip_bank[i]; + BAB_ADD_BREAK(sitem); + } + if (!(babinfo->disabled[i])) { + if (BAB_CFGD_SET(babinfo->chip_conf[i]) || !babinfo->chip_conf[i]) { + bab_set_osc(babinfo, i); + bab_add_data(sitem, BAB_OSC_ADDR, babinfo->osc, sizeof(babinfo->osc)); + bab_config_reg(sitem, BAB_ICLK_REG, BAB_ICLK_BIT(babinfo->chip_conf[i])); + bab_config_reg(sitem, BAB_FAST_REG, BAB_FAST_BIT(babinfo->chip_conf[i])); + bab_config_reg(sitem, BAB_DIV2_REG, BAB_DIV2_BIT(babinfo->chip_conf[i])); + bab_config_reg(sitem, BAB_SLOW_REG, BAB_SLOW_BIT(babinfo->chip_conf[i])); + bab_config_reg(sitem, BAB_OCLK_REG, BAB_OCLK_BIT(babinfo->chip_conf[i])); + for (reg = BAB_REG_CLR_FROM; reg <= BAB_REG_CLR_TO; reg++) + bab_config_reg(sitem, reg, false); + if (babinfo->chip_conf[i]) { + bab_add_data(sitem, BAB_COUNT_ADDR, bab_counters, sizeof(bab_counters)); + bab_add_data(sitem, BAB_W1A_ADDR, bab_w1, sizeof(bab_w1)); + bab_add_data(sitem, BAB_W1B_ADDR, bab_w1, sizeof(bab_w1)/2); + bab_add_data(sitem, BAB_W2_ADDR, bab_w2, sizeof(bab_w2)); + babinfo->chip_conf[i] ^= BAB_CFGD_VAL; + } + babinfo->old_fast[i] = babinfo->chip_fast[i]; + babinfo->old_conf[i] = babinfo->chip_conf[i]; + } else { + if (babinfo->old_fast[i] != babinfo->chip_fast[i]) { + bab_set_osc(babinfo, i); + bab_add_data(sitem, BAB_OSC_ADDR, babinfo->osc, sizeof(babinfo->osc)); + babinfo->old_fast[i] = babinfo->chip_fast[i]; + } + if (babinfo->old_conf[i] != babinfo->chip_conf[i]) { + if (BAB_ICLK_SET(babinfo->old_conf[i]) != + BAB_ICLK_SET(babinfo->chip_conf[i])) + bab_config_reg(sitem, BAB_ICLK_REG, + BAB_ICLK_BIT(babinfo->chip_conf[i])); + if (BAB_FAST_SET(babinfo->old_conf[i]) != + BAB_FAST_SET(babinfo->chip_conf[i])) + bab_config_reg(sitem, BAB_FAST_REG, + BAB_FAST_BIT(babinfo->chip_conf[i])); + if (BAB_DIV2_SET(babinfo->old_conf[i]) != + BAB_DIV2_SET(babinfo->chip_conf[i])) + bab_config_reg(sitem, BAB_DIV2_REG, + BAB_DIV2_BIT(babinfo->chip_conf[i])); + if (BAB_SLOW_SET(babinfo->old_conf[i]) != + BAB_SLOW_SET(babinfo->chip_conf[i])) + bab_config_reg(sitem, BAB_SLOW_REG, + BAB_SLOW_BIT(babinfo->chip_conf[i])); + if (BAB_OCLK_SET(babinfo->old_conf[i]) != + BAB_OCLK_SET(babinfo->chip_conf[i])) + bab_config_reg(sitem, BAB_OCLK_REG, + BAB_OCLK_BIT(babinfo->chip_conf[i])); + babinfo->old_conf[i] = babinfo->chip_conf[i]; + } + } + DATAS(sitem)->chip_off[i] = DATAS(sitem)->siz + 3; + chip_input = &(DATAW(DATAS(sitem)->witems[i])->chip_input); + + if (babinfo->chip_conf[i]) + bab_add_data(sitem, BAB_INP_ADDR, (uint8_t *)chip_input, sizeof(*chip_input)); + + chip_siz = DATAS(sitem)->siz - babinfo->chip_conf[i]; + if (chip_siz < BAB_CHIP_MIN) + BAB_ADD_NOOPs(sitem, BAB_CHIP_MIN - chip_siz); + } + BAB_ADD_ASYNC(sitem); + } + DATAS(sitem)->chip_off[i] = DATAS(sitem)->siz; + DATAS(sitem)->bank_off[bank] = DATAS(sitem)->siz; + + K_WLOCK(babinfo->spi_list); + k_add_head(babinfo->spi_list, sitem); + K_WUNLOCK(babinfo->spi_list); + + cgsem_post(&(babinfo->spi_work)); +} + +static bool bab_get(__maybe_unused struct cgpu_info *babcgpu, struct bab_info *babinfo, struct timeval *when) +{ + K_ITEM *item; + bool delayed; + int i; + + item = NULL; + delayed = false; + while (item == NULL) { + cgsem_mswait(&(babinfo->spi_reply), babinfo->reply_wait); + + K_WLOCK(babinfo->spi_sent); + item = k_unlink_tail(babinfo->spi_sent); + K_WUNLOCK(babinfo->spi_sent); + + if (!item) { + if (!delayed) { + applog(LOG_WARNING, "%s%d: Delay getting work reply ...", + babcgpu->drv->name, + babcgpu->device_id); + delayed = true; + babinfo->reply_waits++; + } + } + } + + for (i = 0; i < babinfo->chips; i++) { + if (babinfo->chip_conf[i] & 0x7f) { + memcpy((void *)&(babinfo->chip_results[i]), + (void *)(DATAS(item)->rbuf + DATAS(item)->chip_off[i]), + sizeof(babinfo->chip_results[0])); + } + } + + // work_start is also the time the results were read + memcpy(when, &(DATAS(item)->work_start), sizeof(*when)); + + K_WLOCK(babinfo->sfree_list); + k_add_head(babinfo->sfree_list, item); + K_WUNLOCK(babinfo->sfree_list); + + return true; +} + +void bab_detect_chips(struct cgpu_info *babcgpu, struct bab_info *babinfo, int bank, int first, int last) +{ + int i, reg, j; + K_ITEM *item; + + if (sizeof(struct bab_work_send) != sizeof(bab_test_data)) { + quithere(1, "struct bab_work_send (%d) and bab_test_data (%d)" + " must be the same size", + (int)sizeof(struct bab_work_send), + (int)sizeof(bab_test_data)); + } + + K_WLOCK(babinfo->sfree_list); + item = k_unlink_head_zero(babinfo->sfree_list); + K_WUNLOCK(babinfo->sfree_list); + BAB_ADD_BREAK(item); + for (i = first; i < last && i < BAB_MAXCHIPS; i++) { + bab_set_osc(babinfo, i); + bab_add_data(item, BAB_OSC_ADDR, babinfo->osc, sizeof(babinfo->osc)); + bab_config_reg(item, BAB_ICLK_REG, BAB_ICLK_BIT(babinfo->chip_conf[i])); + bab_config_reg(item, BAB_FAST_REG, BAB_FAST_BIT(babinfo->chip_conf[i])); + bab_config_reg(item, BAB_DIV2_REG, BAB_DIV2_BIT(babinfo->chip_conf[i])); + bab_config_reg(item, BAB_SLOW_REG, BAB_SLOW_BIT(babinfo->chip_conf[i])); + bab_config_reg(item, BAB_OCLK_REG, BAB_OCLK_BIT(babinfo->chip_conf[i])); + for (reg = BAB_REG_CLR_FROM; reg <= BAB_REG_CLR_TO; reg++) + bab_config_reg(item, reg, false); + bab_add_data(item, BAB_COUNT_ADDR, bab_counters, sizeof(bab_counters)); + bab_add_data(item, BAB_W1A_ADDR, bab_w1, sizeof(bab_w1)); + bab_add_data(item, BAB_W1B_ADDR, bab_w1, sizeof(bab_w1)/2); + bab_add_data(item, BAB_W2_ADDR, bab_w2, sizeof(bab_w2)); + DATAS(item)->chip_off[i] = DATAS(item)->siz + 3; + bab_add_data(item, BAB_INP_ADDR, bab_test_data, sizeof(bab_test_data)); + DATAS(item)->chip_off[i+1] = DATAS(item)->siz; + DATAS(item)->bank_off[bank] = DATAS(item)->siz; + babinfo->chips = i + 1; + bab_txrx(item, false); + DATAS(item)->siz = 0; + BAB_ADD_BREAK(item); + for (j = first; j <= i; j++) { + DATAS(item)->chip_off[j] = DATAS(item)->siz + 3; + BAB_ADD_ASYNC(item); + } + } + + memset(item->data, 0, babinfo->sfree_list->siz); + BAB_ADD_BREAK(item); + for (i = first; i < last && i < BAB_MAXCHIPS; i++) { + DATAS(item)->chip_off[i] = DATAS(item)->siz + 3; + bab_add_data(item, BAB_INP_ADDR, bab_test_data, sizeof(bab_test_data)); + BAB_ADD_ASYNC(item); + } + DATAS(item)->chip_off[i] = DATAS(item)->siz; + DATAS(item)->bank_off[bank] = DATAS(item)->siz; + babinfo->chips = i; + bab_txrx(item, true); + DATAS(item)->siz = 0; + babinfo->chips = first; + for (i = first; i < last && i < BAB_MAXCHIPS; i++) { + uint32_t tmp[DATA_UINTS-1]; + memcpy(tmp, DATAS(item)->rbuf + DATAS(item)->chip_off[i], sizeof(tmp)); + DATAS(item)->chip_off[i] = 0; + for (j = 0; j < BAB_REPLY_NONCES; j++) { + if (tmp[j] != 0xffffffff && tmp[j] != 0x00000000) { + babinfo->chip_bank[i] = bank; + babinfo->chips = i + 1; + break; + } + } + } + for (i = first ; i < babinfo->chips; i++) + babinfo->chip_bank[i] = bank; + + K_WLOCK(babinfo->sfree_list); + k_add_head(babinfo->sfree_list, item); + K_WUNLOCK(babinfo->sfree_list); +} + +static const char *bab_modules[] = { + "i2c-dev", + "i2c-bcm2708", + "spidev", + "spi-bcm2708", + NULL +}; + +static const char *bab_memory = "/dev/mem"; + +static int bab_memory_addr = 0x20200000; + +// TODO: add --bab-options for SPEED_HZ, tran.delay_usecs and an inter transfer delay (default 0) +static struct { + int request; + int value; +} bab_ioc[] = { + { SPI_IOC_RD_MODE, 0 }, + { SPI_IOC_WR_MODE, 0 }, + { SPI_IOC_RD_BITS_PER_WORD, 8 }, + { SPI_IOC_WR_BITS_PER_WORD, 8 }, + { SPI_IOC_RD_MAX_SPEED_HZ, 1000000 }, + { SPI_IOC_WR_MAX_SPEED_HZ, 1000000 }, + { -1, -1 } +}; + +static bool bab_init_gpio(struct cgpu_info *babcgpu, struct bab_info *babinfo, int bus, int chip) +{ + int i, err, memfd, data; + char buf[64]; + + bab_ioc[4].value = (int)(babinfo->speed_hz); + bab_ioc[5].value = (int)(babinfo->speed_hz); + + for (i = 0; bab_modules[i]; i++) { + snprintf(buf, sizeof(buf), "modprobe %s", bab_modules[i]); + err = system(buf); + if (err) { + applog(LOG_ERR, "%s failed to modprobe %s (%d) - you need to be root?", + babcgpu->drv->dname, + bab_modules[i], err); + goto bad_out; + } + } + + memfd = open(bab_memory, O_RDWR | O_SYNC); + if (memfd < 0) { + applog(LOG_ERR, "%s failed open %s (%d)", + babcgpu->drv->dname, + bab_memory, errno); + goto bad_out; + } + + babinfo->gpio = (volatile unsigned *)mmap(NULL, BAB_SPI_BUFSIZ, + PROT_READ | PROT_WRITE, + MAP_SHARED, memfd, + bab_memory_addr); + if (babinfo->gpio == MAP_FAILED) { + close(memfd); + applog(LOG_ERR, "%s failed mmap gpio (%d)", + babcgpu->drv->dname, + errno); + goto bad_out; + } + + close(memfd); + + snprintf(buf, sizeof(buf), "/dev/spidev%d.%d", bus, chip); + babinfo->spifd = open(buf, O_RDWR); + if (babinfo->spifd < 0) { + applog(LOG_ERR, "%s failed to open spidev (%d)", + babcgpu->drv->dname, + errno); + goto map_out; + } + + babcgpu->device_path = strdup(buf); + + for (i = 0; bab_ioc[i].value != -1; i++) { + data = bab_ioc[i].value; + err = ioctl(babinfo->spifd, bab_ioc[i].request, (void *)&data); + if (err < 0) { + applog(LOG_ERR, "%s failed ioctl (%d) (%d)", + babcgpu->drv->dname, + i, errno); + goto close_out; + } + } + + for (i = 0; i < BAB_MAXCHIPS; i++) + babinfo->chip_spis[i] = (int)((1000000.0 / (100.0 + 31.0 * (i + 1))) * 1000); + + return true; + +close_out: + close(babinfo->spifd); + babinfo->spifd = 0; + free(babcgpu->device_path); + babcgpu->device_path = NULL; +map_out: + munmap((void *)(babinfo->gpio), BAB_SPI_BUFSIZ); + babinfo->gpio = NULL; +bad_out: + return false; +} + +static void bab_init_chips(struct cgpu_info *babcgpu, struct bab_info *babinfo) +{ + int chip, chipoff, bank, chips, new_chips, boards, mis; + + applog(LOG_WARNING, "%s V1 first test for %d chips ...", + babcgpu->drv->dname, BAB_V1_CHIP_TEST); + + bab_detect_chips(babcgpu, babinfo, 0, 0, BAB_V1_CHIP_TEST); + if (babinfo->chips > 0) { + babinfo->version = 1; + babinfo->banks = 0; + if (babinfo->chips == BAB_V1_CHIP_TEST) { + applog(LOG_WARNING, "%s V1 test for %d more chips ...", + babcgpu->drv->dname, BAB_MAXCHIPS - BAB_V1_CHIP_TEST); + + bab_detect_chips(babcgpu, babinfo, 0, BAB_V1_CHIP_TEST, BAB_MAXCHIPS); + } + babinfo->chips_per_bank[BAB_V1_BANK] = babinfo->chips; + babinfo->bank_first_chip[BAB_V1_BANK] = 0; + babinfo->bank_last_chip[BAB_V1_BANK] = babinfo->chips - 1; + babinfo->boards = (int)((float)(babinfo->chips - 1) / BAB_BOARDCHIPS) + 1; + babinfo->reply_wait = BAB_REPLY_WAIT_mS * 2; + + if ((chip = (babinfo->chips_per_bank[BAB_V1_BANK] % BAB_BOARDCHIPS))) { + mis = BAB_BOARDCHIPS - chip; + babinfo->missing_chips_per_bank[BAB_V1_BANK] = mis; + applog(LOG_WARNING, "%s V1: missing %d chip%s", + babcgpu->drv->dname, mis, + (mis == 1) ? "" : "s"); + } + } else { + applog(LOG_WARNING, "%s no chips found with V1", babcgpu->drv->dname); + applog(LOG_WARNING, "%s V2 test %d banks %d chips ...", + babcgpu->drv->dname, BAB_MAXBANKS, BAB_MAXCHIPS); + + chips = 0; + babinfo->version = 2; + babinfo->banks = 0; + for (bank = 1; bank <= BAB_MAXBANKS; bank++) { + for (chipoff = 0; chipoff < BAB_BANKCHIPS; chipoff++) { + chip = babinfo->chips + chipoff; + babinfo->chip_spis[chip] = 625000; + } + bab_reset(bank, 64); + bab_detect_chips(babcgpu, babinfo, bank, babinfo->chips, babinfo->chips + BAB_BANKCHIPS); + new_chips = babinfo->chips - chips; + babinfo->chips_per_bank[bank] = new_chips; + if (new_chips > 0) { + babinfo->bank_first_chip[bank] = babinfo->chips - new_chips; + babinfo->bank_last_chip[bank] = babinfo->chips - 1; + } + chips = babinfo->chips; + if (new_chips == 0) + boards = 0; + else { + boards = (int)((float)(new_chips - 1) / BAB_BOARDCHIPS) + 1; + babinfo->banks++; + } + applog(LOG_WARNING, "%s V2 bank %d: %d chips %d board%s", + babcgpu->drv->dname, bank, new_chips, + boards, (boards == 1) ? "" : "s"); + babinfo->boards += boards; + + if ((chip = (babinfo->chips_per_bank[bank] % BAB_BOARDCHIPS))) { + mis = BAB_BOARDCHIPS - chip; + babinfo->missing_chips_per_bank[bank] = mis; + applog(LOG_WARNING, "%s V2: bank %d missing %d chip%s", + babcgpu->drv->dname, bank, + mis, (mis == 1) ? "" : "s"); + } + } + babinfo->reply_wait = BAB_REPLY_WAIT_mS * babinfo->banks; + bab_reset(0, 8); + } + + memcpy(babinfo->old_conf, babinfo->chip_conf, sizeof(babinfo->old_conf)); + memcpy(babinfo->old_fast, babinfo->chip_fast, sizeof(babinfo->old_fast)); +} + +static char *bab_options[] = { + "MaxSpeed", + "DefaultSpeed", + "MinSpeed", + "TuneUp", + "TuneDown", + "SPISpeed", + "SPIDelayuS", + "TransferDelayuS" +}; + +#define INVOP " Invalid Option " + +static void bab_get_options(struct cgpu_info *babcgpu, struct bab_info *babinfo) +{ + char *ptr, *colon; + int which, val; + double fval; + long lval; + + if (opt_bab_options == NULL) + return; + + which = 0; + ptr = opt_bab_options; + while (ptr && *ptr) { + colon = strchr(ptr, ':'); + if (colon) + *(colon++) = '\0'; + + switch (which) { + case 0: + if (*ptr && tolower(*ptr) != 'd') { + val = atoi(ptr); + if (!isdigit(*ptr) || val < BAB_ABSMINSPEED || val > BAB_MAXSPEED) { + quit(1, "%s"INVOP"%s '%s' must be %d <= %s <= %d", + babcgpu->drv->dname, + bab_options[which], + ptr, BAB_ABSMINSPEED, + bab_options[which], + BAB_MAXSPEED); + } + babinfo->max_speed = (uint8_t)val; + // Adjust def,min down if they are above max specified + if (babinfo->def_speed > babinfo->max_speed) + babinfo->def_speed = babinfo->max_speed; + if (babinfo->min_speed > babinfo->max_speed) + babinfo->min_speed = babinfo->max_speed; + } + break; + case 1: + if (*ptr && tolower(*ptr) != 'd') { + val = atoi(ptr); + if (!isdigit(*ptr) || val < BAB_ABSMINSPEED || val > babinfo->max_speed) { + quit(1, "%s"INVOP"%s '%s' must be %d <= %s <= %d", + babcgpu->drv->dname, + bab_options[which], + ptr, BAB_ABSMINSPEED, + bab_options[which], + babinfo->max_speed); + } + babinfo->def_speed = (uint8_t)val; + // Adjust min down if is is above def specified + if (babinfo->min_speed > babinfo->def_speed) + babinfo->min_speed = babinfo->def_speed; + } + break; + case 2: + if (*ptr && tolower(*ptr) != 'd') { + val = atoi(ptr); + if (!isdigit(*ptr) || val < BAB_ABSMINSPEED || val > babinfo->def_speed) { + quit(1, "%s"INVOP"%s '%s' must be %d <= %s <= %d", + babcgpu->drv->dname, + bab_options[which], + ptr, BAB_ABSMINSPEED, + bab_options[which], + babinfo->def_speed); + } + babinfo->min_speed = (uint8_t)val; + } + break; + case 3: + if (*ptr && tolower(*ptr) != 'd') { + fval = atof(ptr); + if (!isdigit(*ptr) || fval < 0.0 || fval > 100.0) { + quit(1, "%s"INVOP"%s '%s' must be 0.0 <= %s <= 100.0", + babcgpu->drv->dname, + bab_options[which], ptr, + bab_options[which]); + } + babinfo->tune_up = fval; + } + break; + case 4: + if (*ptr && tolower(*ptr) != 'd') { + fval = atof(ptr); + if (!isdigit(*ptr) || fval < 0.0 || fval > 100.0) { + quit(1, "%s"INVOP"%s '%s' must be %f <= %s <= 100.0", + babcgpu->drv->dname, + bab_options[which], + ptr, babinfo->tune_up, + bab_options[which]); + } + babinfo->tune_down = fval; + } + break; + case 5: + if (*ptr && tolower(*ptr) != 'd') { + val = atoi(ptr); + if (!isdigit(*ptr) || val < 10000 || val > 10000000) { + quit(1, "%s"INVOP"%s '%s' must be 10,000 <= %s <= 10,000,000", + babcgpu->drv->dname, + bab_options[which], ptr, + bab_options[which]); + } + babinfo->speed_hz = (uint32_t)val; + } + break; + case 6: + if (*ptr && tolower(*ptr) != 'd') { + val = atoi(ptr); + if (!isdigit(*ptr) || val < 0 || val > 65535) { + quit(1, "%s"INVOP"%s '%s' must be 0 <= %s <= 65535", + babcgpu->drv->dname, + bab_options[which], ptr, + bab_options[which]); + } + babinfo->delay_usecs = (uint16_t)val; + } + break; + case 7: + if (*ptr && tolower(*ptr) != 'd') { + lval = atol(ptr); + if (!isdigit(*ptr) || lval < 0) { + quit(1, "%s"INVOP"%s '%s' must be %s >= 0", + babcgpu->drv->dname, + bab_options[which], ptr, + bab_options[which]); + } + babinfo->trf_delay = (uint64_t)lval; + } + break; + default: + break; + } + ptr = colon; + which++; + } +} + +static void bab_detect(bool hotplug) +{ + struct cgpu_info *babcgpu = NULL; + struct bab_info *babinfo = NULL; + int i; + + if (hotplug) + return; + + babcgpu = calloc(1, sizeof(*babcgpu)); + if (unlikely(!babcgpu)) + quithere(1, "Failed to calloc babcgpu"); + + babcgpu->drv = &bab_drv; + babcgpu->deven = DEV_ENABLED; + babcgpu->threads = 1; + + babinfo = calloc(1, sizeof(*babinfo)); + if (unlikely(!babinfo)) + quithere(1, "Failed to calloc babinfo"); + babcgpu->device_data = (void *)babinfo; + + babinfo->max_speed = BAB_DEFMAXSPEED; + babinfo->def_speed = BAB_DEFSPEED; + babinfo->min_speed = BAB_ABSMINSPEED; + + babinfo->tune_up = BAB_TUNEUP; + babinfo->tune_down = BAB_TUNEDOWN; + + babinfo->speed_hz = BAB_SPI_SPEED; + babinfo->delay_usecs = BAB_DELAY_USECS; + babinfo->trf_delay = BAB_TRF_DELAY; + + bab_get_options(babcgpu, babinfo); + + for (i = 0; i < BAB_MAXCHIPS; i++) { + babinfo->chip_conf[i] = BAB_DEFCONF; + babinfo->chip_fast[i] = babinfo->def_speed; +#if UPDATE_HISTORY + babinfo->bad_fast[i] = babinfo->max_speed + 1; +#endif + } + + if (!bab_init_gpio(babcgpu, babinfo, BAB_SPI_BUS, BAB_SPI_CHIP)) + goto unalloc; + + babinfo->sfree_list = k_new_list("SPI I/O", sizeof(SITEM), + ALLOC_SITEMS, LIMIT_SITEMS, true); + babinfo->spi_list = k_new_store(babinfo->sfree_list); + babinfo->spi_sent = k_new_store(babinfo->sfree_list); + + for (i = 0; i <= BAB_MAXBANKS; i++) { + babinfo->bank_first_chip[i] = -1; + babinfo->bank_last_chip[i] = -1; + } + + bab_init_chips(babcgpu, babinfo); + + if (babinfo->boards) { + applog(LOG_WARNING, "%s found %d chips %d board%s", + babcgpu->drv->dname, babinfo->chips, + babinfo->boards, + (babinfo->boards == 1) ? "" : "s"); + } else { + applog(LOG_WARNING, "%s found %d chips", + babcgpu->drv->dname, babinfo->chips); + } + + if (babinfo->chips == 0) + goto cleanup; + + if (!add_cgpu(babcgpu)) + goto cleanup; + + cgsem_init(&(babinfo->scan_work)); + cgsem_init(&(babinfo->spi_work)); + cgsem_init(&(babinfo->spi_reply)); + cgsem_init(&(babinfo->process_reply)); + + mutex_init(&babinfo->did_lock); + mutex_init(&babinfo->nonce_lock); + + babinfo->rfree_list = k_new_list("Results", sizeof(RITEM), + ALLOC_RITEMS, LIMIT_RITEMS, true); + babinfo->res_list = k_new_store(babinfo->rfree_list); + + babinfo->wfree_list = k_new_list("Work", sizeof(WITEM), + ALLOC_WITEMS, LIMIT_WITEMS, true); + babinfo->available_work = k_new_store(babinfo->wfree_list); + for (i = 0; i < BAB_MAXCHIPS; i++) + babinfo->chip_work[i] = k_new_store(babinfo->wfree_list); + + babinfo->nfree_list = k_new_list("Nonce History", sizeof(WITEM), + ALLOC_NITEMS, LIMIT_NITEMS, true); + for (i = 0; i < BAB_MAXCHIPS; i++) { + babinfo->good_nonces[i] = k_new_store(babinfo->nfree_list); + babinfo->bad_nonces[i] = k_new_store(babinfo->nfree_list); + } + + // Exclude detection + cgtime(&(babcgpu->dev_start_tv)); + // Ignore detection tests + babinfo->last_did.tv_sec = 0; + + babinfo->initialised = true; + + return; + +cleanup: + close(babinfo->spifd); + munmap((void *)(babinfo->gpio), BAB_SPI_BUFSIZ); +unalloc: + free(babinfo); + free(babcgpu); +} + +static void bab_identify(__maybe_unused struct cgpu_info *babcgpu) +{ +} + +// thread to do spi txrx +static void *bab_spi(void *userdata) +{ + struct cgpu_info *babcgpu = (struct cgpu_info *)userdata; + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + struct timeval start, stop, send, now; + K_ITEM *sitem, *witem; + double wait, delay; + int chip, band; + + applog(LOG_DEBUG, "%s%i: SPIing...", + babcgpu->drv->name, babcgpu->device_id); + + // Wait until we're ready + while (babcgpu->shutdown == false) { + if (babinfo->initialised) { + break; + } + cgsleep_ms(3); + } + + cgtime(&start); + while (babcgpu->shutdown == false) { + K_WLOCK(babinfo->spi_list); + sitem = k_unlink_tail(babinfo->spi_list); + K_WUNLOCK(babinfo->spi_list); + + if (!sitem) { + cgtime(&stop); + wait = us_tdiff(&stop, &start); + if (wait > BAB_LONG_uS) { + applog(LOG_WARNING, "%s%i: SPI waiting %fs ...", + babcgpu->drv->name, + babcgpu->device_id, + (float)wait / 1000000.0); + cgsem_mswait(&(babinfo->spi_work), BAB_LONG_WAIT_mS); + } else + cgsem_mswait(&(babinfo->spi_work), (int)((BAB_LONG_uS - wait) / 1000)); + continue; + } + + // TODO: need an LP/urgent flag to skip this possible cgsem_mswait() + // maybe zero last_sent_work.tv_sec ? + while (babinfo->last_sent_work.tv_sec) { + cgtime(&now); + delay = tdiff(&now, &(babinfo->last_sent_work)) * 1000.0; + if (delay < BAB_EXPECTED_WORK_DELAY_mS) + cgsem_mswait(&(babinfo->spi_work), BAB_EXPECTED_WORK_DELAY_mS - delay); + else + break; + } + + /* + * TODO: handle if an LP happened after bab_do_work() started + * i.e. we don't want to send the work + * Have an LP counter that at this point would show the work + * is stale - so don't send it + */ + cgtime(&send); + bab_txrx(sitem, false); + cgtime(&start); + + // The work isn't added to the chip until it has been sent + K_WLOCK(babinfo->wfree_list); + for (chip = 0; chip < babinfo->chips; chip++) { + witem = DATAS(sitem)->witems[chip]; + if (witem) { + memcpy(&(DATAW(witem)->work_start), &(DATAS(sitem)->work_start), + sizeof(DATAW(witem)->work_start)); + k_add_head(babinfo->chip_work[chip], witem); +#if UPDATE_HISTORY + babinfo->work_count[chip]++; +#endif + if (babinfo->first_work[chip].tv_sec == 0) + memcpy(&(babinfo->first_work[chip]), &send, sizeof(send)); + } + } + K_WUNLOCK(babinfo->wfree_list); + + K_WLOCK(babinfo->spi_sent); + k_add_head(babinfo->spi_sent, sitem); + K_WUNLOCK(babinfo->spi_sent); + + cgsem_post(&(babinfo->spi_reply)); + + // Store stats + if (babinfo->last_sent_work.tv_sec) { + delay = tdiff(&send, &(babinfo->last_sent_work)); + babinfo->delay_count++; + if (babinfo->delay_min == 0 || babinfo->delay_min > delay) + babinfo->delay_min = delay; + if (babinfo->delay_max < delay) + babinfo->delay_max = delay; + if (delay < BAB_DELAY_BASE) + band = 0; + else if (delay >= (BAB_DELAY_BASE+BAB_DELAY_STEP*(BAB_DELAY_BANDS+1))) + band = BAB_DELAY_BANDS+1; + else + band = (int)(((double)delay - BAB_DELAY_BASE) / BAB_DELAY_STEP) + 1; + babinfo->delay_bands[band]++; + } + memcpy(&(babinfo->last_sent_work), &send, sizeof(start)); + + delay = tdiff(&start, &send); + babinfo->send_count++; + babinfo->send_total += delay; + if (babinfo->send_min == 0 || babinfo->send_min > delay) + babinfo->send_min = delay; + if (babinfo->send_max < delay) + babinfo->send_max = delay; + + cgsem_mswait(&(babinfo->spi_work), BAB_STD_WAIT_mS); + } + + return NULL; +} + +static void bab_flush_work(struct cgpu_info *babcgpu) +{ + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + + applog(LOG_DEBUG, "%s%i: flushing work", + babcgpu->drv->name, babcgpu->device_id); + + mutex_lock(&(babinfo->did_lock)); + babinfo->last_did.tv_sec = 0; + mutex_unlock(&(babinfo->did_lock)); + + cgsem_post(&(babinfo->scan_work)); +} + +#define DATA_MERKLE7 16 +#define DATA_NTIME 17 +#define DATA_BITS 18 +#define DATA_NONCE 19 + +#define WORK_MERKLE7 (16*4) +#define WORK_NTIME (17*4) +#define WORK_BITS (18*4) +#define WORK_NONCE (19*4) + +#if UPDATE_HISTORY +static void process_history(struct cgpu_info *babcgpu, int chip, struct timeval *when, bool good, struct timeval *now) +{ + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + uint64_t good_nonces, bad_nonces; + uint8_t chip_fast; + double tune; + K_ITEM *item; + int i; + + K_WLOCK(babinfo->nfree_list); + item = k_unlink_head(babinfo->nfree_list); + memcpy(&(DATAN(item)->found), when, sizeof(*when)); + if (good) + k_add_head(babinfo->good_nonces[chip], item); + else + k_add_head(babinfo->bad_nonces[chip], item); + + // Remove all expired history + for (i = 0; i < babinfo->chips; i++) { + item = babinfo->good_nonces[i]->tail; + while (item) { + if (tdiff(now, &(DATAN(item)->found)) < HISTORY_TIME_S) + break; + + k_unlink_item(babinfo->good_nonces[i], item); + k_add_head(babinfo->nfree_list, item); + + item = babinfo->good_nonces[i]->tail; + } + + item = babinfo->bad_nonces[i]->tail; + while (item) { + if (tdiff(now, &(DATAN(item)->found)) < HISTORY_TIME_S) + break; + + k_unlink_item(babinfo->bad_nonces[i], item); + k_add_head(babinfo->nfree_list, item); + + item = babinfo->bad_nonces[i]->tail; + } + } + good_nonces = babinfo->good_nonces[chip]->count; + bad_nonces = babinfo->bad_nonces[chip]->count; + + K_WUNLOCK(babinfo->nfree_list); + + // Tuning ... + if (tdiff(now, &(babinfo->first_work[chip])) >= HISTORY_TIME_S && + tdiff(now, &(babinfo->last_tune[chip])) >= HISTORY_TIME_S && + (good_nonces + bad_nonces) > 0) { + + chip_fast = babinfo->chip_fast[chip]; + + /* + * If bad then step it down and remember the speed + * TODO: does a speed change reset the chip? Or is there a reset? + */ + if (good_nonces == 0) { + if (chip_fast > babinfo->min_speed) { + if (babinfo->bad_fast[chip] > chip_fast) + babinfo->bad_fast[chip] = chip_fast; + + babinfo->chip_fast[chip]--; + + applog(LOG_WARNING, "%s%d: Chip %d BAD - speed down from %d to %d", + babcgpu->drv->name, babcgpu->device_id, + chip, (int)chip_fast, (int)chip_fast - 1); + } else { + /* + * Permanently BAD since we're already at the minumum speed + * but only getting bad nonces + */ + if (babinfo->bad_msg[chip] == false) { + applog(LOG_WARNING, "%s%d: Chip %d BAD - at min speed %d", + babcgpu->drv->name, babcgpu->device_id, + chip, (int)chip_fast); + + babinfo->bad_msg[chip] = true; + } + } + goto tune_over; + } + + /* + * It 'was' permanently BAD but a good nonce came back! + */ + if (babinfo->bad_msg[chip]) { + applog(LOG_WARNING, "%s%d: Chip %d REVIVED - at speed %d", + babcgpu->drv->name, babcgpu->device_id, + chip, (int)chip_fast); + + babinfo->bad_msg[chip] = false; + } + + /* + * Since we have found 'some' nonces - + * make sure bad_fast is higher than current chip_fast + */ + if (babinfo->bad_fast[chip] <= chip_fast) + babinfo->bad_fast[chip] = chip_fast + 1; + + tune = (double)bad_nonces / (double)(good_nonces + bad_nonces) * 100.0; + + /* + * TODO: it appears some chips just get a % bad at low speeds + * so we should handle them by weighting the speed reduction vs + * the HW% gained from the reduction (i.e. GH/s) + * Maybe handle that when they hit min_speed, then do a gradual speed + * up verifying if it is really making GH/s worse or better + */ + + // Tune it down if error rate is too high (and it's above min) + if (tune >= babinfo->tune_down && chip_fast > babinfo->min_speed) { + babinfo->chip_fast[chip]--; + + applog(LOG_WARNING, "%s%d: Chip %d High errors %.2f%% - speed down %d to %d", + babcgpu->drv->name, babcgpu->device_id, + chip, tune, (int)chip_fast, (int)chip_fast - 1); + + goto tune_over; + } + + /* + * TODO: if we are at bad_fast-1 and tune_up + * and bad_fast was set more than some time limit ago + * then consider increasing bad_fast by 1? + */ + + // Tune it up if error rate is low enough + if (tune <= babinfo->tune_up && + chip_fast < babinfo->max_speed && + chip_fast < (babinfo->bad_fast[chip] - 1)) { + babinfo->chip_fast[chip]++; + + applog(LOG_WARNING, "%s%d: Chip %d Low errors %.2f%% - speed up %d to %d", + babcgpu->drv->name, babcgpu->device_id, + chip, tune, (int)chip_fast, (int)chip_fast + 1); + + goto tune_over; + } +tune_over: + cgtime(&(babinfo->last_tune[chip])); + } +} +#endif + +/* + * Find the matching work item by checking each nonce against + * work items for the nonces chip + */ +static K_ITEM *process_nonce(struct thr_info *thr, struct cgpu_info *babcgpu, K_ITEM *ritem, uint32_t raw_nonce, K_ITEM *newest_witem) +{ + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + unsigned int links, proc_links, work_links, tests; + int try_sta, try_fin, offset; + K_ITEM *witem, *wtail; + struct timeval now; + bool not_first_reply; + uint32_t nonce; + int chip; + + chip = DATAR(ritem)->chip; + not_first_reply = DATAR(ritem)->not_first_reply; + + babinfo->chip_nonces[chip]++; + + /* + * We can grab the head of the chip work queue and then release + * the lock and follow it to the end and back, since the other + * thread will only add items above the head - it wont touch + * any of the prev/next pointers from the head to the end - + * except the head->prev pointer may get changed + */ + K_RLOCK(babinfo->chip_work[chip]); + witem = babinfo->chip_work[chip]->head; + K_RUNLOCK(babinfo->chip_work[chip]); + + if (!witem) { + applog(LOG_ERR, "%s%i: chip %d has no work, 1 nonce discarded!", + babcgpu->drv->name, babcgpu->device_id, chip); + babinfo->untested_nonces++; + return newest_witem; + } + + babinfo->tested_nonces++; + + if ((raw_nonce & 0xff) < 0x1c) { + // Will only be this offset + try_sta = BAB_OFF_0x1C_STA; + try_fin = BAB_OFF_0x1C_FIN; + } else { + // Will only be one of the other offsets + try_sta = BAB_OFF_OTHER_STA; + try_fin = BAB_OFF_OTHER_FIN; + } + + nonce = bab_decnonce(raw_nonce); + + cgtime(&now); + + tests = links = proc_links = work_links = 0; + wtail = witem; + while (wtail && wtail->next) { + work_links++; + wtail = wtail->next; + } + while (wtail) { + if (!(DATAW(wtail)->work)) { + applog(LOG_ERR, "%s%i: chip %d witem links %d has no work!", + babcgpu->drv->name, + babcgpu->device_id, + chip, links); + } else { + if (ms_tdiff(&now, &(DATAW(wtail)->work_start)) >= BAB_WORK_EXPIRE_mS) + proc_links--; + else { + for (offset = try_sta; offset <= try_fin; offset++) { + tests++; + if (test_nonce(DATAW(wtail)->work, nonce + bab_nonce_offsets[offset])) { + submit_tested_work(thr, DATAW(wtail)->work); + babinfo->nonce_offset_count[offset]++; + babinfo->chip_good[chip]++; + DATAW(wtail)->nonces++; + + mutex_lock(&(babinfo->nonce_lock)); + babinfo->new_nonces++; + mutex_unlock(&(babinfo->nonce_lock)); + + babinfo->ok_nonces++; + babinfo->total_tests += tests; + if (babinfo->max_tests_per_nonce < tests) + babinfo->max_tests_per_nonce = tests; + babinfo->total_links += links; + babinfo->total_proc_links += proc_links; + if (babinfo->max_links < links) + babinfo->max_links = links; + if (babinfo->max_proc_links < proc_links) + babinfo->max_proc_links = proc_links; + babinfo->total_work_links += work_links; + + babinfo->chip_cont_bad[chip] = 0; +#if UPDATE_HISTORY + process_history(babcgpu, chip, + &(DATAR(ritem)->when), + true, &now); +#endif + + if (newest_witem == NULL || + ms_tdiff(&(DATAW(wtail)->work_start), + &(DATAW(newest_witem)->work_start)) < 0) + return wtail; + + return newest_witem; + } + } + } + } + if (wtail == witem) + break; + wtail = wtail->prev; + links++; + proc_links++; + } + + if (not_first_reply) { + babinfo->chip_bad[chip]++; + inc_hw_errors(thr); + + babinfo->fail++; + babinfo->fail_total_tests += tests; + babinfo->fail_total_links += links; + babinfo->fail_total_work_links += work_links; + + babinfo->chip_cont_bad[chip]++; + if (babinfo->chip_max_bad[chip] < babinfo->chip_cont_bad[chip]) + babinfo->chip_max_bad[chip] = babinfo->chip_cont_bad[chip]; + + // Handle chips with only bad results + if (babinfo->disabled[chip] == false && + babinfo->chip_good[chip] == 0 && + babinfo->chip_bad[chip] >= BAB_BAD_COUNT && + tdiff(&now, &(babinfo->first_work[chip])) >= BAB_BAD_TO_MIN) { + if (babinfo->chip_fast[chip] > babinfo->min_speed) + babinfo->chip_fast[chip] = babinfo->min_speed; + else if (tdiff(&now, &(babinfo->first_work[chip])) > BAB_BAD_DEAD) { + babinfo->disabled[chip] = true; + babinfo->total_disabled++; + applog(LOG_ERR, "%s%i: chip %d disabled!", + babcgpu->drv->name, + babcgpu->device_id, + chip); + } + } +#if UPDATE_HISTORY + process_history(babcgpu, chip, &(DATAR(ritem)->when), false, &now); +#endif + } else { + babinfo->initial_ignored++; + babinfo->ign_total_tests += tests; + babinfo->ign_total_links += links; + babinfo->ign_total_work_links += work_links; + } + + return newest_witem; +} + +/* + * On completion discard any work items older than BAB_WORK_EXPIRE_mS + * and any work items of the chip older than the work of the newest nonce work item + */ +static void oknonces(struct thr_info *thr, struct cgpu_info *babcgpu, K_ITEM *ritem) +{ + uint32_t raw_nonce; + K_ITEM *witem; + int nonces; + + witem = NULL; + + for (nonces = 0; nonces < DATAR(ritem)->nonces; nonces++) { + raw_nonce = DATAR(ritem)->nonce[nonces]; + + witem = process_nonce(thr, babcgpu, ritem, raw_nonce, witem); + } + + cleanup_older(babcgpu, DATAR(ritem)->chip, witem); +} + +// Check at least every ... +#define BAB_RESULT_DELAY_mS 999 + +// Results checking thread +static void *bab_res(void *userdata) +{ + struct cgpu_info *babcgpu = (struct cgpu_info *)userdata; + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + struct thr_info *thr = babcgpu->thr[0]; + K_ITEM *ritem; + + applog(LOG_DEBUG, "%s%i: Results...", + babcgpu->drv->name, babcgpu->device_id); + + // Wait until we're ready + while (babcgpu->shutdown == false) { + if (babinfo->initialised) { + break; + } + cgsleep_ms(3); + } + + ritem = NULL; + while (babcgpu->shutdown == false) { + K_WLOCK(babinfo->res_list); + if (ritem) { + // Release the old one + k_add_head(babinfo->rfree_list, ritem); + ritem = NULL; + } + // Check for a new one + ritem = k_unlink_tail(babinfo->res_list); + K_WUNLOCK(babinfo->res_list); + + if (!ritem) { + cgsem_mswait(&(babinfo->process_reply), BAB_RESULT_DELAY_mS); + continue; + } + + oknonces(thr, babcgpu, ritem); + } + + return NULL; +} + +/* + * 1.0s per nonce = 4.2GH/s + * 0.9s per nonce = 4.8GH/s + * On a slow machine, reducing this may resolve: + * BaB0: SPI waiting 1.2...s + */ +#define BAB_STD_WORK_DELAY_uS 900000 + +static bool bab_do_work(struct cgpu_info *babcgpu) +{ + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + int work_items = 0; + K_ITEM *witem, *sitem, *ritem; + struct timeval when, now; + double delay; + int chip, rep, j, nonces, spie = 0, miso = 0; + uint32_t nonce, spichk; + bool res; + + cgtime(&now); + mutex_lock(&(babinfo->did_lock)); + delay = us_tdiff(&now, &(babinfo->last_did)); + mutex_unlock(&(babinfo->did_lock)); + if (delay < BAB_STD_WORK_DELAY_uS) + return false; + + K_WLOCK(babinfo->sfree_list); + sitem = k_unlink_head_zero(babinfo->sfree_list); + K_WUNLOCK(babinfo->sfree_list); + + for (chip = 0; chip < babinfo->chips; chip++) { + if (!(babinfo->disabled[chip])) { + // TODO: ignore stale work + K_WLOCK(babinfo->available_work); + witem = k_unlink_tail(babinfo->available_work); + K_WUNLOCK(babinfo->available_work); + if (!witem) { + applog(LOG_ERR, "%s%i: short work list (%d) %d expected %d - reset", + babcgpu->drv->name, babcgpu->device_id, + chip, work_items, + babinfo->chips - babinfo->total_disabled); + + // Put them back in the order they were taken + K_WLOCK(babinfo->available_work); + for (j = chip-1; j >= 0; j--) { + witem = DATAS(sitem)->witems[j]; + if (witem) + k_add_tail(babinfo->available_work, witem); + } + K_WUNLOCK(babinfo->available_work); + + K_WLOCK(babinfo->sfree_list); + k_add_head(babinfo->sfree_list, sitem); + K_WUNLOCK(babinfo->sfree_list); + + return false; + } + + /* + * TODO: do this when we get work except on LP? + * (not LP so we only do ms3steps for work required) + * Though that may more likely trigger the applog(short work list) above? + */ + if (DATAW(witem)->ci_setup == false) { + memcpy((void *)&(DATAW(witem)->chip_input.midstate[0]), + DATAW(witem)->work->midstate, + sizeof(DATAW(witem)->work->midstate)); + memcpy((void *)&(DATAW(witem)->chip_input.merkle7), + (void *)&(DATAW(witem)->work->data[WORK_MERKLE7]), + MERKLE_BYTES); + + bab_ms3steps((void *)&(DATAW(witem)->chip_input)); + + DATAW(witem)->ci_setup = true; + } + + DATAS(sitem)->witems[chip] = witem; + work_items++; + } + } + + // Send + bab_put(babinfo, sitem); + + // Receive + res = bab_get(babcgpu, babinfo, &when); + if (!res) { + applog(LOG_DEBUG, "%s%i: didn't get work reply ...", + babcgpu->drv->name, babcgpu->device_id); + return false; + } + + applog(LOG_DEBUG, "%s%i: Did get work reply ...", + babcgpu->drv->name, babcgpu->device_id); + + for (chip = 0; chip < babinfo->chips; chip++) { + if (!(babinfo->disabled[chip])) { + K_WLOCK(babinfo->rfree_list); + ritem = k_unlink_head(babinfo->rfree_list); + K_WUNLOCK(babinfo->rfree_list); + + DATAR(ritem)->chip = chip; + DATAR(ritem)->not_first_reply = babinfo->not_first_reply[chip]; + memcpy(&(DATAR(ritem)->when), &when, sizeof(when)); + + spichk = babinfo->chip_results[chip].spichk; + if (spichk != 0 && spichk != 0xffffffff) { + babinfo->chip_spie[chip]++; + spie++; + // Test the results anyway + } + + nonces = 0; + for (rep = 0; rep < BAB_REPLY_NONCES; rep++) { + nonce = babinfo->chip_results[chip].nonce[rep]; + if (nonce != babinfo->chip_prev[chip].nonce[rep]) { + if ((nonce & BAB_EVIL_MASK) == BAB_EVIL_NONCE) + babinfo->discarded_e0s++; + else + DATAR(ritem)->nonce[nonces++] = nonce; + } + } + + if (nonces == BAB_REPLY_NONCES) { + babinfo->chip_miso[chip]++; + miso++; + // Test the results anyway + } + + /* + * Send even with zero nonces + * so cleanup_older() is called for the chip + */ + DATAR(ritem)->nonces = nonces; + K_WLOCK(babinfo->res_list); + k_add_head(babinfo->res_list, ritem); + K_WUNLOCK(babinfo->res_list); + + cgsem_post(&(babinfo->process_reply)); + + babinfo->not_first_reply[chip] = true; + + memcpy((void *)(&(babinfo->chip_prev[chip])), + (void *)(&(babinfo->chip_results[chip])), + sizeof(struct bab_work_reply)); + } + + } + + applog(LOG_DEBUG, "Work: items:%d spie:%d miso:%d", work_items, spie, miso); + + return true; +} + +static bool bab_thread_prepare(struct thr_info *thr) +{ + struct cgpu_info *babcgpu = thr->cgpu; + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + + if (thr_info_create(&(babinfo->spi_thr), NULL, bab_spi, (void *)babcgpu)) { + applog(LOG_ERR, "%s%i: SPI thread create failed", + babcgpu->drv->name, babcgpu->device_id); + return false; + } + pthread_detach(babinfo->spi_thr.pth); + + /* + * We require a seperate results checking thread since there is a lot + * of work done checking the results multiple times - thus we don't + * want that delay affecting sending/receiving work to/from the device + */ + if (thr_info_create(&(babinfo->res_thr), NULL, bab_res, (void *)babcgpu)) { + applog(LOG_ERR, "%s%i: Results thread create failed", + babcgpu->drv->name, babcgpu->device_id); + return false; + } + pthread_detach(babinfo->res_thr.pth); + + return true; +} + +static void bab_shutdown(struct thr_info *thr) +{ + struct cgpu_info *babcgpu = thr->cgpu; + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + int i; + + applog(LOG_DEBUG, "%s%i: shutting down", + babcgpu->drv->name, babcgpu->device_id); + + for (i = 0; i < babinfo->chips; i++) +// TODO: bab_shutdown(babcgpu, babinfo, i); + ; + + babcgpu->shutdown = true; +} + +static bool bab_queue_full(struct cgpu_info *babcgpu) +{ + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + int roll, roll_limit = BAB_MAX_ROLLTIME; + struct work *work, *usework; + K_ITEM *item; + int count, need; + bool ret, rolled; + + K_RLOCK(babinfo->available_work); + count = babinfo->available_work->count; + K_RUNLOCK(babinfo->available_work); + + if (count >= (babinfo->chips - babinfo->total_disabled)) + ret = true; + else { + need = (babinfo->chips - babinfo->total_disabled) - count; + work = get_queued(babcgpu); + if (work) { + if (roll_limit > work->drv_rolllimit) + roll_limit = work->drv_rolllimit; + roll = 0; + do { + if (roll == 0) { + usework = work; + babinfo->work_unrolled++; + rolled = false; + } else { + usework = copy_work_noffset(work, roll); + babinfo->work_rolled++; + rolled = true; + } + + K_WLOCK(babinfo->wfree_list); + item = k_unlink_head_zero(babinfo->wfree_list); + DATAW(item)->work = usework; + DATAW(item)->rolled = rolled; + k_add_head(babinfo->available_work, item); + K_WUNLOCK(babinfo->wfree_list); + } while (--need > 0 && ++roll <= roll_limit); + } else { + // Avoid a hard loop when we can't get work fast enough + cgsleep_us(42); + } + + if (need > 0) + ret = false; + else + ret = true; + } + + return ret; +} + +#define BAB_STD_DELAY_mS 100 + +/* + * TODO: allow this to run through more than once - the second+ + * time not sending any new work unless a flush occurs since: + * at the moment we have BAB_STD_WORK_mS latency added to earliest replies + */ +static int64_t bab_scanwork(__maybe_unused struct thr_info *thr) +{ + struct cgpu_info *babcgpu = thr->cgpu; + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + int64_t hashcount = 0; + int count; + + bab_do_work(babcgpu); + + K_RLOCK(babinfo->available_work); + count = babinfo->available_work->count; + K_RUNLOCK(babinfo->available_work); + + if (count >= babinfo->chips) + cgsem_mswait(&(babinfo->scan_work), BAB_STD_DELAY_mS); + + mutex_lock(&(babinfo->nonce_lock)); + if (babinfo->new_nonces) { + hashcount += 0xffffffffull * babinfo->new_nonces; + babinfo->new_nonces = 0; + } + mutex_unlock(&(babinfo->nonce_lock)); + + return hashcount; +} + +#define CHIPS_PER_STAT 16 +#define FMT_RANGE "%d-%d" + +static struct api_data *bab_api_stats(struct cgpu_info *babcgpu) +{ + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); + uint64_t history_good[BAB_MAXCHIPS], history_bad[BAB_MAXCHIPS]; + uint64_t his_good_tot, his_bad_tot; + double history_elapsed[BAB_MAXCHIPS], diff; + bool elapsed_is_good[BAB_MAXCHIPS]; + int speeds[BAB_CHIP_SPEEDS]; + struct api_data *root = NULL; + char data[2048]; + char buf[32]; + int spi_work, chip_work, sp, chip, bank, chip_off, board, last_board; + int i, to, j, k; + bool bad; + struct timeval now; + double elapsed, ghs; + float ghs_sum, his_ghs_tot; + float tot, hw; + K_ITEM *item; + + if (babinfo->initialised == false) + return NULL; + + memset(&speeds, 0, sizeof(speeds)); + + root = api_add_int(root, "Version", &(babinfo->version), true); + root = api_add_int(root, "Chips", &(babinfo->chips), true); + root = api_add_int(root, "Boards", &(babinfo->boards), true); + root = api_add_int(root, "Banks", &(babinfo->banks), true); + + data[0] = '\0'; + for (i = 0; i <= BAB_MAXBANKS; i++) { + snprintf(buf, sizeof(buf), "%s%d", + (i == 0) ? "" : " ", + babinfo->chips_per_bank[i]); + strcat(data, buf); + } + root = api_add_string(root, "Chips Per Bank", data, true); + + data[0] = '\0'; + for (i = 0; i <= BAB_MAXBANKS; i++) { + snprintf(buf, sizeof(buf), "%s%d", + (i == 0) ? "" : " ", + babinfo->missing_chips_per_bank[i]); + strcat(data, buf); + } + root = api_add_string(root, "Missing Chips Per Bank", data, true); + + cgtime(&now); + elapsed = tdiff(&now, &(babcgpu->dev_start_tv)); + + root = api_add_elapsed(root, "Device Elapsed", &elapsed, true); + + root = api_add_string(root, "History Enabled", +#if UPDATE_HISTORY + "true", +#else + "false", +#endif + true); + + int chs = HISTORY_TIME_S; + root = api_add_int(root, "Chip History Limit", &chs, true); + + K_RLOCK(babinfo->nfree_list); + for (i = 0; i < babinfo->chips; i++) { + item = babinfo->good_nonces[i]->tail; + elapsed_is_good[i] = true; + if (!item) + history_elapsed[i] = 0; + else + history_elapsed[i] = tdiff(&now, &(DATAN(item)->found)); + + item = babinfo->bad_nonces[i]->tail; + if (item) { + diff = tdiff(&now, &(DATAN(item)->found)); + if (history_elapsed[i] < diff) { + history_elapsed[i] = diff; + elapsed_is_good[i] = false; + } + } + history_good[i] = babinfo->good_nonces[i]->count; + history_bad[i] = babinfo->bad_nonces[i]->count; + } + K_RUNLOCK(babinfo->nfree_list); + + his_ghs_tot = 0; + for (i = 0; i < babinfo->chips; i += CHIPS_PER_STAT) { + to = i + CHIPS_PER_STAT - 1; + if (to >= babinfo->chips) + to = babinfo->chips - 1; + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64, + j == i ? "" : " ", + babinfo->chip_nonces[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Nonces "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64, + j == i ? "" : " ", + babinfo->chip_good[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Good "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64, + j == i ? "" : " ", + babinfo->chip_bad[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Bad "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s0x%02x", + j == i ? "" : " ", + (int)(babinfo->chip_conf[j])); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Conf "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%d", + j == i ? "" : " ", + (int)(babinfo->chip_fast[j])); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Fast "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%d", + j == i ? "" : " ", + (int)(babinfo->chip_spie[j])); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Spie "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%d", + j == i ? "" : " ", + (int)(babinfo->chip_miso[j])); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Miso "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + tot = (float)(babinfo->chip_good[j] + babinfo->chip_bad[j]); + if (tot != 0) + hw = 100.0 * (float)(babinfo->chip_bad[j]) / tot; + else + hw = 0; + snprintf(buf, sizeof(buf), + "%s%.3f", + j == i ? "" : " ", hw); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "HW%% "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + ghs_sum = 0; + data[0] = '\0'; + for (j = i; j <= to; j++) { + if (elapsed > 0) { + ghs = (double)(babinfo->chip_good[j]) * 0xffffffffull / + elapsed / 1000000000.0; + } else + ghs = 0; + + snprintf(buf, sizeof(buf), + "%s%.3f", + j == i ? "" : " ", ghs); + strcat(data, buf); + ghs_sum += (float)ghs; + } + snprintf(buf, sizeof(buf), "GHs "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + snprintf(buf, sizeof(buf), "Sum GHs "FMT_RANGE, i, to); + root = api_add_avg(root, buf, &ghs_sum, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64, + j == i ? "" : " ", + babinfo->chip_cont_bad[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Cont-Bad "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64, + j == i ? "" : " ", + babinfo->chip_max_bad[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Max-Bad "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64, + j == i ? "" : " ", + history_good[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "History Good "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64, + j == i ? "" : " ", + history_bad[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "History Bad "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + tot = (float)(history_good[j] + history_bad[j]); + if (tot != 0) + hw = 100.0 * (float)(history_bad[j]) / tot; + else + hw = 0; + snprintf(buf, sizeof(buf), + "%s%.3f", + j == i ? "" : " ", hw); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "History HW%% "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + ghs_sum = 0; + data[0] = '\0'; + for (j = i; j <= to; j++) { + if (history_elapsed[j] > 0) { + double num = history_good[j]; + // exclude the first nonce? + if (elapsed_is_good[j]) + num--; + ghs = num * 0xffffffffull / + history_elapsed[j] / 1000000000.0; + } else + ghs = 0; + + snprintf(buf, sizeof(buf), + "%s%.3f", + j == i ? "" : " ", ghs); + strcat(data, buf); + + ghs_sum += (float)ghs; + + // Setup speed range data + for (sp = 0; sp < BAB_CHIP_SPEEDS - 1; sp++) { + if (ghs <= chip_speed_ranges[sp]) { + speeds[sp]++; + break; + } + } + if (sp >= (BAB_CHIP_SPEEDS - 1)) + speeds[BAB_CHIP_SPEEDS - 1]++; + } + snprintf(buf, sizeof(buf), "History GHs "FMT_RANGE, i, to); + root = api_add_string(root, buf, data, true); + + snprintf(buf, sizeof(buf), "Sum History GHs "FMT_RANGE, i, to); + root = api_add_avg(root, buf, &ghs_sum, true); + + his_ghs_tot += ghs_sum; + } + + root = api_add_avg(root, "Total History GHs", &his_ghs_tot, true); + + his_good_tot = his_bad_tot = 0; + for (i = 0; i < babinfo->chips; i++) { + his_good_tot += history_good[i]; + his_bad_tot += history_bad[i]; + } + if (his_good_tot + his_bad_tot) + tot = 100.0 * (float)his_bad_tot / (float)(his_good_tot + his_bad_tot); + else + tot = 0.0; + root = api_add_avg(root, "Total History HW%", &tot, true); + + for (sp = 0; sp < BAB_CHIP_SPEEDS; sp++) { + if (sp < (BAB_CHIP_SPEEDS - 1)) + ghs = chip_speed_ranges[sp]; + else + ghs = chip_speed_ranges[BAB_CHIP_SPEEDS - 2]; + + snprintf(buf, sizeof(buf), "History Speed %s%.1f %s", + (sp < (BAB_CHIP_SPEEDS - 1)) ? "" : ">", + ghs, chip_speed_names[sp]); + + root = api_add_int(root, buf, &(speeds[sp]), true); + } + + int len, str, siz = 1024; + char *tmp = malloc(siz); + if (!tmp) + quithere(1, "OOM tmp1"); + for (sp = 0; sp < 2; sp++) { + tmp[0] = '\0'; + len = 0; + for (i = 0; i < babinfo->chips; i++) { + if (history_elapsed[i] > 0) { + double num = history_good[i]; + // exclude the first nonce? + if (elapsed_is_good[i]) + num--; + ghs = num * 0xffffffffull / + history_elapsed[i] / 1000000000.0; + } else + ghs = 0; + + if ((sp == 0 || ghs > chip_speed_ranges[sp-1]) && + (ghs <= chip_speed_ranges[sp])) { + bank = babinfo->chip_bank[i]; + chip_off = i; + for (j = 0; j < babinfo->chip_bank[i]; j++) + chip_off -= babinfo->chips_per_bank[j]; + /* + * Bank/Board/Chip are all 1 based + * except V1 Bank = BAB_V1_BANK (0) + * If the bank has any missing chips then a "?" + * is placed after the board number + */ + snprintf(buf, sizeof(buf), "%s%d/%d%s/%d", + len ? " " : "", bank, + (int)(chip_off / BAB_BOARDCHIPS)+1, + babinfo->missing_chips_per_bank[bank] ? + "?" : "", + (chip_off % BAB_BOARDCHIPS)+1); + str = strlen(buf); + while ((len + str + 1) > siz) { + siz += 1024; + tmp = realloc(tmp, siz); + if (!tmp) + quithere(1, "OOM tmp2"); + } + strcpy(tmp + len, buf); + len += str; + } + } + snprintf(buf, sizeof(buf), "History %s", chip_speed_names[sp]); + + root = api_add_string(root, buf, len ? tmp : "None", true); + } + free(tmp); + tmp = NULL; + + switch (babinfo->version) { + case 1: + i = j = BAB_V1_BANK; + break; + case 2: + i = 1; + j = BAB_MAXBANKS; + break; + } + data[0] = '\0'; + for (bank = i; bank <= j; bank++) { + if (babinfo->bank_first_chip[bank] >= 0) { + chip = babinfo->bank_first_chip[bank]; + to = babinfo->bank_last_chip[bank]; + for (; chip <= to; chip += BAB_BOARDCHIPS) { + bad = true; + for (k = chip; (k <= to) && (k < (chip+BAB_BOARDCHIPS)); k++) { + if (history_elapsed[k] > 0) { + double num = history_good[k]; + // exclude the first nonce? + if (elapsed_is_good[k]) + num--; + ghs = num * 0xffffffffull / + history_elapsed[k] / 1000000000.0; + } else + ghs = 0; + + if (ghs > 0.0) { + bad = false; + break; + } + } + if (bad) { + board = (int)((float)(chip - babinfo->bank_first_chip[bank]) / + BAB_BOARDCHIPS) + 1; + snprintf(buf, sizeof(buf), + "%s%d/%d%s", + data[0] ? " " : "", + bank, board, + babinfo->missing_chips_per_bank[bank] ? + "?" : ""); + strcat(data, buf); + } + } + } + } + root = api_add_string(root, "History Bad Boards", data[0] ? data : "None", true); + + data[0] = '\0'; + for (bank = i; bank <= j; bank++) { + if (babinfo->bank_first_chip[bank] >= 0) { + to = babinfo->bank_first_chip[bank]; + chip = babinfo->bank_last_chip[bank]; + for (; chip >= to; chip--) { + bad = true; + if (history_elapsed[chip] > 0) { + double num = history_good[chip]; + // exclude the first nonce? + if (elapsed_is_good[chip]) + num--; + ghs = num * 0xffffffffull / + history_elapsed[chip] / 1000000000.0; + } else + ghs = 0; + + if (ghs > 0.0) + break; + } + /* + * The output here is: a/b+c/d + * a/b is the SPI/board that starts the Bad Chain + * c is the number of boards after a + * d is the total number of chips in the Bad Chain + * A Bad Chain is a continous set of bad chips that + * finish at the end of an SPI chain of boards + * This might be caused by the first board, or the cables attached + * to the first board, in the Bad Chain i.e. a/b + * If c is zero, it's just the last board, so it's the same as any + * other board having bad chips + */ + if (chip < babinfo->bank_last_chip[bank]) { + board = (int)((float)(chip - babinfo->bank_first_chip[bank]) / + BAB_BOARDCHIPS) + 1; + last_board = (int)((float)(babinfo->bank_last_chip[bank] - + babinfo->bank_first_chip[bank]) / + BAB_BOARDCHIPS) + 1; + snprintf(buf, sizeof(buf), + "%s%d/%d%s+%d/%d", + data[0] ? " " : "", + bank, board, + babinfo->missing_chips_per_bank[bank] ? + "?" : "", + last_board - board, + babinfo->bank_last_chip[bank] - chip); + strcat(data, buf); + } + } + } + root = api_add_string(root, "History Bad Chains", data[0] ? data : "None", true); + + root = api_add_int(root, "Disabled Chips", &(babinfo->total_disabled), true); + + for (i = 0; i < BAB_NONCE_OFFSETS; i++) { + snprintf(buf, sizeof(buf), "Nonce Offset 0x%08x", bab_nonce_offsets[i]); + root = api_add_uint64(root, buf, &(babinfo->nonce_offset_count[i]), true); + } + + root = api_add_uint64(root, "Discarded E0s", &(babinfo->discarded_e0s), true); + root = api_add_uint64(root, "Tested", &(babinfo->tested_nonces), true); + root = api_add_uint64(root, "OK", &(babinfo->ok_nonces), true); + root = api_add_uint64(root, "Total Tests", &(babinfo->total_tests), true); + root = api_add_uint64(root, "Max Tests", &(babinfo->max_tests_per_nonce), true); + float avg = babinfo->ok_nonces ? (float)(babinfo->total_tests) / + (float)(babinfo->ok_nonces) : 0; + root = api_add_avg(root, "Avg Tests", &avg, true); + root = api_add_uint64(root, "Untested", &(babinfo->untested_nonces), true); + + root = api_add_uint64(root, "Work Links", &(babinfo->total_links), true); + root = api_add_uint64(root, "Work Processed Links", &(babinfo->total_proc_links), true); + root = api_add_uint64(root, "Max Links", &(babinfo->max_links), true); + root = api_add_uint64(root, "Max Processed Links", &(babinfo->max_proc_links), true); + root = api_add_uint64(root, "Total Work Links", &(babinfo->total_work_links), true); + avg = babinfo->ok_nonces ? (float)(babinfo->total_links) / + (float)(babinfo->ok_nonces) : 0; + root = api_add_avg(root, "Avg Links", &avg, true); + avg = babinfo->ok_nonces ? (float)(babinfo->total_proc_links) / + (float)(babinfo->ok_nonces) : 0; + root = api_add_avg(root, "Avg Proc Links", &avg, true); + avg = babinfo->ok_nonces ? (float)(babinfo->total_work_links) / + (float)(babinfo->ok_nonces) : 0; + root = api_add_avg(root, "Avg Work Links", &avg, true); + + root = api_add_uint64(root, "Fail", &(babinfo->fail), true); + root = api_add_uint64(root, "Fail Total Tests", &(babinfo->fail_total_tests), true); + avg = babinfo->fail ? (float)(babinfo->fail_total_tests) / + (float)(babinfo->fail) : 0; + root = api_add_avg(root, "Fail Avg Tests", &avg, true); + root = api_add_uint64(root, "Fail Work Links", &(babinfo->fail_total_links), true); + root = api_add_uint64(root, "Fail Total Work Links", &(babinfo->fail_total_work_links), true); + + root = api_add_uint32(root, "Initial Ignored", &(babinfo->initial_ignored), true); + root = api_add_uint64(root, "Ign Total Tests", &(babinfo->ign_total_tests), true); + root = api_add_uint64(root, "Ign Work Links", &(babinfo->ign_total_links), true); + root = api_add_uint64(root, "Ign Total Work Links", &(babinfo->ign_total_work_links), true); + + chip_work = 0; + for (i = 0; i < babinfo->chips; i++) + chip_work += babinfo->chip_work[i]->count; + spi_work = babinfo->spi_list->count * babinfo->chips; + + root = api_add_int(root, "WFree Total", &(babinfo->wfree_list->total), true); + root = api_add_int(root, "WFree Count", &(babinfo->wfree_list->count), true); + root = api_add_int(root, "Available Work", &(babinfo->available_work->count), true); + root = api_add_int(root, "SPI Work", &spi_work, true); + root = api_add_int(root, "Chip Work", &chip_work, true); + + root = api_add_int(root, "SFree Total", &(babinfo->sfree_list->total), true); + root = api_add_int(root, "SFree Count", &(babinfo->sfree_list->count), true); + root = api_add_int(root, "SPI Waiting", &(babinfo->spi_list->count), true); + root = api_add_int(root, "SPI Sent", &(babinfo->spi_sent->count), true); + + root = api_add_int(root, "RFree Total", &(babinfo->rfree_list->total), true); + root = api_add_int(root, "RFree Count", &(babinfo->rfree_list->count), true); + root = api_add_int(root, "Result Count", &(babinfo->res_list->count), true); + + int used = babinfo->nfree_list->total - babinfo->nfree_list->count; + root = api_add_int(root, "NFree Total", &(babinfo->nfree_list->total), true); + root = api_add_int(root, "NFree Used", &used, true); + + root = api_add_uint64(root, "Delay Count", &(babinfo->delay_count), true); + root = api_add_double(root, "Delay Min", &(babinfo->delay_min), true); + root = api_add_double(root, "Delay Max", &(babinfo->delay_max), true); + + data[0] = '\0'; + for (i = 0; i <= BAB_DELAY_BANDS; i++) { + snprintf(buf, sizeof(buf), + "%s<%.1f=%"PRIu64, + i == 0 ? "" : " ", + BAB_DELAY_BASE+(BAB_DELAY_STEP*i), + babinfo->delay_bands[i]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), + " >=%.1f=%"PRIu64, + BAB_DELAY_BASE+BAB_DELAY_STEP*(BAB_DELAY_BANDS+1), + babinfo->delay_bands[BAB_DELAY_BANDS+1]); + strcat(data, buf); + root = api_add_string(root, "Delay Bands", data, true); + + root = api_add_uint64(root, "Send Count", &(babinfo->send_count), true); + root = api_add_double(root, "Send Total", &(babinfo->send_total), true); + avg = babinfo->send_count ? (float)(babinfo->send_total) / + (float)(babinfo->send_count) : 0; + root = api_add_avg(root, "Send Avg", &avg, true); + root = api_add_double(root, "Send Min", &(babinfo->send_min), true); + root = api_add_double(root, "Send Max", &(babinfo->send_max), true); + + root = api_add_int(root, "Reply Wait", &(babinfo->reply_wait), true); + root = api_add_uint64(root, "Reply Waits", &(babinfo->reply_waits), true); + + root = api_add_uint64(root, "Work Unrolled", &(babinfo->work_unrolled), true); + root = api_add_uint64(root, "Work Rolled", &(babinfo->work_rolled), true); + + i = (int)(babinfo->max_speed); + root = api_add_int(root, bab_options[0], &i, true); + i = (int)(babinfo->def_speed); + root = api_add_int(root, bab_options[1], &i, true); + i = (int)(babinfo->min_speed); + root = api_add_int(root, bab_options[2], &i, true); + root = api_add_double(root, bab_options[3], &(babinfo->tune_up), true); + root = api_add_double(root, bab_options[4], &(babinfo->tune_down), true); + i = (int)(babinfo->speed_hz); + root = api_add_int(root, bab_options[5], &i, true); + i = (int)(babinfo->delay_usecs); + root = api_add_int(root, bab_options[6], &i, true); + root = api_add_uint64(root, bab_options[7], &(babinfo->trf_delay), true); + + return root; +} + +static void bab_get_statline_before(char *buf, size_t bufsiz, struct cgpu_info *babcgpu) +{ + struct bab_info *babinfo = (struct bab_info *)(babcgpu->device_data); +#if UPDATE_HISTORY + struct timeval now; + double elapsed; + int i, bad = 0; + + cgtime(&now); + elapsed = tdiff(&now, &(babcgpu->dev_start_tv)); + + // At least get 15s of nonces before saying anything is bad + if (elapsed > 15.0) { + K_RLOCK(babinfo->nfree_list); + for (i = 0; i < babinfo->chips; i++) { + if (babinfo->good_nonces[i]->count == 0 && + babinfo->bad_nonces[i]->count > 1) + bad++; + } + K_RUNLOCK(babinfo->nfree_list); + } + + tailsprintf(buf, bufsiz, "%d.%02d.%03d B:%03d D:%03d", + babinfo->banks, + babinfo->boards, + babinfo->chips, + bad, + babinfo->total_disabled); +#else + tailsprintf(buf, bufsiz, "%d.%02d.%03d D:%03d", + babinfo->banks, + babinfo->boards, + babinfo->chips, + babinfo->total_disabled); +#endif +} +#endif + +struct device_drv bab_drv = { + .drv_id = DRIVER_bab, + .dname = "BlackArrowBitFuryGPIO", + .name = "BaB", + .drv_detect = bab_detect, +#ifdef LINUX + .get_api_stats = bab_api_stats, + .get_statline_before = bab_get_statline_before, + .identify_device = bab_identify, + .thread_prepare = bab_thread_prepare, + .hash_work = hash_queued_work, + .scanwork = bab_scanwork, + .queue_full = bab_queue_full, + .flush_work = bab_flush_work, + .thread_shutdown = bab_shutdown +#endif +}; diff --git a/driver-bflsc.c b/driver-bflsc.c new file mode 100644 index 0000000..f14538e --- /dev/null +++ b/driver-bflsc.c @@ -0,0 +1,2370 @@ +/* + * Copyright 2013 Andrew Smith + * Copyright 2013-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef WIN32 +#include +#endif + +#include "compat.h" +#include "miner.h" +#include "usbutils.h" +#include "uthash.h" +#include "driver-bflsc.h" + +int opt_bflsc_overheat = BFLSC_TEMP_OVERHEAT; + +static const char *blank = ""; + +static enum driver_version drv_ver(struct cgpu_info *bflsc, const char *ver) +{ + char *tmp; + + if (strstr(ver, "1.0.0")) + return BFLSC_DRV1; + + if (strstr(ver, "1.0.") || strstr(ver, "1.1.")) { + applog(LOG_WARNING, "%s detect (%s) Warning assuming firmware '%s' is Ver1", + bflsc->drv->dname, bflsc->device_path, ver); + return BFLSC_DRV1; + } + + if (strstr(ver, "1.2.")) + return BFLSC_DRV2; + + tmp = str_text((char *)ver); + applog(LOG_INFO, "%s detect (%s) Warning unknown firmware '%s' using Ver2", + bflsc->drv->dname, bflsc->device_path, tmp); + free(tmp); + return BFLSC_DRV2; +} + +static void xlinkstr(char *xlink, size_t siz, int dev, struct bflsc_info *sc_info) +{ + if (dev > 0) + snprintf(xlink, siz, " x-%d", dev); + else { + if (sc_info->sc_count > 1) + strcpy(xlink, " master"); + else + *xlink = '\0'; + } +} + +static void bflsc_applog(struct cgpu_info *bflsc, int dev, enum usb_cmds cmd, int amount, int err) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + char xlink[17]; + + xlinkstr(xlink, sizeof(xlink), dev, sc_info); + + usb_applog(bflsc, cmd, xlink, amount, err); +} + +// Break an input up into lines with LFs removed +// false means an error, but if *lines > 0 then data was also found +// error would be no data or missing LF at the end +static bool tolines(struct cgpu_info *bflsc, int dev, char *buf, int *lines, char ***items, enum usb_cmds cmd) +{ + bool ok = false; + char *ptr; + +#define p_lines (*lines) +#define p_items (*items) + + p_lines = 0; + p_items = NULL; + + if (!buf || !(*buf)) { + applog(LOG_DEBUG, "USB: %s%i: (%d) empty %s", + bflsc->drv->name, bflsc->device_id, dev, usb_cmdname(cmd)); + return ok; + } + + ptr = strdup(buf); + while (ptr && *ptr) { + p_items = realloc(p_items, ++p_lines * sizeof(*p_items)); + if (unlikely(!p_items)) + quit(1, "Failed to realloc p_items in tolines"); + p_items[p_lines-1] = ptr; + ptr = strchr(ptr, '\n'); + if (ptr) + *(ptr++) = '\0'; + else { + applog(LOG_DEBUG, "USB: %s%i: (%d) missing lf(s) in %s", + bflsc->drv->name, bflsc->device_id, dev, usb_cmdname(cmd)); + return ok; + } + } + ok = true; + + return ok; +} + +static void freetolines(int *lines, char ***items) +{ + if (*lines > 0) { + free(**items); + free(*items); + } + *lines = 0; + *items = NULL; +} + +enum breakmode { + NOCOLON, + ONECOLON, + ALLCOLON // Temperature uses this +}; + +// Break down a single line into 'fields' +// 'lf' will be a pointer to the final LF if it is there (or NULL) +// firstname will be the allocated buf copy pointer which is also +// the string before ':' for ONECOLON and ALLCOLON +// If any string is missing the ':' when it was expected, false is returned +static bool breakdown(enum breakmode mode, char *buf, int *count, char **firstname, char ***fields, char **lf) +{ + char *ptr, *colon, *comma; + bool ok = false; + +#define p_count (*count) +#define p_firstname (*firstname) +#define p_fields (*fields) +#define p_lf (*lf) + + p_count = 0; + p_firstname = NULL; + p_fields = NULL; + p_lf = NULL; + + if (!buf || !(*buf)) + return ok; + + ptr = p_firstname = strdup(buf); + p_lf = strchr(p_firstname, '\n'); + if (mode == ONECOLON) { + colon = strchr(ptr, ':'); + if (colon) { + ptr = colon; + *(ptr++) = '\0'; + } else + return ok; + } + + while (ptr && *ptr) { + if (mode == ALLCOLON) { + colon = strchr(ptr, ':'); + if (colon) + ptr = colon + 1; + else + return ok; + } + comma = strchr(ptr, ','); + if (comma) + *(comma++) = '\0'; + p_fields = realloc(p_fields, ++p_count * sizeof(*p_fields)); + if (unlikely(!p_fields)) + quit(1, "Failed to realloc p_fields in breakdown"); + p_fields[p_count-1] = ptr; + ptr = comma; + } + + ok = true; + return ok; +} + +static void freebreakdown(int *count, char **firstname, char ***fields) +{ + if (*firstname) + free(*firstname); + if (*count > 0) + free(*fields); + *count = 0; + *firstname = NULL; + *fields = NULL; +} + +static bool isokerr(int err, char *buf, int amount) +{ + if (err < 0 || amount < (int)BFLSC_OK_LEN) + return false; + else { + if (strstr(buf, BFLSC_ANERR)) { + applog(LOG_INFO, "BFLSC not ok err: %s", buf); + return false; + } else + return true; + } +} + +// send+receive dual stage - always single line replies +static int send_recv_ds(struct cgpu_info *bflsc, int dev, int *stage, bool *sent, int *amount, char *send1, int send1_len, enum usb_cmds send1_cmd, enum usb_cmds recv1_cmd, char *send2, int send2_len, enum usb_cmds send2_cmd, enum usb_cmds recv2_cmd, char *recv, int recv_siz) +{ + struct DataForwardToChain data; + int len, err, tried; + + if (dev == 0) { + usb_buffer_clear(bflsc); + + *stage = 1; + *sent = false; + err = usb_write(bflsc, send1, send1_len, amount, send1_cmd); + if (err < 0 || *amount < send1_len) + return err; + + *sent = true; + err = usb_read_nl(bflsc, recv, recv_siz, amount, recv1_cmd); + if (!isokerr(err, recv, *amount)) + return err; + + usb_buffer_clear(bflsc); + + *stage = 2; + *sent = false; + err = usb_write(bflsc, send2, send2_len, amount, send2_cmd); + if (err < 0 || *amount < send2_len) + return err; + + *sent = true; + err = usb_read_nl(bflsc, recv, recv_siz, amount, recv2_cmd); + + return err; + } + + data.header = BFLSC_XLINKHDR; + data.deviceAddress = (uint8_t)dev; + tried = 0; + while (tried++ < 3) { + data.payloadSize = send1_len; + memcpy(data.payloadData, send1, send1_len); + len = DATAFORWARDSIZE(data); + + usb_buffer_clear(bflsc); + + *stage = 1; + *sent = false; + err = usb_write(bflsc, (char *)&data, len, amount, send1_cmd); + if (err < 0 || *amount < send1_len) + return err; + + *sent = true; + err = usb_read_nl(bflsc, recv, recv_siz, amount, recv1_cmd); + + if (err != LIBUSB_SUCCESS) + return err; + + // x-link timeout? - try again? + if (strstr(recv, BFLSC_XTIMEOUT)) + continue; + + if (!isokerr(err, recv, *amount)) + return err; + + data.payloadSize = send2_len; + memcpy(data.payloadData, send2, send2_len); + len = DATAFORWARDSIZE(data); + + usb_buffer_clear(bflsc); + + *stage = 2; + *sent = false; + err = usb_write(bflsc, (char *)&data, len, amount, send2_cmd); + if (err < 0 || *amount < send2_len) + return err; + + *sent = true; + err = usb_read_nl(bflsc, recv, recv_siz, amount, recv2_cmd); + + if (err != LIBUSB_SUCCESS) + return err; + + // x-link timeout? - try again? + if (strstr(recv, BFLSC_XTIMEOUT)) + continue; + + // SUCCESS - return it + break; + } + return err; +} + +#define READ_OK true +#define READ_NL false + +// send+receive single stage +static int send_recv_ss(struct cgpu_info *bflsc, int dev, bool *sent, int *amount, char *send, int send_len, enum usb_cmds send_cmd, char *recv, int recv_siz, enum usb_cmds recv_cmd, bool read_ok) +{ + struct DataForwardToChain data; + int len, err, tried; + + if (dev == 0) { + usb_buffer_clear(bflsc); + + *sent = false; + err = usb_write(bflsc, send, send_len, amount, send_cmd); + if (err < 0 || *amount < send_len) { + // N.B. thus !(*sent) directly implies err < 0 or *amount < send_len + return err; + } + + *sent = true; + if (read_ok == READ_OK) + err = usb_read_ok(bflsc, recv, recv_siz, amount, recv_cmd); + else + err = usb_read_nl(bflsc, recv, recv_siz, amount, recv_cmd); + + return err; + } + + data.header = BFLSC_XLINKHDR; + data.deviceAddress = (uint8_t)dev; + data.payloadSize = send_len; + memcpy(data.payloadData, send, send_len); + len = DATAFORWARDSIZE(data); + + tried = 0; + while (tried++ < 3) { + usb_buffer_clear(bflsc); + + *sent = false; + err = usb_write(bflsc, (char *)&data, len, amount, recv_cmd); + if (err < 0 || *amount < send_len) + return err; + + *sent = true; + if (read_ok == READ_OK) + err = usb_read_ok(bflsc, recv, recv_siz, amount, recv_cmd); + else + err = usb_read_nl(bflsc, recv, recv_siz, amount, recv_cmd); + + if (err != LIBUSB_SUCCESS && err != LIBUSB_ERROR_TIMEOUT) + return err; + + // read_ok can err timeout if it's looking for OK + // TODO: add a usb_read() option to spot the ERR: and convert end=OK to just + // x-link timeout? - try again? + if ((err == LIBUSB_SUCCESS || (read_ok == READ_OK && err == LIBUSB_ERROR_TIMEOUT)) && + strstr(recv, BFLSC_XTIMEOUT)) + continue; + + // SUCCESS or TIMEOUT - return it + break; + } + return err; +} + +static int write_to_dev(struct cgpu_info *bflsc, int dev, char *buf, int buflen, int *amount, enum usb_cmds cmd) +{ + struct DataForwardToChain data; + int len; + + /* + * The protocol is syncronous so any previous excess can be + * discarded and assumed corrupt data or failed USB transfers + */ + usb_buffer_clear(bflsc); + + if (dev == 0) + return usb_write(bflsc, buf, buflen, amount, cmd); + + data.header = BFLSC_XLINKHDR; + data.deviceAddress = (uint8_t)dev; + data.payloadSize = buflen; + memcpy(data.payloadData, buf, buflen); + len = DATAFORWARDSIZE(data); + + return usb_write(bflsc, (char *)&data, len, amount, cmd); +} + +static void bflsc_send_flush_work(struct cgpu_info *bflsc, int dev) +{ + char buf[BFLSC_BUFSIZ+1]; + int err, amount; + bool sent; + + // Device is gone + if (bflsc->usbinfo.nodev) + return; + + mutex_lock(&bflsc->device_mutex); + err = send_recv_ss(bflsc, dev, &sent, &amount, + BFLSC_QFLUSH, BFLSC_QFLUSH_LEN, C_QUEFLUSH, + buf, sizeof(buf)-1, C_QUEFLUSHREPLY, READ_NL); + mutex_unlock(&bflsc->device_mutex); + + if (!sent) + bflsc_applog(bflsc, dev, C_QUEFLUSH, amount, err); + else { + // TODO: do we care if we don't get 'OK'? (always will in normal processing) + } +} + +/* return True = attempted usb_read_ok() + * set ignore to true means no applog/ignore errors */ +static bool bflsc_qres(struct cgpu_info *bflsc, char *buf, size_t bufsiz, int dev, int *err, int *amount, bool ignore) +{ + bool readok = false; + + mutex_lock(&(bflsc->device_mutex)); + *err = send_recv_ss(bflsc, dev, &readok, amount, + BFLSC_QRES, BFLSC_QRES_LEN, C_REQUESTRESULTS, + buf, bufsiz-1, C_GETRESULTS, READ_OK); + mutex_unlock(&(bflsc->device_mutex)); + + if (!readok) { + if (!ignore) + bflsc_applog(bflsc, dev, C_REQUESTRESULTS, *amount, *err); + + // TODO: do what? flag as dead device? + // count how many times it has happened and reset/fail it + // or even make sure it is all x-link and that means device + // has failed after some limit of this? + // of course all other I/O must also be failing ... + } else { + if (*err < 0 || *amount < 1) { + if (!ignore) + bflsc_applog(bflsc, dev, C_GETRESULTS, *amount, *err); + + // TODO: do what? ... see above + } + } + + return readok; +} + +static void __bflsc_initialise(struct cgpu_info *bflsc) +{ + int err, interface; + +// TODO: does x-link bypass the other device FTDI? (I think it does) +// So no initialisation required except for the master device? + + if (bflsc->usbinfo.nodev) + return; + + interface = usb_interface(bflsc); + // Reset + err = usb_transfer(bflsc, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, + FTDI_VALUE_RESET, interface, C_RESET); + + applog(LOG_DEBUG, "%s%i: reset got err %d", + bflsc->drv->name, bflsc->device_id, err); + + if (bflsc->usbinfo.nodev) + return; + + usb_ftdi_set_latency(bflsc); + + if (bflsc->usbinfo.nodev) + return; + + // Set data control + err = usb_transfer(bflsc, FTDI_TYPE_OUT, FTDI_REQUEST_DATA, + FTDI_VALUE_DATA_BAS, interface, C_SETDATA); + + applog(LOG_DEBUG, "%s%i: setdata got err %d", + bflsc->drv->name, bflsc->device_id, err); + + if (bflsc->usbinfo.nodev) + return; + + // Set the baud + err = usb_transfer(bflsc, FTDI_TYPE_OUT, FTDI_REQUEST_BAUD, FTDI_VALUE_BAUD_BAS, + (FTDI_INDEX_BAUD_BAS & 0xff00) | interface, + C_SETBAUD); + + applog(LOG_DEBUG, "%s%i: setbaud got err %d", + bflsc->drv->name, bflsc->device_id, err); + + if (bflsc->usbinfo.nodev) + return; + + // Set Flow Control + err = usb_transfer(bflsc, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, + FTDI_VALUE_FLOW, interface, C_SETFLOW); + + applog(LOG_DEBUG, "%s%i: setflowctrl got err %d", + bflsc->drv->name, bflsc->device_id, err); + + if (bflsc->usbinfo.nodev) + return; + + // Set Modem Control + err = usb_transfer(bflsc, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, + FTDI_VALUE_MODEM, interface, C_SETMODEM); + + applog(LOG_DEBUG, "%s%i: setmodemctrl got err %d", + bflsc->drv->name, bflsc->device_id, err); + + if (bflsc->usbinfo.nodev) + return; + + // Clear any sent data + err = usb_transfer(bflsc, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, + FTDI_VALUE_PURGE_TX, interface, C_PURGETX); + + applog(LOG_DEBUG, "%s%i: purgetx got err %d", + bflsc->drv->name, bflsc->device_id, err); + + if (bflsc->usbinfo.nodev) + return; + + // Clear any received data + err = usb_transfer(bflsc, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, + FTDI_VALUE_PURGE_RX, interface, C_PURGERX); + + applog(LOG_DEBUG, "%s%i: purgerx got err %d", + bflsc->drv->name, bflsc->device_id, err); + + if (!bflsc->cutofftemp) + bflsc->cutofftemp = opt_bflsc_overheat; +} + +static void bflsc_initialise(struct cgpu_info *bflsc) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + char buf[BFLSC_BUFSIZ+1]; + int err, amount; + int dev; + + mutex_lock(&(bflsc->device_mutex)); + __bflsc_initialise(bflsc); + mutex_unlock(&(bflsc->device_mutex)); + + for (dev = 0; dev < sc_info->sc_count; dev++) { + bflsc_send_flush_work(bflsc, dev); + bflsc_qres(bflsc, buf, sizeof(buf), dev, &err, &amount, true); + } +} + +static bool getinfo(struct cgpu_info *bflsc, int dev) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + struct bflsc_dev sc_dev; + char buf[BFLSC_BUFSIZ+1]; + int err, amount; + char **items, *firstname, **fields, *lf; + bool res, ok = false; + int i, lines, count; + char *tmp; + + /* + * Kano's first dev Jalapeno output: + * DEVICE: BitFORCE SC + * FIRMWARE: 1.0.0 + * ENGINES: 30 + * FREQUENCY: [UNKNOWN] + * XLINK MODE: MASTER + * XLINK PRESENT: YES + * --DEVICES IN CHAIN: 0 + * --CHAIN PRESENCE MASK: 00000000 + * OK + */ + + /* + * Don't use send_recv_ss() since we have a different receive timeout + * Also getinfo() is called multiple times if it fails anyway + */ + err = write_to_dev(bflsc, dev, BFLSC_DETAILS, BFLSC_DETAILS_LEN, &amount, C_REQUESTDETAILS); + if (err < 0 || amount != BFLSC_DETAILS_LEN) { + applog(LOG_ERR, "%s detect (%s) send details request failed (%d:%d)", + bflsc->drv->dname, bflsc->device_path, amount, err); + return ok; + } + + err = usb_read_ok_timeout(bflsc, buf, sizeof(buf)-1, &amount, + BFLSC_INFO_TIMEOUT, C_GETDETAILS); + if (err < 0 || amount < 1) { + if (err < 0) { + applog(LOG_ERR, "%s detect (%s) get details return invalid/timed out (%d:%d)", + bflsc->drv->dname, bflsc->device_path, amount, err); + } else { + applog(LOG_ERR, "%s detect (%s) get details returned nothing (%d:%d)", + bflsc->drv->dname, bflsc->device_path, amount, err); + } + return ok; + } + + memset(&sc_dev, 0, sizeof(struct bflsc_dev)); + sc_info->sc_count = 1; + res = tolines(bflsc, dev, &(buf[0]), &lines, &items, C_GETDETAILS); + if (!res) + return ok; + + tmp = str_text(buf); + strncpy(sc_dev.getinfo, tmp, sizeof(sc_dev.getinfo)); + sc_dev.getinfo[sizeof(sc_dev.getinfo)-1] = '\0'; + free(tmp); + + for (i = 0; i < lines-2; i++) { + res = breakdown(ONECOLON, items[i], &count, &firstname, &fields, &lf); + if (lf) + *lf = '\0'; + if (!res || count != 1) { + tmp = str_text(items[i]); + applogsiz(LOG_WARNING, BFLSC_APPLOGSIZ, + "%s detect (%s) invalid details line: '%s' %d", + bflsc->drv->dname, bflsc->device_path, tmp, count); + free(tmp); + dev_error(bflsc, REASON_DEV_COMMS_ERROR); + goto mata; + } + if (strstr(firstname, BFLSC_DI_FIRMWARE)) { + sc_dev.firmware = strdup(fields[0]); + sc_info->driver_version = drv_ver(bflsc, sc_dev.firmware); + } + else if (Strcasestr(firstname, BFLSC_DI_ENGINES)) { + sc_dev.engines = atoi(fields[0]); + if (sc_dev.engines < 1) { + tmp = str_text(items[i]); + applogsiz(LOG_WARNING, BFLSC_APPLOGSIZ, + "%s detect (%s) invalid engine count: '%s'", + bflsc->drv->dname, bflsc->device_path, tmp); + free(tmp); + goto mata; + } + } + else if (strstr(firstname, BFLSC_DI_XLINKMODE)) + sc_dev.xlink_mode = strdup(fields[0]); + else if (strstr(firstname, BFLSC_DI_XLINKPRESENT)) + sc_dev.xlink_present = strdup(fields[0]); + else if (strstr(firstname, BFLSC_DI_DEVICESINCHAIN)) { + if (fields[0][0] == '0' || + (fields[0][0] == ' ' && fields[0][1] == '0')) + sc_info->sc_count = 1; + else + sc_info->sc_count = atoi(fields[0]); + if (sc_info->sc_count < 1 || sc_info->sc_count > 30) { + tmp = str_text(items[i]); + applogsiz(LOG_WARNING, BFLSC_APPLOGSIZ, + "%s detect (%s) invalid x-link count: '%s'", + bflsc->drv->dname, bflsc->device_path, tmp); + free(tmp); + goto mata; + } + } + else if (strstr(firstname, BFLSC_DI_CHIPS)) + sc_dev.chips = strdup(fields[0]); + else if (strstr(firstname, BFLSC28_DI_ASICS)) + sc_dev.chips = strdup(fields[0]); + + freebreakdown(&count, &firstname, &fields); + } + + if (sc_info->driver_version == BFLSC_DRVUNDEF) { + applog(LOG_WARNING, "%s detect (%s) missing %s", + bflsc->drv->dname, bflsc->device_path, BFLSC_DI_FIRMWARE); + goto ne; + } + + sc_info->sc_devs = calloc(sc_info->sc_count, sizeof(struct bflsc_dev)); + if (unlikely(!sc_info->sc_devs)) + quit(1, "Failed to calloc in getinfo"); + memcpy(&(sc_info->sc_devs[0]), &sc_dev, sizeof(sc_dev)); + // TODO: do we care about getting this info for the rest if > 0 x-link + + ok = true; + goto ne; + +mata: + freebreakdown(&count, &firstname, &fields); + ok = false; +ne: + freetolines(&lines, &items); + return ok; +} + +static bool bflsc28_queue_full(struct cgpu_info *bflsc); + +static struct cgpu_info *bflsc_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + struct bflsc_info *sc_info = NULL; + char buf[BFLSC_BUFSIZ+1]; + int i, err, amount; + struct timeval init_start, init_now; + int init_sleep, init_count; + bool ident_first, sent; + char *newname; + uint16_t latency; + + struct cgpu_info *bflsc = usb_alloc_cgpu(&bflsc_drv, 1); + + sc_info = calloc(1, sizeof(*sc_info)); + if (unlikely(!sc_info)) + quit(1, "Failed to calloc sc_info in bflsc_detect_one"); + // TODO: fix ... everywhere ... + bflsc->device_data = (FILE *)sc_info; + + if (!usb_init(bflsc, dev, found)) + goto shin; + + // Allow 2 complete attempts if the 1st time returns an unrecognised reply + ident_first = true; +retry: + init_count = 0; + init_sleep = REINIT_TIME_FIRST_MS; + cgtime(&init_start); +reinit: + __bflsc_initialise(bflsc); + + err = send_recv_ss(bflsc, 0, &sent, &amount, + BFLSC_IDENTIFY, BFLSC_IDENTIFY_LEN, C_REQUESTIDENTIFY, + buf, sizeof(buf)-1, C_GETIDENTIFY, READ_NL); + + if (!sent) { + applog(LOG_ERR, "%s detect (%s) send identify request failed (%d:%d)", + bflsc->drv->dname, bflsc->device_path, amount, err); + goto unshin; + } + + if (err < 0 || amount < 1) { + init_count++; + cgtime(&init_now); + if (us_tdiff(&init_now, &init_start) <= REINIT_TIME_MAX) { + if (init_count == 2) { + applog(LOG_WARNING, "%s detect (%s) 2nd init failed (%d:%d) - retrying", + bflsc->drv->dname, bflsc->device_path, amount, err); + } + cgsleep_ms(init_sleep); + if ((init_sleep * 2) <= REINIT_TIME_MAX_MS) + init_sleep *= 2; + goto reinit; + } + + if (init_count > 0) + applog(LOG_WARNING, "%s detect (%s) init failed %d times %.2fs", + bflsc->drv->dname, bflsc->device_path, init_count, tdiff(&init_now, &init_start)); + + if (err < 0) { + applog(LOG_ERR, "%s detect (%s) error identify reply (%d:%d)", + bflsc->drv->dname, bflsc->device_path, amount, err); + } else { + applog(LOG_ERR, "%s detect (%s) empty identify reply (%d)", + bflsc->drv->dname, bflsc->device_path, amount); + } + + goto unshin; + } + buf[amount] = '\0'; + + if (unlikely(!strstr(buf, BFLSC_BFLSC) && !strstr(buf, BFLSC_BFLSC28))) { + applog(LOG_DEBUG, "%s detect (%s) found an FPGA '%s' ignoring", + bflsc->drv->dname, bflsc->device_path, buf); + goto unshin; + } + + if (unlikely(strstr(buf, BFLSC_IDENTITY))) { + if (ident_first) { + applog(LOG_DEBUG, "%s detect (%s) didn't recognise '%s' trying again ...", + bflsc->drv->dname, bflsc->device_path, buf); + ident_first = false; + goto retry; + } + applog(LOG_DEBUG, "%s detect (%s) didn't recognise '%s' on 2nd attempt", + bflsc->drv->dname, bflsc->device_path, buf); + goto unshin; + } + + int tries = 0; + while (7734) { + if (getinfo(bflsc, 0)) + break; + + // N.B. we will get displayed errors each time it fails + if (++tries > 2) + goto unshin; + + cgsleep_ms(40); + } + + switch (sc_info->driver_version) { + case BFLSC_DRV1: + sc_info->que_size = BFLSC_QUE_SIZE_V1; + sc_info->que_full_enough = BFLSC_QUE_FULL_ENOUGH_V1; + sc_info->que_watermark = BFLSC_QUE_WATERMARK_V1; + sc_info->que_low = BFLSC_QUE_LOW_V1; + sc_info->que_noncecount = QUE_NONCECOUNT_V1; + sc_info->que_fld_min = QUE_FLD_MIN_V1; + sc_info->que_fld_max = QUE_FLD_MAX_V1; + // Only Jalapeno uses 1.0.0 + sc_info->flush_size = 1; + break; + case BFLSC_DRV2: + case BFLSC_DRVUNDEF: + default: + sc_info->driver_version = BFLSC_DRV2; + + sc_info->que_size = BFLSC_QUE_SIZE_V2; + sc_info->que_full_enough = BFLSC_QUE_FULL_ENOUGH_V2; + sc_info->que_watermark = BFLSC_QUE_WATERMARK_V2; + sc_info->que_low = BFLSC_QUE_LOW_V2; + sc_info->que_noncecount = QUE_NONCECOUNT_V2; + sc_info->que_fld_min = QUE_FLD_MIN_V2; + sc_info->que_fld_max = QUE_FLD_MAX_V2; + // TODO: this can be reduced to total chip count + sc_info->flush_size = 16 * sc_info->sc_count; + break; + } + + // Set parallelization based on the getinfo() response if it is present + if (sc_info->sc_devs[0].chips && strlen(sc_info->sc_devs[0].chips)) { + if (strstr(sc_info->sc_devs[0].chips, BFLSC_DI_CHIPS_PARALLEL)) { + sc_info->que_noncecount = QUE_NONCECOUNT_V2; + sc_info->que_fld_min = QUE_FLD_MIN_V2; + sc_info->que_fld_max = QUE_FLD_MAX_V2; + } else { + sc_info->que_noncecount = QUE_NONCECOUNT_V1; + sc_info->que_fld_min = QUE_FLD_MIN_V1; + sc_info->que_fld_max = QUE_FLD_MAX_V1; + } + } + + sc_info->scan_sleep_time = BAS_SCAN_TIME; + sc_info->results_sleep_time = BFLSC_RES_TIME; + sc_info->default_ms_work = BAS_WORK_TIME; + latency = BAS_LATENCY; + + /* When getinfo() "FREQUENCY: [UNKNOWN]" is fixed - + * use 'freq * engines' to estimate. + * Otherwise for now: */ + newname = NULL; + if (sc_info->sc_count > 1) { + newname = BFLSC_MINIRIG; + sc_info->scan_sleep_time = BAM_SCAN_TIME; + sc_info->default_ms_work = BAM_WORK_TIME; + bflsc->usbdev->ident = IDENT_BAM; + latency = BAM_LATENCY; + } else { + if (sc_info->sc_devs[0].engines < 34) { // 16 * 2 + 2 + newname = BFLSC_JALAPENO; + sc_info->scan_sleep_time = BAJ_SCAN_TIME; + sc_info->default_ms_work = BAJ_WORK_TIME; + bflsc->usbdev->ident = IDENT_BAJ; + latency = BAJ_LATENCY; + } else if (sc_info->sc_devs[0].engines < 130) { // 16 * 8 + 2 + newname = BFLSC_LITTLESINGLE; + sc_info->scan_sleep_time = BAL_SCAN_TIME; + sc_info->default_ms_work = BAL_WORK_TIME; + bflsc->usbdev->ident = IDENT_BAL; + latency = BAL_LATENCY; + } + } + + sc_info->ident = usb_ident(bflsc); + if (sc_info->ident == IDENT_BMA) { + bflsc->drv->queue_full = &bflsc28_queue_full; + sc_info->scan_sleep_time = BMA_SCAN_TIME; + sc_info->default_ms_work = BMA_WORK_TIME; + sc_info->results_sleep_time = BMA_RES_TIME; + } + + if (latency != bflsc->usbdev->found->latency) { + bflsc->usbdev->found->latency = latency; + usb_ftdi_set_latency(bflsc); + } + + for (i = 0; i < sc_info->sc_count; i++) + sc_info->sc_devs[i].ms_work = sc_info->default_ms_work; + + if (newname) { + if (!bflsc->drv->copy) + bflsc->drv = copy_drv(bflsc->drv); + bflsc->drv->name = newname; + } + + // We have a real BFLSC! + applog(LOG_DEBUG, "%s (%s) identified as: '%s'", + bflsc->drv->dname, bflsc->device_path, bflsc->drv->name); + + if (!add_cgpu(bflsc)) + goto unshin; + + update_usb_stats(bflsc); + + mutex_init(&bflsc->device_mutex); + rwlock_init(&sc_info->stat_lock); + + return bflsc; + +unshin: + + usb_uninit(bflsc); + +shin: + + free(bflsc->device_data); + bflsc->device_data = NULL; + + if (bflsc->name != blank) { + free(bflsc->name); + bflsc->name = NULL; + } + + bflsc = usb_free_cgpu(bflsc); + + return NULL; +} + +static void bflsc_detect(bool __maybe_unused hotplug) +{ + usb_detect(&bflsc_drv, bflsc_detect_one); +} + +static void get_bflsc_statline_before(char *buf, size_t bufsiz, struct cgpu_info *bflsc) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + float temp = 0; + float vcc2 = 0; + int i; + + rd_lock(&(sc_info->stat_lock)); + for (i = 0; i < sc_info->sc_count; i++) { + if (sc_info->sc_devs[i].temp1 > temp) + temp = sc_info->sc_devs[i].temp1; + if (sc_info->sc_devs[i].temp2 > temp) + temp = sc_info->sc_devs[i].temp2; + if (sc_info->sc_devs[i].vcc2 > vcc2) + vcc2 = sc_info->sc_devs[i].vcc2; + } + rd_unlock(&(sc_info->stat_lock)); + + tailsprintf(buf, bufsiz, "max%3.0fC %4.2fV", temp, vcc2); +} + +static void flush_one_dev(struct cgpu_info *bflsc, int dev) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + struct work *work, *tmp; + bool did = false; + + bflsc_send_flush_work(bflsc, dev); + + rd_lock(&bflsc->qlock); + + HASH_ITER(hh, bflsc->queued_work, work, tmp) { + if (work->subid == dev) { + // devflag is used to flag stale work + work->devflag = true; + did = true; + } + } + + rd_unlock(&bflsc->qlock); + + if (did) { + wr_lock(&(sc_info->stat_lock)); + sc_info->sc_devs[dev].flushed = true; + sc_info->sc_devs[dev].flush_id = sc_info->sc_devs[dev].result_id; + sc_info->sc_devs[dev].work_queued = 0; + wr_unlock(&(sc_info->stat_lock)); + } +} + +static void bflsc_flush_work(struct cgpu_info *bflsc) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + int dev; + + for (dev = 0; dev < sc_info->sc_count; dev++) + flush_one_dev(bflsc, dev); +} + +static void bflsc_set_volt(struct cgpu_info *bflsc, int dev) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + char buf[BFLSC_BUFSIZ+1]; + char msg[16]; + int err, amount; + bool sent; + + // Device is gone + if (bflsc->usbinfo.nodev) + return; + + snprintf(msg, sizeof(msg), "V%dX", sc_info->volt_next); + + mutex_lock(&bflsc->device_mutex); + + err = send_recv_ss(bflsc, dev, &sent, &amount, + msg, strlen(msg), C_SETVOLT, + buf, sizeof(buf)-1, C_REPLYSETVOLT, READ_NL); + mutex_unlock(&(bflsc->device_mutex)); + + if (!sent) + bflsc_applog(bflsc, dev, C_SETVOLT, amount, err); + else { + // Don't care + } + + sc_info->volt_next_stat = false; + + return; +} + +static void bflsc_set_clock(struct cgpu_info *bflsc, int dev) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + char buf[BFLSC_BUFSIZ+1]; + char msg[16]; + int err, amount; + bool sent; + + // Device is gone + if (bflsc->usbinfo.nodev) + return; + + snprintf(msg, sizeof(msg), "F%XX", sc_info->clock_next); + + mutex_lock(&bflsc->device_mutex); + + err = send_recv_ss(bflsc, dev, &sent, &amount, + msg, strlen(msg), C_SETCLOCK, + buf, sizeof(buf)-1, C_REPLYSETCLOCK, READ_NL); + mutex_unlock(&(bflsc->device_mutex)); + + if (!sent) + bflsc_applog(bflsc, dev, C_SETCLOCK, amount, err); + else { + // Don't care + } + + sc_info->clock_next_stat = false; + + return; +} + +static void bflsc_flash_led(struct cgpu_info *bflsc, int dev) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + char buf[BFLSC_BUFSIZ+1]; + int err, amount; + bool sent; + + // Device is gone + if (bflsc->usbinfo.nodev) + return; + + // It is not critical flashing the led so don't get stuck if we + // can't grab the mutex now + if (mutex_trylock(&bflsc->device_mutex)) + return; + + err = send_recv_ss(bflsc, dev, &sent, &amount, + BFLSC_FLASH, BFLSC_FLASH_LEN, C_REQUESTFLASH, + buf, sizeof(buf)-1, C_FLASHREPLY, READ_NL); + mutex_unlock(&(bflsc->device_mutex)); + + if (!sent) + bflsc_applog(bflsc, dev, C_REQUESTFLASH, amount, err); + else { + // Don't care + } + + // Once we've tried - don't do it until told to again + // - even if it failed + sc_info->flash_led = false; + + return; +} + +/* Flush and stop all work if the device reaches the thermal cutoff temp, or + * temporarily stop queueing work if it's in the throttling range. */ +static void bflsc_manage_temp(struct cgpu_info *bflsc, struct bflsc_dev *sc_dev, + int dev, float temp) +{ + bflsc->temp = temp; + if (bflsc->cutofftemp > 0) { + int cutoff = bflsc->cutofftemp; + int throttle = cutoff - BFLSC_TEMP_THROTTLE; + int recover = cutoff - BFLSC_TEMP_RECOVER; + + if (sc_dev->overheat) { + if (temp < recover) + sc_dev->overheat = false; + } else if (temp > throttle) { + sc_dev->overheat = true; + if (temp > cutoff) { + applog(LOG_WARNING, "%s%i: temp (%.1f) hit thermal cutoff limit %d, stopping work!", + bflsc->drv->name, bflsc->device_id, temp, cutoff); + dev_error(bflsc, REASON_DEV_THERMAL_CUTOFF); + flush_one_dev(bflsc, dev); + + } else { + applog(LOG_NOTICE, "%s%i: temp (%.1f) hit thermal throttle limit %d, throttling", + bflsc->drv->name, bflsc->device_id, temp, throttle); + } + } + } +} + +static bool bflsc_get_temp(struct cgpu_info *bflsc, int dev) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + struct bflsc_dev *sc_dev; + char temp_buf[BFLSC_BUFSIZ+1]; + char volt_buf[BFLSC_BUFSIZ+1]; + char *tmp; + int err, amount; + char *firstname, **fields, *lf; + char xlink[17]; + int count; + bool res, sent; + float temp, temp1, temp2; + float vcc1, vcc2, vmain; + + // Device is gone + if (bflsc->usbinfo.nodev) + return false; + + if (dev >= sc_info->sc_count) { + applog(LOG_ERR, "%s%i: temp invalid xlink device %d - limit %d", + bflsc->drv->name, bflsc->device_id, dev, sc_info->sc_count - 1); + return false; + } + + if (sc_info->volt_next_stat || sc_info->clock_next_stat) { + if (sc_info->volt_next_stat) + bflsc_set_volt(bflsc, dev); + if (sc_info->clock_next_stat) + bflsc_set_clock(bflsc, dev); + return true; + } + + // Flash instead of Temp + if (sc_info->flash_led) { + bflsc_flash_led(bflsc, dev); + return true; + } + + xlinkstr(xlink, sizeof(xlink), dev, sc_info); + + /* It is not very critical getting temp so don't get stuck if we + * can't grab the mutex here */ + if (mutex_trylock(&bflsc->device_mutex)) + return false; + + err = send_recv_ss(bflsc, dev, &sent, &amount, + BFLSC_TEMPERATURE, BFLSC_TEMPERATURE_LEN, C_REQUESTTEMPERATURE, + temp_buf, sizeof(temp_buf)-1, C_GETTEMPERATURE, READ_NL); + mutex_unlock(&(bflsc->device_mutex)); + + if (!sent) { + applog(LOG_ERR, "%s%i: Error: Request%s temp invalid/timed out (%d:%d)", + bflsc->drv->name, bflsc->device_id, xlink, amount, err); + return false; + } else { + if (err < 0 || amount < 1) { + if (err < 0) { + applog(LOG_ERR, "%s%i: Error: Get%s temp return invalid/timed out (%d:%d)", + bflsc->drv->name, bflsc->device_id, xlink, amount, err); + } else { + applog(LOG_ERR, "%s%i: Error: Get%s temp returned nothing (%d:%d)", + bflsc->drv->name, bflsc->device_id, xlink, amount, err); + } + return false; + } + } + + // Ignore it if we can't get the V + if (mutex_trylock(&bflsc->device_mutex)) + return false; + + err = send_recv_ss(bflsc, dev, &sent, &amount, + BFLSC_VOLTAGE, BFLSC_VOLTAGE_LEN, C_REQUESTVOLTS, + volt_buf, sizeof(volt_buf)-1, C_GETVOLTS, READ_NL); + mutex_unlock(&(bflsc->device_mutex)); + + if (!sent) { + applog(LOG_ERR, "%s%i: Error: Request%s volts invalid/timed out (%d:%d)", + bflsc->drv->name, bflsc->device_id, xlink, amount, err); + return false; + } else { + if (err < 0 || amount < 1) { + if (err < 0) { + applog(LOG_ERR, "%s%i: Error: Get%s volt return invalid/timed out (%d:%d)", + bflsc->drv->name, bflsc->device_id, xlink, amount, err); + } else { + applog(LOG_ERR, "%s%i: Error: Get%s volt returned nothing (%d:%d)", + bflsc->drv->name, bflsc->device_id, xlink, amount, err); + } + return false; + } + } + + res = breakdown(ALLCOLON, temp_buf, &count, &firstname, &fields, &lf); + if (lf) + *lf = '\0'; + if (!res || count < 2 || !lf) { + tmp = str_text(temp_buf); + applog(LOG_WARNING, "%s%i: Invalid%s temp reply: '%s'", + bflsc->drv->name, bflsc->device_id, xlink, tmp); + free(tmp); + freebreakdown(&count, &firstname, &fields); + dev_error(bflsc, REASON_DEV_COMMS_ERROR); + return false; + } + + temp = temp1 = (float)atoi(fields[0]); + temp2 = (float)atoi(fields[1]); + + freebreakdown(&count, &firstname, &fields); + + res = breakdown(NOCOLON, volt_buf, &count, &firstname, &fields, &lf); + if (lf) + *lf = '\0'; + if (!res || count != 3 || !lf) { + tmp = str_text(volt_buf); + applog(LOG_WARNING, "%s%i: Invalid%s volt reply: '%s'", + bflsc->drv->name, bflsc->device_id, xlink, tmp); + free(tmp); + freebreakdown(&count, &firstname, &fields); + dev_error(bflsc, REASON_DEV_COMMS_ERROR); + return false; + } + + sc_dev = &sc_info->sc_devs[dev]; + vcc1 = (float)atoi(fields[0]) / 1000.0; + vcc2 = (float)atoi(fields[1]) / 1000.0; + vmain = (float)atoi(fields[2]) / 1000.0; + + freebreakdown(&count, &firstname, &fields); + + if (vcc1 > 0 || vcc2 > 0 || vmain > 0) { + wr_lock(&(sc_info->stat_lock)); + if (vcc1 > 0) { + if (unlikely(sc_dev->vcc1 == 0)) + sc_dev->vcc1 = vcc1; + else { + sc_dev->vcc1 += vcc1 * 0.63; + sc_dev->vcc1 /= 1.63; + } + } + if (vcc2 > 0) { + if (unlikely(sc_dev->vcc2 == 0)) + sc_dev->vcc2 = vcc2; + else { + sc_dev->vcc2 += vcc2 * 0.63; + sc_dev->vcc2 /= 1.63; + } + } + if (vmain > 0) { + if (unlikely(sc_dev->vmain == 0)) + sc_dev->vmain = vmain; + else { + sc_dev->vmain += vmain * 0.63; + sc_dev->vmain /= 1.63; + } + } + wr_unlock(&(sc_info->stat_lock)); + } + + if (temp1 > 0 || temp2 > 0) { + wr_lock(&(sc_info->stat_lock)); + if (unlikely(!sc_dev->temp1)) + sc_dev->temp1 = temp1; + else { + sc_dev->temp1 += temp1 * 0.63; + sc_dev->temp1 /= 1.63; + } + if (unlikely(!sc_dev->temp2)) + sc_dev->temp2 = temp2; + else { + sc_dev->temp2 += temp2 * 0.63; + sc_dev->temp2 /= 1.63; + } + if (temp1 > sc_dev->temp1_max) { + sc_dev->temp1_max = temp1; + sc_dev->temp1_max_time = time(NULL); + } + if (temp2 > sc_dev->temp2_max) { + sc_dev->temp2_max = temp2; + sc_dev->temp2_max_time = time(NULL); + } + + if (unlikely(sc_dev->temp1_5min_av == 0)) + sc_dev->temp1_5min_av = temp1; + else { + sc_dev->temp1_5min_av += temp1 * .0042; + sc_dev->temp1_5min_av /= 1.0042; + } + if (unlikely(sc_dev->temp2_5min_av == 0)) + sc_dev->temp2_5min_av = temp2; + else { + sc_dev->temp2_5min_av += temp2 * .0042; + sc_dev->temp2_5min_av /= 1.0042; + } + wr_unlock(&(sc_info->stat_lock)); + + if (temp < temp2) + temp = temp2; + + bflsc_manage_temp(bflsc, sc_dev, dev, temp); + } + + return true; +} + +static void inc_core_errors(struct bflsc_info *info, int8_t core) +{ + if (info->ident == IDENT_BMA) { + if (core >= 0) + info->cortex_hw[core]++; + } else { + if (core >= 0 && core < 16) + info->core_hw[core]++; + } +} + +static void inc_bflsc_errors(struct thr_info *thr, struct bflsc_info *info, int8_t core) +{ + inc_hw_errors(thr); + inc_core_errors(info, core); +} + +static void inc_bflsc_nonces(struct bflsc_info *info, int8_t core) +{ + if (info->ident == IDENT_BMA) { + if (core >= 0) + info->cortex_nonces[core]++; + } else { + if (core >= 0 && core < 16) + info->core_nonces[core]++; + } +} + +struct work *bflsc_work_by_uid(struct cgpu_info *bflsc, struct bflsc_info *sc_info, int id) +{ + struct bflsc_work *bwork; + struct work *work = NULL; + + wr_lock(&bflsc->qlock); + HASH_FIND_INT(sc_info->bworks, &id, bwork); + if (likely(bwork)) { + HASH_DEL(sc_info->bworks, bwork); + work = bwork->work; + free(bwork); + } + wr_unlock(&bflsc->qlock); + + return work; +} + +static void process_nonces(struct cgpu_info *bflsc, int dev, char *xlink, char *data, int count, char **fields, int *nonces) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + struct thr_info *thr = bflsc->thr[0]; + struct work *work = NULL; + int8_t core = -1; + uint32_t nonce; + int i, num, x; + char *tmp; + bool res; + + if (count < sc_info->que_fld_min) { + tmp = str_text(data); + applogsiz(LOG_INFO, BFLSC_APPLOGSIZ, + "%s%i:%s work returned too small (%d,%s)", + bflsc->drv->name, bflsc->device_id, xlink, count, tmp); + free(tmp); + inc_bflsc_errors(thr, sc_info, core); + return; + } + + if (sc_info->ident == IDENT_BMA) { + unsigned int ucore; + + if (sscanf(fields[QUE_CC], "%x", &ucore) == 1) + core = ucore; + } else if (sc_info->que_noncecount != QUE_NONCECOUNT_V1) { + unsigned int ucore; + + if (sscanf(fields[QUE_CHIP_V2], "%x", &ucore) == 1) + core = ucore; + } + + if (count > sc_info->que_fld_max) { + applog(LOG_INFO, "%s%i:%s work returned too large (%d) processing %d anyway", + bflsc->drv->name, bflsc->device_id, xlink, count, sc_info->que_fld_max); + count = sc_info->que_fld_max; + inc_bflsc_errors(thr, sc_info, core); + } + + num = atoi(fields[sc_info->que_noncecount]); + if (num != count - sc_info->que_fld_min) { + tmp = str_text(data); + applogsiz(LOG_INFO, BFLSC_APPLOGSIZ, + "%s%i:%s incorrect data count (%d) will use %d instead from (%s)", + bflsc->drv->name, bflsc->device_id, xlink, num, + count - sc_info->que_fld_max, tmp); + free(tmp); + inc_bflsc_errors(thr, sc_info, core); + } + + if (sc_info->ident == IDENT_BMA) { + int uid; + + if (sscanf(fields[QUE_UID], "%04x", &uid) == 1) + work = bflsc_work_by_uid(bflsc, sc_info, uid); + } else { + char midstate[MIDSTATE_BYTES] = {}, blockdata[MERKLE_BYTES] = {}; + + if (!hex2bin((unsigned char *)midstate, fields[QUE_MIDSTATE], MIDSTATE_BYTES) || + !hex2bin((unsigned char *)blockdata, fields[QUE_BLOCKDATA], MERKLE_BYTES)) { + applog(LOG_INFO, "%s%i:%s Failed to convert binary data to hex result - ignored", + bflsc->drv->name, bflsc->device_id, xlink); + inc_bflsc_errors(thr, sc_info, core); + return; + } + + work = take_queued_work_bymidstate(bflsc, midstate, MIDSTATE_BYTES, + blockdata, MERKLE_OFFSET, MERKLE_BYTES); + } + if (!work) { + if (sc_info->not_first_work) { + applog(LOG_INFO, "%s%i:%s failed to find nonce work - can't be processed - ignored", + bflsc->drv->name, bflsc->device_id, xlink); + inc_bflsc_errors(thr, sc_info, core); + } + return; + } + + res = false; + x = 0; + for (i = sc_info->que_fld_min; i < count; i++) { + if (strlen(fields[i]) != 8) { + tmp = str_text(data); + applogsiz(LOG_INFO, BFLSC_APPLOGSIZ, + "%s%i:%s invalid nonce (%s) will try to process anyway", + bflsc->drv->name, bflsc->device_id, xlink, tmp); + free(tmp); + } + + hex2bin((void*)&nonce, fields[i], 4); + nonce = htobe32(nonce); + res = submit_nonce(thr, work, nonce); + if (res) { + wr_lock(&(sc_info->stat_lock)); + sc_info->sc_devs[dev].nonces_found++; + wr_unlock(&(sc_info->stat_lock)); + + (*nonces)++; + x++; + inc_bflsc_nonces(sc_info, core); + } else + inc_core_errors(sc_info, core); + } + + wr_lock(&(sc_info->stat_lock)); + if (res) + sc_info->sc_devs[dev].result_id++; + if (x > QUE_MAX_RESULTS) + x = QUE_MAX_RESULTS + 1; + (sc_info->result_size[x])++; + sc_info->sc_devs[dev].work_complete++; + sc_info->sc_devs[dev].hashes_unsent += FULLNONCE; + // If not flushed (stale) + if (!(work->devflag)) + sc_info->sc_devs[dev].work_queued -= 1; + wr_unlock(&(sc_info->stat_lock)); + + free_work(work); +} + +static int process_results(struct cgpu_info *bflsc, int dev, char *pbuf, int *nonces, int *in_process) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + char **items, *firstname, **fields, *lf; + int que = 0, i, lines, count; + char *tmp, *tmp2, *buf; + char xlink[17]; + bool res; + + *nonces = 0; + *in_process = 0; + + xlinkstr(xlink, sizeof(xlink), dev, sc_info); + + buf = strdup(pbuf); + if (!strncmp(buf, "INPROCESS", 9)) + sscanf(buf, "INPROCESS:%d\n%s", in_process, pbuf); + res = tolines(bflsc, dev, buf, &lines, &items, C_GETRESULTS); + if (!res || lines < 1) { + tmp = str_text(pbuf); + applogsiz(LOG_ERR, BFLSC_APPLOGSIZ, + "%s%i:%s empty result (%s) ignored", + bflsc->drv->name, bflsc->device_id, xlink, tmp); + free(tmp); + goto arigatou; + } + + if (lines < QUE_RES_LINES_MIN) { + tmp = str_text(pbuf); + applogsiz(LOG_ERR, BFLSC_APPLOGSIZ, + "%s%i:%s result of %d too small (%s) ignored", + bflsc->drv->name, bflsc->device_id, xlink, lines, tmp); + free(tmp); + goto arigatou; + } + + breakdown(ONECOLON, items[1], &count, &firstname, &fields, &lf); + if (count < 1) { + tmp = str_text(pbuf); + tmp2 = str_text(items[1]); + applogsiz(LOG_ERR, BFLSC_APPLOGSIZ, + "%s%i:%s empty result count (%s) in (%s) ignoring", + bflsc->drv->name, bflsc->device_id, xlink, tmp2, tmp); + free(tmp2); + free(tmp); + goto arigatou; + } else if (count != 1) { + tmp = str_text(pbuf); + tmp2 = str_text(items[1]); + applogsiz(LOG_ERR, BFLSC_APPLOGSIZ, + "%s%i:%s incorrect result count %d (%s) in (%s) will try anyway", + bflsc->drv->name, bflsc->device_id, xlink, count, tmp2, tmp); + free(tmp2); + free(tmp); + } + + que = atoi(fields[0]); + if (que != (lines - QUE_RES_LINES_MIN)) { + i = que; + // 1+ In case the last line isn't 'OK' - try to process it + que = 1 + lines - QUE_RES_LINES_MIN; + + tmp = str_text(pbuf); + tmp2 = str_text(items[0]); + applogsiz(LOG_ERR, BFLSC_APPLOGSIZ, + "%s%i:%s incorrect result count %d (%s) will try %d (%s)", + bflsc->drv->name, bflsc->device_id, xlink, i, tmp2, que, tmp); + free(tmp2); + free(tmp); + + } + + freebreakdown(&count, &firstname, &fields); + + for (i = 0; i < que; i++) { + res = breakdown(NOCOLON, items[i + QUE_RES_LINES_MIN - 1], &count, &firstname, &fields, &lf); + if (likely(res)) + process_nonces(bflsc, dev, &(xlink[0]), items[i], count, fields, nonces); + else + applogsiz(LOG_ERR, BFLSC_APPLOGSIZ, + "%s%i:%s failed to process nonce %s", + bflsc->drv->name, bflsc->device_id, xlink, items[i]); + freebreakdown(&count, &firstname, &fields); + sc_info->not_first_work = true; + } + +arigatou: + freetolines(&lines, &items); + free(buf); + + return que; +} + +#define TVF(tv) ((float)((tv)->tv_sec) + ((float)((tv)->tv_usec) / 1000000.0)) +#define TVFMS(tv) (TVF(tv) * 1000.0) + +// Thread to simply keep looking for results +static void *bflsc_get_results(void *userdata) +{ + struct cgpu_info *bflsc = (struct cgpu_info *)userdata; + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + struct timeval elapsed, now; + float oldest, f; + char buf[BFLSC_BUFSIZ+1]; + int err, amount; + int i, que, dev, nonces; + bool readok; + + cgtime(&now); + for (i = 0; i < sc_info->sc_count; i++) { + copy_time(&(sc_info->sc_devs[i].last_check_result), &now); + copy_time(&(sc_info->sc_devs[i].last_dev_result), &now); + copy_time(&(sc_info->sc_devs[i].last_nonce_result), &now); + } + + while (sc_info->shutdown == false) { + cgtimer_t ts_start; + int in_process; + + if (bflsc->usbinfo.nodev) + return NULL; + + dev = -1; + oldest = FLT_MAX; + cgtime(&now); + + // Find the first oldest ... that also needs checking + for (i = 0; i < sc_info->sc_count; i++) { + timersub(&now, &(sc_info->sc_devs[i].last_check_result), &elapsed); + f = TVFMS(&elapsed); + if (f < oldest && f >= sc_info->sc_devs[i].ms_work) { + f = oldest; + dev = i; + } + } + + if (bflsc->usbinfo.nodev) + return NULL; + + cgsleep_prepare_r(&ts_start); + if (dev == -1) + goto utsura; + + cgtime(&(sc_info->sc_devs[dev].last_check_result)); + + readok = bflsc_qres(bflsc, buf, sizeof(buf), dev, &err, &amount, false); + if (err < 0 || (!readok && amount != BFLSC_QRES_LEN) || (readok && amount < 1)) { + // TODO: do what else? + } else { + que = process_results(bflsc, dev, buf, &nonces, &in_process); + sc_info->not_first_work = true; // in case it failed processing it + if (que > 0) + cgtime(&(sc_info->sc_devs[dev].last_dev_result)); + if (nonces > 0) + cgtime(&(sc_info->sc_devs[dev].last_nonce_result)); + + /* There are more results queued so do not sleep */ + if (in_process) + continue; + // TODO: if not getting results ... reinit? + } + +utsura: + cgsleep_ms_r(&ts_start, sc_info->results_sleep_time); + } + + return NULL; +} + +static bool bflsc_thread_prepare(struct thr_info *thr) +{ + struct cgpu_info *bflsc = thr->cgpu; + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + + if (thr_info_create(&(sc_info->results_thr), NULL, bflsc_get_results, (void *)bflsc)) { + applog(LOG_ERR, "%s%i: thread create failed", bflsc->drv->name, bflsc->device_id); + return false; + } + pthread_detach(sc_info->results_thr.pth); + + return true; +} + +static void bflsc_shutdown(struct thr_info *thr) +{ + struct cgpu_info *bflsc = thr->cgpu; + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + + bflsc_flush_work(bflsc); + sc_info->shutdown = true; +} + +static void bflsc_thread_enable(struct thr_info *thr) +{ + struct cgpu_info *bflsc = thr->cgpu; + + if (bflsc->usbinfo.nodev) + return; + + bflsc_initialise(bflsc); +} + +static bool bflsc_send_work(struct cgpu_info *bflsc, int dev, bool mandatory) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + struct FullNonceRangeJob data; + char buf[BFLSC_BUFSIZ+1]; + bool sent, ret = false; + struct work *work; + int err, amount; + int len, try; + int stage; + + // Device is gone + if (bflsc->usbinfo.nodev) + return false; + + // TODO: handle this everywhere + if (sc_info->sc_devs[dev].overheat == true) + return false; + + // Initially code only deals with sending one work item + data.payloadSize = BFLSC_JOBSIZ; + data.endOfBlock = BFLSC_EOB; + + len = sizeof(struct FullNonceRangeJob); + + /* On faster devices we have a lot of lock contention so only + * mandatorily grab the lock and send work if the queue is empty since + * we have a submit queue. */ + if (mandatory) + mutex_lock(&(bflsc->device_mutex)); + else { + if (mutex_trylock(&bflsc->device_mutex)) + return ret; + } + + work = get_queued(bflsc); + if (unlikely(!work)) { + mutex_unlock(&bflsc->device_mutex); + return ret; + } + memcpy(data.midState, work->midstate, MIDSTATE_BYTES); + memcpy(data.blockData, work->data + MERKLE_OFFSET, MERKLE_BYTES); + try = 0; +re_send: + err = send_recv_ds(bflsc, dev, &stage, &sent, &amount, + BFLSC_QJOB, BFLSC_QJOB_LEN, C_REQUESTQUEJOB, C_REQUESTQUEJOBSTATUS, + (char *)&data, len, C_QUEJOB, C_QUEJOBSTATUS, + buf, sizeof(buf)-1); + mutex_unlock(&(bflsc->device_mutex)); + + switch (stage) { + case 1: + if (!sent) { + bflsc_applog(bflsc, dev, C_REQUESTQUEJOB, amount, err); + goto out; + } else { + // TODO: handle other errors ... + + // Try twice + if (try++ < 1 && amount > 1 && + strstr(buf, BFLSC_TIMEOUT)) + goto re_send; + + bflsc_applog(bflsc, dev, C_REQUESTQUEJOBSTATUS, amount, err); + goto out; + } + break; + case 2: + if (!sent) { + bflsc_applog(bflsc, dev, C_QUEJOB, amount, err); + goto out; + } else { + if (!isokerr(err, buf, amount)) { + // TODO: check for QUEUE FULL and set work_queued to sc_info->que_size + // and report a code bug LOG_ERR - coz it should never happen + // TODO: handle other errors ... + + // Try twice + if (try++ < 1 && amount > 1 && + strstr(buf, BFLSC_TIMEOUT)) + goto re_send; + + bflsc_applog(bflsc, dev, C_QUEJOBSTATUS, amount, err); + goto out; + } + } + break; + } + + wr_lock(&(sc_info->stat_lock)); + sc_info->sc_devs[dev].work_queued++; + wr_unlock(&(sc_info->stat_lock)); + + work->subid = dev; + ret = true; +out: + if (unlikely(!ret)) + work_completed(bflsc, work); + return ret; +} + +#define JP_COMMAND 0 +#define JP_STREAMLENGTH 2 +#define JP_SIGNATURE 4 +#define JP_JOBSINARRY 5 +#define JP_JOBSARRY 6 +#define JP_ARRAYSIZE 45 + +static bool bflsc28_queue_full(struct cgpu_info *bflsc) +{ + struct bflsc_info *sc_info = bflsc->device_data; + int created, queued = 0, create, i, offset; + struct work *base_work, *work, *works[10]; + char *buf, *field, *ptr; + bool sent, ret = false; + uint16_t *streamlen; + uint8_t *job_pack; + int err, amount; + + job_pack = alloca(2 + // Command + 2 + // StreamLength + 1 + // Signature + 1 + // JobsInArray + JP_ARRAYSIZE * 10 +// Array of up to 10 Job Structs + 1 // EndOfWrapper + ); + + if (bflsc->usbinfo.nodev) + return true; + + /* Don't send any work if this device is overheating */ + if (sc_info->sc_devs[0].overheat == true) + return true; + + wr_lock(&bflsc->qlock); + base_work = __get_queued(bflsc); + if (likely(base_work)) + __work_completed(bflsc, base_work); + wr_unlock(&bflsc->qlock); + + if (unlikely(!base_work)) + return ret; + created = 1; + + create = 9; + if (base_work->drv_rolllimit < create) + create = base_work->drv_rolllimit; + + works[0] = base_work; + for (i = 1; i <= create ; i++) { + created++; + work = make_clone(base_work); + roll_work(base_work); + works[i] = work; + } + + memcpy(job_pack, "WX", 2); + streamlen = (uint16_t *)&job_pack[JP_STREAMLENGTH]; + *streamlen = created * JP_ARRAYSIZE + 7; + job_pack[JP_SIGNATURE] = 0xc1; + job_pack[JP_JOBSINARRY] = created; + offset = JP_JOBSARRY; + + /* Create the maximum number of work items we can queue by nrolling one */ + for (i = 0; i < created; i++) { + work = works[i]; + memcpy(job_pack + offset, work->midstate, MIDSTATE_BYTES); + offset += MIDSTATE_BYTES; + memcpy(job_pack + offset, work->data + MERKLE_OFFSET, MERKLE_BYTES); + offset += MERKLE_BYTES; + job_pack[offset] = 0xaa; // EndOfBlock signature + offset++; + } + job_pack[offset++] = 0xfe; // EndOfWrapper + + buf = alloca(BFLSC_BUFSIZ + 1); + mutex_lock(&bflsc->device_mutex); + err = send_recv_ss(bflsc, 0, &sent, &amount, (char *)job_pack, offset, + C_REQUESTQUEJOB, buf, BFLSC_BUFSIZ, C_REQUESTQUEJOBSTATUS, READ_NL); + mutex_unlock(&bflsc->device_mutex); + + if (!isokerr(err, buf, amount)) { + if (!strncasecmp(buf, "ERR:QUEUE FULL", 14)) { + applog(LOG_DEBUG, "%s%d: Queue full", + bflsc->drv->name, bflsc->device_id); + ret = true; + } else { + applog(LOG_WARNING, "%s%d: Queue response not ok %s", + bflsc->drv->name, bflsc->device_id, buf); + } + goto out; + } + + ptr = alloca(strlen(buf)); + if (sscanf(buf, "OK:QUEUED %d:%s", &queued, ptr) != 2) { + applog(LOG_WARNING, "%s%d: Failed to parse queue response %s", + bflsc->drv->name, bflsc->device_id, buf); + goto out; + } + if (queued < 1 || queued > 10) { + applog(LOG_WARNING, "%s%d: Invalid queued count %d", + bflsc->drv->name, bflsc->device_id, queued); + queued = 0; + goto out; + } + for (i = 0; i < queued; i++) { + struct bflsc_work *bwork, *oldbwork; + unsigned int uid; + + work = works[i]; + field = Strsep(&ptr, ","); + if (!field) { + applog(LOG_WARNING, "%s%d: Ran out of queued IDs after %d of %d", + bflsc->drv->name, bflsc->device_id, i, queued); + queued = i - 1; + goto out; + } + sscanf(field, "%04x", &uid); + bwork = calloc(sizeof(struct bflsc_work), 1); + bwork->id = uid; + bwork->work = work; + + wr_lock(&bflsc->qlock); + HASH_REPLACE_INT(sc_info->bworks, id, bwork, oldbwork); + if (oldbwork) { + free_work(oldbwork->work); + free(oldbwork); + } + wr_unlock(&bflsc->qlock); + sc_info->sc_devs[0].work_queued++; + } + if (queued < created) + ret = true; +out: + for (i = queued; i < created; i++) { + work = works[i]; + discard_work(work); + } + return ret; +} + +static bool bflsc_queue_full(struct cgpu_info *bflsc) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + int i, dev, tried, que; + bool ret = false; + int tries = 0; + + tried = -1; + // if something is wrong with a device try the next one available + // TODO: try them all? Add an unavailable flag to sc_devs[i] init to 0 here first + while (++tries < 3) { + bool mandatory = false; + + // Device is gone - shouldn't normally get here + if (bflsc->usbinfo.nodev) { + ret = true; + break; + } + + dev = -1; + rd_lock(&(sc_info->stat_lock)); + // Anything waiting - gets the work first + for (i = 0; i < sc_info->sc_count; i++) { + // TODO: and ignore x-link dead - once I work out how to decide it is dead + if (i != tried && sc_info->sc_devs[i].work_queued == 0 && + !sc_info->sc_devs[i].overheat) { + dev = i; + break; + } + } + + if (dev == -1) { + que = sc_info->que_size * 10; // 10x is certainly above the MAX it could be + // The first device with the smallest amount queued + for (i = 0; i < sc_info->sc_count; i++) { + if (i != tried && sc_info->sc_devs[i].work_queued < que && + !sc_info->sc_devs[i].overheat) { + dev = i; + que = sc_info->sc_devs[i].work_queued; + } + } + if (que > sc_info->que_full_enough) + dev = -1; + else if (que < sc_info->que_low) + mandatory = true; + } + rd_unlock(&(sc_info->stat_lock)); + + // nothing needs work yet + if (dev == -1) { + ret = true; + break; + } + + if (bflsc_send_work(bflsc, dev, mandatory)) + break; + else + tried = dev; + } + + return ret; +} + +static int64_t bflsc_scanwork(struct thr_info *thr) +{ + struct cgpu_info *bflsc = thr->cgpu; + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + int64_t ret, unsent; + bool flushed, cleanup; + struct work *work, *tmp; + int dev, waited, i; + + // Device is gone + if (bflsc->usbinfo.nodev) + return -1; + + flushed = false; + // Single lock check if any are flagged as flushed + rd_lock(&(sc_info->stat_lock)); + for (dev = 0; dev < sc_info->sc_count; dev++) + flushed |= sc_info->sc_devs[dev].flushed; + rd_unlock(&(sc_info->stat_lock)); + + // > 0 flagged as flushed + if (flushed) { +// TODO: something like this ...... + for (dev = 0; dev < sc_info->sc_count; dev++) { + cleanup = false; + + // Is there any flushed work that can be removed? + rd_lock(&(sc_info->stat_lock)); + if (sc_info->sc_devs[dev].flushed) { + if (sc_info->sc_devs[dev].result_id > (sc_info->sc_devs[dev].flush_id + sc_info->flush_size)) + cleanup = true; + } + rd_unlock(&(sc_info->stat_lock)); + + // yes remove the flushed work that can be removed + if (cleanup) { + wr_lock(&bflsc->qlock); + HASH_ITER(hh, bflsc->queued_work, work, tmp) { + if (work->devflag && work->subid == dev) { + bflsc->queued_count--; + HASH_DEL(bflsc->queued_work, work); + discard_work(work); + } + } + wr_unlock(&bflsc->qlock); + + wr_lock(&(sc_info->stat_lock)); + sc_info->sc_devs[dev].flushed = false; + wr_unlock(&(sc_info->stat_lock)); + } + } + } + + waited = restart_wait(thr, sc_info->scan_sleep_time); + if (waited == ETIMEDOUT && sc_info->ident != IDENT_BMA) { + unsigned int old_sleep_time, new_sleep_time = 0; + int min_queued = sc_info->que_size; + /* Only adjust the scan_sleep_time if we did not receive a + * restart message while waiting. Try to adjust sleep time + * so we drop to sc_info->que_watermark before getting more work. + */ + + rd_lock(&sc_info->stat_lock); + old_sleep_time = sc_info->scan_sleep_time; + for (i = 0; i < sc_info->sc_count; i++) { + if (sc_info->sc_devs[i].work_queued < min_queued) + min_queued = sc_info->sc_devs[i].work_queued; + } + rd_unlock(&sc_info->stat_lock); + new_sleep_time = old_sleep_time; + + /* Increase slowly but decrease quickly */ + if (min_queued > sc_info->que_full_enough && old_sleep_time < BFLSC_MAX_SLEEP) + new_sleep_time = old_sleep_time * 21 / 20; + else if (min_queued < sc_info->que_low) + new_sleep_time = old_sleep_time * 2 / 3; + + /* Do not sleep more than BFLSC_MAX_SLEEP so we can always + * report in at least 2 results per 5s log interval. */ + if (new_sleep_time != old_sleep_time) { + if (new_sleep_time > BFLSC_MAX_SLEEP) + new_sleep_time = BFLSC_MAX_SLEEP; + else if (new_sleep_time == 0) + new_sleep_time = 1; + applog(LOG_DEBUG, "%s%i: Changed scan sleep time to %d", + bflsc->drv->name, bflsc->device_id, new_sleep_time); + + wr_lock(&sc_info->stat_lock); + sc_info->scan_sleep_time = new_sleep_time; + wr_unlock(&sc_info->stat_lock); + } + } + + // Count up the work done since we last were here + ret = 0; + wr_lock(&(sc_info->stat_lock)); + for (dev = 0; dev < sc_info->sc_count; dev++) { + unsent = sc_info->sc_devs[dev].hashes_unsent; + sc_info->sc_devs[dev].hashes_unsent = 0; + sc_info->sc_devs[dev].hashes_sent += unsent; + sc_info->hashes_sent += unsent; + ret += unsent; + } + wr_unlock(&(sc_info->stat_lock)); + + return ret; +} + +#define BFLSC_OVER_TEMP 75 + +/* Set the fanspeed to auto for any valid value <= BFLSC_OVER_TEMP, + * or max for any value > BFLSC_OVER_TEMP or if we don't know the temperature. */ +static void bflsc_set_fanspeed(struct cgpu_info *bflsc) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)bflsc->device_data; + char buf[BFLSC_BUFSIZ+1]; + char data[16+1]; + int amount; + bool sent; + + if ((bflsc->temp <= BFLSC_OVER_TEMP && bflsc->temp > 0 && sc_info->fanauto) || + ((bflsc->temp > BFLSC_OVER_TEMP || !bflsc->temp) && !sc_info->fanauto)) + return; + + if (bflsc->temp > BFLSC_OVER_TEMP || !bflsc->temp) { + strcpy(data, BFLSC_FAN4); + sc_info->fanauto = false; + } else { + strcpy(data, BFLSC_FANAUTO); + sc_info->fanauto = true; + } + + applog(LOG_DEBUG, "%s%i: temp=%.0f over=%d set fan to %s", + bflsc->drv->name, bflsc->device_id, bflsc->temp, + BFLSC_OVER_TEMP, data); + + mutex_lock(&bflsc->device_mutex); + send_recv_ss(bflsc, 0, &sent, &amount, + data, strlen(data), C_SETFAN, + buf, sizeof(buf)-1, C_FANREPLY, READ_NL); + mutex_unlock(&bflsc->device_mutex); +} + +static bool bflsc_get_stats(struct cgpu_info *bflsc) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + bool allok = true; + int i; + + // Device is gone + if (bflsc->usbinfo.nodev) + return false; + + for (i = 0; i < sc_info->sc_count; i++) { + if (!bflsc_get_temp(bflsc, i)) + allok = false; + + // Device is gone + if (bflsc->usbinfo.nodev) + return false; + + if (i < (sc_info->sc_count - 1)) + cgsleep_ms(BFLSC_TEMP_SLEEPMS); + } + + bflsc_set_fanspeed(bflsc); + + return allok; +} + +static char *bflsc_set(struct cgpu_info *bflsc, char *option, char *setting, char *replybuf) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + int val; + + if (sc_info->ident != IDENT_BMA) { + strcpy(replybuf, "no set options available"); + return replybuf; + } + + if (strcasecmp(option, "help") == 0) { + sprintf(replybuf, "volt: range 0-9 clock: range 0-15"); + return replybuf; + } + + if (strcasecmp(option, "volt") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing volt setting"); + return replybuf; + } + + val = atoi(setting); + if (val < 0 || val > 9) { + sprintf(replybuf, "invalid volt: '%s' valid range 0-9", + setting); + } + + sc_info->volt_next = val; + sc_info->volt_next_stat = true; + + return NULL; + } + + if (strcasecmp(option, "clock") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing clock setting"); + return replybuf; + } + + val = atoi(setting); + if (val < 0 || val > 15) { + sprintf(replybuf, "invalid clock: '%s' valid range 0-15", + setting); + } + + sc_info->clock_next = val; + sc_info->clock_next_stat = true; + + return NULL; + } + + sprintf(replybuf, "Unknown option: %s", option); + return replybuf; +} + +static void bflsc_identify(struct cgpu_info *bflsc) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + + // TODO: handle x-link + sc_info->flash_led = true; +} + +static bool bflsc_thread_init(struct thr_info *thr) +{ + struct cgpu_info *bflsc = thr->cgpu; + + if (bflsc->usbinfo.nodev) + return false; + + bflsc_initialise(bflsc); + + return true; +} + +// there should be a new API function to return device info that isn't the standard stuff +// instead of bflsc_api_stats - since the stats should really just be internal code info +// and the new one should be UNusual device stats/extra details - like the stuff below + +static struct api_data *bflsc_api_stats(struct cgpu_info *bflsc) +{ + struct bflsc_info *sc_info = (struct bflsc_info *)(bflsc->device_data); + struct api_data *root = NULL; + char data[4096]; + char buf[256]; + int i, j, off; + size_t len; + +//if no x-link ... etc + rd_lock(&(sc_info->stat_lock)); + root = api_add_temp(root, "Temp1", &(sc_info->sc_devs[0].temp1), true); + root = api_add_temp(root, "Temp2", &(sc_info->sc_devs[0].temp2), true); + root = api_add_volts(root, "Vcc1", &(sc_info->sc_devs[0].vcc1), true); + root = api_add_volts(root, "Vcc2", &(sc_info->sc_devs[0].vcc2), true); + root = api_add_volts(root, "Vmain", &(sc_info->sc_devs[0].vmain), true); + root = api_add_temp(root, "Temp1 Max", &(sc_info->sc_devs[0].temp1_max), true); + root = api_add_temp(root, "Temp2 Max", &(sc_info->sc_devs[0].temp2_max), true); + root = api_add_time(root, "Temp1 Max Time", &(sc_info->sc_devs[0].temp1_max_time), true); + root = api_add_time(root, "Temp2 Max Time", &(sc_info->sc_devs[0].temp2_max_time), true); + root = api_add_int(root, "Work Queued", &(sc_info->sc_devs[0].work_queued), true); + root = api_add_int(root, "Work Complete", &(sc_info->sc_devs[0].work_complete), true); + root = api_add_bool(root, "Overheat", &(sc_info->sc_devs[0].overheat), true); + root = api_add_uint64(root, "Flush ID", &(sc_info->sc_devs[0].flush_id), true); + root = api_add_uint64(root, "Result ID", &(sc_info->sc_devs[0].result_id), true); + root = api_add_bool(root, "Flushed", &(sc_info->sc_devs[0].flushed), true); + root = api_add_uint(root, "Scan Sleep", &(sc_info->scan_sleep_time), true); + root = api_add_uint(root, "Results Sleep", &(sc_info->results_sleep_time), true); + root = api_add_uint(root, "Work ms", &(sc_info->default_ms_work), true); + + buf[0] = '\0'; + for (i = 0; i <= QUE_MAX_RESULTS + 1; i++) + tailsprintf(buf, sizeof(buf), "%s%"PRIu64, (i > 0) ? "/" : "", sc_info->result_size[i]); + root = api_add_string(root, "Result Size", buf, true); + + rd_unlock(&(sc_info->stat_lock)); + + i = (int)(sc_info->driver_version); + root = api_add_int(root, "Driver", &i, true); + root = api_add_string(root, "Firmware", sc_info->sc_devs[0].firmware, false); + root = api_add_string(root, "Chips", sc_info->sc_devs[0].chips, false); + root = api_add_int(root, "Que Size", &(sc_info->que_size), false); + root = api_add_int(root, "Que Full", &(sc_info->que_full_enough), false); + root = api_add_int(root, "Que Watermark", &(sc_info->que_watermark), false); + root = api_add_int(root, "Que Low", &(sc_info->que_low), false); + root = api_add_escape(root, "GetInfo", sc_info->sc_devs[0].getinfo, false); + +/* +else a whole lot of something like these ... etc + root = api_add_temp(root, "X-%d-Temp1", &(sc_info->temp1), false); + root = api_add_temp(root, "X-%d-Temp2", &(sc_info->temp2), false); + root = api_add_volts(root, "X-%d-Vcc1", &(sc_info->vcc1), false); + root = api_add_volts(root, "X-%d-Vcc2", &(sc_info->vcc2), false); + root = api_add_volts(root, "X-%d-Vmain", &(sc_info->vmain), false); +*/ + if (sc_info->ident == IDENT_BMA) { + for (i = 0; i < 128; i += 16) { + data[0] = '\0'; + off = 0; + for (j = 0; j < 16; j++) { + len = snprintf(data+off, sizeof(data)-off, + "%s%"PRIu64, + j > 0 ? " " : "", + sc_info->cortex_nonces[i+j]); + if (len >= (sizeof(data)-off)) + off = sizeof(data)-1; + else { + if (len > 0) + off += len; + } + } + sprintf(buf, "Cortex %02x-%02x Nonces", i, i+15); + root = api_add_string(root, buf, data, true); + } + for (i = 0; i < 128; i += 16) { + data[0] = '\0'; + off = 0; + for (j = 0; j < 16; j++) { + len = snprintf(data+off, sizeof(data)-off, + "%s%"PRIu64, + j > 0 ? " " : "", + sc_info->cortex_hw[i+j]); + if (len >= (sizeof(data)-off)) + off = sizeof(data)-1; + else { + if (len > 0) + off += len; + } + } + sprintf(buf, "Cortex %02x-%02x HW Errors", i, i+15); + root = api_add_string(root, buf, data, true); + } + } else if (sc_info->que_noncecount != QUE_NONCECOUNT_V1) { + for (i = 0; i < 16; i++) { + sprintf(buf, "Core%d Nonces", i); + root = api_add_uint64(root, buf, &sc_info->core_nonces[i], false); + } + for (i = 0; i < 16; i++) { + sprintf(buf, "Core%d HW Errors", i); + root = api_add_uint64(root, buf, &sc_info->core_hw[i], false); + } + } + + return root; +} + +struct device_drv bflsc_drv = { + .drv_id = DRIVER_bflsc, + .dname = "BitForceSC", + .name = BFLSC_SINGLE, + .drv_detect = bflsc_detect, + .get_api_stats = bflsc_api_stats, + .get_statline_before = get_bflsc_statline_before, + .get_stats = bflsc_get_stats, + .set_device = bflsc_set, + .identify_device = bflsc_identify, + .thread_prepare = bflsc_thread_prepare, + .thread_init = bflsc_thread_init, + .hash_work = hash_queued_work, + .scanwork = bflsc_scanwork, + .queue_full = bflsc_queue_full, + .flush_work = bflsc_flush_work, + .thread_shutdown = bflsc_shutdown, + .thread_enable = bflsc_thread_enable +}; diff --git a/driver-bflsc.h b/driver-bflsc.h new file mode 100644 index 0000000..4778993 --- /dev/null +++ b/driver-bflsc.h @@ -0,0 +1,392 @@ +/* + * Copyright 2013-2014 Con Kolivas + * Copyright 2013 Andrew Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef BFLSC_H +#define BFLSC_H +#define BLANK "" +#define LFSTR "" + +/* + * Firmware + * DRV_V2 expects (beyond V1) the GetInfo to return the chip count + * The queues are 40 instead of 20 and are *usually* consumed and filled + * in bursts due to e.g. a 16 chip device doing 16 items at a time and + * returning 16 results at a time + * If the device has varying chip speeds, it will gradually break up the + * burst of results as we progress + */ +enum driver_version { + BFLSC_DRVUNDEF = 0, + BFLSC_DRV1, + BFLSC_DRV2 +}; + +/* + * With Firmware 1.0.0 and a result queue of 20 the Max is: + * inprocess = 12 + * max count = 9 + * 64+1+24+1+1+(1+8)*8+1 per line = 164 * 20 + * OK = 3 + * Total: 3304 + * + * With Firmware 1.2.* and a result queue of 40 but a limit of 15 replies: + * inprocess = 12 + * max count = 9 + * 64+1+24+1+1+1+1+(1+8)*8+1 per line = 166 * 15 + * OK = 3 + * Total: 2514 + * + */ +#define BFLSC_BUFSIZ (0x1000) + +// Should be big enough +#define BFLSC_APPLOGSIZ 8192 + +#define BFLSC_INFO_TIMEOUT 999 + +#define BFLSC_DI_FIRMWARE "FIRMWARE" +#define BFLSC_DI_ENGINES "ENGINES" +#define BFLSC_DI_JOBSINQUE "JOBS IN QUEUE" +#define BFLSC_DI_XLINKMODE "XLINK MODE" +#define BFLSC_DI_XLINKPRESENT "XLINK PRESENT" +#define BFLSC_DI_DEVICESINCHAIN "DEVICES IN CHAIN" +#define BFLSC_DI_CHAINPRESENCE "CHAIN PRESENCE MASK" +#define BFLSC_DI_CHIPS "CHIP PARALLELIZATION" +#define BFLSC_DI_CHIPS_PARALLEL "YES" +#define BFLSC28_DI_ASICS "ASIC Installed" + +#define FULLNONCE 0x100000000ULL + +struct bflsc_dev { + // Work + unsigned int ms_work; + int work_queued; + int work_complete; + int nonces_hw; // TODO: this - need to add a paramter to submit_nonce() + // so can pass 'dev' to hw_error + uint64_t hashes_unsent; + uint64_t hashes_sent; + uint64_t nonces_found; + + struct timeval last_check_result; + struct timeval last_dev_result; // array > 0 + struct timeval last_nonce_result; // > 0 nonce + + // Info + char getinfo[(BFLSC_BUFSIZ+4)*4]; + char *firmware; + int engines; // each engine represents a 'thread' in a chip + char *xlink_mode; + char *xlink_present; + char *chips; + + // Status + bool dead; // TODO: handle seperate x-link devices failing? + bool overheat; + + // Stats + float temp1; + float temp2; + float vcc1; + float vcc2; + float vmain; + float temp1_max; + float temp2_max; + time_t temp1_max_time; + time_t temp2_max_time; + float temp1_5min_av; // TODO: + float temp2_5min_av; // TODO: + + // To handle the fact that flushing the queue may not remove all work + // (normally one item is still being processed) + // and also that once the queue is flushed, results may still be in + // the output queue - but we don't want to process them at the time of doing an LP + // when result_id > flush_id+1, flushed work can be discarded since it + // is no longer in the device + uint64_t flush_id; // counter when results were last flushed + uint64_t result_id; // counter when results were last checked + bool flushed; // are any flushed? +}; + +#define QUE_MAX_RESULTS 8 + +struct bflsc_work { + UT_hash_handle hh; + int id; + struct work *work; +}; + +struct bflsc_info { + enum sub_ident ident; + enum driver_version driver_version; + pthread_rwlock_t stat_lock; + struct thr_info results_thr; + uint64_t hashes_sent; + uint32_t update_count; + struct timeval last_update; + int sc_count; + struct bflsc_dev *sc_devs; + unsigned int scan_sleep_time; + unsigned int results_sleep_time; + unsigned int default_ms_work; + bool shutdown; + bool flash_led; + bool not_first_work; // allow ignoring the first nonce error + bool fanauto; + int que_size; + int que_full_enough; + int que_watermark; + int que_low; + int que_noncecount; + int que_fld_min; + int que_fld_max; + uint64_t core_nonces[17]; + uint64_t core_hw[17]; + int flush_size; + // count of given size, [+2] is for any > QUE_MAX_RESULTS + uint64_t result_size[QUE_MAX_RESULTS+2]; + + struct bflsc_work *bworks; + uint64_t cortex_nonces[0x80]; + uint64_t cortex_hw[0x80]; + + int volt_next; + bool volt_next_stat; + int clock_next; + bool clock_next_stat; +}; + +#define BFLSC_XLINKHDR '@' +#define BFLSC_MAXPAYLOAD 255 + +struct DataForwardToChain { + uint8_t header; + uint8_t payloadSize; + uint8_t deviceAddress; + uint8_t payloadData[BFLSC_MAXPAYLOAD]; +}; + +#define DATAFORWARDSIZE(data) (1 + 1 + 1 + data.payloadSize) + +#define MIDSTATE_BYTES 32 +#define MERKLE_OFFSET 64 +#define MERKLE_BYTES 12 +#define BFLSC_QJOBSIZ (MIDSTATE_BYTES+MERKLE_BYTES+1) +#define BFLSC_EOB 0xaa + +struct QueueJobStructure { + uint8_t payloadSize; + uint8_t midState[MIDSTATE_BYTES]; + uint8_t blockData[MERKLE_BYTES]; + uint8_t endOfBlock; +}; + +#define QUE_RES_LINES_MIN 3 +#define QUE_MIDSTATE 0 +#define QUE_BLOCKDATA 1 + +#define QUE_UID 0 +#define QUE_CC 1 + +#define QUE_NONCECOUNT_V1 2 +#define QUE_FLD_MIN_V1 3 +#define QUE_FLD_MAX_V1 (QUE_MAX_RESULTS+QUE_FLD_MIN_V1) + +#define QUE_CHIP_V2 2 +#define QUE_NONCECOUNT_V2 3 +#define QUE_FLD_MIN_V2 4 +#define QUE_FLD_MAX_V2 (QUE_MAX_RESULTS+QUE_FLD_MIN_V2) + +#define BFLSC_SIGNATURE 0xc1 +#define BFLSC_EOW 0xfe + +// N.B. this will only work with 5 jobs +// requires a different jobs[N] for each job count +// but really only need to handle 5 anyway +struct QueueJobPackStructure { + uint8_t payloadSize; + uint8_t signature; + uint8_t jobsInArray; + struct QueueJobStructure jobs[5]; + uint8_t endOfWrapper; +}; + +// TODO: Implement in API and also in usb device selection +struct SaveString { + uint8_t payloadSize; + uint8_t payloadData[BFLSC_MAXPAYLOAD]; +}; + +// Commands (Single Stage) +#define BFLSC_IDENTIFY "ZGX" +#define BFLSC_IDENTIFY_LEN (sizeof(BFLSC_IDENTIFY)-1) +#define BFLSC_DETAILS "ZCX" +#define BFLSC_DETAILS_LEN (sizeof(BFLSC_DETAILS)-1) +#define BFLSC_FIRMWARE "ZJX" +#define BFLSC_FIRMWARE_LEN (sizeof(BFLSC_FIRMWARE)-1) +#define BFLSC_FLASH "ZMX" +#define BFLSC_FLASH_LEN (sizeof(BFLSC_FLASH)-1) +#define BFLSC_VOLTAGE "ZTX" +#define BFLSC_VOLTAGE_LEN (sizeof(BFLSC_VOLTAGE)-1) +#define BFLSC_TEMPERATURE "ZLX" +#define BFLSC_TEMPERATURE_LEN (sizeof(BFLSC_TEMPERATURE)-1) +#define BFLSC_QRES "ZOX" +#define BFLSC_QRES_LEN (sizeof(BFLSC_QRES)-1) +#define BFLSC_QFLUSH "ZQX" +#define BFLSC_QFLUSH_LEN (sizeof(BFLSC_QFLUSH)-1) +#define BFLSC_FANAUTO "Z9X" +#define BFLSC_FANOUT_LEN (sizeof(BFLSC_FANAUTO)-1) +#define BFLSC_FAN0 "Z0X" +#define BFLSC_FAN0_LEN (sizeof(BFLSC_FAN0)-1) +#define BFLSC_FAN1 "Z1X" +#define BFLSC_FAN1_LEN (sizeof(BFLSC_FAN1)-1) +#define BFLSC_FAN2 "Z2X" +#define BFLSC_FAN2_LEN (sizeof(BFLSC_FAN2)-1) +#define BFLSC_FAN3 "Z3X" +#define BFLSC_FAN3_LEN (sizeof(BFLSC_FAN3)-1) +#define BFLSC_FAN4 "Z4X" +#define BFLSC_FAN4_LEN (sizeof(BFLSC_FAN4)-1) +#define BFLSC_LOADSTR "ZUX" +#define BFLSC_LOADSTR_LEN (sizeof(BFLSC_LOADSTR)-1) + +// Commands (Dual Stage) +#define BFLSC_QJOB "ZNX" +#define BFLSC_QJOB_LEN (sizeof(BFLSC_QJOB)-1) +#define BFLSC_QJOBS "ZWX" +#define BFLSC_QJOBS_LEN (sizeof(BFLSC_QJOBS)-1) +#define BFLSC_SAVESTR "ZSX" +#define BFLSC_SAVESTR_LEN (sizeof(BFLSC_SAVESTR)-1) + +// Replies +#define BFLSC_IDENTITY "BitFORCE SC" +#define BFLSC_BFLSC "SHA256 SC" +#define BFLSC_BFLSC28 "SC-28nm" + +#define BFLSC_OK "OK\n" +#define BFLSC_OK_LEN (sizeof(BFLSC_OK)-1) +#define BFLSC_SUCCESS "SUCCESS\n" +#define BFLSC_SUCCESS_LEN (sizeof(BFLSC_SUCCESS)-1) + +#define BFLSC_RESULT "COUNT:" +#define BFLSC_RESULT_LEN (sizeof(BFLSC_RESULT)-1) + +#define BFLSC_ANERR "ERR:" +#define BFLSC_ANERR_LEN (sizeof(BFLSC_ANERR)-1) +#define BFLSC_TIMEOUT BFLSC_ANERR "TIMEOUT" +#define BFLSC_TIMEOUT_LEN (sizeof(BFLSC_TIMEOUT)-1) +// x-link timeout has a space (a number follows) +#define BFLSC_XTIMEOUT BFLSC_ANERR "TIMEOUT " +#define BFLSC_XTIMEOUT_LEN (sizeof(BFLSC_XTIMEOUT)-1) +#define BFLSC_INVALID BFLSC_ANERR "INVALID DATA" +#define BFLSC_INVALID_LEN (sizeof(BFLSC_INVALID)-1) +#define BFLSC_ERRSIG BFLSC_ANERR "SIGNATURE" +#define BFLSC_ERRSIG_LEN (sizeof(BFLSC_ERRSIG)-1) +#define BFLSC_OKQ "OK:QUEUED" +#define BFLSC_OKQ_LEN (sizeof(BFLSC_OKQ)-1) +#define BFLSC_INPROCESS "INPROCESS" +#define BFLSC_INPROCESS_LEN (sizeof(BFLSC_INPROCESS)-1) +// Followed by N=1..5 +#define BFLSC_OKQN "OK:QUEUED " +#define BFLSC_OKQN_LEN (sizeof(BFLSC_OKQN)-1) +#define BFLSC_QFULL "QUEUE FULL" +#define BFLSC_QFULL_LEN (sizeof(BFLSC_QFULL)-1) +#define BFLSC_HITEMP "HIGH TEMPERATURE RECOVERY" +#define BFLSC_HITEMP_LEN (sizeof(BFLSC_HITEMP)-1) +#define BFLSC_EMPTYSTR "MEMORY EMPTY" +#define BFLSC_EMPTYSTR_LEN (sizeof(BFLSC_EMPTYSTR)-1) + +// Queued and non-queued are the same +#define FullNonceRangeJob QueueJobStructure +#define BFLSC_JOBSIZ BFLSC_QJOBSIZ + +// Non queued commands (not used) +#define BFLSC_SENDWORK "ZDX" +#define BFLSC_SENDWORK_LEN (sizeof(BFLSC_SENDWORK)-1) +#define BFLSC_WORKSTATUS "ZFX" +#define BFLSC_WORKSTATUS_LEN (sizeof(BFLSC_WORKSTATUS)-1) +#define BFLSC_SENDRANGE "ZPX" +#define BFLSC_SENDRANGE_LEN (sizeof(BFLSC_SENDRANGE)-1) + +// Non queued work replies (not used) +#define BFLSC_NONCE "NONCE-FOUND:" +#define BFLSC_NONCE_LEN (sizeof(BFLSC_NONCE)-1) +#define BFLSC_NO_NONCE "NO-NONCE" +#define BFLSC_NO_NONCE_LEN (sizeof(BFLSC_NO_NONCE)-1) +#define BFLSC_IDLE "IDLE" +#define BFLSC_IDLE_LEN (sizeof(BFLSC_IDLE)-1) +#define BFLSC_BUSY "BUSY" +#define BFLSC_BUSY_LEN (sizeof(BFLSC_BUSY)-1) + +#define BFLSC_MINIRIG "BAM" +#define BFLSC_SINGLE "BAS" +#define BFLSC_LITTLESINGLE "BAL" +#define BFLSC_JALAPENO "BAJ" +#define BFLSC_MONARCH "BMA" + +// Default expected time for a nonce range +// - thus no need to check until this + last time work was found +// 60GH/s MiniRig (1 board) or Single +#define BAM_WORK_TIME 71.58 +#define BAS_WORK_TIME 71.58 +// 30GH/s Little Single +#define BAL_WORK_TIME 143.17 +// 4.5GH/s Jalapeno +#define BAJ_WORK_TIME 954.44 +#define BMA_WORK_TIME 35 // ??? + +// Defaults (slightly over half the work time) but ensure none are above 100 +// SCAN_TIME - delay after sending work +// RES_TIME - delay between checking for results +#define BAM_SCAN_TIME 20 +#define BMA_SCAN_TIME 50 +#define BAS_SCAN_TIME 360 +#define BAL_SCAN_TIME 720 +#define BAJ_SCAN_TIME 1000 +#define BFLSC_RES_TIME 100 +#define BMA_RES_TIME 50 +#define BFLSC_MAX_SLEEP 2000 + +#define BAJ_LATENCY LATENCY_STD +#define BAL_LATENCY 12 +#define BAS_LATENCY 12 +// For now a BAM doesn't really exist - it's currently 8 independent BASs +#define BAM_LATENCY 2 + +#define BFLSC_TEMP_SLEEPMS 5 + +#define BFLSC_QUE_SIZE_V1 20 +#define BFLSC_QUE_FULL_ENOUGH_V1 13 +#define BFLSC_QUE_WATERMARK_V1 6 +#define BFLSC_QUE_LOW_V1 3 + +// TODO: use 5 batch jobs +// TODO: base these numbers on the chip count? +#define BFLSC_QUE_SIZE_V2 40 +#define BFLSC_QUE_FULL_ENOUGH_V2 36 +#define BFLSC_QUE_WATERMARK_V2 32 +#define BFLSC_QUE_LOW_V2 16 + +#define BFLSC_TEMP_OVERHEAT 85 +// Will start throttling this much below overheat +#define BFLSC_TEMP_THROTTLE 3 +// Must drop this far below overheat before resuming work +#define BFLSC_TEMP_RECOVER 5 + +// If initialisation fails the first time, +// sleep this amount (ms) and try again +#define REINIT_TIME_FIRST_MS 100 +// Max ms per sleep +#define REINIT_TIME_MAX_MS 800 +// Keep trying up to this many us +#define REINIT_TIME_MAX 3000000 + +int opt_bflsc_overheat; + +#endif /* BFLSC_H */ diff --git a/driver-bitforce.c b/driver-bitforce.c new file mode 100644 index 0000000..34f1ef9 --- /dev/null +++ b/driver-bitforce.c @@ -0,0 +1,752 @@ +/* + * Copyright 2012-2013 Andrew Smith + * Copyright 2012 Luke Dashjr + * Copyright 2012 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "compat.h" +#include "miner.h" +#include "usbutils.h" +#include "util.h" + +#ifdef WIN32 +#include +#endif /* WIN32 */ + +#define BITFORCE_IDENTIFY "ZGX" +#define BITFORCE_IDENTIFY_LEN (sizeof(BITFORCE_IDENTIFY)-1) +#define BITFORCE_FLASH "ZMX" +#define BITFORCE_FLASH_LEN (sizeof(BITFORCE_FLASH)-1) +#define BITFORCE_TEMPERATURE "ZLX" +#define BITFORCE_TEMPERATURE_LEN (sizeof(BITFORCE_TEMPERATURE)-1) +#define BITFORCE_SENDRANGE "ZPX" +#define BITFORCE_SENDRANGE_LEN (sizeof(BITFORCE_SENDRANGE)-1) +#define BITFORCE_SENDWORK "ZDX" +#define BITFORCE_SENDWORK_LEN (sizeof(BITFORCE_SENDWORK)-1) +#define BITFORCE_WORKSTATUS "ZFX" +#define BITFORCE_WORKSTATUS_LEN (sizeof(BITFORCE_WORKSTATUS)-1) + +// Either of Nonce or No-nonce start with: +#define BITFORCE_EITHER "N" +#define BITFORCE_EITHER_LEN 1 +#define BITFORCE_NONCE "NONCE-FOUND" +#define BITFORCE_NONCE_LEN (sizeof(BITFORCE_NONCE)-1) +#define BITFORCE_NO_NONCE "NO-NONCE" +#define BITFORCE_NO_NONCE_MATCH 3 +#define BITFORCE_IDLE "IDLE" +#define BITFORCE_IDLE_MATCH 1 + +#define BITFORCE_SLEEP_MS 500 +#define BITFORCE_TIMEOUT_S 7 +#define BITFORCE_TIMEOUT_MS (BITFORCE_TIMEOUT_S * 1000) +#define BITFORCE_LONG_TIMEOUT_S 30 +#define BITFORCE_LONG_TIMEOUT_MS (BITFORCE_LONG_TIMEOUT_S * 1000) +#define BITFORCE_CHECK_INTERVAL_MS 10 +#define WORK_CHECK_INTERVAL_MS 50 +#define MAX_START_DELAY_MS 100 +#define tv_to_ms(tval) (tval.tv_sec * 1000 + tval.tv_usec / 1000) +#define TIME_AVG_CONSTANT 8 + +#define KNAME_WORK "full work" +#define KNAME_RANGE "nonce range" + +#define BITFORCE_BUFSIZ (0x200) + +// If initialisation fails the first time, +// sleep this amount (ms) and try again +#define REINIT_TIME_FIRST_MS 100 +// Max ms per sleep +#define REINIT_TIME_MAX_MS 800 +// Keep trying up to this many us +#define REINIT_TIME_MAX 3000000 + +static const char *blank = ""; + +static void bitforce_initialise(struct cgpu_info *bitforce, bool lock) +{ + int err, interface; + + if (lock) + mutex_lock(&bitforce->device_mutex); + + if (bitforce->usbinfo.nodev) + goto failed; + + interface = usb_interface(bitforce); + // Reset + err = usb_transfer(bitforce, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, + FTDI_VALUE_RESET, interface, C_RESET); + if (opt_debug) + applog(LOG_DEBUG, "%s%i: reset got err %d", + bitforce->drv->name, bitforce->device_id, err); + + if (bitforce->usbinfo.nodev) + goto failed; + + // Set data control + err = usb_transfer(bitforce, FTDI_TYPE_OUT, FTDI_REQUEST_DATA, + FTDI_VALUE_DATA_BFL, interface, C_SETDATA); + if (opt_debug) + applog(LOG_DEBUG, "%s%i: setdata got err %d", + bitforce->drv->name, bitforce->device_id, err); + + if (bitforce->usbinfo.nodev) + goto failed; + + // Set the baud + err = usb_transfer(bitforce, FTDI_TYPE_OUT, FTDI_REQUEST_BAUD, FTDI_VALUE_BAUD_BFL, + (FTDI_INDEX_BAUD_BFL & 0xff00) | interface, + C_SETBAUD); + if (opt_debug) + applog(LOG_DEBUG, "%s%i: setbaud got err %d", + bitforce->drv->name, bitforce->device_id, err); + + if (bitforce->usbinfo.nodev) + goto failed; + + // Set Flow Control + err = usb_transfer(bitforce, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, + FTDI_VALUE_FLOW, interface, C_SETFLOW); + if (opt_debug) + applog(LOG_DEBUG, "%s%i: setflowctrl got err %d", + bitforce->drv->name, bitforce->device_id, err); + + if (bitforce->usbinfo.nodev) + goto failed; + + // Set Modem Control + err = usb_transfer(bitforce, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, + FTDI_VALUE_MODEM, interface, C_SETMODEM); + if (opt_debug) + applog(LOG_DEBUG, "%s%i: setmodemctrl got err %d", + bitforce->drv->name, bitforce->device_id, err); + + if (bitforce->usbinfo.nodev) + goto failed; + + // Clear any sent data + err = usb_transfer(bitforce, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, + FTDI_VALUE_PURGE_TX, interface, C_PURGETX); + if (opt_debug) + applog(LOG_DEBUG, "%s%i: purgetx got err %d", + bitforce->drv->name, bitforce->device_id, err); + + if (bitforce->usbinfo.nodev) + goto failed; + + // Clear any received data + err = usb_transfer(bitforce, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, + FTDI_VALUE_PURGE_RX, interface, C_PURGERX); + if (opt_debug) + applog(LOG_DEBUG, "%s%i: purgerx got err %d", + bitforce->drv->name, bitforce->device_id, err); + +failed: + + if (lock) + mutex_unlock(&bitforce->device_mutex); +} + +static struct cgpu_info *bitforce_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + char buf[BITFORCE_BUFSIZ+1]; + int err, amount; + char *s; + struct timeval init_start, init_now; + int init_sleep, init_count; + bool ident_first; + + struct cgpu_info *bitforce = usb_alloc_cgpu(&bitforce_drv, 1); + + if (!usb_init(bitforce, dev, found)) + goto shin; + + // Allow 2 complete attempts if the 1st time returns an unrecognised reply + ident_first = true; +retry: + init_count = 0; + init_sleep = REINIT_TIME_FIRST_MS; + cgtime(&init_start); +reinit: + bitforce_initialise(bitforce, false); + if ((err = usb_write(bitforce, BITFORCE_IDENTIFY, BITFORCE_IDENTIFY_LEN, &amount, C_REQUESTIDENTIFY)) < 0 || amount != BITFORCE_IDENTIFY_LEN) { + applog(LOG_ERR, "%s detect (%s) send identify request failed (%d:%d)", + bitforce->drv->dname, bitforce->device_path, amount, err); + goto unshin; + } + + if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_GETIDENTIFY)) < 0 || amount < 1) { + init_count++; + cgtime(&init_now); + if (us_tdiff(&init_now, &init_start) <= REINIT_TIME_MAX) { + if (init_count == 2) { + applog(LOG_WARNING, "%s detect (%s) 2nd init failed (%d:%d) - retrying", + bitforce->drv->dname, bitforce->device_path, amount, err); + } + cgsleep_ms(init_sleep); + if ((init_sleep * 2) <= REINIT_TIME_MAX_MS) + init_sleep *= 2; + goto reinit; + } + + if (init_count > 0) + applog(LOG_WARNING, "%s detect (%s) init failed %d times %.2fs", + bitforce->drv->dname, bitforce->device_path, init_count, tdiff(&init_now, &init_start)); + + if (err < 0) { + applog(LOG_ERR, "%s detect (%s) error identify reply (%d:%d)", + bitforce->drv->dname, bitforce->device_path, amount, err); + } else { + applog(LOG_ERR, "%s detect (%s) empty identify reply (%d)", + bitforce->drv->dname, bitforce->device_path, amount); + } + + goto unshin; + } + buf[amount] = '\0'; + + if (unlikely(!strstr(buf, "SHA256"))) { + if (ident_first) { + applog(LOG_WARNING, "%s detect (%s) didn't recognise '%s' trying again ...", + bitforce->drv->dname, bitforce->device_path, buf); + ident_first = false; + goto retry; + } + applog(LOG_ERR, "%s detect (%s) didn't recognise '%s' on 2nd attempt", + bitforce->drv->dname, bitforce->device_path, buf); + goto unshin; + } + + if (strstr(buf, "SHA256 SC")) { +#ifdef USE_BFLSC + applog(LOG_DEBUG, "SC device detected, will defer to BFLSC driver"); +#else + applog(LOG_WARNING, "SC device detected but no BFLSC support compiled in!"); +#endif + goto unshin; + } + + if (likely((!memcmp(buf, ">>>ID: ", 7)) && (s = strstr(buf + 3, ">>>")))) { + s[0] = '\0'; + bitforce->name = strdup(buf + 7); + } else { + bitforce->name = (char *)blank; + } + + // We have a real BitForce! + applog(LOG_DEBUG, "%s (%s) identified as: '%s'", + bitforce->drv->dname, bitforce->device_path, bitforce->name); + + /* Initially enable support for nonce range and disable it later if it + * fails */ + if (opt_bfl_noncerange) { + bitforce->nonce_range = true; + bitforce->sleep_ms = BITFORCE_SLEEP_MS; + bitforce->kname = KNAME_RANGE; + } else { + bitforce->sleep_ms = BITFORCE_SLEEP_MS * 5; + bitforce->kname = KNAME_WORK; + } + + if (!add_cgpu(bitforce)) + goto unshin; + + update_usb_stats(bitforce); + + mutex_init(&bitforce->device_mutex); + + return bitforce; + +unshin: + + usb_uninit(bitforce); + +shin: + + if (bitforce->name != blank) { + free(bitforce->name); + bitforce->name = NULL; + } + + bitforce = usb_free_cgpu(bitforce); + + return NULL; +} + +static void bitforce_detect(bool __maybe_unused hotplug) +{ + usb_detect(&bitforce_drv, bitforce_detect_one); +} + +static void get_bitforce_statline_before(char *buf, size_t bufsiz, struct cgpu_info *bitforce) +{ + float gt = bitforce->temp; + + if (gt > 0) + tailsprintf(buf, bufsiz, "%5.1fC", gt); +} + +static bool bitforce_thread_prepare(__maybe_unused struct thr_info *thr) +{ +// struct cgpu_info *bitforce = thr->cgpu; + + return true; +} + +static void bitforce_flash_led(struct cgpu_info *bitforce) +{ + int err, amount; + + /* Do not try to flash the led if we're polling for a result to + * minimise the chance of interleaved results */ + if (bitforce->polling) + return; + + /* It is not critical flashing the led so don't get stuck if we + * can't grab the mutex now */ + if (mutex_trylock(&bitforce->device_mutex)) + return; + + if ((err = usb_write(bitforce, BITFORCE_FLASH, BITFORCE_FLASH_LEN, &amount, C_REQUESTFLASH)) < 0 || amount != BITFORCE_FLASH_LEN) { + applog(LOG_ERR, "%s%i: flash request failed (%d:%d)", + bitforce->drv->name, bitforce->device_id, amount, err); + } else { + /* However, this stops anything else getting a reply + * So best to delay any other access to the BFL */ + cgsleep_ms(4000); + } + + /* Once we've tried - don't do it until told to again */ + bitforce->flash_led = false; + + mutex_unlock(&bitforce->device_mutex); + + return; // nothing is returned by the BFL +} + +static bool bitforce_get_temp(struct cgpu_info *bitforce) +{ + char buf[BITFORCE_BUFSIZ+1]; + int err, amount; + char *s; + + // Device is gone + if (bitforce->usbinfo.nodev) + return false; + + /* Do not try to get the temperature if we're polling for a result to + * minimise the chance of interleaved results */ + if (bitforce->polling) + return true; + + // Flash instead of Temp - doing both can be too slow + if (bitforce->flash_led) { + bitforce_flash_led(bitforce); + return true; + } + + /* It is not critical getting temperature so don't get stuck if we + * can't grab the mutex here */ + if (mutex_trylock(&bitforce->device_mutex)) + return false; + + if ((err = usb_write(bitforce, BITFORCE_TEMPERATURE, BITFORCE_TEMPERATURE_LEN, &amount, C_REQUESTTEMPERATURE)) < 0 || amount != BITFORCE_TEMPERATURE_LEN) { + mutex_unlock(&bitforce->device_mutex); + applog(LOG_ERR, "%s%i: Error: Request temp invalid/timed out (%d:%d)", + bitforce->drv->name, bitforce->device_id, amount, err); + bitforce->hw_errors++; + return false; + } + + if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_GETTEMPERATURE)) < 0 || amount < 1) { + mutex_unlock(&bitforce->device_mutex); + if (err < 0) { + applog(LOG_ERR, "%s%i: Error: Get temp return invalid/timed out (%d:%d)", + bitforce->drv->name, bitforce->device_id, amount, err); + } else { + applog(LOG_ERR, "%s%i: Error: Get temp returned nothing (%d:%d)", + bitforce->drv->name, bitforce->device_id, amount, err); + } + bitforce->hw_errors++; + return false; + } + + mutex_unlock(&bitforce->device_mutex); + + if ((!strncasecmp(buf, "TEMP", 4)) && (s = strchr(buf + 4, ':'))) { + float temp = strtof(s + 1, NULL); + + /* Cope with older software that breaks and reads nonsense + * values */ + if (temp > 100) + temp = strtod(s + 1, NULL); + + if (temp > 0) { + bitforce->temp = temp; + if (unlikely(bitforce->cutofftemp > 0 && temp > bitforce->cutofftemp)) { + applog(LOG_WARNING, "%s%i: Hit thermal cutoff limit, disabling!", + bitforce->drv->name, bitforce->device_id); + bitforce->deven = DEV_RECOVER; + dev_error(bitforce, REASON_DEV_THERMAL_CUTOFF); + } + } + } else { + /* Use the temperature monitor as a kind of watchdog for when + * our responses are out of sync and flush the buffer to + * hopefully recover */ + applog(LOG_WARNING, "%s%i: Garbled response probably throttling, clearing buffer", + bitforce->drv->name, bitforce->device_id); + dev_error(bitforce, REASON_DEV_THROTTLE); + /* Count throttling episodes as hardware errors */ + bitforce->hw_errors++; + bitforce_initialise(bitforce, true); + return false; + } + + return true; +} + +static bool bitforce_send_work(struct thr_info *thr, struct work *work) +{ + struct cgpu_info *bitforce = thr->cgpu; + unsigned char ob[70]; + char buf[BITFORCE_BUFSIZ+1]; + int err, amount; + char *s; + char *cmd; + int len; + +re_send: + if (bitforce->nonce_range) { + cmd = BITFORCE_SENDRANGE; + len = BITFORCE_SENDRANGE_LEN; + } else { + cmd = BITFORCE_SENDWORK; + len = BITFORCE_SENDWORK_LEN; + } + + mutex_lock(&bitforce->device_mutex); + if ((err = usb_write(bitforce, cmd, len, &amount, C_REQUESTSENDWORK)) < 0 || amount != len) { + mutex_unlock(&bitforce->device_mutex); + applog(LOG_ERR, "%s%i: request send work failed (%d:%d)", + bitforce->drv->name, bitforce->device_id, amount, err); + return false; + } + + if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_REQUESTSENDWORKSTATUS)) < 0) { + mutex_unlock(&bitforce->device_mutex); + applog(LOG_ERR, "%s%d: read request send work status failed (%d:%d)", + bitforce->drv->name, bitforce->device_id, amount, err); + return false; + } + + if (amount == 0 || !buf[0] || !strncasecmp(buf, "B", 1)) { + mutex_unlock(&bitforce->device_mutex); + cgsleep_ms(WORK_CHECK_INTERVAL_MS); + goto re_send; + } else if (unlikely(strncasecmp(buf, "OK", 2))) { + mutex_unlock(&bitforce->device_mutex); + if (bitforce->nonce_range) { + applog(LOG_WARNING, "%s%i: Does not support nonce range, disabling", + bitforce->drv->name, bitforce->device_id); + bitforce->nonce_range = false; + bitforce->sleep_ms *= 5; + bitforce->kname = KNAME_WORK; + goto re_send; + } + applog(LOG_ERR, "%s%i: Error: Send work reports: %s", + bitforce->drv->name, bitforce->device_id, buf); + return false; + } + + sprintf((char *)ob, ">>>>>>>>"); + memcpy(ob + 8, work->midstate, 32); + memcpy(ob + 8 + 32, work->data + 64, 12); + if (!bitforce->nonce_range) { + sprintf((char *)ob + 8 + 32 + 12, ">>>>>>>>"); + work->nonce = bitforce->nonces = 0xffffffff; + len = 60; + } else { + uint32_t *nonce; + + nonce = (uint32_t *)(ob + 8 + 32 + 12); + *nonce = htobe32(work->nonce); + nonce = (uint32_t *)(ob + 8 + 32 + 12 + 4); + /* Split work up into 1/5th nonce ranges */ + bitforce->nonces = 0x33333332; + *nonce = htobe32(work->nonce + bitforce->nonces); + work->nonce += bitforce->nonces + 1; + sprintf((char *)ob + 8 + 32 + 12 + 8, ">>>>>>>>"); + len = 68; + } + + if ((err = usb_write(bitforce, (char *)ob, len, &amount, C_SENDWORK)) < 0 || amount != len) { + mutex_unlock(&bitforce->device_mutex); + applog(LOG_ERR, "%s%i: send work failed (%d:%d)", + bitforce->drv->name, bitforce->device_id, amount, err); + return false; + } + + if ((err = usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_SENDWORKSTATUS)) < 0) { + mutex_unlock(&bitforce->device_mutex); + applog(LOG_ERR, "%s%d: read send work status failed (%d:%d)", + bitforce->drv->name, bitforce->device_id, amount, err); + return false; + } + + mutex_unlock(&bitforce->device_mutex); + + if (opt_debug) { + s = bin2hex(ob + 8, 44); + applog(LOG_DEBUG, "%s%i: block data: %s", + bitforce->drv->name, bitforce->device_id, s); + free(s); + } + + if (amount == 0 || !buf[0]) { + applog(LOG_ERR, "%s%i: Error: Send block data returned empty string/timed out", + bitforce->drv->name, bitforce->device_id); + return false; + } + + if (unlikely(strncasecmp(buf, "OK", 2))) { + applog(LOG_ERR, "%s%i: Error: Send block data reports: %s", + bitforce->drv->name, bitforce->device_id, buf); + return false; + } + + cgtime(&bitforce->work_start_tv); + return true; +} + +static int64_t bitforce_get_result(struct thr_info *thr, struct work *work) +{ + struct cgpu_info *bitforce = thr->cgpu; + unsigned int delay_time_ms; + struct timeval elapsed; + struct timeval now; + char buf[BITFORCE_BUFSIZ+1]; + int amount; + char *pnoncebuf; + uint32_t nonce; + + while (1) { + if (unlikely(thr->work_restart)) + return 0; + + mutex_lock(&bitforce->device_mutex); + usb_write(bitforce, BITFORCE_WORKSTATUS, BITFORCE_WORKSTATUS_LEN, &amount, C_REQUESTWORKSTATUS); + usb_read_nl(bitforce, buf, sizeof(buf)-1, &amount, C_GETWORKSTATUS); + mutex_unlock(&bitforce->device_mutex); + + cgtime(&now); + timersub(&now, &bitforce->work_start_tv, &elapsed); + + if (elapsed.tv_sec >= BITFORCE_LONG_TIMEOUT_S) { + applog(LOG_ERR, "%s%i: took %ldms - longer than %dms", + bitforce->drv->name, bitforce->device_id, + tv_to_ms(elapsed), BITFORCE_LONG_TIMEOUT_MS); + return 0; + } + + if (amount > 0 && buf[0] && strncasecmp(buf, "B", 1)) /* BFL does not respond during throttling */ + break; + + /* if BFL is throttling, no point checking so quickly */ + delay_time_ms = (buf[0] ? BITFORCE_CHECK_INTERVAL_MS : 2 * WORK_CHECK_INTERVAL_MS); + cgsleep_ms(delay_time_ms); + bitforce->wait_ms += delay_time_ms; + } + + if (elapsed.tv_sec > BITFORCE_TIMEOUT_S) { + applog(LOG_ERR, "%s%i: took %ldms - longer than %dms", + bitforce->drv->name, bitforce->device_id, + tv_to_ms(elapsed), BITFORCE_TIMEOUT_MS); + dev_error(bitforce, REASON_DEV_OVER_HEAT); + + /* Only return if we got nothing after timeout - there still may be results */ + if (amount == 0) + return 0; + } else if (!strncasecmp(buf, BITFORCE_EITHER, BITFORCE_EITHER_LEN)) { + /* Simple timing adjustment. Allow a few polls to cope with + * OS timer delays being variably reliable. wait_ms will + * always equal sleep_ms when we've waited greater than or + * equal to the result return time.*/ + delay_time_ms = bitforce->sleep_ms; + + if (bitforce->wait_ms > bitforce->sleep_ms + (WORK_CHECK_INTERVAL_MS * 2)) + bitforce->sleep_ms += (bitforce->wait_ms - bitforce->sleep_ms) / 2; + else if (bitforce->wait_ms == bitforce->sleep_ms) { + if (bitforce->sleep_ms > WORK_CHECK_INTERVAL_MS) + bitforce->sleep_ms -= WORK_CHECK_INTERVAL_MS; + else if (bitforce->sleep_ms > BITFORCE_CHECK_INTERVAL_MS) + bitforce->sleep_ms -= BITFORCE_CHECK_INTERVAL_MS; + } + + if (delay_time_ms != bitforce->sleep_ms) + applog(LOG_DEBUG, "%s%i: Wait time changed to: %d, waited %u", + bitforce->drv->name, bitforce->device_id, + bitforce->sleep_ms, bitforce->wait_ms); + + /* Work out the average time taken. Float for calculation, uint for display */ + bitforce->avg_wait_f += (tv_to_ms(elapsed) - bitforce->avg_wait_f) / TIME_AVG_CONSTANT; + bitforce->avg_wait_d = (unsigned int) (bitforce->avg_wait_f + 0.5); + } + + applog(LOG_DEBUG, "%s%i: waited %dms until %s", + bitforce->drv->name, bitforce->device_id, + bitforce->wait_ms, buf); + if (!strncasecmp(buf, BITFORCE_NO_NONCE, BITFORCE_NO_NONCE_MATCH)) + return bitforce->nonces; /* No valid nonce found */ + else if (!strncasecmp(buf, BITFORCE_IDLE, BITFORCE_IDLE_MATCH)) + return 0; /* Device idle */ + else if (strncasecmp(buf, BITFORCE_NONCE, BITFORCE_NONCE_LEN)) { + bitforce->hw_errors++; + applog(LOG_WARNING, "%s%i: Error: Get result reports: %s", + bitforce->drv->name, bitforce->device_id, buf); + bitforce_initialise(bitforce, true); + return 0; + } + + pnoncebuf = &buf[12]; + + while (1) { + hex2bin((void*)&nonce, pnoncebuf, 4); +#ifndef __BIG_ENDIAN__ + nonce = swab32(nonce); +#endif + if (unlikely(bitforce->nonce_range && (nonce >= work->nonce || + (work->nonce > 0 && nonce < work->nonce - bitforce->nonces - 1)))) { + applog(LOG_WARNING, "%s%i: Disabling broken nonce range support", + bitforce->drv->name, bitforce->device_id); + bitforce->nonce_range = false; + work->nonce = 0xffffffff; + bitforce->sleep_ms *= 5; + bitforce->kname = KNAME_WORK; + } + + submit_nonce(thr, work, nonce); + if (strncmp(&pnoncebuf[8], ",", 1)) + break; + pnoncebuf += 9; + } + + return bitforce->nonces; +} + +static void bitforce_shutdown(__maybe_unused struct thr_info *thr) +{ +// struct cgpu_info *bitforce = thr->cgpu; +} + +static void biforce_thread_enable(struct thr_info *thr) +{ + struct cgpu_info *bitforce = thr->cgpu; + + bitforce_initialise(bitforce, true); +} + +static int64_t bitforce_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce) +{ + struct cgpu_info *bitforce = thr->cgpu; + bool send_ret; + int64_t ret; + + // Device is gone + if (bitforce->usbinfo.nodev) + return -1; + + send_ret = bitforce_send_work(thr, work); + + if (!restart_wait(thr, bitforce->sleep_ms)) + return 0; + + bitforce->wait_ms = bitforce->sleep_ms; + + if (send_ret) { + bitforce->polling = true; + ret = bitforce_get_result(thr, work); + bitforce->polling = false; + } else + ret = -1; + + if (ret == -1) { + ret = 0; + applog(LOG_ERR, "%s%i: Comms error", bitforce->drv->name, bitforce->device_id); + dev_error(bitforce, REASON_DEV_COMMS_ERROR); + bitforce->hw_errors++; + /* empty read buffer */ + bitforce_initialise(bitforce, true); + } + return ret; +} + +static bool bitforce_get_stats(struct cgpu_info *bitforce) +{ + return bitforce_get_temp(bitforce); +} + +static void bitforce_identify(struct cgpu_info *bitforce) +{ + bitforce->flash_led = true; +} + +static bool bitforce_thread_init(struct thr_info *thr) +{ + struct cgpu_info *bitforce = thr->cgpu; + unsigned int wait; + + /* Pause each new thread at least 100ms between initialising + * so the devices aren't making calls all at the same time. */ + wait = thr->id * MAX_START_DELAY_MS; + applog(LOG_DEBUG, "%s%d: Delaying start by %dms", + bitforce->drv->name, bitforce->device_id, wait / 1000); + cgsleep_ms(wait); + + return true; +} + +static struct api_data *bitforce_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + + // Warning, access to these is not locked - but we don't really + // care since hashing performance is way more important than + // locking access to displaying API debug 'stats' + // If locking becomes an issue for any of them, use copy_data=true also + root = api_add_uint(root, "Sleep Time", &(cgpu->sleep_ms), false); + root = api_add_uint(root, "Avg Wait", &(cgpu->avg_wait_d), false); + + return root; +} + +struct device_drv bitforce_drv = { + .drv_id = DRIVER_bitforce, + .dname = "BitForce", + .name = "BFL", + .drv_detect = bitforce_detect, + .get_api_stats = bitforce_api_stats, + .get_statline_before = get_bitforce_statline_before, + .get_stats = bitforce_get_stats, + .identify_device = bitforce_identify, + .thread_prepare = bitforce_thread_prepare, + .thread_init = bitforce_thread_init, + .scanhash = bitforce_scanhash, + .thread_shutdown = bitforce_shutdown, + .thread_enable = biforce_thread_enable +}; diff --git a/driver-bitfury.c b/driver-bitfury.c new file mode 100644 index 0000000..6951ccf --- /dev/null +++ b/driver-bitfury.c @@ -0,0 +1,1660 @@ +/* + * Copyright 2013-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include "miner.h" +#include "driver-bitfury.h" +#include "sha2.h" +#include "mcp2210.h" +#include "libbitfury.h" + +int opt_bxf_temp_target = BXF_TEMP_TARGET / 10; +int opt_nfu_bits = 50; +int opt_bxm_bits = 54; +int opt_bxf_bits = 54; +int opt_bxf_debug; +int opt_osm_led_mode = 4; + +/* Wait longer 1/3 longer than it would take for a full nonce range */ +#define BF1WAIT 1600 +#define BF1MSGSIZE 7 +#define BF1INFOSIZE 14 + +#define TWELVE_MHZ 12000000 + +//Low port pins +#define SK 1 +#define DO 2 +#define DI 4 +#define CS 8 +#define GPIO0 16 +#define GPIO1 32 +#define GPIO2 64 +#define GPIO3 128 + +//GPIO pins +#define GPIOL0 0 +#define GPIOL1 1 +#define GPIOL2 2 +#define GPIOL3 3 +#define GPIOH 4 +#define GPIOH1 5 +#define GPIOH2 6 +#define GPIOH3 7 +#define GPIOH4 8 +#define GPIOH5 9 +#define GPIOH6 10 +#define GPIOH7 11 + +#define DEFAULT_DIR (SK | DO | CS | GPIO0 | GPIO1 | GPIO2 | GPIO3) /* Setup default input or output state per FTDI for SPI */ +#define DEFAULT_STATE (CS) /* CS idles high, CLK idles LOW for SPI0 */ + +//MPSSE commands from FTDI AN_108 +#define INVALID_COMMAND 0xAB +#define ENABLE_ADAPTIVE_CLOCK 0x96 +#define DISABLE_ADAPTIVE_CLOCK 0x97 +#define ENABLE_3_PHASE_CLOCK 0x8C +#define DISABLE_3_PHASE_CLOCK 0x8D +#define TCK_X5 0x8A +#define TCK_D5 0x8B +#define CLOCK_N_CYCLES 0x8E +#define CLOCK_N8_CYCLES 0x8F +#define PULSE_CLOCK_IO_HIGH 0x94 +#define PULSE_CLOCK_IO_LOW 0x95 +#define CLOCK_N8_CYCLES_IO_HIGH 0x9C +#define CLOCK_N8_CYCLES_IO_LOW 0x9D +#define TRISTATE_IO 0x9E +#define TCK_DIVISOR 0x86 +#define LOOPBACK_END 0x85 +#define SET_OUT_ADBUS 0x80 +#define SET_OUT_ACBUS 0x82 +#define WRITE_BYTES_SPI0 0x11 +#define READ_WRITE_BYTES_SPI0 0x31 + +static void bf1_empty_buffer(struct cgpu_info *bitfury) +{ + char buf[512]; + int amount; + + do { + usb_read_once(bitfury, buf, 512, &amount, C_BF1_FLUSH); + } while (amount); +} + +static bool bf1_open(struct cgpu_info *bitfury) +{ + uint32_t buf[2]; + int err; + + bf1_empty_buffer(bitfury); + /* Magic sequence to reset device only really needed for windows but + * harmless on linux. */ + buf[0] = 0x80250000; + buf[1] = 0x00000800; + err = usb_transfer(bitfury, 0, 9, 1, 0, C_ATMEL_RESET); + if (!err) + err = usb_transfer(bitfury, 0x21, 0x22, 0, 0, C_ATMEL_OPEN); + if (!err) { + err = usb_transfer_data(bitfury, 0x21, 0x20, 0x0000, 0, buf, + BF1MSGSIZE, C_ATMEL_INIT); + } + + if (err < 0) { + applog(LOG_INFO, "%s %d: Failed to open with error %s", bitfury->drv->name, + bitfury->device_id, libusb_error_name(err)); + } + return (err == BF1MSGSIZE); +} + +static void bf1_close(struct cgpu_info *bitfury) +{ + bf1_empty_buffer(bitfury); +} + +static void bf1_identify(struct cgpu_info *bitfury) +{ + int amount; + + usb_write(bitfury, "L", 1, &amount, C_BF1_IDENTIFY); +} + +static void bitfury_identify(struct cgpu_info *bitfury) +{ + struct bitfury_info *info = bitfury->device_data; + + switch(info->ident) { + case IDENT_BF1: + bf1_identify(bitfury); + break; + case IDENT_BXF: + case IDENT_OSM: + default: + break; + } +} + +static bool bf1_getinfo(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + int amount, err; + char buf[16]; + + err = usb_write(bitfury, "I", 1, &amount, C_BF1_REQINFO); + if (err) { + applog(LOG_INFO, "%s %d: Failed to write REQINFO", + bitfury->drv->name, bitfury->device_id); + return false; + } + err = usb_read(bitfury, buf, BF1INFOSIZE, &amount, C_BF1_GETINFO); + if (err) { + applog(LOG_INFO, "%s %d: Failed to read GETINFO", + bitfury->drv->name, bitfury->device_id); + return false; + } + if (amount != BF1INFOSIZE) { + applog(LOG_INFO, "%s %d: Getinfo received %d bytes instead of %d", + bitfury->drv->name, bitfury->device_id, amount, BF1INFOSIZE); + return false; + } + info->version = buf[1]; + memcpy(&info->product, buf + 2, 8); + memcpy(&info->serial, buf + 10, 4); + bitfury->unique_id = bin2hex((unsigned char *)buf + 10, 4); + + applog(LOG_INFO, "%s %d: Getinfo returned version %d, product %s serial %s", bitfury->drv->name, + bitfury->device_id, info->version, info->product, bitfury->unique_id); + bf1_empty_buffer(bitfury); + return true; +} + +static bool bf1_reset(struct cgpu_info *bitfury) +{ + int amount, err; + char buf[16]; + + err = usb_write(bitfury, "R", 1, &amount, C_BF1_REQRESET); + if (err) { + applog(LOG_INFO, "%s %d: Failed to write REQRESET", + bitfury->drv->name, bitfury->device_id); + return false; + } + err = usb_read_timeout(bitfury, buf, BF1MSGSIZE, &amount, BF1WAIT, + C_BF1_GETRESET); + if (err) { + applog(LOG_INFO, "%s %d: Failed to read GETRESET", + bitfury->drv->name, bitfury->device_id); + return false; + } + if (amount != BF1MSGSIZE) { + applog(LOG_INFO, "%s %d: Getreset received %d bytes instead of %d", + bitfury->drv->name, bitfury->device_id, amount, BF1MSGSIZE); + return false; + } + applog(LOG_DEBUG, "%s %d: Getreset returned %s", bitfury->drv->name, + bitfury->device_id, buf); + bf1_empty_buffer(bitfury); + return true; +} + +static bool bxf_send_msg(struct cgpu_info *bitfury, char *buf, enum usb_cmds cmd) +{ + int err, amount, len; + + if (unlikely(bitfury->usbinfo.nodev)) + return false; + + if (opt_bxf_debug) { + char *strbuf = str_text(buf); + + applog(LOG_ERR, "%s %d: >BXF [%s]", bitfury->drv->name, bitfury->device_id, strbuf); + free(strbuf); + } + + len = strlen(buf); + applog(LOG_DEBUG, "%s %d: Sending %s", bitfury->drv->name, bitfury->device_id, buf); + err = usb_write(bitfury, buf, len, &amount, cmd); + if (err || amount != len) { + applog(LOG_WARNING, "%s %d: Error %d sending %s sent %d of %d", bitfury->drv->name, + bitfury->device_id, err, usb_cmdname(cmd), amount, len); + return false; + } + return true; +} + +static bool bxf_send_debugmode(struct cgpu_info *bitfury) +{ + char buf[16]; + + sprintf(buf, "debug-mode %d\n", opt_bxf_debug); + return bxf_send_msg(bitfury, buf, C_BXF_DEBUGMODE); +} + +static bool bxf_send_ledmode(struct cgpu_info *bitfury) +{ + char buf[16]; + + sprintf(buf, "led-mode %d\n", opt_osm_led_mode); + return bxf_send_msg(bitfury, buf, C_BXF_LEDMODE); +} + +/* Returns the amount received only if we receive a full message, otherwise + * it returns the err value. */ +static int bxf_recv_msg(struct cgpu_info *bitfury, char *buf) +{ + int err, amount; + + err = usb_read_nl(bitfury, buf, 512, &amount, C_BXF_READ); + if (amount) + applog(LOG_DEBUG, "%s %d: Received %s", bitfury->drv->name, bitfury->device_id, buf); + if (!err) + return amount; + return err; +} + +/* Keep reading till the first timeout or error */ +static void bxf_clear_buffer(struct cgpu_info *bitfury) +{ + int err, retries = 0; + char buf[512]; + + do { + err = bxf_recv_msg(bitfury, buf); + usb_buffer_clear(bitfury); + if (err < 0) + break; + } while (retries++ < 10); +} + +static bool bxf_send_flush(struct cgpu_info *bitfury) +{ + char buf[8]; + + sprintf(buf, "flush\n"); + return bxf_send_msg(bitfury, buf, C_BXF_FLUSH); +} + +static bool bxf_detect_one(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + int err, retries = 0; + char buf[512]; + + if (!bxf_send_flush(bitfury)) + return false; + + bxf_clear_buffer(bitfury); + + sprintf(buf, "version\n"); + if (!bxf_send_msg(bitfury, buf, C_BXF_VERSION)) + return false; + + do { + err = bxf_recv_msg(bitfury, buf); + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) + return false; + if (err > 0 && !strncmp(buf, "version", 7)) { + sscanf(&buf[8], "%d.%d rev %d chips %d", &info->ver_major, + &info->ver_minor, &info->hw_rev, &info->chips); + applog(LOG_INFO, "%s %d: Version %d.%d rev %d chips %d", + bitfury->drv->name, bitfury->device_id, info->ver_major, + info->ver_minor, info->hw_rev, info->chips); + break; + } + /* Keep parsing if the buffer is full without counting it as + * a retry. */ + if (usb_buffer_size(bitfury)) + continue; + } while (retries++ < 10); + + if (!add_cgpu(bitfury)) + quit(1, "Failed to add_cgpu in bxf_detect_one"); + + update_usb_stats(bitfury); + applog(LOG_INFO, "%s %d: Successfully initialised %s", + bitfury->drv->name, bitfury->device_id, bitfury->device_path); + + /* Sanity check and recognise variations */ + if (info->chips <= 2 || info->chips > 999) + info->chips = 2; + else if (info->chips <= 6 && info->ident == IDENT_BXF) + bitfury->drv->name = "HXF"; + else if (info->chips > 6 && info->ident == IDENT_BXF) + bitfury->drv->name = "MXF"; + info->filtered_hw = calloc(sizeof(int), info->chips); + info->job = calloc(sizeof(int), info->chips); + info->submits = calloc(sizeof(int), info->chips); + if (!info->filtered_hw || !info->job || !info->submits) + quit(1, "Failed to calloc bxf chip arrays"); + info->total_nonces = 1; + info->temp_target = opt_bxf_temp_target * 10; + /* This unsets it to make sure it gets set on the first pass */ + info->maxroll = -1; + + return true; +} + +static bool bf1_detect_one(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + if (!bf1_open(bitfury)) + goto out_close; + + /* Send getinfo request */ + if (!bf1_getinfo(bitfury, info)) + goto out_close; + + /* Send reset request */ + if (!bf1_reset(bitfury)) + goto out_close; + + bf1_identify(bitfury); + bf1_empty_buffer(bitfury); + + if (!add_cgpu(bitfury)) + quit(1, "Failed to add_cgpu in bf1_detect_one"); + + update_usb_stats(bitfury); + applog(LOG_INFO, "%s %d: Successfully initialised %s", + bitfury->drv->name, bitfury->device_id, bitfury->device_path); + + /* This does not artificially raise hashrate, it simply allows the + * hashrate to adapt quickly on starting. */ + info->total_nonces = 1; + + return true; +out_close: + bf1_close(bitfury); + return false; +} + +static void nfu_close(struct cgpu_info *bitfury) +{ + struct bitfury_info *info = bitfury->device_data; + struct mcp_settings *mcp = &info->mcp; + int i; + + mcp2210_spi_cancel(bitfury); + + /* Set all pins to input mode, ignoring return code */ + for (i = 0; i < 9; i++) { + mcp->direction.pin[i] = MCP2210_GPIO_INPUT; + mcp->value.pin[i] = MCP2210_GPIO_PIN_LOW; + } + mcp2210_set_gpio_settings(bitfury, mcp); +} + +static bool nfu_reinit(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + bool ret = true; + int i; + + for (i = 0; i < info->chips; i++) { + spi_clear_buf(info); + spi_add_break(info); + spi_add_fasync(info, i); + spi_set_freq(info); + spi_send_conf(info); + spi_send_init(info); + spi_reset(bitfury, info); + ret = info->spi_txrx(bitfury, info); + if (!ret) + break; + } + return ret; +} + +static bool nfu_set_spi_settings(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + struct mcp_settings *mcp = &info->mcp; + + return mcp2210_set_spi_transfer_settings(bitfury, mcp->bitrate, mcp->icsv, + mcp->acsv, mcp->cstdd, mcp->ldbtcsd, mcp->sdbd, mcp->bpst, mcp->spimode); +} + +static void nfu_alloc_arrays(struct bitfury_info *info) +{ + info->payload = calloc(sizeof(struct bitfury_payload), info->chips); + info->oldbuf = calloc(sizeof(unsigned int) * 17, info->chips); + info->job_switched = calloc(sizeof(bool), info->chips); + info->second_run = calloc(sizeof(bool), info->chips); + info->work = calloc(sizeof(struct work *), info->chips); + info->owork = calloc(sizeof(struct work *), info->chips); + info->submits = calloc(sizeof(int *), info->chips); +} + +static bool nfu_detect_one(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + struct mcp_settings *mcp = &info->mcp; + char buf[MCP2210_BUFFER_LENGTH]; + unsigned int length; + bool ret = false; + int i, val; + + /* Identify number of chips, and use it in device name if it can fit + * into 3 chars, otherwise use generic NFU name. */ + val = sscanf(bitfury->usbdev->prod_string, "NanoFury NF%u ", &info->chips); + if (val < 1) + info->chips = 1; + else if (info->chips < 10) { + sprintf(info->product, "NF%u", info->chips); + bitfury->drv->name = info->product; + } + nfu_alloc_arrays(info); + + info->spi_txrx = &mcp_spi_txrx; + mcp2210_get_gpio_settings(bitfury, mcp); + + for (i = 0; i < 9; i++) { + /* Set all pins to GPIO mode */ + mcp->designation.pin[i] = MCP2210_PIN_GPIO; + /* Set all pins to input mode */ + mcp->direction.pin[i] = MCP2210_GPIO_INPUT; + mcp->value.pin[i] = MCP2210_GPIO_PIN_LOW; + } + + /* Set LED and PWR pins to output and high */ + mcp->direction.pin[NFU_PIN_LED] = mcp->direction.pin[NFU_PIN_PWR_EN] = MCP2210_GPIO_OUTPUT; + mcp->value.pin[NFU_PIN_LED] = mcp->value.pin[NFU_PIN_PWR_EN] = MCP2210_GPIO_PIN_HIGH; + mcp->direction.pin[NFU_PIN_PWR_EN0] = MCP2210_GPIO_OUTPUT; + mcp->value.pin[NFU_PIN_PWR_EN0] = MCP2210_GPIO_PIN_LOW; + + mcp->direction.pin[4] = MCP2210_GPIO_OUTPUT; + mcp->designation.pin[4] = MCP2210_PIN_CS; + + if (!mcp2210_set_gpio_settings(bitfury, mcp)) + goto out; + + if (opt_debug) { + struct gpio_pin gp; + + mcp2210_get_gpio_pindirs(bitfury, &gp); + for (i = 0; i < 9; i++) { + applog(LOG_DEBUG, "%s %d: Pin dir %d %d", bitfury->drv->name, + bitfury->device_id, i, gp.pin[i]); + } + mcp2210_get_gpio_pinvals(bitfury, &gp); + for (i = 0; i < 9; i++) { + applog(LOG_DEBUG, "%s %d: Pin val %d %d", bitfury->drv->name, + bitfury->device_id, i, gp.pin[i]); + } + mcp2210_get_gpio_pindes(bitfury, &gp); + for (i = 0; i < 9; i++) { + applog(LOG_DEBUG, "%s %d: Pin des %d %d", bitfury->drv->name, + bitfury->device_id, i, gp.pin[i]); + } + } + + /* Cancel any transfers in progress */ + if (!mcp2210_spi_cancel(bitfury)) + goto out; + if (!mcp2210_get_spi_transfer_settings(bitfury, &mcp->bitrate, &mcp->icsv, + &mcp->acsv, &mcp->cstdd, &mcp->ldbtcsd, &mcp->sdbd, &mcp->bpst, &mcp->spimode)) + goto out; + mcp->bitrate = 200000; // default to 200kHz + mcp->icsv = 0xffff; + mcp->acsv = 0xffef; + mcp->cstdd = mcp->ldbtcsd = mcp->sdbd = mcp->spimode = 0; + mcp->bpst = 1; + if (!nfu_set_spi_settings(bitfury, info)) + goto out; + + buf[0] = 0; + length = 1; + if (!mcp2210_spi_transfer(bitfury, mcp, buf, &length)) + goto out; + /* after this command SCK_OVRRIDE should read the same as current SCK + * value (which for mode 0 should be 0) */ + if (!mcp2210_get_gpio_pinval(bitfury, NFU_PIN_SCK_OVR, &val)) + goto out; + if (val != MCP2210_GPIO_PIN_LOW) + goto out; + + /* switch SCK to polarity (default SCK=1 in mode 2) */ + mcp->spimode = 2; + if (!nfu_set_spi_settings(bitfury, info)) + goto out; + buf[0] = 0; + length = 1; + if (!mcp2210_spi_transfer(bitfury, mcp, buf, &length)) + goto out; + /* after this command SCK_OVRRIDE should read the same as current SCK + * value (which for mode 2 should be 1) */ + if (!mcp2210_get_gpio_pinval(bitfury, NFU_PIN_SCK_OVR, &val)) + goto out; + if (val != MCP2210_GPIO_PIN_HIGH) + goto out; + + /* switch SCK to polarity (default SCK=0 in mode 0) */ + mcp->spimode = 0; + if (!nfu_set_spi_settings(bitfury, info)) + goto out; + buf[0] = 0; + length = 1; + if (!mcp2210_spi_transfer(bitfury, mcp, buf, &length)) + goto out; + if (!mcp2210_get_gpio_pinval(bitfury, NFU_PIN_SCK_OVR, &val)) + goto out; + if (val != MCP2210_GPIO_PIN_LOW) + goto out; + + info->osc6_bits = opt_nfu_bits; + if (!nfu_reinit(bitfury, info)) + goto out; + + ret = true; + if (!add_cgpu(bitfury)) + quit(1, "Failed to add_cgpu in nfu_detect_one"); + + update_usb_stats(bitfury); + applog(LOG_INFO, "%s %d: Successfully initialised %s", + bitfury->drv->name, bitfury->device_id, bitfury->device_path); + spi_clear_buf(info); + + info->total_nonces = info->chips; +out: + if (!ret) + nfu_close(bitfury); + + return ret; +} + +static bool bxm_purge_buffers(struct cgpu_info *bitfury) +{ + int err; + + err = usb_transfer(bitfury, FTDI_TYPE_OUT, SIO_RESET_REQUEST, SIO_RESET_PURGE_RX, 1, C_BXM_PURGERX); + if (err) + return false; + err = usb_transfer(bitfury, FTDI_TYPE_OUT, SIO_RESET_REQUEST, SIO_RESET_PURGE_TX, 1, C_BXM_PURGETX); + if (err) + return false; + return true; +} + +/* Calculate required divisor for desired frequency see FTDI AN_108 page 19*/ +static uint16_t calc_divisor(uint32_t system_clock, uint32_t freq) +{ + uint16_t divisor = system_clock / freq; + + divisor /= 2; + divisor -= 1; + return divisor; +} + +static void bxm_shutdown(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + int chip_n; + + for (chip_n = 0; chip_n < 2; chip_n++) { + spi_clear_buf(info); + spi_add_break(info); + spi_add_fasync(info, chip_n); + spi_config_reg(info, 4, 0); + info->spi_txrx(bitfury, info); + } +} + +static void bxm_close(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + unsigned char bitmask = 0; + unsigned char mode = BITMODE_RESET; + unsigned short usb_val = bitmask; + + bxm_shutdown(bitfury, info); + + //Need to do BITMODE_RESET before usb close per FTDI + usb_val |= (mode << 8); + usb_transfer(bitfury, FTDI_TYPE_OUT, SIO_SET_BITMODE_REQUEST, usb_val, 1, C_BXM_SETBITMODE); +} + +static bool bxm_open(struct cgpu_info *bitfury) +{ + unsigned char mode = BITMODE_RESET; + unsigned char bitmask = 0; + unsigned short usb_val = bitmask; + uint32_t system_clock = TWELVE_MHZ; + uint32_t freq = 200000; + uint16_t divisor = calc_divisor(system_clock,freq); + int amount, err; + char buf[4]; + + /* Enable the transaction translator emulator for these devices + * otherwise we may write to them too quickly. */ + bitfury->usbdev->tt = true; + + err = usb_transfer(bitfury, FTDI_TYPE_OUT, SIO_RESET_REQUEST, SIO_RESET_SIO, 1, C_BXM_SRESET); + if (err) + return false; + err = usb_transfer(bitfury, FTDI_TYPE_OUT, SIO_SET_LATENCY_TIMER_REQUEST, BXM_LATENCY_MS, 1, C_BXM_SETLATENCY); + if (err) + return false; + err = usb_transfer(bitfury, FTDI_TYPE_OUT, SIO_SET_EVENT_CHAR_REQUEST, 0x00, 1, C_BXM_SECR); + if (err) + return false; + + //Do a BITMODE_RESET + usb_val |= (mode << 8); + err = usb_transfer(bitfury, FTDI_TYPE_OUT, SIO_SET_BITMODE_REQUEST, usb_val, 1, C_BXM_SETBITMODE); + if (err) + return false; + //Now set to MPSSE mode + bitmask = 0; + mode = BITMODE_MPSSE; + usb_val = bitmask; + usb_val |= (mode << 8); + err = usb_transfer(bitfury, FTDI_TYPE_OUT, SIO_SET_BITMODE_REQUEST, usb_val, 1, C_BXM_SETBITMODE); + if (err) + return false; + + //Now set the clock divisor + //First send just the 0x8B command to set the system clock to 12MHz + memset(buf, 0, 4); + buf[0] = TCK_D5; + err = usb_write(bitfury, buf, 1, &amount, C_BXM_CLOCK); + if (err || amount != 1) + return false; + + buf[0] = TCK_DIVISOR; + buf[1] = (divisor & 0xFF); + buf[2] = ((divisor >> 8) & 0xFF); + err = usb_write(bitfury, buf, 3, &amount, C_BXM_CLOCKDIV); + if (err || amount != 3) + return false; + + //Disable internal loopback + buf[0] = LOOPBACK_END; + err = usb_write(bitfury, buf, 1, &amount, C_BXM_LOOP); + if (err || amount != 1) + return false; + + //Now set direction and idle (initial) states for the pins + buf[0] = SET_OUT_ADBUS; + buf[1] = DEFAULT_STATE; //Bitmask for LOW_PORT + buf[2] = DEFAULT_DIR; + err = usb_write(bitfury, buf, 3, &amount, C_BXM_ADBUS); + if (err || amount != 3) + return false; + + //Set the pin states for the HIGH_BITS port as all outputs, all low + buf[0] = SET_OUT_ACBUS; + buf[1] = 0x00; //Bitmask for HIGH_PORT + buf[2] = 0xFF; + err = usb_write(bitfury, buf, 3, &amount, C_BXM_ACBUS); + if (err || amount != 3) + return false; + + return true; +} + +static bool bxm_set_CS_low(struct cgpu_info *bitfury) +{ + char buf[4] = { 0 }; + int err, amount; + + buf[0] = SET_OUT_ADBUS; + buf[1] &= ~DEFAULT_STATE; //Bitmask for LOW_PORT + buf[2] = DEFAULT_DIR; + err = usb_write(bitfury, buf, 3, &amount, C_BXM_CSLOW); + if (err || amount != 3) + return false; + + return true; +} + +static bool bxm_set_CS_high(struct cgpu_info *bitfury) +{ + char buf[4] = { 0 }; + int err, amount; + + buf[0] = SET_OUT_ADBUS; + buf[1] = DEFAULT_STATE; //Bitmask for LOW_PORT + buf[2] = DEFAULT_DIR; + err = usb_write(bitfury, buf, 3, &amount, C_BXM_CSHIGH); + if (err || amount != 3) + return false; + + return true; +} + +static bool bxm_reset_bitfury(struct cgpu_info *bitfury) +{ + char buf[20] = { 0 }; + char rst_buf[8] = {0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00}; + int err, amount; + + //Set the FTDI CS pin HIGH. This will gate the clock to the Bitfury chips so we can send the reset sequence. + if (!bxm_set_CS_high(bitfury)) + return false; + + buf[0] = WRITE_BYTES_SPI0; + buf[1] = (uint8_t)16 - (uint8_t)1; + buf[2] = 0; + memcpy(&buf[3], rst_buf, 8); + memcpy(&buf[11], rst_buf, 8); + err = usb_write(bitfury, buf, 19, &amount, C_BXM_RESET); + if (err || amount != 19) + return false; + + if (!bxm_set_CS_low(bitfury)) + return false; + + return true; +} + +static bool bxm_reinit(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + bool ret; + int i; + + for (i = 0; i < 2; i++) { + spi_clear_buf(info); + spi_add_break(info); + spi_add_fasync(info, i); + spi_set_freq(info); + spi_send_conf(info); + spi_send_init(info); + ret = info->spi_txrx(bitfury, info); + if (!ret) + break; + } + return ret; +} + +static bool bxm_detect_one(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + bool ret; + + info->spi_txrx = &ftdi_spi_txrx; + ret = bxm_open(bitfury); + if (!ret) + goto out; + ret = bxm_purge_buffers(bitfury); + if (!ret) + goto out; + ret = bxm_reset_bitfury(bitfury); + if (!ret) + goto out; + ret = bxm_purge_buffers(bitfury); + if (!ret) + goto out; + + /* Do a dummy read */ + memset(info->spibuf, 0, 80); + info->spibufsz = 80; + ret = info->spi_txrx(bitfury, info); + if (!ret) + goto out; + info->osc6_bits = opt_bxm_bits; + /* Only have 2 chip devices for now */ + info->chips = 2; + nfu_alloc_arrays(info); + + ret = bxm_reinit(bitfury, info); + if (!ret) + goto out; + + if (!add_cgpu(bitfury)) + quit(1, "Failed to add_cgpu in bxm_detect_one"); + + update_usb_stats(bitfury); + applog(LOG_INFO, "%s %d: Successfully initialised %s", + bitfury->drv->name, bitfury->device_id, bitfury->device_path); + spi_clear_buf(info); + + info->total_nonces = 1; +out: + if (!ret) + bxm_close(bitfury, info); + return ret; +} + +static struct cgpu_info *bitfury_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + struct cgpu_info *bitfury; + struct bitfury_info *info; + enum sub_ident ident; + bool ret = false; + + bitfury = usb_alloc_cgpu(&bitfury_drv, 1); + + if (!usb_init(bitfury, dev, found)) + goto out; + applog(LOG_INFO, "%s %d: Found at %s", bitfury->drv->name, + bitfury->device_id, bitfury->device_path); + + info = calloc(sizeof(struct bitfury_info), 1); + if (!info) + quit(1, "Failed to calloc info in bitfury_detect_one"); + bitfury->device_data = info; + info->ident = ident = usb_ident(bitfury); + switch (ident) { + case IDENT_BF1: + ret = bf1_detect_one(bitfury, info); + break; + case IDENT_BXF: + case IDENT_OSM: + ret = bxf_detect_one(bitfury, info); + break; + case IDENT_NFU: + ret = nfu_detect_one(bitfury, info); + break; + case IDENT_BXM: + ret = bxm_detect_one(bitfury, info); + break; + default: + applog(LOG_INFO, "%s %d: Unrecognised bitfury device", + bitfury->drv->name, bitfury->device_id); + break; + } + + if (!ret) { + free(info); + usb_uninit(bitfury); +out: + bitfury = usb_free_cgpu(bitfury); + } + return bitfury; +} + +static void bitfury_detect(bool __maybe_unused hotplug) +{ + usb_detect(&bitfury_drv, bitfury_detect_one); +} + +static void adjust_bxf_chips(struct cgpu_info *bitfury, struct bitfury_info *info, int chip) +{ + int chips = chip + 1; + size_t old, new; + + if (likely(chips <= info->chips)) + return; + if (chips > 999) + return; + old = sizeof(int) * info->chips; + new = sizeof(int) * chips; + applog(LOG_INFO, "%s %d: Adjust chip size to %d", bitfury->drv->name, bitfury->device_id, + chips); + + recalloc(info->filtered_hw, old, new); + recalloc(info->job, old, new); + recalloc(info->submits, old, new); + if (info->chips == 2 && chips <= 6 && info->ident == IDENT_BXF) + bitfury->drv->name = "HXF"; + else if (info->chips <= 6 && chips > 6 && info->ident == IDENT_BXF) + bitfury->drv->name = "MXF"; + info->chips = chips; +} + +static void parse_bxf_submit(struct cgpu_info *bitfury, struct bitfury_info *info, char *buf) +{ + struct work *match_work, *tmp, *work = NULL; + struct thr_info *thr = info->thr; + uint32_t nonce, timestamp; + int workid, chip = -1; + + if (!sscanf(&buf[7], "%x %x %x %d", &nonce, &workid, ×tamp, &chip)) { + applog(LOG_WARNING, "%s %d: Failed to parse submit response", + bitfury->drv->name, bitfury->device_id); + return; + } + adjust_bxf_chips(bitfury, info, chip); + if (unlikely(chip >= info->chips || chip < 0)) { + applog(LOG_INFO, "%s %d: Invalid submit chip number %d", + bitfury->drv->name, bitfury->device_id, chip); + } else + info->submits[chip]++; + + applog(LOG_DEBUG, "%s %d: Parsed nonce %u workid %d timestamp %u", + bitfury->drv->name, bitfury->device_id, nonce, workid, timestamp); + + rd_lock(&bitfury->qlock); + HASH_ITER(hh, bitfury->queued_work, match_work, tmp) { + if (match_work->subid == workid) { + work = copy_work(match_work); + break; + } + } + rd_unlock(&bitfury->qlock); + + if (!work) { + /* Discard first results from any previous run */ + if (unlikely(!info->valid)) + return; + + applog(LOG_INFO, "%s %d: No matching work", bitfury->drv->name, bitfury->device_id); + + mutex_lock(&info->lock); + info->no_matching_work++; + mutex_unlock(&info->lock); + + inc_hw_errors(thr); + return; + } + /* Set the device start time from when we first get valid results */ + if (unlikely(!info->valid)) { + info->valid = true; + cgtime(&bitfury->dev_start_tv); + } + set_work_ntime(work, timestamp); + if (submit_nonce(thr, work, nonce)) { + mutex_lock(&info->lock); + info->nonces++; + mutex_unlock(&info->lock); + } + free_work(work); +} + +static bool bxf_send_clock(struct cgpu_info *bitfury, struct bitfury_info *info, + uint8_t clockspeed) +{ + char buf[64]; + + info->clocks = clockspeed; + sprintf(buf, "clock %d %d\n", clockspeed, clockspeed); + return bxf_send_msg(bitfury, buf, C_BXF_CLOCK); +} + +static void parse_bxf_temp(struct cgpu_info *bitfury, struct bitfury_info *info, char *buf) +{ + uint8_t clockspeed = info->clocks; + int decitemp; + + if (!sscanf(&buf[5], "%d", &decitemp)) { + applog(LOG_INFO, "%s %d: Failed to parse temperature", + bitfury->drv->name, bitfury->device_id); + return; + } + + mutex_lock(&info->lock); + bitfury->temp = (double)decitemp / 10; + if (decitemp > info->max_decitemp) { + info->max_decitemp = decitemp; + applog(LOG_DEBUG, "%s %d: New max decitemp %d", bitfury->drv->name, + bitfury->device_id, decitemp); + } + mutex_unlock(&info->lock); + + if (decitemp > info->temp_target + BXF_TEMP_HYSTERESIS) { + if (info->clocks <= BXF_CLOCK_MIN) + goto out; + applog(LOG_WARNING, "%s %d: Hit overheat temperature of %d, throttling!", + bitfury->drv->name, bitfury->device_id, decitemp); + clockspeed = BXF_CLOCK_MIN; + goto out; + } + if (decitemp > info->temp_target) { + if (info->clocks <= BXF_CLOCK_MIN) + goto out; + if (decitemp < info->last_decitemp) + goto out; + applog(LOG_INFO, "%s %d: Temp %d over target and not falling, decreasing clock", + bitfury->drv->name, bitfury->device_id, decitemp); + clockspeed = info->clocks - 1; + goto out; + } + if (decitemp <= info->temp_target && decitemp >= info->temp_target - BXF_TEMP_HYSTERESIS) { + if (decitemp == info->last_decitemp) + goto out; + if (decitemp > info->last_decitemp) { + if (info->clocks <= BXF_CLOCK_MIN) + goto out; + applog(LOG_DEBUG, "%s %d: Temp %d in target and rising, decreasing clock", + bitfury->drv->name, bitfury->device_id, decitemp); + clockspeed = info->clocks - 1; + goto out; + } + /* implies: decitemp < info->last_decitemp */ + if (info->clocks >= opt_bxf_bits) + goto out; + applog(LOG_DEBUG, "%s %d: Temp %d in target and falling, increasing clock", + bitfury->drv->name, bitfury->device_id, decitemp); + clockspeed = info->clocks + 1; + goto out; + } + /* implies: decitemp < info->temp_target - BXF_TEMP_HYSTERESIS */ + if (info->clocks >= opt_bxf_bits) + goto out; + applog(LOG_DEBUG, "%s %d: Temp %d below target, increasing clock", + bitfury->drv->name, bitfury->device_id, decitemp); + clockspeed = info->clocks + 1; +out: + bxf_send_clock(bitfury, info, clockspeed); + info->last_decitemp = decitemp; +} + +static void bxf_update_work(struct cgpu_info *bitfury, struct bitfury_info *info); + +static void parse_bxf_needwork(struct cgpu_info *bitfury, struct bitfury_info *info, + char *buf) +{ + int needed; + + if (!sscanf(&buf[9], "%d", &needed)) { + applog(LOG_INFO, "%s %d: Failed to parse needwork", + bitfury->drv->name, bitfury->device_id); + return; + } + while (needed-- > 0) + bxf_update_work(bitfury, info); +} + +static void parse_bxf_job(struct cgpu_info *bitfury, struct bitfury_info *info, char *buf) +{ + int job_id, timestamp, chip; + + if (sscanf(&buf[4], "%x %x %x", &job_id, ×tamp, &chip) != 3) { + applog(LOG_INFO, "%s %d: Failed to parse job", + bitfury->drv->name, bitfury->device_id); + return; + } + adjust_bxf_chips(bitfury, info, chip); + if (chip >= info->chips || chip < 0) { + applog(LOG_INFO, "%s %d: Invalid job chip number %d", + bitfury->drv->name, bitfury->device_id, chip); + return; + } + ++info->job[chip]; +} + +static void parse_bxf_hwerror(struct cgpu_info *bitfury, struct bitfury_info *info, char *buf) +{ + int chip; + + if (!sscanf(&buf[8], "%d", &chip)) { + applog(LOG_INFO, "%s %d: Failed to parse hwerror", + bitfury->drv->name, bitfury->device_id); + return; + } + adjust_bxf_chips(bitfury, info, chip); + if (chip >= info->chips || chip < 0) { + applog(LOG_INFO, "%s %d: Invalid hwerror chip number %d", + bitfury->drv->name, bitfury->device_id, chip); + return; + } + ++info->filtered_hw[chip]; +} + +#define PARSE_BXF_MSG(MSG) \ + msg = strstr(buf, #MSG); \ + if (msg) { \ + parse_bxf_##MSG(bitfury, info, msg); \ + continue; \ + } + +static void *bxf_get_results(void *userdata) +{ + struct cgpu_info *bitfury = userdata; + struct bitfury_info *info = bitfury->device_data; + char threadname[24], buf[512]; + + snprintf(threadname, 24, "bxf_recv/%d", bitfury->device_id); + + /* We operate the device at lowest diff since it's not a lot of results + * to process and gives us a better indicator of the nonce return rate + * and hardware errors. */ + sprintf(buf, "target ffffffff\n"); + if (!bxf_send_msg(bitfury, buf, C_BXF_TARGET)) + goto out; + + /* Read thread sends the first work item to get the device started + * since it will roll ntime and make work itself from there on. */ + bxf_update_work(bitfury, info); + bxf_update_work(bitfury, info); + + while (likely(!bitfury->shutdown)) { + char *msg, *strbuf; + int err; + + if (unlikely(bitfury->usbinfo.nodev)) + break; + + err = bxf_recv_msg(bitfury, buf); + if (err < 0) { + if (err != LIBUSB_ERROR_TIMEOUT) + break; + continue; + } + if (!err) + continue; + + if (opt_bxf_debug) { + strbuf = str_text(buf); + applog(LOG_ERR, "%s %d: < [%s]", + bitfury->drv->name, bitfury->device_id, strbuf); + free(strbuf); + } + + PARSE_BXF_MSG(submit); + PARSE_BXF_MSG(temp); + PARSE_BXF_MSG(needwork); + PARSE_BXF_MSG(job); + PARSE_BXF_MSG(hwerror); + + if (buf[0] != '#') { + strbuf = str_text(buf); + applog(LOG_DEBUG, "%s %d: Unrecognised string %s", + bitfury->drv->name, bitfury->device_id, strbuf); + free(strbuf); + } + } +out: + return NULL; +} + +static bool bxf_prepare(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + bxf_send_ledmode(bitfury); + bxf_send_debugmode(bitfury); + + mutex_init(&info->lock); + if (pthread_create(&info->read_thr, NULL, bxf_get_results, (void *)bitfury)) + quit(1, "Failed to create bxf read_thr"); + + return bxf_send_clock(bitfury, info, opt_bxf_bits); +} + +static bool bitfury_prepare(struct thr_info *thr) +{ + struct cgpu_info *bitfury = thr->cgpu; + struct bitfury_info *info = bitfury->device_data; + + info->thr = thr; + + switch(info->ident) { + case IDENT_BXF: + case IDENT_OSM: + return bxf_prepare(bitfury, info); + break; + case IDENT_BF1: + default: + return true; + } +} + +static int64_t bitfury_rate(struct bitfury_info *info) +{ + double nonce_rate; + int64_t ret = 0; + + info->cycles++; + info->total_nonces += info->nonces; + info->saved_nonces += info->nonces; + info->nonces = 0; + nonce_rate = (double)info->total_nonces / (double)info->cycles; + if (info->saved_nonces >= nonce_rate) { + info->saved_nonces -= nonce_rate; + ret = (double)0xffffffff * nonce_rate; + } + return ret; +} + +static int64_t bf1_scan(struct thr_info *thr, struct cgpu_info *bitfury, + struct bitfury_info *info) +{ + int amount, i, aged, total = 0, ms_diff; + char readbuf[512], buf[45]; + struct work *work, *tmp; + struct timeval tv_now; + int64_t ret = 0; + + work = get_queue_work(thr, bitfury, thr->id); + if (unlikely(thr->work_restart)) { + work_completed(bitfury, work); + goto out; + } + + buf[0] = 'W'; + memcpy(buf + 1, work->midstate, 32); + memcpy(buf + 33, work->data + 64, 12); + + /* New results may spill out from the latest work, making us drop out + * too early so read whatever we get for the first half nonce and then + * look for the results to prev work. */ + cgtime(&tv_now); + ms_diff = 600 - ms_tdiff(&tv_now, &info->tv_start); + if (ms_diff > 0) { + usb_read_timeout_cancellable(bitfury, readbuf, 512, &amount, ms_diff, + C_BF1_GETRES); + total += amount; + } + + /* Now look for the bulk of the previous work results, they will come + * in a batch following the first data. */ + cgtime(&tv_now); + ms_diff = BF1WAIT - ms_tdiff(&tv_now, &info->tv_start); + /* If a work restart was sent, just empty the buffer. */ + if (unlikely(ms_diff < 10 || thr->work_restart)) + ms_diff = 10; + usb_read_once_timeout_cancellable(bitfury, readbuf + total, BF1MSGSIZE, + &amount, ms_diff, C_BF1_GETRES); + total += amount; + while (amount) { + usb_read_once_timeout(bitfury, readbuf + total, 512 - total, &amount, 10, + C_BF1_GETRES); + total += amount; + }; + + /* Don't send whatever work we've stored if we got a restart */ + if (unlikely(thr->work_restart)) + goto out; + + /* Send work */ + cgtime(&work->tv_work_start); + usb_write(bitfury, buf, 45, &amount, C_BF1_REQWORK); + cgtime(&info->tv_start); + + /* Get response acknowledging work */ + usb_read(bitfury, buf, BF1MSGSIZE, &amount, C_BF1_GETWORK); + +out: + /* Search for what work the nonce matches in order of likelihood. Last + * entry is end of result marker. */ + for (i = 0; i < total - BF1MSGSIZE; i += BF1MSGSIZE) { + bool found = false; + uint32_t nonce; + + /* Ignore state & switched data in results for now. */ + memcpy(&nonce, readbuf + i + 3, 4); + nonce = decnonce(nonce); + + rd_lock(&bitfury->qlock); + HASH_ITER(hh, bitfury->queued_work, work, tmp) { + if (bitfury_checkresults(thr, work, nonce)) { + info->nonces++; + found = true; + break; + } + } + rd_unlock(&bitfury->qlock); + + if (!found) { + if (likely(info->valid)) + inc_hw_errors(thr); + } else if (unlikely(!info->valid)) { + info->valid = true; + cgtime(&bitfury->dev_start_tv); + } + } + + cgtime(&tv_now); + + /* This iterates over the hashlist finding work started more than 6 + * seconds ago. */ + aged = age_queued_work(bitfury, 6.0); + if (aged) { + applog(LOG_DEBUG, "%s %d: Aged %d work items", bitfury->drv->name, + bitfury->device_id, aged); + } + + ret = bitfury_rate(info); + + if (unlikely(bitfury->usbinfo.nodev)) { + applog(LOG_WARNING, "%s %d: Device disappeared, disabling thread", + bitfury->drv->name, bitfury->device_id); + ret = -1; + } + return ret; +} + +static int64_t bxf_scan(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + int ms, aged; + int64_t ret; + + bxf_update_work(bitfury, info); + ms = 1200 / info->chips; + if (ms < 100) + ms = 100; + cgsleep_ms(ms); + + mutex_lock(&info->lock); + ret = bitfury_rate(info); + mutex_unlock(&info->lock); + + /* Keep no more than the last 90 seconds worth of work items in the + * hashlist */ + aged = age_queued_work(bitfury, 90.0); + if (aged) { + applog(LOG_DEBUG, "%s %d: Aged %d work items", bitfury->drv->name, + bitfury->device_id, aged); + } + + if (unlikely(bitfury->usbinfo.nodev)) { + applog(LOG_WARNING, "%s %d: Device disappeared, disabling thread", + bitfury->drv->name, bitfury->device_id); + ret = -1; + } + return ret; +} + +static void bitfury_check_work(struct thr_info *thr, struct cgpu_info *bitfury, + struct bitfury_info *info, int chip_n) +{ + if (!info->work[chip_n]) { + info->work[chip_n] = get_work(thr, thr->id); + if (unlikely(thr->work_restart)) { + free_work(info->work[chip_n]); + info->work[chip_n] = NULL; + return; + } + bitfury_work_to_payload(&info->payload[chip_n], info->work[chip_n]); + } + + if (unlikely(bitfury->usbinfo.nodev)) + return; + + if (!libbitfury_sendHashData(thr, bitfury, info, chip_n)) + usb_nodev(bitfury); + + if (info->job_switched[chip_n]) { + if (likely(info->owork[chip_n])) + free_work(info->owork[chip_n]); + info->owork[chip_n] = info->work[chip_n]; + info->work[chip_n] = NULL; + } + +} + +static int64_t nfu_scan(struct thr_info *thr, struct cgpu_info *bitfury, + struct bitfury_info *info) +{ + int64_t ret = 0; + int i; + + for (i = 0; i < info->chips; i++) + bitfury_check_work(thr, bitfury, info, i); + + ret = bitfury_rate(info); + + if (unlikely(bitfury->usbinfo.nodev)) { + applog(LOG_WARNING, "%s %d: Device disappeared, disabling thread", + bitfury->drv->name, bitfury->device_id); + ret = -1; + } + + return ret; +} + +static int64_t bitfury_scanwork(struct thr_info *thr) +{ + struct cgpu_info *bitfury = thr->cgpu; + struct bitfury_info *info = bitfury->device_data; + int64_t ret = -1; + + if (unlikely(share_work_tdiff(bitfury) > 60)) { + if (info->failing) { + if (share_work_tdiff(bitfury) > 120) { + applog(LOG_ERR, "%s %d: Device failed to respond to restart", + bitfury->drv->name, bitfury->device_id); + return ret; + } + } else { + applog(LOG_WARNING, "%s %d: No valid hashes for over 1 minute, attempting to reset", + bitfury->drv->name, bitfury->device_id); + usb_reset(bitfury); + info->failing = true; + } + } + + if (unlikely(bitfury->usbinfo.nodev)) + return ret; + + switch(info->ident) { + case IDENT_BF1: + ret = bf1_scan(thr, bitfury, info); + break; + case IDENT_BXF: + case IDENT_OSM: + ret = bxf_scan(bitfury, info); + break; + case IDENT_NFU: + case IDENT_BXM: + ret = nfu_scan(thr, bitfury, info); + break; + default: + ret = 0; + break; + } + if (ret > 0) + info->failing = false; + return ret; +} + +static void bxf_send_maxroll(struct cgpu_info *bitfury, int maxroll) +{ + char buf[20]; + + sprintf(buf, "maxroll %d\n", maxroll); + bxf_send_msg(bitfury, buf, C_BXF_MAXROLL); +} + +static bool bxf_send_work(struct cgpu_info *bitfury, struct work *work) +{ + char buf[512], hexwork[156]; + + __bin2hex(hexwork, work->data, 76); + sprintf(buf, "work %s %x\n", hexwork, work->subid); + return bxf_send_msg(bitfury, buf, C_BXF_WORK); +} + +static void bxf_update_work(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + struct thr_info *thr = info->thr; + struct work *work; + + if (unlikely(bitfury->usbinfo.nodev)) + return; + + work = get_queue_work(thr, bitfury, thr->id); + if (work->drv_rolllimit != info->maxroll) { + info->maxroll = work->drv_rolllimit; + bxf_send_maxroll(bitfury, info->maxroll); + } + + mutex_lock(&info->lock); + work->subid = ++info->work_id; + mutex_unlock(&info->lock); + + cgtime(&work->tv_work_start); + bxf_send_work(bitfury, work); +} + +static void bitfury_flush_work(struct cgpu_info *bitfury) +{ + struct bitfury_info *info = bitfury->device_data; + + switch(info->ident) { + case IDENT_BXF: + case IDENT_OSM: + bxf_send_flush(bitfury); + bxf_update_work(bitfury, info); + bxf_update_work(bitfury, info); + case IDENT_BF1: + default: + break; + } +} + +static void bitfury_update_work(struct cgpu_info *bitfury) +{ + struct bitfury_info *info = bitfury->device_data; + + switch(info->ident) { + case IDENT_BXF: + case IDENT_OSM: + bxf_update_work(bitfury, info); + case IDENT_BF1: + default: + break; + } +} + +static struct api_data *bf1_api_stats(struct bitfury_info *info) +{ + struct api_data *root = NULL; + double nonce_rate; + char serial[16]; + int version; + + version = info->version; + root = api_add_int(root, "Version", &version, true); + root = api_add_string(root, "Product", info->product, false); + sprintf(serial, "%08x", info->serial); + root = api_add_string(root, "Serial", serial, true); + nonce_rate = (double)info->total_nonces / (double)info->cycles; + root = api_add_double(root, "NonceRate", &nonce_rate, true); + + return root; +} + +static struct api_data *bxf_api_stats(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + struct api_data *root = NULL; + double nonce_rate; + char buf[32]; + int i; + + sprintf(buf, "%d.%d", info->ver_major, info->ver_minor); + root = api_add_string(root, "Version", buf, true); + root = api_add_int(root, "Revision", &info->hw_rev, false); + root = api_add_int(root, "Chips", &info->chips, false); + nonce_rate = (double)info->total_nonces / (double)info->cycles; + root = api_add_double(root, "NonceRate", &nonce_rate, true); + root = api_add_int(root, "NoMatchingWork", &info->no_matching_work, false); + root = api_add_double(root, "Temperature", &bitfury->temp, false); + root = api_add_int(root, "Max DeciTemp", &info->max_decitemp, false); + root = api_add_uint8(root, "Clock", &info->clocks, false); + for (i = 0; i < info->chips; i++) { + sprintf(buf, "Core%d hwerror", i); + root = api_add_int(root, buf, &info->filtered_hw[i], false); + sprintf(buf, "Core%d jobs", i); + root = api_add_int(root, buf, &info->job[i], false); + sprintf(buf, "Core%d submits", i); + root = api_add_int(root, buf, &info->submits[i], false); + } + + return root; +} + +static struct api_data *nfu_api_stats(struct bitfury_info *info) +{ + struct api_data *root = NULL; + char buf[32]; + int i; + + root = api_add_int(root, "Chips", &info->chips, false); + for (i = 0; i < info->chips; i++) { + sprintf(buf, "Core%d submits", i); + root = api_add_int(root, buf, &info->submits[i], false); + } + return root; +} + +static struct api_data *bitfury_api_stats(struct cgpu_info *cgpu) +{ + struct bitfury_info *info = cgpu->device_data; + + switch(info->ident) { + case IDENT_BF1: + return bf1_api_stats(info); + break; + case IDENT_BXF: + case IDENT_OSM: + return bxf_api_stats(cgpu, info); + break; + case IDENT_NFU: + case IDENT_BXM: + return nfu_api_stats(info); + break; + default: + break; + } + return NULL; +} + +static void bitfury_get_statline_before(char *buf, size_t bufsiz, struct cgpu_info *cgpu) +{ + struct bitfury_info *info = cgpu->device_data; + + switch(info->ident) { + case IDENT_BXF: + case IDENT_OSM: + tailsprintf(buf, bufsiz, "%5.1fC", cgpu->temp); + break; + default: + break; + } +} + +static void bf1_init(struct cgpu_info *bitfury) +{ + bf1_close(bitfury); + bf1_open(bitfury); + bf1_reset(bitfury); +} + +static void bitfury_init(struct cgpu_info *bitfury) +{ + struct bitfury_info *info = bitfury->device_data; + + switch(info->ident) { + case IDENT_BF1: + bf1_init(bitfury); + break; + default: + break; + } +} + +static void bxf_close(struct bitfury_info *info) +{ + pthread_join(info->read_thr, NULL); + mutex_destroy(&info->lock); +} + +static void bitfury_shutdown(struct thr_info *thr) +{ + struct cgpu_info *bitfury = thr->cgpu; + struct bitfury_info *info = bitfury->device_data; + + switch(info->ident) { + case IDENT_BF1: + bf1_close(bitfury); + break; + case IDENT_BXF: + case IDENT_OSM: + bxf_close(info); + break; + case IDENT_NFU: + nfu_close(bitfury); + break; + case IDENT_BXM: + bxm_close(bitfury, info); + break; + default: + break; + } + usb_nodev(bitfury); +} + +/* Currently hardcoded to BF1 devices */ +struct device_drv bitfury_drv = { + .drv_id = DRIVER_bitfury, + .dname = "bitfury", + .name = "BF1", + .drv_detect = bitfury_detect, + .thread_prepare = bitfury_prepare, + .hash_work = &hash_driver_work, + .scanwork = bitfury_scanwork, + .flush_work = bitfury_flush_work, + .update_work = bitfury_update_work, + .get_api_stats = bitfury_api_stats, + .get_statline_before = bitfury_get_statline_before, + .reinit_device = bitfury_init, + .thread_shutdown = bitfury_shutdown, + .identify_device = bitfury_identify +}; diff --git a/driver-bitfury.h b/driver-bitfury.h new file mode 100644 index 0000000..6cbbc0a --- /dev/null +++ b/driver-bitfury.h @@ -0,0 +1,116 @@ +/* + * Copyright 2013-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef BITFURY_H +#define BITFURY_H + +#include "miner.h" +#include "usbutils.h" +#include "mcp2210.h" + +#define BXF_CLOCK_OFF 0 +#define BXF_CLOCK_MIN 32 +#define BXF_CLOCK_MAX 63 // Not really used since we only get hw errors above default + +/* In tenths of a degree */ +#define BXF_TEMP_TARGET 820 +#define BXF_TEMP_HYSTERESIS 30 + +extern int opt_bxf_temp_target; +extern int opt_nfu_bits; +extern int opt_bxm_bits; +extern int opt_bxf_bits; +extern int opt_bxf_debug; +extern int opt_osm_led_mode; + +#define NFU_PIN_LED 0 +#define NFU_PIN_SCK_OVR 5 +#define NFU_PIN_PWR_EN 6 +#define NFU_PIN_PWR_EN0 7 + +#define SPIBUF_SIZE 16384 +#define BITFURY_REFRESH_DELAY 100 + +#define SIO_RESET_REQUEST 0 +#define SIO_SET_LATENCY_TIMER_REQUEST 0x09 +#define SIO_SET_EVENT_CHAR_REQUEST 0x06 +#define SIO_SET_ERROR_CHAR_REQUEST 0x07 +#define SIO_SET_BITMODE_REQUEST 0x0B +#define SIO_RESET_PURGE_RX 1 +#define SIO_RESET_PURGE_TX 2 + +#define BITMODE_RESET 0x00 +#define BITMODE_MPSSE 0x02 +#define SIO_RESET_SIO 0 + +#define BXM_LATENCY_MS 2 + +struct bitfury_payload { + unsigned char midstate[32]; + unsigned int junk[8]; + unsigned m7; + unsigned ntime; + unsigned nbits; + unsigned nnonce; +}; + +struct bitfury_info { + struct cgpu_info *base_cgpu; + struct thr_info *thr; + enum sub_ident ident; + int nonces; + int total_nonces; + double saved_nonces; + int cycles; + bool valid; /* Set on first valid data being found */ + bool failing; /* Set when an attempted restart has been sent */ + + int chips; + char product[8]; + + /* BF1 specific data */ + uint8_t version; + uint32_t serial; + struct timeval tv_start; + + /* BXF specific data */ + pthread_mutex_t lock; + pthread_t read_thr; + int last_decitemp; + int max_decitemp; + int temp_target; + int work_id; // Current work->subid + int no_matching_work; + int maxroll; // Last maxroll sent to device + int ver_major; + int ver_minor; + int hw_rev; + uint8_t clocks; // There are two but we set them equal + int *filtered_hw; // Hardware errors we're told about but are filtered + int *job; // Completed jobs we're told about + int *submits; // Submitted responses + + /* NFU specific data */ + struct mcp_settings mcp; + char spibuf[SPIBUF_SIZE]; + unsigned int spibufsz; + int osc6_bits; + + /* Chip sized arrays */ + struct bitfury_payload *payload; + unsigned int *oldbuf; // 17 vals per chip + bool *job_switched; + bool *second_run; + struct work **work; + struct work **owork; + + bool (*spi_txrx)(struct cgpu_info *, struct bitfury_info *info); +}; + +#endif /* BITFURY_H */ diff --git a/driver-bitmain.c b/driver-bitmain.c new file mode 100644 index 0000000..0b794dc --- /dev/null +++ b/driver-bitmain.c @@ -0,0 +1,2463 @@ +/* + * Copyright 2012-2013 Lingchao Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#ifndef WIN32 + #include + #include + #include + #include + #ifndef O_CLOEXEC + #define O_CLOEXEC 0 + #endif +#else + #include "compat.h" + #include + #include +#endif + +#include "elist.h" +#include "miner.h" +#include "usbutils.h" +#include "driver-bitmain.h" +#include "hexdump.c" +#include "util.h" + +#define BITMAIN_CALC_DIFF1 1 + +#ifdef WIN32 +#define BITMAIN_TEST +#endif + +#define BITMAIN_TEST_PRINT_WORK 0 +#ifdef BITMAIN_TEST +#define BITMAIN_TEST_NUM 19 +#define BITMAIN_TEST_USENUM 1 +int g_test_index = 0; +const char btm_work_test_data[BITMAIN_TEST_NUM][256] = { + "00000002ddc1ce5579dbec17f17fbb8f31ae218a814b2a0c1900f0d90000000100000000b58aa6ca86546b07a5a46698f736c7ca9c0eedc756d8f28ac33c20cc24d792675276f879190afc85b6888022000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b190000000000000000eb2d45233c5b02de50ddcb9049ba16040e0ba00e9750a474eec75891571d925b52dfda4a190266667145b02f000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b19000000000000000090c7d3743e0b0562e4f56d3dd35cece3c5e8275d0abb21bf7e503cb72bd7ed3b52dfda4a190266667bbb58d7000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b1900000000000000006e0561da06022bfbb42c5ecd74a46bfd91934f201b777e9155cc6c3674724ec652dfda4a19026666a0cd827b000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b1900000000000000000312f42ce4964cc23f2d8c039f106f25ddd58e10a1faed21b3bba4b0e621807b52dfda4a1902666629c9497d000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b19000000000000000033093a6540dbe8f7f3d19e3d2af05585ac58dafad890fa9a942e977334a23d6e52dfda4a190266665ae95079000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b190000000000000000bd7893057d06e69705bddf9a89c7bac6b40c5b32f15e2295fc8c5edf491ea24952dfda4a190266664b89b4d3000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b19000000000000000075e66f533e53837d14236a793ee4e493985642bc39e016b9e63adf14a584a2aa52dfda4a19026666ab5d638d000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b190000000000000000d936f90c5db5f0fe1d017344443854fbf9e40a07a9b7e74fedc8661c23162bff52dfda4a19026666338e79cb000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b190000000000000000d2c1a7d279a4355b017bc0a4b0a9425707786729f21ee18add3fda4252a31a4152dfda4a190266669bc90806000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b190000000000000000ad36d19f33d04ca779942843890bc3b083cec83a4b60b6c45cf7d21fc187746552dfda4a1902666675d81ab7000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b19000000000000000093b809cf82b76082eacb55bc35b79f31882ed0976fd102ef54783cd24341319b52dfda4a1902666642ab4e42000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b1900000000000000007411ff315430a7bbf41de8a685d457e82d5177c05640d6a4436a40f39e99667852dfda4a190266662affa4b5000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b1900000000000000001ad0db5b9e1e2b57c8d3654c160f5a51067521eab7e340a270639d97f00a3fa252dfda4a1902666601a47bb6000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b19000000000000000022e055c442c46bbe16df68603a26891f6e4cf85b90102b39fd7cadb602b4e34552dfda4a1902666695d33cea000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b1900000000000000009c8baf5a8a1e16de2d6ae949d5fec3ed751f10dcd4c99810f2ce08040fb9e31d52dfda4a19026666fe78849d000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b190000000000000000e5655532b414887f35eb4652bc7b11ebac12891f65bc08cbe0ce5b277b9e795152dfda4a19026666fcc0d1d1000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b190000000000000000f272c5508704e2b62dd1c30ea970372c40bf00f9203f9bf69d456b4a7fbfffe352dfda4a19026666c03d4399000000800000000000000000000000000000000000000000000000000000000000000000", + "0000000256ccc4c8aeae2b1e41490bc352893605f284e4be043f7b190000000000000000fca3b4531ba627ad9b0e23cdd84c888952c23810df196e9c6db0bcecba6a830952dfda4a19026666c14009cb000000800000000000000000000000000000000000000000000000000000000000000000" +}; +const char btm_work_test_midstate[BITMAIN_TEST_NUM][256] = { + "2d8738e7f5bcf76dcb8316fec772e20e240cd58c88d47f2d3f5a6a9547ed0a35", + "d31b6ce09c0bfc2af6f3fe3a03475ebefa5aa191fa70a327a354b2c22f9692f1", + "84a8c8224b80d36caeb42eff2a100f634e1ff873e83fd02ef1306a34abef9dbe", + "059882159439b9b32968c79a93c5521e769dbea9d840f56c2a17b9ad87e530b8", + "17fa435d05012574f8f1da26994cc87b6cb9660b5e82072dc6a0881cec150a0d", + "92a28cc5ec4ba6a2688471dfe2032b5fe97c805ca286c503e447d6749796c6af", + "1677a03516d6e9509ac37e273d2482da9af6e077abe8392cdca6a30e916a7ae9", + "50bbe09f1b8ac18c97aeb745d5d2c3b5d669b6ac7803e646f65ac7b763a392d1", + "e46a0022ebdc303a7fb1a0ebfa82b523946c312e745e5b8a116b17ae6b4ce981", + "8f2f61e7f5b4d76d854e6d266acfff4d40347548216838ccc4ef3b9e43d3c9ea", + "0a450588ae99f75d676a08d0326e1ea874a3497f696722c78a80c7b6ee961ea6", + "3c4c0fc2cf040b806c51b46de9ec0dcc678a7cc5cf3eff11c6c03de3bc7818cc", + "f6c7c785ab5daddb8f98e5f854f2cb41879fcaf47289eb2b4196fefc1b28316f", + "005312351ccb0d0794779f5023e4335b5cad221accf0dfa3da7b881266fa9f5a", + "7b26d189c6bba7add54143179aadbba7ccaeff6887bd8d5bec9597d5716126e6", + "a4718f4c801e7ddf913a9474eb71774993525684ffea1915f767ab16e05e6889", + "6b6226a8c18919d0e55684638d33a6892a00d22492cc2f5906ca7a4ac21c74a7", + "383114dccd1cb824b869158aa2984d157fcb02f46234ceca65943e919329e697", + "d4d478df3016852b27cb1ae9e1e98d98617f8d0943bf9dc1217f47f817236222" +}; +#endif + +char opt_bitmain_dev[256] = {0}; +bool opt_bitmain_hwerror = false; +bool opt_bitmain_checkall = false; +bool opt_bitmain_checkn2diff = false; +bool opt_bitmain_dev_usb = true; +bool opt_bitmain_nobeeper = false; +bool opt_bitmain_notempoverctrl = false; +bool opt_bitmain_homemode = false; +int opt_bitmain_temp = BITMAIN_TEMP_TARGET; +int opt_bitmain_overheat = BITMAIN_TEMP_OVERHEAT; +int opt_bitmain_fan_min = BITMAIN_DEFAULT_FAN_MIN_PWM; +int opt_bitmain_fan_max = BITMAIN_DEFAULT_FAN_MAX_PWM; +int opt_bitmain_freq_min = BITMAIN_MIN_FREQUENCY; +int opt_bitmain_freq_max = BITMAIN_MAX_FREQUENCY; +bool opt_bitmain_auto; + +static int option_offset = -1; + +// -------------------------------------------------------------- +// CRC16 check table +// -------------------------------------------------------------- +const uint8_t chCRCHTalbe[] = // CRC high byte table +{ + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40 +}; + +const uint8_t chCRCLTalbe[] = // CRC low byte table +{ + 0x00, 0xC0, 0xC1, 0x01, 0xC3, 0x03, 0x02, 0xC2, 0xC6, 0x06, 0x07, 0xC7, + 0x05, 0xC5, 0xC4, 0x04, 0xCC, 0x0C, 0x0D, 0xCD, 0x0F, 0xCF, 0xCE, 0x0E, + 0x0A, 0xCA, 0xCB, 0x0B, 0xC9, 0x09, 0x08, 0xC8, 0xD8, 0x18, 0x19, 0xD9, + 0x1B, 0xDB, 0xDA, 0x1A, 0x1E, 0xDE, 0xDF, 0x1F, 0xDD, 0x1D, 0x1C, 0xDC, + 0x14, 0xD4, 0xD5, 0x15, 0xD7, 0x17, 0x16, 0xD6, 0xD2, 0x12, 0x13, 0xD3, + 0x11, 0xD1, 0xD0, 0x10, 0xF0, 0x30, 0x31, 0xF1, 0x33, 0xF3, 0xF2, 0x32, + 0x36, 0xF6, 0xF7, 0x37, 0xF5, 0x35, 0x34, 0xF4, 0x3C, 0xFC, 0xFD, 0x3D, + 0xFF, 0x3F, 0x3E, 0xFE, 0xFA, 0x3A, 0x3B, 0xFB, 0x39, 0xF9, 0xF8, 0x38, + 0x28, 0xE8, 0xE9, 0x29, 0xEB, 0x2B, 0x2A, 0xEA, 0xEE, 0x2E, 0x2F, 0xEF, + 0x2D, 0xED, 0xEC, 0x2C, 0xE4, 0x24, 0x25, 0xE5, 0x27, 0xE7, 0xE6, 0x26, + 0x22, 0xE2, 0xE3, 0x23, 0xE1, 0x21, 0x20, 0xE0, 0xA0, 0x60, 0x61, 0xA1, + 0x63, 0xA3, 0xA2, 0x62, 0x66, 0xA6, 0xA7, 0x67, 0xA5, 0x65, 0x64, 0xA4, + 0x6C, 0xAC, 0xAD, 0x6D, 0xAF, 0x6F, 0x6E, 0xAE, 0xAA, 0x6A, 0x6B, 0xAB, + 0x69, 0xA9, 0xA8, 0x68, 0x78, 0xB8, 0xB9, 0x79, 0xBB, 0x7B, 0x7A, 0xBA, + 0xBE, 0x7E, 0x7F, 0xBF, 0x7D, 0xBD, 0xBC, 0x7C, 0xB4, 0x74, 0x75, 0xB5, + 0x77, 0xB7, 0xB6, 0x76, 0x72, 0xB2, 0xB3, 0x73, 0xB1, 0x71, 0x70, 0xB0, + 0x50, 0x90, 0x91, 0x51, 0x93, 0x53, 0x52, 0x92, 0x96, 0x56, 0x57, 0x97, + 0x55, 0x95, 0x94, 0x54, 0x9C, 0x5C, 0x5D, 0x9D, 0x5F, 0x9F, 0x9E, 0x5E, + 0x5A, 0x9A, 0x9B, 0x5B, 0x99, 0x59, 0x58, 0x98, 0x88, 0x48, 0x49, 0x89, + 0x4B, 0x8B, 0x8A, 0x4A, 0x4E, 0x8E, 0x8F, 0x4F, 0x8D, 0x4D, 0x4C, 0x8C, + 0x44, 0x84, 0x85, 0x45, 0x87, 0x47, 0x46, 0x86, 0x82, 0x42, 0x43, 0x83, + 0x41, 0x81, 0x80, 0x40 +}; + +static uint16_t CRC16(const uint8_t* p_data, uint16_t w_len) +{ + uint8_t chCRCHi = 0xFF; // CRC high byte initialize + uint8_t chCRCLo = 0xFF; // CRC low byte initialize + uint16_t wIndex = 0; // CRC cycling index + + while (w_len--) { + wIndex = chCRCLo ^ *p_data++; + chCRCLo = chCRCHi ^ chCRCHTalbe[wIndex]; + chCRCHi = chCRCLTalbe[wIndex]; + } + return ((chCRCHi << 8) | chCRCLo); +} + +static uint32_t num2bit(int num) { + switch(num) { + case 0: return 0x80000000; + case 1: return 0x40000000; + case 2: return 0x20000000; + case 3: return 0x10000000; + case 4: return 0x08000000; + case 5: return 0x04000000; + case 6: return 0x02000000; + case 7: return 0x01000000; + case 8: return 0x00800000; + case 9: return 0x00400000; + case 10: return 0x00200000; + case 11: return 0x00100000; + case 12: return 0x00080000; + case 13: return 0x00040000; + case 14: return 0x00020000; + case 15: return 0x00010000; + case 16: return 0x00008000; + case 17: return 0x00004000; + case 18: return 0x00002000; + case 19: return 0x00001000; + case 20: return 0x00000800; + case 21: return 0x00000400; + case 22: return 0x00000200; + case 23: return 0x00000100; + case 24: return 0x00000080; + case 25: return 0x00000040; + case 26: return 0x00000020; + case 27: return 0x00000010; + case 28: return 0x00000008; + case 29: return 0x00000004; + case 30: return 0x00000002; + case 31: return 0x00000001; + default: return 0x00000000; + } +} + +static bool get_options(int this_option_offset, int *baud, int *chain_num, + int *asic_num, int *timeout, int *frequency, char * frequency_t, uint8_t * reg_data, uint8_t * voltage, char * voltage_t) +{ + char buf[BUFSIZ+1]; + char *ptr, *comma, *colon, *colon2, *colon3, *colon4, *colon5, *colon6; + size_t max; + int i, tmp; + + if (opt_bitmain_options == NULL) + buf[0] = '\0'; + else { + ptr = opt_bitmain_options; + for (i = 0; i < this_option_offset; i++) { + comma = strchr(ptr, ','); + if (comma == NULL) + break; + ptr = comma + 1; + } + + comma = strchr(ptr, ','); + if (comma == NULL) + max = strlen(ptr); + else + max = comma - ptr; + + if (max > BUFSIZ) + max = BUFSIZ; + strncpy(buf, ptr, max); + buf[max] = '\0'; + } + + if (!(*buf)) + return false; + + colon = strchr(buf, ':'); + if (colon) + *(colon++) = '\0'; + + tmp = atoi(buf); + switch (tmp) { + case 115200: + *baud = 115200; + break; + case 57600: + *baud = 57600; + break; + case 38400: + *baud = 38400; + break; + case 19200: + *baud = 19200; + break; + default: + quit(1, "Invalid bitmain-options for baud (%s) " + "must be 115200, 57600, 38400 or 19200", buf); + } + + if (colon && *colon) { + colon2 = strchr(colon, ':'); + if (colon2) + *(colon2++) = '\0'; + + if (*colon) { + tmp = atoi(colon); + if (tmp > 0) { + *chain_num = tmp; + } else { + quit(1, "Invalid bitmain-options for " + "chain_num (%s) must be 1 ~ %d", + colon, BITMAIN_DEFAULT_CHAIN_NUM); + } + } + + if (colon2 && *colon2) { + colon3 = strchr(colon2, ':'); + if (colon3) + *(colon3++) = '\0'; + + tmp = atoi(colon2); + if (tmp > 0 && tmp <= BITMAIN_DEFAULT_ASIC_NUM) + *asic_num = tmp; + else { + quit(1, "Invalid bitmain-options for " + "asic_num (%s) must be 1 ~ %d", + colon2, BITMAIN_DEFAULT_ASIC_NUM); + } + + if (colon3 && *colon3) { + colon4 = strchr(colon3, ':'); + if (colon4) + *(colon4++) = '\0'; + + tmp = atoi(colon3); + if (tmp > 0 && tmp <= 0xff) + *timeout = tmp; + else { + quit(1, "Invalid bitmain-options for " + "timeout (%s) must be 1 ~ %d", + colon3, 0xff); + } + if (colon4 && *colon4) { + colon5 = strchr(colon4, ':'); + if(colon5) + *(colon5++) = '\0'; + + tmp = atoi(colon4); + if (tmp < BITMAIN_MIN_FREQUENCY || tmp > BITMAIN_MAX_FREQUENCY) { + quit(1, "Invalid bitmain-options for frequency, must be %d <= frequency <= %d", + BITMAIN_MIN_FREQUENCY, BITMAIN_MAX_FREQUENCY); + } else { + *frequency = tmp; + strcpy(frequency_t, colon4); + } + if (colon5 && *colon5) { + colon6 = strchr(colon5, ':'); + if(colon6) + *(colon6++) = '\0'; + + if(strlen(colon5) > 8 || strlen(colon5)%2 != 0 || strlen(colon5)/2 == 0) { + quit(1, "Invalid bitmain-options for reg data, must be hex now: %s", + colon5); + } + memset(reg_data, 0, 4); + if(!hex2bin(reg_data, colon5, strlen(colon5)/2)) { + quit(1, "Invalid bitmain-options for reg data, hex2bin error now: %s", + colon5); + } + + if (colon6 && *colon6) { + if(strlen(colon6) > 4 || strlen(colon6)%2 != 0 || strlen(colon6)/2 == 0) { + quit(1, "Invalid bitmain-options for voltage data, must be hex now: %s", + colon6); + } + memset(voltage, 0, 2); + if(!hex2bin(voltage, colon6, strlen(colon6)/2)) { + quit(1, "Invalid bitmain-options for voltage data, hex2bin error now: %s", + colon5); + } else { + sprintf(voltage_t, "%02x%02x", voltage[0], voltage[1]); + voltage_t[5] = 0; + voltage_t[4] = voltage_t[3]; + voltage_t[3] = voltage_t[2]; + voltage_t[2] = voltage_t[1]; + voltage_t[1] = '.'; + } + } + } + } + } + } + } + return true; +} + +static bool get_option_freq(int *timeout, int *frequency, char * frequency_t, uint8_t * reg_data) +{ + char buf[BUFSIZ+1]; + char *ptr, *comma, *colon, *colon2; + size_t max; + int i, tmp; + + if (opt_bitmain_freq == NULL) + return true; + else { + ptr = opt_bitmain_freq; + + comma = strchr(ptr, ','); + if (comma == NULL) + max = strlen(ptr); + else + max = comma - ptr; + + if (max > BUFSIZ) + max = BUFSIZ; + strncpy(buf, ptr, max); + buf[max] = '\0'; + } + + if (!(*buf)) + return false; + + colon = strchr(buf, ':'); + if (colon) + *(colon++) = '\0'; + + tmp = atoi(buf); + if (tmp > 0 && tmp <= 0xff) + *timeout = tmp; + else { + quit(1, "Invalid bitmain-freq for " + "timeout (%s) must be 1 ~ %d", + buf, 0xff); + } + + if (colon && *colon) { + colon2 = strchr(colon, ':'); + if (colon2) + *(colon2++) = '\0'; + + tmp = atoi(colon); + if (tmp < BITMAIN_MIN_FREQUENCY || tmp > BITMAIN_MAX_FREQUENCY) { + quit(1, "Invalid bitmain-freq for frequency, must be %d <= frequency <= %d", + BITMAIN_MIN_FREQUENCY, BITMAIN_MAX_FREQUENCY); + } else { + *frequency = tmp; + strcpy(frequency_t, colon); + } + + if (colon2 && *colon2) { + if(strlen(colon2) > 8 || strlen(colon2)%2 != 0 || strlen(colon2)/2 == 0) { + quit(1, "Invalid bitmain-freq for reg data, must be hex now: %s", + colon2); + } + memset(reg_data, 0, 4); + if(!hex2bin(reg_data, colon2, strlen(colon2)/2)) { + quit(1, "Invalid bitmain-freq for reg data, hex2bin error now: %s", + colon2); + } + } + } + return true; +} + +static bool get_option_voltage(uint8_t * voltage, char * voltage_t) +{ + if(opt_bitmain_voltage) { + if(strlen(opt_bitmain_voltage) > 4 || strlen(opt_bitmain_voltage)%2 != 0 || strlen(opt_bitmain_voltage)/2 == 0) { + applog(LOG_ERR, "Invalid bitmain-voltage for voltage data, must be hex now: %s,set default_volttage", + opt_bitmain_voltage); + return false; + } + memset(voltage, 0, 2); + if(!hex2bin(voltage, opt_bitmain_voltage, strlen(opt_bitmain_voltage)/2)) { + quit(1, "Invalid bitmain-voltage for voltage data, hex2bin error now: %s", + opt_bitmain_voltage); + } else { + sprintf(voltage_t, "%02x%02x", voltage[0], voltage[1]); + voltage_t[5] = 0; + voltage_t[4] = voltage_t[3]; + voltage_t[3] = voltage_t[2]; + voltage_t[2] = voltage_t[1]; + voltage_t[1] = '.'; + } + } + return true; +} + +static int bitmain_set_txconfig(struct bitmain_txconfig_token *bm, + uint8_t reset, uint8_t fan_eft, uint8_t timeout_eft, uint8_t frequency_eft, + uint8_t voltage_eft, uint8_t chain_check_time_eft, uint8_t chip_config_eft, uint8_t hw_error_eft, + uint8_t beeper_ctrl, uint8_t temp_over_ctrl,uint8_t fan_home_mode, + uint8_t chain_num, uint8_t asic_num, uint8_t fan_pwm_data, uint8_t timeout_data, + uint16_t frequency, uint8_t * voltage, uint8_t chain_check_time, + uint8_t chip_address, uint8_t reg_address, uint8_t * reg_data) +{ + uint16_t crc = 0; + int datalen = 0; + uint8_t version = 0; + uint8_t * sendbuf = (uint8_t *)bm; + if (unlikely(!bm)) { + applog(LOG_WARNING, "bitmain_set_txconfig bitmain_txconfig_token is null"); + return -1; + } + + if (unlikely(timeout_data <= 0 || asic_num <= 0 || chain_num <= 0)) { + applog(LOG_WARNING, "bitmain_set_txconfig parameter invalid timeout_data(%d) asic_num(%d) chain_num(%d)", + timeout_data, asic_num, chain_num); + return -1; + } + + datalen = sizeof(struct bitmain_txconfig_token); + memset(bm, 0, datalen); + + bm->token_type = BITMAIN_TOKEN_TYPE_TXCONFIG; + bm->version = version; + bm->length = datalen-4; + bm->length = htole16(bm->length); + + bm->reset = reset; + bm->fan_eft = fan_eft; + bm->timeout_eft = timeout_eft; + bm->frequency_eft = frequency_eft; + bm->voltage_eft = voltage_eft; + bm->chain_check_time_eft = chain_check_time_eft; + bm->chip_config_eft = chip_config_eft; + bm->hw_error_eft = hw_error_eft; + bm->beeper_ctrl = beeper_ctrl; + bm->temp_over_ctrl = temp_over_ctrl; + bm->fan_home_mode = fan_home_mode; + + sendbuf[4] = htole8(sendbuf[4]); + sendbuf[5] = htole8(sendbuf[5]); + + bm->chain_num = chain_num; + bm->asic_num = asic_num; + bm->fan_pwm_data = fan_pwm_data; + bm->timeout_data = timeout_data; + + bm->frequency = htole16(frequency); + memcpy(bm->voltage, voltage, 2); + bm->chain_check_time = chain_check_time; + + memcpy(bm->reg_data, reg_data, 4); + bm->chip_address = chip_address; + bm->reg_address = reg_address; + + crc = CRC16((uint8_t *)bm, datalen-2); + bm->crc = htole16(crc); + + applog(LOG_ERR, "BTM TxConfigToken:v(%d) reset(%d) fan_e(%d) tout_e(%d) fq_e(%d) vt_e(%d) chainc_e(%d) chipc_e(%d) hw_e(%d) b_c(%d) t_c(%d) f_m(%d) mnum(%d) anum(%d) fanpwmdata(%d) toutdata(%d) freq(%d) volt(%02x%02x) chainctime(%d) regdata(%02x%02x%02x%02x) chipaddr(%02x) regaddr(%02x) crc(%04x)", + version, reset, fan_eft, timeout_eft, frequency_eft, voltage_eft, + chain_check_time_eft, chip_config_eft, hw_error_eft, beeper_ctrl, temp_over_ctrl,fan_home_mode,chain_num, asic_num, + fan_pwm_data, timeout_data, frequency, voltage[0], voltage[1], + chain_check_time, reg_data[0], reg_data[1], reg_data[2], reg_data[3], chip_address, reg_address, crc); + + return datalen; +} + +static int bitmain_set_txtask(uint8_t * sendbuf, + unsigned int * last_work_block, struct work **works, int work_array_size, int work_array, int sendworkcount, int * sendcount) +{ + uint16_t crc = 0; + uint32_t work_id = 0; + uint8_t version = 0; + int datalen = 0; + int i = 0; + int index = work_array; + uint8_t new_block= 0; + char * ob_hex = NULL; + struct bitmain_txtask_token *bm = (struct bitmain_txtask_token *)sendbuf; + *sendcount = 0; + int cursendcount = 0; + int diff = 0; + unsigned int difftmp = 0; + unsigned int pooldiff = 0; + uint64_t netdifftmp = 0; + int netdiff = 0; + if (unlikely(!bm)) { + applog(LOG_WARNING, "bitmain_set_txtask bitmain_txtask_token is null"); + return -1; + } + if (unlikely(!works)) { + applog(LOG_WARNING, "bitmain_set_txtask work is null"); + return -1; + } + memset(bm, 0, sizeof(struct bitmain_txtask_token)); + + bm->token_type = BITMAIN_TOKEN_TYPE_TXTASK; + bm->version = version; + + datalen = 10; + applog(LOG_DEBUG, "BTM send work count %d -----", sendworkcount); + for(i = 0; i < sendworkcount; i++) { + if(index > work_array_size) { + index = 0; + } + if(works[index]) { + if(works[index]->work_block > *last_work_block) { + applog(LOG_ERR, "BTM send task new block %d old(%d)", works[index]->work_block, *last_work_block); + new_block = 1; + *last_work_block = works[index]->work_block; + } +#ifdef BITMAIN_TEST + if(!hex2bin(works[index]->data, btm_work_test_data[g_test_index], 128)) { + applog(LOG_DEBUG, "BTM send task set test data error"); + } + if(!hex2bin(works[index]->midstate, btm_work_test_midstate[g_test_index], 32)) { + applog(LOG_DEBUG, "BTM send task set test midstate error"); + } + g_test_index++; + if(g_test_index >= BITMAIN_TEST_USENUM) { + g_test_index = 0; + } + applog(LOG_DEBUG, "BTM test index = %d", g_test_index); +#endif + work_id = works[index]->id; + bm->works[cursendcount].work_id = htole32(work_id); + applog(LOG_DEBUG, "BTM send task work id:%d %d", bm->works[cursendcount].work_id, work_id); + memcpy(bm->works[cursendcount].midstate, works[index]->midstate, 32); + memcpy(bm->works[cursendcount].data2, works[index]->data + 64, 12); + + if(cursendcount == 0) { + pooldiff = (unsigned int)(works[index]->sdiff); + difftmp = pooldiff; + while(1) { + difftmp = difftmp >> 1; + if(difftmp > 0) { + diff++; + if(diff >= 255) { + break; + } + } else { + break; + } + } + } + + if(BITMAIN_TEST_PRINT_WORK) { + ob_hex = bin2hex(works[index]->data, 76); + applog(LOG_ERR, "work %d data: %s", works[index]->id, ob_hex); + free(ob_hex); + } + + cursendcount++; + } + index++; + } + if(cursendcount <= 0) { + applog(LOG_ERR, "BTM send work count %d", cursendcount); + return 0; + } + + netdifftmp = current_diff; + while(netdifftmp > 0) { + netdifftmp = netdifftmp >> 1; + netdiff++; + } + datalen += 48*cursendcount; + + bm->length = datalen-4; + bm->length = htole16(bm->length); + //len = datalen-3; + //len = htole16(len); + //memcpy(sendbuf+1, &len, 2); + bm->new_block = new_block; + bm->diff = diff; + bm->net_diff = htole16(netdiff); + + sendbuf[4] = htole8(sendbuf[4]); + + applog(LOG_DEBUG, "BitMain TxTask Token: %d %d %02x%02x%02x%02x%02x%02x", + datalen, bm->length, sendbuf[0],sendbuf[1],sendbuf[2],sendbuf[3],sendbuf[4],sendbuf[5]); + + *sendcount = cursendcount; + + crc = CRC16(sendbuf, datalen-2); + crc = htole16(crc); + memcpy(sendbuf+datalen-2, &crc, 2); + + applog(LOG_DEBUG, "BitMain TxTask Token: v(%d) new_block(%d) diff(%d pool:%d net:%d) work_num(%d) crc(%04x)", + version, new_block, diff, pooldiff,netdiff, cursendcount, crc); + applog(LOG_DEBUG, "BitMain TxTask Token: %d %d %02x%02x%02x%02x%02x%02x", + datalen, bm->length, sendbuf[0],sendbuf[1],sendbuf[2],sendbuf[3],sendbuf[4],sendbuf[5]); + + return datalen; +} + +static int bitmain_set_rxstatus(struct bitmain_rxstatus_token *bm, + uint8_t chip_status_eft, uint8_t detect_get, uint8_t chip_address, uint8_t reg_address) +{ + uint16_t crc = 0; + uint8_t version = 0; + int datalen = 0; + uint8_t * sendbuf = (uint8_t *)bm; + + if (unlikely(!bm)) { + applog(LOG_WARNING, "bitmain_set_rxstatus bitmain_rxstatus_token is null"); + return -1; + } + + datalen = sizeof(struct bitmain_rxstatus_token); + memset(bm, 0, datalen); + + bm->token_type = BITMAIN_TOKEN_TYPE_RXSTATUS; + bm->version = version; + bm->length = datalen-4; + bm->length = htole16(bm->length); + + bm->chip_status_eft = chip_status_eft; + bm->detect_get = detect_get; + + sendbuf[4] = htole8(sendbuf[4]); + + bm->chip_address = chip_address; + bm->reg_address = reg_address; + + crc = CRC16((uint8_t *)bm, datalen-2); + bm->crc = htole16(crc); + + applog(LOG_ERR, "BitMain RxStatus Token: v(%d) chip_status_eft(%d) detect_get(%d) chip_address(%02x) reg_address(%02x) crc(%04x)", + version, chip_status_eft, detect_get, chip_address, reg_address, crc); + + return datalen; +} + +static int bitmain_parse_rxstatus(const uint8_t * data, int datalen, struct bitmain_rxstatus_data *bm) +{ + uint16_t crc = 0; + uint8_t version = 0; + int i = 0, j = 0; + int asic_num = 0; + int dataindex = 0; + uint8_t tmp = 0x01; + if (unlikely(!bm)) { + applog(LOG_WARNING, "bitmain_parse_rxstatus bitmain_rxstatus_data is null"); + return -1; + } + if (unlikely(!data || datalen <= 0)) { + applog(LOG_WARNING, "bitmain_parse_rxstatus parameter invalid data is null or datalen(%d) error", datalen); + return -1; + } + memset(bm, 0, sizeof(struct bitmain_rxstatus_data)); + memcpy(bm, data, 28); + if (bm->data_type != BITMAIN_DATA_TYPE_RXSTATUS) { + applog(LOG_ERR, "bitmain_parse_rxstatus datatype(%02x) error", bm->data_type); + return -1; + } + if (bm->version != version) { + applog(LOG_ERR, "bitmain_parse_rxstatus version(%02x) error", bm->version); + return -1; + } + bm->length = htole16(bm->length); + if (bm->length+4 != datalen) { + applog(LOG_ERR, "bitmain_parse_rxstatus length(%d) datalen(%d) error", bm->length, datalen); + return -1; + } + crc = CRC16(data, datalen-2); + memcpy(&(bm->crc), data+datalen-2, 2); + bm->crc = htole16(bm->crc); + if(crc != bm->crc) { + applog(LOG_ERR, "bitmain_parse_rxstatus check crc(%d) != bm crc(%d) datalen(%d)", crc, bm->crc, datalen); + return -1; + } + bm->fifo_space = htole16(bm->fifo_space); + bm->fan_exist = htole16(bm->fan_exist); + bm->temp_exist = htole32(bm->temp_exist); + bm->nonce_error = htole32(bm->nonce_error); + if(bm->chain_num > BITMAIN_MAX_CHAIN_NUM) { + applog(LOG_ERR, "bitmain_parse_rxstatus chain_num=%d error", bm->chain_num); + return -1; + } + dataindex = 28; + if(bm->chain_num > 0) { + memcpy(bm->chain_asic_num, data+datalen-2-bm->chain_num-bm->temp_num-bm->fan_num, bm->chain_num); + } + for(i = 0; i < bm->chain_num; i++) { + asic_num = bm->chain_asic_num[i]; + if(asic_num <= 0) { + asic_num = 1; + } else { + if(asic_num % 32 == 0) { + asic_num = asic_num / 32; + } else { + asic_num = asic_num / 32 + 1; + } + } + memcpy((uint8_t *)bm->chain_asic_exist+i*32, data+dataindex, asic_num*4); + dataindex += asic_num*4; + } + for(i = 0; i < bm->chain_num; i++) { + asic_num = bm->chain_asic_num[i]; + if(asic_num <= 0) { + asic_num = 1; + } else { + if(asic_num % 32 == 0) { + asic_num = asic_num / 32; + } else { + asic_num = asic_num / 32 + 1; + } + } + memcpy((uint8_t *)bm->chain_asic_status+i*32, data+dataindex, asic_num*4); + dataindex += asic_num*4; + } + dataindex += bm->chain_num; + if(dataindex + bm->temp_num + bm->fan_num + 2 != datalen) { + applog(LOG_ERR, "bitmain_parse_rxstatus dataindex(%d) chain_num(%d) temp_num(%d) fan_num(%d) not match datalen(%d)", + dataindex, bm->chain_num, bm->temp_num, bm->fan_num, datalen); + return -1; + } + for(i = 0; i < bm->chain_num; i++) { + //bm->chain_asic_status[i] = swab32(bm->chain_asic_status[i]); + for(j = 0; j < 8; j++) { + bm->chain_asic_exist[i*8+j] = htole32(bm->chain_asic_exist[i*8+j]); + bm->chain_asic_status[i*8+j] = htole32(bm->chain_asic_status[i*8+j]); + } + } + if(bm->temp_num > 0) { + memcpy(bm->temp, data+dataindex, bm->temp_num); + dataindex += bm->temp_num; + } + if(bm->fan_num > 0) { + memcpy(bm->fan, data+dataindex, bm->fan_num); + dataindex += bm->fan_num; + } + if(!opt_bitmain_checkall){ + if(tmp != htole8(tmp)){ + applog(LOG_ERR, "BitMain RxStatus byte4 0x%02x chip_value_eft %d reserved %d get_blk_num %d ",*((uint8_t* )bm +4),bm->chip_value_eft,bm->reserved1,bm->get_blk_num); + memcpy(&tmp,data+4,1); + bm->chip_value_eft = tmp >>7; + bm->get_blk_num = tmp >> 4; + bm->reserved1 = ((tmp << 4) & 0xff) >> 5; + } + found_blocks = bm->get_blk_num; + applog(LOG_ERR, "BitMain RxStatus tmp :0x%02x byte4 0x%02x chip_value_eft %d reserved %d get_blk_num %d ",tmp,*((uint8_t* )bm +4),bm->chip_value_eft,bm->reserved1,bm->get_blk_num); + } + applog(LOG_DEBUG, "BitMain RxStatusData: chipv_e(%d) chainnum(%d) fifos(%d) v1(%d) v2(%d) v3(%d) v4(%d) fann(%d) tempn(%d) fanet(%04x) tempet(%08x) ne(%d) regvalue(%d) crc(%04x)", + bm->chip_value_eft, bm->chain_num, bm->fifo_space, bm->hw_version[0], bm->hw_version[1], bm->hw_version[2], bm->hw_version[3], bm->fan_num, bm->temp_num, bm->fan_exist, bm->temp_exist, bm->nonce_error, bm->reg_value, bm->crc); + applog(LOG_DEBUG, "BitMain RxStatus Data chain info:"); + for(i = 0; i < bm->chain_num; i++) { + applog(LOG_DEBUG, "BitMain RxStatus Data chain(%d) asic num=%d asic_exist=%08x asic_status=%08x", i+1, bm->chain_asic_num[i], bm->chain_asic_exist[i*8], bm->chain_asic_status[i*8]); + } + applog(LOG_DEBUG, "BitMain RxStatus Data temp info:"); + for(i = 0; i < bm->temp_num; i++) { + applog(LOG_DEBUG, "BitMain RxStatus Data temp(%d) temp=%d", i+1, bm->temp[i]); + } + applog(LOG_DEBUG, "BitMain RxStatus Data fan info:"); + for(i = 0; i < bm->fan_num; i++) { + applog(LOG_DEBUG, "BitMain RxStatus Data fan(%d) fan=%d", i+1, bm->fan[i]); + } + return 0; +} + +static int bitmain_parse_rxnonce(const uint8_t * data, int datalen, struct bitmain_rxnonce_data *bm, int * nonce_num) +{ + int i = 0; + uint16_t crc = 0; + uint8_t version = 0; + int curnoncenum = 0; + if (unlikely(!bm)) { + applog(LOG_ERR, "bitmain_parse_rxnonce bitmain_rxstatus_data null"); + return -1; + } + if (unlikely(!data || datalen <= 0)) { + applog(LOG_ERR, "bitmain_parse_rxnonce data null or datalen(%d) error", datalen); + return -1; + } + memcpy(bm, data, sizeof(struct bitmain_rxnonce_data)); + if (bm->data_type != BITMAIN_DATA_TYPE_RXNONCE) { + applog(LOG_ERR, "bitmain_parse_rxnonce datatype(%02x) error", bm->data_type); + return -1; + } + if (bm->version != version) { + applog(LOG_ERR, "bitmain_parse_rxnonce version(%02x) error", bm->version); + return -1; + } + bm->length = htole16(bm->length); + if (bm->length+4 != datalen) { + applog(LOG_ERR, "bitmain_parse_rxnonce length(%d) error", bm->length); + return -1; + } + crc = CRC16(data, datalen-2); + memcpy(&(bm->crc), data+datalen-2, 2); + bm->crc = htole16(bm->crc); + if(crc != bm->crc) { + applog(LOG_ERR, "bitmain_parse_rxnonce check crc(%d) != bm crc(%d) datalen(%d)", crc, bm->crc, datalen); + return -1; + } + bm->fifo_space = htole16(bm->fifo_space); + bm->diff = htole16(bm->diff); + bm->total_nonce_num = htole64(bm->total_nonce_num); + curnoncenum = (datalen-14)/8; + applog(LOG_DEBUG, "BitMain RxNonce Data: nonce_num(%d) fifo_space(%d) diff(%d) tnn(%lld)", curnoncenum, bm->fifo_space, bm->diff, bm->total_nonce_num); + for(i = 0; i < curnoncenum; i++) { + bm->nonces[i].work_id = htole32(bm->nonces[i].work_id); + bm->nonces[i].nonce = htole32(bm->nonces[i].nonce); + + applog(LOG_DEBUG, "BitMain RxNonce Data %d: work_id(%d) nonce(%08x)(%d)", + i, bm->nonces[i].work_id, bm->nonces[i].nonce, bm->nonces[i].nonce); + } + *nonce_num = curnoncenum; + return 0; +} + +static int bitmain_read(struct cgpu_info *bitmain, unsigned char *buf, + size_t bufsize, int timeout, int ep) +{ + int err = 0, readlen = 0; + size_t total = 0; + + if(bitmain == NULL || buf == NULL || bufsize <= 0) { + applog(LOG_WARNING, "bitmain_read parameter error bufsize(%d)", bufsize); + return -1; + } + if(opt_bitmain_dev_usb) { +#ifdef WIN32 + char readbuf[BITMAIN_READBUF_SIZE]; + int ofs = 2, cp = 0; + + err = usb_read_once_timeout(bitmain, readbuf, bufsize, &readlen, timeout, ep); + applog(LOG_DEBUG, "%s%i: Get bitmain read got readlen %d err %d", + bitmain->drv->name, bitmain->device_id, readlen, err); + + if (readlen < 2) + goto out; + + while (readlen > 2) { + cp = readlen - 2; + if (cp > 62) + cp = 62; + memcpy(&buf[total], &readbuf[ofs], cp); + total += cp; + readlen -= cp + 2; + ofs += 64; + } +#else + err = usb_read_once_timeout(bitmain, buf, bufsize, &readlen, timeout, ep); + applog(LOG_DEBUG, "%s%i: Get bitmain read got readlen %d err %d", + bitmain->drv->name, bitmain->device_id, readlen, err); + total = readlen; +#endif + } else { + err = btm_read(bitmain, buf, bufsize); + total = err; + } +out: + return total; +} + +static int bitmain_write(struct cgpu_info *bitmain, char *buf, ssize_t len, int ep) +{ + int err, amount; + if(opt_bitmain_dev_usb) { + err = usb_write(bitmain, buf, len, &amount, ep); + applog(LOG_DEBUG, "%s%i: usb_write got err %d", bitmain->drv->name, + bitmain->device_id, err); + + if (unlikely(err != 0)) { + applog(LOG_ERR, "usb_write error on bitmain_write err=%d", err); + return BTM_SEND_ERROR; + } + if (amount != len) { + applog(LOG_ERR, "usb_write length mismatch on bitmain_write amount=%d len=%d", amount, len); + return BTM_SEND_ERROR; + } + } else { + int havelen = 0; + while(havelen < len) { + err = btm_write(bitmain, buf+havelen, len-havelen); + if(err < 0) { + applog(LOG_DEBUG, "%s%i: btm_write got err %d", bitmain->drv->name, + bitmain->device_id, err); + applog(LOG_WARNING, "usb_write error on bitmain_write"); + return BTM_SEND_ERROR; + } else { + havelen += err; + } + } + } + return BTM_SEND_OK; +} + +static int bitmain_send_data(const uint8_t * data, int datalen, struct cgpu_info *bitmain) +{ + int delay, ret, ep = C_BITMAIN_SEND; + struct bitmain_info *info = NULL; + cgtimer_t ts_start; + + if(datalen <= 0) { + return 0; + } + + if(data[0] == BITMAIN_TOKEN_TYPE_TXCONFIG) { + ep = C_BITMAIN_TOKEN_TXCONFIG; + } else if(data[0] == BITMAIN_TOKEN_TYPE_TXTASK) { + ep = C_BITMAIN_TOKEN_TXTASK; + } else if(data[0] == BITMAIN_TOKEN_TYPE_RXSTATUS) { + ep = C_BITMAIN_TOKEN_RXSTATUS; + } + + info = bitmain->device_data; + //delay = datalen * 10 * 1000000; + //delay = delay / info->baud; + //delay += 4000; + + if(opt_debug) { + applog(LOG_DEBUG, "BitMain: Sent(%d):", datalen); + hexdump(data, datalen); + } + + //cgsleep_prepare_r(&ts_start); + //applog(LOG_DEBUG, "----bitmain_send_data start"); + ret = bitmain_write(bitmain, (char *)data, datalen, ep); + applog(LOG_DEBUG, "----bitmain_send_data stop ret=%d datalen=%d", ret, datalen); + //cgsleep_us_r(&ts_start, delay); + + //applog(LOG_DEBUG, "BitMain: Sent: Buffer delay: %dus", delay); + + return ret; +} + +static bool bitmain_decode_nonce(struct thr_info *thr, struct cgpu_info *bitmain, + struct bitmain_info *info, uint32_t nonce, struct work *work) +{ + info = bitmain->device_data; + //info->matching_work[work->subid]++; + if(opt_bitmain_hwerror) { + applog(LOG_DEBUG, "BitMain: submit direct nonce = %08x", nonce); + if(opt_bitmain_checkall) { + applog(LOG_DEBUG, "BitMain check all"); + return submit_nonce(thr, work, nonce); + } else { + if(opt_bitmain_checkn2diff) { + int diff = 0; + diff = work->sdiff; + if(diff&&(diff&(diff-1))) { + applog(LOG_DEBUG, "BitMain %d not diff 2 submit_nonce", diff); + return submit_nonce(thr, work, nonce); + } else { + applog(LOG_DEBUG, "BitMain %d diff 2 submit_nonce_direct", diff); + return submit_nonce_direct(thr, work, nonce); + } + } else { + return submit_nonce_direct(thr, work, nonce); + } + } + } else { + applog(LOG_DEBUG, "BitMain: submit nonce = %08x", nonce); + return submit_nonce(thr, work, nonce); + } +} + +static void bitmain_inc_nvw(struct bitmain_info *info, struct thr_info *thr) +{ + applog(LOG_INFO, "%s%d: No matching work - HW error", + thr->cgpu->drv->name, thr->cgpu->device_id); + + inc_hw_errors(thr); + info->no_matching_work++; +} + +static inline void record_temp_fan(struct bitmain_info *info, struct bitmain_rxstatus_data *bm, double *temp_avg) +{ + int i = 0; + int maxfan = 0, maxtemp = 0; + *temp_avg = 0; + + info->fan_num = bm->fan_num; + for(i = 0; i < bm->fan_num; i++) { + info->fan[i] = bm->fan[i] * BITMAIN_FAN_FACTOR; + + if(info->fan[i] > maxfan) + maxfan = info->fan[i]; + } + info->temp_num = bm->temp_num; + for(i = 0; i < bm->temp_num; i++) { + info->temp[i] = bm->temp[i]; + /* + if(bm->temp[i] & 0x80) { + bm->temp[i] &= 0x7f; + info->temp[i] = 0 - ((~bm->temp[i] & 0x7f) + 1); + }*/ + *temp_avg += info->temp[i]; + + if(info->temp[i] > info->temp_max) { + info->temp_max = info->temp[i]; + } + if(info->temp[i] > maxtemp) + maxtemp = info->temp[i]; + } + + if(bm->temp_num > 0) { + *temp_avg = *temp_avg / bm->temp_num; + info->temp_avg = *temp_avg; + } + + inc_dev_status(maxfan, maxtemp); +} + +static void bitmain_update_temps(struct cgpu_info *bitmain, struct bitmain_info *info, + struct bitmain_rxstatus_data *bm) +{ + char tmp[64] = {0}; + char msg[10240] = {0}; + int i = 0; + record_temp_fan(info, bm, &(bitmain->temp)); + + strcpy(msg, "BitMain: "); + for(i = 0; i < bm->fan_num; i++) { + if(i != 0) { + strcat(msg, ", "); + } + sprintf(tmp, "Fan%d: %d/m", i+1, info->fan[i]); + strcat(msg, tmp); + } + strcat(msg, "\t"); + for(i = 0; i < bm->temp_num; i++) { + if(i != 0) { + strcat(msg, ", "); + } + sprintf(tmp, "Temp%d: %dC", i+1, info->temp[i]); + strcat(msg, tmp); + } + sprintf(tmp, ", TempMAX: %dC", info->temp_max); + strcat(msg, tmp); + applog(LOG_INFO, msg); + info->temp_history_index++; + info->temp_sum += bitmain->temp; + applog(LOG_DEBUG, "BitMain: temp_index: %d, temp_count: %d, temp_old: %d", + info->temp_history_index, info->temp_history_count, info->temp_old); + if (info->temp_history_index == info->temp_history_count) { + info->temp_history_index = 0; + info->temp_sum = 0; + } + if (unlikely(info->temp_old >= opt_bitmain_overheat)) { + applog(LOG_WARNING, "BTM%d overheat! Idling", bitmain->device_id); + info->overheat = true; + } else if (info->overheat && info->temp_old <= opt_bitmain_temp) { + applog(LOG_WARNING, "BTM%d cooled, restarting", bitmain->device_id); + info->overheat = false; + } +} + +extern void cg_logwork_uint32(struct work *work, uint32_t nonce, bool ok); + +static void bitmain_parse_results(struct cgpu_info *bitmain, struct bitmain_info *info, + struct thr_info *thr, uint8_t *buf, int *offset) +{ + int i, j, n, m, r, errordiff, spare = BITMAIN_READ_SIZE; + uint32_t checkbit = 0x00000000; + bool found = false; + struct work *work = NULL; + char * ob_hex = NULL; + struct bitmain_packet_head packethead; + int asicnum = 0; + int idiff = 0; + int mod = 0,tmp = 0; + + for (i = 0; i <= spare; i++) { + if(buf[i] == 0xa1) { + struct bitmain_rxstatus_data rxstatusdata; + applog(LOG_DEBUG, "bitmain_parse_results RxStatus Data"); + if(*offset < 4) { + return; + } + memcpy(&packethead, buf+i, sizeof(struct bitmain_packet_head)); + packethead.length = htole16(packethead.length); + if(packethead.length > 1130) { + applog(LOG_ERR, "bitmain_parse_results bitmain_parse_rxstatus datalen=%d error", packethead.length+4); + continue; + } + if(*offset < packethead.length + 4) { + return; + } + if(bitmain_parse_rxstatus(buf+i, packethead.length+4, &rxstatusdata) != 0) { + applog(LOG_ERR, "bitmain_parse_results bitmain_parse_rxstatus error len=%d", packethead.length+4); + } else { + mutex_lock(&info->qlock); + info->chain_num = rxstatusdata.chain_num; + info->fifo_space = rxstatusdata.fifo_space; + info->hw_version[0] = rxstatusdata.hw_version[0]; + info->hw_version[1] = rxstatusdata.hw_version[1]; + info->hw_version[2] = rxstatusdata.hw_version[2]; + info->hw_version[3] = rxstatusdata.hw_version[3]; + info->nonce_error = rxstatusdata.nonce_error; + errordiff = info->nonce_error-info->last_nonce_error; + //sprintf(g_miner_version, "%d.%d.%d.%d", info->hw_version[0], info->hw_version[1], info->hw_version[2], info->hw_version[3]); + applog(LOG_ERR, "bitmain_parse_results v=%d chain=%d fifo=%d hwv1=%d hwv2=%d hwv3=%d hwv4=%d nerr=%d-%d freq=%d chain info:", + rxstatusdata.version, info->chain_num, info->fifo_space, info->hw_version[0], info->hw_version[1], info->hw_version[2], info->hw_version[3], + info->last_nonce_error, info->nonce_error, info->frequency); + memcpy(info->chain_asic_exist, rxstatusdata.chain_asic_exist, BITMAIN_MAX_CHAIN_NUM*32); + memcpy(info->chain_asic_status, rxstatusdata.chain_asic_status, BITMAIN_MAX_CHAIN_NUM*32); + for(n = 0; n < rxstatusdata.chain_num; n++) { + info->chain_asic_num[n] = rxstatusdata.chain_asic_num[n]; + memset(info->chain_asic_status_t[n], 0, 320); + j = 0; + + mod = 0; + if(info->chain_asic_num[n] <= 0) { + asicnum = 0; + } else { + mod = info->chain_asic_num[n] % 32; + if(mod == 0) { + asicnum = info->chain_asic_num[n] / 32; + } else { + asicnum = info->chain_asic_num[n] / 32 + 1; + } + } + if(asicnum > 0) { + for(m = asicnum-1; m >= 0; m--) { + tmp = mod ? (32-mod): 0; + for(r = tmp;r < 32;r++){ + if((r-tmp)%8 == 0 && (r-tmp) !=0){ + info->chain_asic_status_t[n][j] = ' '; + j++; + } + checkbit = num2bit(r); + if(rxstatusdata.chain_asic_exist[n*8+m] & checkbit) { + if(rxstatusdata.chain_asic_status[n*8+m] & checkbit) { + info->chain_asic_status_t[n][j] = 'o'; + } else { + info->chain_asic_status_t[n][j] = 'x'; + } + } else { + info->chain_asic_status_t[n][j] = '-'; + } + j++; + } + info->chain_asic_status_t[n][j] = ' '; + j++; + mod = 0; + } + } + applog(LOG_DEBUG, "bitmain_parse_results chain(%d) asic_num=%d asic_exist=%08x%08x%08x%08x%08x%08x%08x%08x asic_status=%08x%08x%08x%08x%08x%08x%08x%08x", + n, info->chain_asic_num[n], + info->chain_asic_exist[n*8+0], info->chain_asic_exist[n*8+1], info->chain_asic_exist[n*8+2], info->chain_asic_exist[n*8+3], info->chain_asic_exist[n*8+4], info->chain_asic_exist[n*8+5], info->chain_asic_exist[n*8+6], info->chain_asic_exist[n*8+7], + info->chain_asic_status[n*8+0], info->chain_asic_status[n*8+1], info->chain_asic_status[n*8+2], info->chain_asic_status[n*8+3], info->chain_asic_status[n*8+4], info->chain_asic_status[n*8+5], info->chain_asic_status[n*8+6], info->chain_asic_status[n*8+7]); + applog(LOG_ERR, "bitmain_parse_results chain(%d) asic_num=%d asic_status=%s", n, info->chain_asic_num[n], info->chain_asic_status_t[n]); + } + mutex_unlock(&info->qlock); + + if(errordiff > 0) { + for(j = 0; j < errordiff; j++) { + bitmain_inc_nvw(info, thr); + } + mutex_lock(&info->qlock); + info->last_nonce_error += errordiff; + mutex_unlock(&info->qlock); + } + bitmain_update_temps(bitmain, info, &rxstatusdata); + } + + found = true; + spare = packethead.length + 4 + i; + if(spare > *offset) { + applog(LOG_ERR, "bitmain_parse_rxresults space(%d) > offset(%d)", spare, *offset); + spare = *offset; + } + break; + } else if(buf[i] == 0xa2) { + struct bitmain_rxnonce_data rxnoncedata; + int nonce_num = 0; + applog(LOG_DEBUG, "bitmain_parse_results RxNonce Data"); + if(*offset < 4) { + return; + } + memcpy(&packethead, buf+i, sizeof(struct bitmain_packet_head)); + packethead.length = htole16(packethead.length); + if(packethead.length > 1030) { + applog(LOG_ERR, "bitmain_parse_results bitmain_parse_rxnonce datalen=%d error", packethead.length+4); + continue; + } + if(*offset < packethead.length + 4) { + return; + } + if(bitmain_parse_rxnonce(buf+i, packethead.length+4, &rxnoncedata, &nonce_num) != 0) { + applog(LOG_ERR, "bitmain_parse_results bitmain_parse_rxnonce error len=%d", packethead.length+4); + } else { + struct pool * pool = NULL; + for(j = 0; j < nonce_num; j++) { + work = clone_queued_work_byid(bitmain, rxnoncedata.nonces[j].work_id); + if(work) { + pool = work->pool; + if(BITMAIN_TEST_PRINT_WORK) { + applog(LOG_ERR, "bitmain_parse_results nonce find work(%d-%d)(%08x)", work->id, rxnoncedata.nonces[j].work_id, rxnoncedata.nonces[j].nonce); + + ob_hex = bin2hex(work->midstate, 32); + applog(LOG_ERR, "work %d midstate: %s", work->id, ob_hex); + free(ob_hex); + + ob_hex = bin2hex(work->data+64, 12); + applog(LOG_ERR, "work %d data2: %s", work->id, ob_hex); + free(ob_hex); + } + + if(work->work_block < info->last_work_block) { + applog(LOG_ERR, "BitMain: bitmain_parse_rxnonce work(%d) nonce stale", rxnoncedata.nonces[j].work_id); + } else { + if (bitmain_decode_nonce(thr, bitmain, info, rxnoncedata.nonces[j].nonce, work)) { + cg_logwork_uint32(work, rxnoncedata.nonces[j].nonce, true); + if(opt_bitmain_hwerror) { +#ifndef BITMAIN_CALC_DIFF1 + mutex_lock(&info->qlock); + idiff = (int)work->sdiff; + info->nonces+=idiff; + info->auto_nonces+=idiff; + mutex_unlock(&info->qlock); + inc_work_status(thr, pool, idiff); +#endif + } else { + mutex_lock(&info->qlock); + info->nonces++; + info->auto_nonces++; + mutex_unlock(&info->qlock); + } + } else { + //bitmain_inc_nvw(info, thr); + applog(LOG_ERR, "BitMain: bitmain_decode_nonce error work(%d)", rxnoncedata.nonces[j].work_id); + } + } + free_work(work); + } else { + //bitmain_inc_nvw(info, thr); + applog(LOG_ERR, "BitMain: Nonce not find work(%d)", rxnoncedata.nonces[j].work_id); + } + } +#ifdef BITMAIN_CALC_DIFF1 + if(opt_bitmain_hwerror) { + int difftmp = 0; + difftmp = rxnoncedata.diff; + idiff = 1; + while(difftmp > 0) { + difftmp--; + idiff = idiff << 1; + } + mutex_lock(&info->qlock); + difftmp = idiff*(rxnoncedata.total_nonce_num-info->total_nonce_num); + if(difftmp < 0) + difftmp = 0; + + info->nonces = info->nonces+difftmp; + info->auto_nonces = info->auto_nonces+difftmp; + info->total_nonce_num = rxnoncedata.total_nonce_num; + info->fifo_space = rxnoncedata.fifo_space; + mutex_unlock(&info->qlock); + inc_work_stats(thr, pool, difftmp); + + applog(LOG_DEBUG, "bitmain_parse_rxnonce fifo space=%d diff=%d rxtnn=%lld tnn=%lld", info->fifo_space, idiff, rxnoncedata.total_nonce_num, info->total_nonce_num); + } else { + mutex_lock(&info->qlock); + info->fifo_space = rxnoncedata.fifo_space; + mutex_unlock(&info->qlock); + applog(LOG_DEBUG, "bitmain_parse_rxnonce fifo space=%d", info->fifo_space); + } +#else + mutex_lock(&info->qlock); + info->fifo_space = rxnoncedata.fifo_space; + mutex_unlock(&info->qlock); + applog(LOG_DEBUG, "bitmain_parse_rxnonce fifo space=%d", info->fifo_space); +#endif + +#ifndef WIN32 + if(nonce_num < BITMAIN_MAX_NONCE_NUM) + cgsleep_ms(5); +#endif + } + + found = true; + spare = packethead.length + 4 + i; + if(spare > *offset) { + applog(LOG_ERR, "bitmain_parse_rxnonce space(%d) > offset(%d)", spare, *offset); + spare = *offset; + } + break; + } else { + applog(LOG_ERR, "bitmain_parse_results data type error=%02x", buf[i]); + } + } + if (!found) { + spare = *offset - BITMAIN_READ_SIZE; + /* We are buffering and haven't accumulated one more corrupt + * work result. */ + if (spare < (int)BITMAIN_READ_SIZE) + return; + bitmain_inc_nvw(info, thr); + } + + *offset -= spare; + memmove(buf, buf + spare, *offset); +} + +static void bitmain_running_reset(struct cgpu_info *bitmain, struct bitmain_info *info) +{ + bitmain->results = 0; + info->reset = false; +} + +static void *bitmain_get_results(void *userdata) +{ + struct cgpu_info *bitmain = (struct cgpu_info *)userdata; + struct bitmain_info *info = bitmain->device_data; + int offset = 0, read_delay = 0, ret = 0; + const int rsize = BITMAIN_FTDI_READSIZE; + char readbuf[BITMAIN_READBUF_SIZE]; + struct thr_info *thr = info->thr; + char threadname[24]; + int errorcount = 0; + + snprintf(threadname, 24, "btm_recv/%d", bitmain->device_id); + RenameThread(threadname); + + while (likely(!bitmain->shutdown)) { + unsigned char buf[rsize]; + + //applog(LOG_DEBUG, "+++++++bitmain_get_results offset=%d", offset); + + if (offset >= (int)BITMAIN_READ_SIZE) { + //applog(LOG_DEBUG, "======start bitmain_get_results "); + bitmain_parse_results(bitmain, info, thr, readbuf, &offset); + //applog(LOG_DEBUG, "======stop bitmain_get_results "); + } + + if (unlikely(offset + rsize >= BITMAIN_READBUF_SIZE)) { + /* This should never happen */ + applog(LOG_DEBUG, "BitMain readbuf overflow, resetting buffer"); + offset = 0; + } + + if (unlikely(info->reset)) { + bitmain_running_reset(bitmain, info); + /* Discard anything in the buffer */ + offset = 0; + } + + /* As the usb read returns after just 1ms, sleep long enough + * to leave the interface idle for writes to occur, but do not + * sleep if we have been receiving data as more may be coming. */ + //if (offset == 0) { + // cgsleep_ms_r(&ts_start, BITMAIN_READ_TIMEOUT); + //} + + //cgsleep_prepare_r(&ts_start); + //applog(LOG_DEBUG, "======start bitmain_get_results bitmain_read"); + ret = bitmain_read(bitmain, buf, rsize, BITMAIN_READ_TIMEOUT, C_BITMAIN_READ); + //applog(LOG_DEBUG, "======stop bitmain_get_results bitmain_read=%d", ret); + + if ((ret < 1) || (ret == 18)) { + errorcount++; +#ifdef WIN32 + if(errorcount > 200) { + //applog(LOG_ERR, "bitmain_read errorcount ret=%d", ret); + cgsleep_ms(20); + errorcount = 0; + } +#else + if(errorcount > 3) { + //applog(LOG_ERR, "bitmain_read errorcount ret=%d", ret); + cgsleep_ms(20); + errorcount = 0; + } +#endif + if(ret < 1) + { + cgsleep_ms(1); // add by clement : we just wait a little time for RX data... + continue; + } + } + + if (opt_debug) { + applog(LOG_DEBUG, "BitMain: get:"); + hexdump((uint8_t *)buf, ret); + } + + memcpy(readbuf+offset, buf, ret); + offset += ret; + } + return NULL; +} + +static void bitmain_set_timeout(struct bitmain_info *info) +{ + info->timeout = BITMAIN_TIMEOUT_FACTOR / info->frequency; +} + +static void *bitmain_send_tasks(void *userdata) +{ + return NULL; +} + +static void bitmain_init(struct cgpu_info *bitmain) +{ + applog(LOG_INFO, "BitMain: Opened on %s", bitmain->device_path); +} + +static bool bitmain_prepare(struct thr_info *thr) +{ + struct cgpu_info *bitmain = thr->cgpu; + struct bitmain_info *info = bitmain->device_data; + + free(bitmain->works); + bitmain->works = calloc(BITMAIN_MAX_WORK_NUM * sizeof(struct work *), + BITMAIN_ARRAY_SIZE); + if (!bitmain->works) + quit(1, "Failed to calloc bitmain works in bitmain_prepare"); + + info->thr = thr; + mutex_init(&info->lock); + mutex_init(&info->qlock); + if (unlikely(pthread_cond_init(&info->qcond, NULL))) + quit(1, "Failed to pthread_cond_init bitmain qcond"); + cgsem_init(&info->write_sem); + + if (pthread_create(&info->read_thr, NULL, bitmain_get_results, (void *)bitmain)) + quit(1, "Failed to create bitmain read_thr"); + + //if (pthread_create(&info->write_thr, NULL, bitmain_send_tasks, (void *)bitmain)) + // quit(1, "Failed to create bitmain write_thr"); + + bitmain_init(bitmain); + + return true; +} + +static int bitmain_initialize(struct cgpu_info *bitmain) +{ + uint8_t data[BITMAIN_READBUF_SIZE]; + struct bitmain_info *info = NULL; + int ret = 0, spare = 0; + uint8_t sendbuf[BITMAIN_SENDBUF_SIZE]; + int readlen = 0; + int sendlen = 0; + int trycount = 3; + struct timespec p; + struct bitmain_rxstatus_data rxstatusdata; + int i = 0, j = 0, m = 0, r = 0, statusok = 0; + uint32_t checkbit = 0x00000000; + int hwerror_eft = 0; + int beeper_ctrl = 1; + int tempover_ctrl = 1; + int home_mode = 0; + struct bitmain_packet_head packethead; + int asicnum = 0; + int mod = 0,tmp = 0; + + /* Send reset, then check for result */ + if(!bitmain) { + applog(LOG_WARNING, "bitmain_initialize cgpu_info is null"); + return -1; + } + info = bitmain->device_data; + + /* clear read buf */ + ret = bitmain_read(bitmain, data, BITMAIN_READBUF_SIZE, + BITMAIN_RESET_TIMEOUT, C_BITMAIN_READ); + if(ret > 0) { + if (opt_debug) { + applog(LOG_DEBUG, "BTM%d Clear Read(%d):", bitmain->device_id, ret); + hexdump(data, ret); + } + } + + sendlen = bitmain_set_rxstatus((struct bitmain_rxstatus_token *)sendbuf, 0, 1, 0, 0); + if(sendlen <= 0) { + applog(LOG_ERR, "bitmain_initialize bitmain_set_rxstatus error(%d)", sendlen); + return -1; + } + + ret = bitmain_send_data(sendbuf, sendlen, bitmain); + if (unlikely(ret == BTM_SEND_ERROR)) { + applog(LOG_ERR, "bitmain_initialize bitmain_send_data error"); + return -1; + } + while(trycount >= 0) { + ret = bitmain_read(bitmain, data+readlen, BITMAIN_READBUF_SIZE, BITMAIN_RESET_TIMEOUT, C_BITMAIN_DATA_RXSTATUS); + if(ret > 0) { + readlen += ret; + if(readlen > BITMAIN_READ_SIZE) { + for(i = 0; i < readlen; i++) { + if(data[i] == 0xa1) { + if (opt_debug) { + applog(LOG_DEBUG, "%s%d initset: get:", bitmain->drv->name, bitmain->device_id); + hexdump(data, readlen); + } + memcpy(&packethead, data+i, sizeof(struct bitmain_packet_head)); + packethead.length = htole16(packethead.length); + + if(packethead.length > 1130) { + applog(LOG_ERR, "bitmain_initialize rxstatus datalen=%d error", packethead.length+4); + continue; + } + if(readlen-i < packethead.length+4) { + applog(LOG_ERR, "bitmain_initialize rxstatus datalen=%d<%d low", readlen-i, packethead.length+4); + continue; + } + if (bitmain_parse_rxstatus(data+i, packethead.length+4, &rxstatusdata) != 0) { + applog(LOG_ERR, "bitmain_initialize bitmain_parse_rxstatus error"); + continue; + } + info->chain_num = rxstatusdata.chain_num; + info->fifo_space = rxstatusdata.fifo_space; + info->hw_version[0] = rxstatusdata.hw_version[0]; + info->hw_version[1] = rxstatusdata.hw_version[1]; + info->hw_version[2] = rxstatusdata.hw_version[2]; + info->hw_version[3] = rxstatusdata.hw_version[3]; + info->nonce_error = 0; + info->last_nonce_error = 0; + sprintf(g_miner_version, "%d.%d.%d.%d", info->hw_version[0], info->hw_version[1], info->hw_version[2], info->hw_version[3]); + applog(LOG_ERR, "bitmain_initialize rxstatus v(%d) chain(%d) fifo(%d) hwv1(%d) hwv2(%d) hwv3(%d) hwv4(%d) nerr(%d) freq=%d", + rxstatusdata.version, info->chain_num, info->fifo_space, info->hw_version[0], info->hw_version[1], info->hw_version[2], info->hw_version[3], + rxstatusdata.nonce_error, info->frequency); + + memcpy(info->chain_asic_exist, rxstatusdata.chain_asic_exist, BITMAIN_MAX_CHAIN_NUM*32); + memcpy(info->chain_asic_status, rxstatusdata.chain_asic_status, BITMAIN_MAX_CHAIN_NUM*32); + for(i = 0; i < rxstatusdata.chain_num; i++) { + info->chain_asic_num[i] = rxstatusdata.chain_asic_num[i]; + memset(info->chain_asic_status_t[i], 0, 320); + j = 0; + mod = 0; + + if(info->chain_asic_num[i] <= 0) { + asicnum = 0; + } else { + mod = info->chain_asic_num[i] % 32; + if(mod == 0) { + asicnum = info->chain_asic_num[i] / 32; + } else { + asicnum = info->chain_asic_num[i] / 32 + 1; + } + } + if(asicnum > 0) { + for(m = asicnum-1; m >= 0; m--) { + tmp = mod ? (32-mod):0; + for(r = tmp;r < 32;r++){ + if((r-tmp)%8 == 0 && (r-tmp) !=0){ + info->chain_asic_status_t[i][j] = ' '; + j++; + } + checkbit = num2bit(r); + if(rxstatusdata.chain_asic_exist[i*8+m] & checkbit) { + if(rxstatusdata.chain_asic_status[i*8+m] & checkbit) { + info->chain_asic_status_t[i][j] = 'o'; + } else { + info->chain_asic_status_t[i][j] = 'x'; + } + } else { + info->chain_asic_status_t[i][j] = '-'; + } + j++; + } + info->chain_asic_status_t[i][j] = ' '; + j++; + mod = 0; + } + } + applog(LOG_DEBUG, "bitmain_initialize chain(%d) asic_num=%d asic_exist=%08x%08x%08x%08x%08x%08x%08x%08x asic_status=%08x%08x%08x%08x%08x%08x%08x%08x", + i, info->chain_asic_num[i], + info->chain_asic_exist[i*8+0], info->chain_asic_exist[i*8+1], info->chain_asic_exist[i*8+2], info->chain_asic_exist[i*8+3], info->chain_asic_exist[i*8+4], info->chain_asic_exist[i*8+5], info->chain_asic_exist[i*8+6], info->chain_asic_exist[i*8+7], + info->chain_asic_status[i*8+0], info->chain_asic_status[i*8+1], info->chain_asic_status[i*8+2], info->chain_asic_status[i*8+3], info->chain_asic_status[i*8+4], info->chain_asic_status[i*8+5], info->chain_asic_status[i*8+6], info->chain_asic_status[i*8+7]); + applog(LOG_ERR, "bitmain_initialize chain(%d) asic_num=%d asic_status=%s", i, info->chain_asic_num[i], info->chain_asic_status_t[i]); + } + bitmain_update_temps(bitmain, info, &rxstatusdata); + statusok = 1; + break; + } + } + if(statusok) { + break; + } + } + } + trycount--; + p.tv_sec = 0; + p.tv_nsec = BITMAIN_RESET_PITCH; + nanosleep(&p, NULL); + } + + p.tv_sec = 0; + p.tv_nsec = BITMAIN_RESET_PITCH; + nanosleep(&p, NULL); + + cgtime(&info->last_status_time); + + if(statusok) { + applog(LOG_ERR, "bitmain_initialize start send txconfig"); + if(opt_bitmain_hwerror) + hwerror_eft = 1; + else + hwerror_eft = 0; + if(opt_bitmain_nobeeper) + beeper_ctrl = 0; + else + beeper_ctrl = 1; + if(opt_bitmain_notempoverctrl) + tempover_ctrl = 0; + else + tempover_ctrl = 1; + if(opt_bitmain_homemode) + home_mode= 1; + else + home_mode= 0; + sendlen = bitmain_set_txconfig((struct bitmain_txconfig_token *)sendbuf, 1, 1, 1, 1, 1, 0, 1, hwerror_eft, beeper_ctrl, tempover_ctrl,home_mode, + info->chain_num, info->asic_num, BITMAIN_DEFAULT_FAN_MAX_PWM, info->timeout, + info->frequency, info->voltage, 0, 0, 0x04, info->reg_data); + if(sendlen <= 0) { + applog(LOG_ERR, "bitmain_initialize bitmain_set_txconfig error(%d)", sendlen); + return -1; + } + + ret = bitmain_send_data(sendbuf, sendlen, bitmain); + if (unlikely(ret == BTM_SEND_ERROR)) { + applog(LOG_ERR, "bitmain_initialize bitmain_send_data error"); + return -1; + } + applog(LOG_WARNING, "BMM%d: InitSet succeeded", bitmain->device_id); + } else { + applog(LOG_WARNING, "BMS%d: InitSet error", bitmain->device_id); + return -1; + } + return 0; +} + +static void bitmain_usb_init(struct cgpu_info *bitmain) +{ + int err, interface; + +#ifndef WIN32 + return; +#endif + + if (bitmain->usbinfo.nodev) + return; + + interface = usb_interface(bitmain); + + // Reset + err = usb_transfer(bitmain, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, + FTDI_VALUE_RESET, interface, C_RESET); + + applog(LOG_DEBUG, "%s%i: reset got err %d", + bitmain->drv->name, bitmain->device_id, err); + + if (bitmain->usbinfo.nodev) + return; + + // Set latency + err = usb_transfer(bitmain, FTDI_TYPE_OUT, FTDI_REQUEST_LATENCY, + BITMAIN_LATENCY, interface, C_LATENCY); + + applog(LOG_DEBUG, "%s%i: latency got err %d", + bitmain->drv->name, bitmain->device_id, err); + + if (bitmain->usbinfo.nodev) + return; + + // Set data + err = usb_transfer(bitmain, FTDI_TYPE_OUT, FTDI_REQUEST_DATA, + FTDI_VALUE_DATA_BTM, interface, C_SETDATA); + + applog(LOG_DEBUG, "%s%i: data got err %d", + bitmain->drv->name, bitmain->device_id, err); + + if (bitmain->usbinfo.nodev) + return; + + // Set the baud + err = usb_transfer(bitmain, FTDI_TYPE_OUT, FTDI_REQUEST_BAUD, FTDI_VALUE_BAUD_BTM, + (FTDI_INDEX_BAUD_BTM & 0xff00) | interface, + C_SETBAUD); + + applog(LOG_DEBUG, "%s%i: setbaud got err %d", + bitmain->drv->name, bitmain->device_id, err); + + if (bitmain->usbinfo.nodev) + return; + + // Set Modem Control + err = usb_transfer(bitmain, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, + FTDI_VALUE_MODEM, interface, C_SETMODEM); + + applog(LOG_DEBUG, "%s%i: setmodemctrl got err %d", + bitmain->drv->name, bitmain->device_id, err); + + if (bitmain->usbinfo.nodev) + return; + + // Set Flow Control + err = usb_transfer(bitmain, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, + FTDI_VALUE_FLOW, interface, C_SETFLOW); + + applog(LOG_DEBUG, "%s%i: setflowctrl got err %d", + bitmain->drv->name, bitmain->device_id, err); + + if (bitmain->usbinfo.nodev) + return; + + /* BitMain repeats the following */ + // Set Modem Control + err = usb_transfer(bitmain, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, + FTDI_VALUE_MODEM, interface, C_SETMODEM); + + applog(LOG_DEBUG, "%s%i: setmodemctrl 2 got err %d", + bitmain->drv->name, bitmain->device_id, err); + + if (bitmain->usbinfo.nodev) + return; + + // Set Flow Control + err = usb_transfer(bitmain, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, + FTDI_VALUE_FLOW, interface, C_SETFLOW); + + applog(LOG_DEBUG, "%s%i: setflowctrl 2 got err %d", + bitmain->drv->name, bitmain->device_id, err); +} + +static struct cgpu_info * bitmain_usb_detect_one(libusb_device *dev, struct usb_find_devices *found) +{ + int baud, chain_num, asic_num, timeout, frequency = 0; + char frequency_t[256] = {0}; + uint8_t reg_data[4] = {0}; + uint8_t voltage[2] = {0}; + char voltage_t[8] = {0}; + int this_option_offset = ++option_offset; + struct bitmain_info *info; + struct cgpu_info *bitmain; + bool configured; + int ret; + + if (opt_bitmain_options == NULL) + return NULL; + + bitmain = usb_alloc_cgpu(&bitmain_drv, BITMAIN_MINER_THREADS); + + baud = BITMAIN_IO_SPEED; + chain_num = BITMAIN_DEFAULT_CHAIN_NUM; + asic_num = BITMAIN_DEFAULT_ASIC_NUM; + timeout = BITMAIN_DEFAULT_TIMEOUT; + frequency = BITMAIN_DEFAULT_FREQUENCY; + + if (!usb_init(bitmain, dev, found)) + goto shin; + + configured = get_options(this_option_offset, &baud, &chain_num, + &asic_num, &timeout, &frequency, frequency_t, reg_data, voltage, voltage_t); + get_option_freq(&timeout, &frequency, frequency_t, reg_data); + get_option_voltage(voltage, voltage_t); + + /* Even though this is an FTDI type chip, we want to do the parsing + * all ourselves so set it to std usb type */ + bitmain->usbdev->usb_type = USB_TYPE_STD; + + /* We have a real BitMain! */ + bitmain_usb_init(bitmain); + + bitmain->device_data = calloc(sizeof(struct bitmain_info), 1); + if (unlikely(!(bitmain->device_data))) + quit(1, "Failed to calloc bitmain_info data"); + info = bitmain->device_data; + + if (configured) { + info->baud = baud; + info->chain_num = chain_num; + info->asic_num = asic_num; + info->timeout = timeout; + info->frequency = frequency; + strcpy(info->frequency_t, frequency_t); + memcpy(info->reg_data, reg_data, 4); + memcpy(info->voltage, voltage, 2); + strcpy(info->voltage_t, voltage_t); + } else { + info->baud = BITMAIN_IO_SPEED; + info->chain_num = BITMAIN_DEFAULT_CHAIN_NUM; + info->asic_num = BITMAIN_DEFAULT_ASIC_NUM; + info->timeout = BITMAIN_DEFAULT_TIMEOUT; + info->frequency = BITMAIN_DEFAULT_FREQUENCY; + sprintf(info->frequency_t, "%d", BITMAIN_DEFAULT_FREQUENCY); + memset(info->reg_data, 0, 4); + info->voltage[0] = BITMAIN_DEFAULT_VOLTAGE0; + info->voltage[1] = BITMAIN_DEFAULT_VOLTAGE1; + strcpy(info->voltage_t, BITMAIN_DEFAULT_VOLTAGE_T); + } + + info->fan_pwm = BITMAIN_DEFAULT_FAN_MIN_PWM; + info->temp_max = 0; + /* This is for check the temp/fan every 3~4s */ + info->temp_history_count = (4 / (float)((float)info->timeout * ((float)1.67/0x32))) + 1; + if (info->temp_history_count <= 0) + info->temp_history_count = 1; + + info->temp_history_index = 0; + info->temp_sum = 0; + info->temp_old = 0; + + if (!add_cgpu(bitmain)) + goto unshin; + + applog(LOG_ERR, "------bitmain usb detect one------"); + ret = bitmain_initialize(bitmain); + if (ret && !configured) + goto unshin; + + update_usb_stats(bitmain); + + info->errorcount = 0; + + applog(LOG_DEBUG, "BitMain Detected: %s " + "(chain_num=%d asic_num=%d timeout=%d frequency=%d)", + bitmain->device_path, info->chain_num, info->asic_num, info->timeout, + info->frequency); + + return bitmain; + +unshin: + + usb_uninit(bitmain); + +shin: + + free(bitmain->device_data); + bitmain->device_data = NULL; + + bitmain = usb_free_cgpu(bitmain); + + return NULL; +} + +static bool bitmain_detect_one(const char * devpath) +{ + int baud, chain_num, asic_num, timeout, frequency = 0; + char frequency_t[256] = {0}; + uint8_t reg_data[4] = {0}; + uint8_t voltage[2] = {0}; + char voltage_t[8] = {0}; + int this_option_offset = ++option_offset; + struct bitmain_info *info; + struct cgpu_info *bitmain; + bool configured; + int ret; + + if (opt_bitmain_options == NULL) + return false; + + bitmain = btm_alloc_cgpu(&bitmain_drv, BITMAIN_MINER_THREADS); + + configured = get_options(this_option_offset, &baud, &chain_num, + &asic_num, &timeout, &frequency, frequency_t, reg_data, voltage, voltage_t); + get_option_freq(&timeout, &frequency, frequency_t, reg_data); + get_option_voltage(voltage, voltage_t); + + if (!btm_init(bitmain, opt_bitmain_dev)) + goto shin; + applog(LOG_ERR, "bitmain_detect_one btm init ok"); + + bitmain->device_data = calloc(sizeof(struct bitmain_info), 1); + /* make sure initialize successfully*/ + memset(bitmain->device_data,0,sizeof(struct bitmain_info)); + if (unlikely(!(bitmain->device_data))) + quit(1, "Failed to calloc bitmain_info data"); + info = bitmain->device_data; + + if (configured) { + info->baud = baud; + info->chain_num = chain_num; + info->asic_num = asic_num; + info->timeout = timeout; + info->frequency = frequency; + strcpy(info->frequency_t, frequency_t); + memcpy(info->reg_data, reg_data, 4); + memcpy(info->voltage, voltage, 2); + strcpy(info->voltage_t, voltage_t); + } else { + info->baud = BITMAIN_IO_SPEED; + info->chain_num = BITMAIN_DEFAULT_CHAIN_NUM; + info->asic_num = BITMAIN_DEFAULT_ASIC_NUM; + info->timeout = BITMAIN_DEFAULT_TIMEOUT; + info->frequency = BITMAIN_DEFAULT_FREQUENCY; + sprintf(info->frequency_t, "%d", BITMAIN_DEFAULT_FREQUENCY); + memset(info->reg_data, 0, 4); + info->voltage[0] = BITMAIN_DEFAULT_VOLTAGE0; + info->voltage[1] = BITMAIN_DEFAULT_VOLTAGE1; + strcpy(info->voltage_t, BITMAIN_DEFAULT_VOLTAGE_T); + } + + info->fan_pwm = BITMAIN_DEFAULT_FAN_MIN_PWM; + info->temp_max = 0; + /* This is for check the temp/fan every 3~4s */ + info->temp_history_count = (4 / (float)((float)info->timeout * ((float)1.67/0x32))) + 1; + if (info->temp_history_count <= 0) + info->temp_history_count = 1; + + info->temp_history_index = 0; + info->temp_sum = 0; + info->temp_old = 0; + + if (!add_cgpu(bitmain)) + goto unshin; + + ret = bitmain_initialize(bitmain); + applog(LOG_ERR, "bitmain_detect_one stop bitmain_initialize %d", ret); + if (ret && !configured) + goto unshin; + + info->errorcount = 0; + + applog(LOG_ERR, "BitMain Detected: %s " + "(chain_num=%d asic_num=%d timeout=%d freq=%d-%s volt=%02x%02x-%s)", + bitmain->device_path, info->chain_num, info->asic_num, info->timeout, + info->frequency, info->frequency_t, info->voltage[0], info->voltage[1], info->voltage_t); + + return true; + +unshin: + btm_uninit(bitmain); + +shin: + free(bitmain->device_data); + bitmain->device_data = NULL; + + bitmain = usb_free_cgpu(bitmain); + + return false; +} + +static void bitmain_detect(bool __maybe_unused hotplug) +{ + applog(LOG_DEBUG, "BTM detect dev: %s", opt_bitmain_dev); + if(strlen(opt_bitmain_dev) <= 0) { + opt_bitmain_dev_usb = true; + } else { + opt_bitmain_dev_usb = false; + } + if(opt_bitmain_dev_usb) { + usb_detect(&bitmain_drv, bitmain_usb_detect_one); + } else { + btm_detect(&bitmain_drv, bitmain_detect_one); + } +} + +static void do_bitmain_close(struct thr_info *thr) +{ + struct cgpu_info *bitmain = thr->cgpu; + struct bitmain_info *info = bitmain->device_data; + + pthread_join(info->read_thr, NULL); + pthread_join(info->write_thr, NULL); + bitmain_running_reset(bitmain, info); + + info->no_matching_work = 0; + + cgsem_destroy(&info->write_sem); +} + +static void get_bitmain_statline_before(char *buf, size_t bufsiz, struct cgpu_info *bitmain) +{ + struct bitmain_info *info = bitmain->device_data; + int lowfan = 10000; + int i = 0; + + /* Find the lowest fan speed of the ASIC cooling fans. */ + for(i = 0; i < info->fan_num; i++) { + if (info->fan[i] >= 0 && info->fan[i] < lowfan) + lowfan = info->fan[i]; + } + + tailsprintf(buf, bufsiz, "%2d/%3dC %04dR | ", info->temp_avg, info->temp_max, lowfan); +} + +/* We use a replacement algorithm to only remove references to work done from + * the buffer when we need the extra space for new work. */ +static bool bitmain_fill(struct cgpu_info *bitmain) +{ + struct bitmain_info *info = bitmain->device_data; + int subid, slot; + struct work *work; + bool ret = true; + int sendret = 0, sendcount = 0, neednum = 0, queuednum = 0, sendnum = 0, sendlen = 0; + uint8_t sendbuf[BITMAIN_SENDBUF_SIZE]; + cgtimer_t ts_start; + int senderror = 0; + struct timeval now; + int timediff = 0; + int needwait=0; // add by clement. use a flag to indicate need sleep or not. + + //applog(LOG_DEBUG, "BTM bitmain_fill start--------"); + mutex_lock(&info->qlock); + if(info->fifo_space <= 0) { + //applog(LOG_DEBUG, "BTM bitmain_fill fifo space empty--------"); + ret = true; + needwait=1; // add by clement. DEVICE FIFO is full, no space for new works. So we need sleep. + goto out_unlock; + } + if (bitmain->queued >= BITMAIN_MAX_WORK_QUEUE_NUM) { + ret = true; + } else { + ret = false; + } + while(info->fifo_space > 0) { + neednum = info->fifo_spacefifo_space:BITMAIN_MAX_WORK_NUM; + queuednum = bitmain->queued; + applog(LOG_DEBUG, "BTM: Work task queued(%d) fifo space(%d) needsend(%d)", queuednum, info->fifo_space, neednum); + if(queuednum < neednum) { + while(true) { + work = get_queued(bitmain); + if (unlikely(!work)) { + break; + } else { + applog(LOG_DEBUG, "BTM get work queued number:%d neednum:%d", queuednum, neednum); + subid = bitmain->queued++; + work->subid = subid; + slot = bitmain->work_array + subid; + if (slot >= BITMAIN_ARRAY_SIZE) { // slot=edited by clement , old code is if (slot > BITMAIN_ARRAY_SIZE), not sure ,just fixed it. + applog(LOG_DEBUG, "bitmain_fill array cyc %d", BITMAIN_ARRAY_SIZE); + slot = 0; + } + if (likely(bitmain->works[slot])) { + applog(LOG_DEBUG, "bitmain_fill work_completed %d", slot); + work_completed(bitmain, bitmain->works[slot]); + } + bitmain->works[slot] = work; + queuednum++; + if(queuednum >= neednum) { + break; + } + } + } + } + if(queuednum < BITMAIN_MAX_DEAL_QUEUE_NUM) { + /* by clement + if(queuednum < neednum) { + applog(LOG_DEBUG, "BTM: No enough work to send, queue num=%d", queuednum); + break; + } + */ + + needwait=1; // if queuednum is not enough, we just wait and sleep. queuednum must be >= BITMAIN_MAX_DEAL_QUEUE_NUM, then send to device + break; + } + + sendnum = queuednum < neednum ? queuednum : neednum; + sendlen = bitmain_set_txtask(sendbuf, &(info->last_work_block), bitmain->works, BITMAIN_ARRAY_SIZE, bitmain->work_array, sendnum, &sendcount); + bitmain->queued -= sendnum; + info->send_full_space += sendnum; + if (bitmain->queued < 0) + bitmain->queued = 0; + if (bitmain->work_array + sendnum > BITMAIN_ARRAY_SIZE) { + bitmain->work_array = bitmain->work_array + sendnum-BITMAIN_ARRAY_SIZE; + } else { + bitmain->work_array += sendnum; + } + applog(LOG_DEBUG, "BTM: Send work array %d", bitmain->work_array); + if (sendlen > 0) { + info->fifo_space -= sendcount; + if (info->fifo_space < 0) + info->fifo_space = 0; + sendret = bitmain_send_data(sendbuf, sendlen, bitmain); + if (unlikely(sendret == BTM_SEND_ERROR)) { + applog(LOG_ERR, "BTM%i: Comms error(buffer)", bitmain->device_id); + //dev_error(bitmain, REASON_DEV_COMMS_ERROR); + info->reset = true; + info->errorcount++; + senderror = 1; + if (info->errorcount > 1000) { + info->errorcount = 0; + applog(LOG_ERR, "%s%d: Device disappeared, shutting down thread", bitmain->drv->name, bitmain->device_id); + bitmain->shutdown = true; + } + break; + } else { + applog(LOG_DEBUG, "bitmain_send_data send ret=%d", sendret); + info->errorcount = 0; + } + } else { + applog(LOG_DEBUG, "BTM: Send work bitmain_set_txtask error: %d", sendlen); + break; + } + } + +out_unlock: + cgtime(&now); + timediff = now.tv_sec - info->last_status_time.tv_sec; + if(timediff < 0) timediff = -timediff; + if (timediff > BITMAIN_SEND_STATUS_TIME) { + applog(LOG_DEBUG, "BTM: Send RX Status Token fifo_space(%d) timediff(%d)", info->fifo_space, timediff); + copy_time(&(info->last_status_time), &now); + + sendlen = bitmain_set_rxstatus((struct bitmain_rxstatus_token *) sendbuf, 0, 0, 0, 0); + if (sendlen > 0) { + sendret = bitmain_send_data(sendbuf, sendlen, bitmain); + if (unlikely(sendret == BTM_SEND_ERROR)) { + applog(LOG_ERR, "BTM%i: Comms error(buffer)", bitmain->device_id); + //dev_error(bitmain, REASON_DEV_COMMS_ERROR); + info->reset = true; + info->errorcount++; + senderror = 1; + if (info->errorcount > 1000) { + info->errorcount = 0; + applog(LOG_ERR, "%s%d: Device disappeared, shutting down thread", bitmain->drv->name, bitmain->device_id); + bitmain->shutdown = true; + } + } else { + info->errorcount = 0; + if (info->fifo_space <= 0) { + senderror = 1; + } + } + } + } + + if(info->send_full_space > BITMAIN_SEND_FULL_SPACE) { + info->send_full_space = 0; + mutex_unlock(&info->qlock); // add by clement. we need unlock first, then sleep. So we can let other thread run as soon as possible. + + ret = true; + cgsleep_ms(1); // just sleep a liitle. + } + else + { + mutex_unlock(&info->qlock); // add by clement. we need unlock first, then sleep. So we can let other thread run as soon as possible. + + if(needwait) + cgsleep_ms(1); // add by clement. if there is no work on queue, we need wait for a little time. Or this thread will hold CPU near 99%. + // In fact, we need more time in gen_hash thread!!! + } + + if(senderror) { + ret = true; + applog(LOG_DEBUG, "bitmain_fill send task sleep"); + //cgsleep_ms(1); + } + return ret; +} + +static int64_t bitmain_scanhash(struct thr_info *thr) +{ + struct cgpu_info *bitmain = thr->cgpu; + struct bitmain_info *info = bitmain->device_data; + const int chain_num = info->chain_num; + struct timeval now, then, tdiff; + int64_t hash_count, us_timeout; + struct timespec abstime; + int ret; + + /* Half nonce range */ + us_timeout = 0x80000000ll / info->asic_num / info->frequency; + tdiff.tv_sec = us_timeout / 1000000; + tdiff.tv_usec = us_timeout - (tdiff.tv_sec * 1000000); + cgtime(&now); + timeradd(&now, &tdiff, &then); + abstime.tv_sec = then.tv_sec; + abstime.tv_nsec = then.tv_usec * 1000; + + //applog(LOG_DEBUG, "bitmain_scanhash info->qlock start"); + mutex_lock(&info->qlock); + hash_count = 0xffffffffull * (uint64_t)info->nonces; + bitmain->results += info->nonces + info->idle; + if (bitmain->results > chain_num) + bitmain->results = chain_num; + if (!info->reset) + bitmain->results--; + info->nonces = info->idle = 0; + mutex_unlock(&info->qlock); + //applog(LOG_DEBUG, "bitmain_scanhash info->qlock stop"); + + /* Check for nothing but consecutive bad results or consistently less + * results than we should be getting and reset the FPGA if necessary */ + //if (bitmain->results < -chain_num && !info->reset) { + // applog(LOG_ERR, "BTM%d: Result return rate low, resetting!", + // bitmain->device_id); + // info->reset = true; + //} + + if (unlikely(bitmain->usbinfo.nodev)) { + applog(LOG_ERR, "BTM%d: Device disappeared, shutting down thread", + bitmain->device_id); + bitmain->shutdown = true; + } + + /* This hashmeter is just a utility counter based on returned shares */ + return hash_count; +} + +static void bitmain_flush_work(struct cgpu_info *bitmain) +{ + struct bitmain_info *info = bitmain->device_data; + int i = 0; + + mutex_lock(&info->qlock); + /* Will overwrite any work queued */ + applog(LOG_ERR, "bitmain_flush_work queued=%d array=%d", bitmain->queued, bitmain->work_array); + if(bitmain->queued > 0) { + if (bitmain->work_array + bitmain->queued > BITMAIN_ARRAY_SIZE) { + bitmain->work_array = bitmain->work_array + bitmain->queued-BITMAIN_ARRAY_SIZE; + } else { + bitmain->work_array += bitmain->queued; + } + } + bitmain->queued = 0; + //bitmain->work_array = 0; + //for(i = 0; i < BITMAIN_ARRAY_SIZE; i++) { + // bitmain->works[i] = NULL; + //} + //pthread_cond_signal(&info->qcond); + mutex_unlock(&info->qlock); +} + +static struct api_data *bitmain_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + struct bitmain_info *info = cgpu->device_data; + char buf[64]; + int i = 0; + double hwp = (cgpu->hw_errors + cgpu->diff1) ? + (double)(cgpu->hw_errors) / (double)(cgpu->hw_errors + cgpu->diff1) : 0; + + root = api_add_int(root, "baud", &(info->baud), false); + root = api_add_int(root, "miner_count", &(info->chain_num), false); + root = api_add_int(root, "asic_count", &(info->asic_num), false); + root = api_add_int(root, "timeout", &(info->timeout), false); + root = api_add_string(root, "frequency", info->frequency_t, false); + root = api_add_string(root, "voltage", info->voltage_t, false); + root = api_add_int(root, "hwv1", &(info->hw_version[0]), false); + root = api_add_int(root, "hwv2", &(info->hw_version[1]), false); + root = api_add_int(root, "hwv3", &(info->hw_version[2]), false); + root = api_add_int(root, "hwv4", &(info->hw_version[3]), false); + + root = api_add_int(root, "fan_num", &(info->fan_num), false); + root = api_add_int(root, "fan1", &(info->fan[0]), false); + root = api_add_int(root, "fan2", &(info->fan[1]), false); + root = api_add_int(root, "fan3", &(info->fan[2]), false); + root = api_add_int(root, "fan4", &(info->fan[3]), false); + root = api_add_int(root, "fan5", &(info->fan[4]), false); + root = api_add_int(root, "fan6", &(info->fan[5]), false); + root = api_add_int(root, "fan7", &(info->fan[6]), false); + root = api_add_int(root, "fan8", &(info->fan[7]), false); + root = api_add_int(root, "fan9", &(info->fan[8]), false); + root = api_add_int(root, "fan10", &(info->fan[9]), false); + root = api_add_int(root, "fan11", &(info->fan[10]), false); + root = api_add_int(root, "fan12", &(info->fan[11]), false); + root = api_add_int(root, "fan13", &(info->fan[12]), false); + root = api_add_int(root, "fan14", &(info->fan[13]), false); + root = api_add_int(root, "fan15", &(info->fan[14]), false); + root = api_add_int(root, "fan16", &(info->fan[15]), false); + + root = api_add_int(root, "temp_num", &(info->temp_num), false); + root = api_add_int(root, "temp1", &(info->temp[0]), false); + root = api_add_int(root, "temp2", &(info->temp[1]), false); + root = api_add_int(root, "temp3", &(info->temp[2]), false); + root = api_add_int(root, "temp4", &(info->temp[3]), false); + root = api_add_int(root, "temp5", &(info->temp[4]), false); + root = api_add_int(root, "temp6", &(info->temp[5]), false); + root = api_add_int(root, "temp7", &(info->temp[6]), false); + root = api_add_int(root, "temp8", &(info->temp[7]), false); + root = api_add_int(root, "temp9", &(info->temp[8]), false); + root = api_add_int(root, "temp10", &(info->temp[9]), false); + root = api_add_int(root, "temp11", &(info->temp[10]), false); + root = api_add_int(root, "temp12", &(info->temp[11]), false); + root = api_add_int(root, "temp13", &(info->temp[12]), false); + root = api_add_int(root, "temp14", &(info->temp[13]), false); + root = api_add_int(root, "temp15", &(info->temp[14]), false); + root = api_add_int(root, "temp16", &(info->temp[15]), false); + root = api_add_int(root, "temp_avg", &(info->temp_avg), false); + root = api_add_int(root, "temp_max", &(info->temp_max), false); + root = api_add_percent(root, "Device Hardware%", &hwp, true); + root = api_add_int(root, "no_matching_work", &(info->no_matching_work), false); + /* + for (i = 0; i < info->chain_num; i++) { + char mcw[24]; + + sprintf(mcw, "match_work_count%d", i + 1); + root = api_add_int(root, mcw, &(info->matching_work[i]), false); + }*/ + + root = api_add_int(root, "chain_acn1", &(info->chain_asic_num[0]), false); + root = api_add_int(root, "chain_acn2", &(info->chain_asic_num[1]), false); + root = api_add_int(root, "chain_acn3", &(info->chain_asic_num[2]), false); + root = api_add_int(root, "chain_acn4", &(info->chain_asic_num[3]), false); + root = api_add_int(root, "chain_acn5", &(info->chain_asic_num[4]), false); + root = api_add_int(root, "chain_acn6", &(info->chain_asic_num[5]), false); + root = api_add_int(root, "chain_acn7", &(info->chain_asic_num[6]), false); + root = api_add_int(root, "chain_acn8", &(info->chain_asic_num[7]), false); + root = api_add_int(root, "chain_acn9", &(info->chain_asic_num[8]), false); + root = api_add_int(root, "chain_acn10", &(info->chain_asic_num[9]), false); + root = api_add_int(root, "chain_acn11", &(info->chain_asic_num[10]), false); + root = api_add_int(root, "chain_acn12", &(info->chain_asic_num[11]), false); + root = api_add_int(root, "chain_acn13", &(info->chain_asic_num[12]), false); + root = api_add_int(root, "chain_acn14", &(info->chain_asic_num[13]), false); + root = api_add_int(root, "chain_acn15", &(info->chain_asic_num[14]), false); + root = api_add_int(root, "chain_acn16", &(info->chain_asic_num[15]), false); + + //applog(LOG_ERR, "chain asic status:%s", info->chain_asic_status_t[0]); + root = api_add_string(root, "chain_acs1", info->chain_asic_status_t[0], false); + root = api_add_string(root, "chain_acs2", info->chain_asic_status_t[1], false); + root = api_add_string(root, "chain_acs3", info->chain_asic_status_t[2], false); + root = api_add_string(root, "chain_acs4", info->chain_asic_status_t[3], false); + root = api_add_string(root, "chain_acs5", info->chain_asic_status_t[4], false); + root = api_add_string(root, "chain_acs6", info->chain_asic_status_t[5], false); + root = api_add_string(root, "chain_acs7", info->chain_asic_status_t[6], false); + root = api_add_string(root, "chain_acs8", info->chain_asic_status_t[7], false); + root = api_add_string(root, "chain_acs9", info->chain_asic_status_t[8], false); + root = api_add_string(root, "chain_acs10", info->chain_asic_status_t[9], false); + root = api_add_string(root, "chain_acs11", info->chain_asic_status_t[10], false); + root = api_add_string(root, "chain_acs12", info->chain_asic_status_t[11], false); + root = api_add_string(root, "chain_acs13", info->chain_asic_status_t[12], false); + root = api_add_string(root, "chain_acs14", info->chain_asic_status_t[13], false); + root = api_add_string(root, "chain_acs15", info->chain_asic_status_t[14], false); + root = api_add_string(root, "chain_acs16", info->chain_asic_status_t[15], false); + + //root = api_add_int(root, "chain_acs1", &(info->chain_asic_status[0]), false); + //root = api_add_int(root, "chain_acs2", &(info->chain_asic_status[1]), false); + //root = api_add_int(root, "chain_acs3", &(info->chain_asic_status[2]), false); + //root = api_add_int(root, "chain_acs4", &(info->chain_asic_status[3]), false); + + return root; +} + +static void bitmain_shutdown(struct thr_info *thr) +{ + do_bitmain_close(thr); +} + +char *set_bitmain_dev(char *arg) +{ + if(arg == NULL || strlen(arg) <= 0) { + memcpy(opt_bitmain_dev, 0, 256); + } else { + strncpy(opt_bitmain_dev, arg, 256); + } + applog(LOG_DEBUG, "BTM set device: %s", opt_bitmain_dev); + return NULL; +} + +char *set_bitmain_fan(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to bitmain-fan"; + if (ret == 1) + val2 = val1; + + if (val1 < 0 || val1 > 100 || val2 < 0 || val2 > 100 || val2 < val1) + return "Invalid value passed to bitmain-fan"; + + opt_bitmain_fan_min = val1 * BITMAIN_PWM_MAX / 100; + opt_bitmain_fan_max = val2 * BITMAIN_PWM_MAX / 100; + + return NULL; +} + +char *set_bitmain_freq(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to bitmain-freq"; + if (ret == 1) + val2 = val1; + + if (val1 < BITMAIN_MIN_FREQUENCY || val1 > BITMAIN_MAX_FREQUENCY || + val2 < BITMAIN_MIN_FREQUENCY || val2 > BITMAIN_MAX_FREQUENCY || + val2 < val1) + return "Invalid value passed to bitmain-freq"; + + opt_bitmain_freq_min = val1; + opt_bitmain_freq_max = val2; + + return NULL; +} + +struct device_drv bitmain_drv = { + .drv_id = DRIVER_bitmain, + .dname = "Bitmain", + .name = "BTM", + .drv_detect = bitmain_detect, + .thread_prepare = bitmain_prepare, + .hash_work = hash_queued_work, + .queue_full = bitmain_fill, + .scanwork = bitmain_scanhash, + .flush_work = bitmain_flush_work, + .get_api_stats = bitmain_api_stats, + .get_statline_before = get_bitmain_statline_before, + .reinit_device = bitmain_init, + .thread_shutdown = bitmain_shutdown, +}; diff --git a/driver-bitmain.h b/driver-bitmain.h new file mode 100644 index 0000000..7336fcc --- /dev/null +++ b/driver-bitmain.h @@ -0,0 +1,314 @@ +/* + * Copyright 2013 BitMain project + * Copyright 2013 BitMain + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef BITMAIN_H +#define BITMAIN_H + +#ifdef USE_BITMAIN + +#include "util.h" + +//#define BITMAIN_TYPE_S1 +//#define BITMAIN_TYPE_S2 +//#define BITMAIN_TYPE_S3 +#define BITMAIN_TYPE_S4 + +#define BITMAIN_RESET_FAULT_DECISECONDS 1 +#define BITMAIN_MINER_THREADS 1 + +#define BITMAIN_IO_SPEED 115200 +#define BITMAIN_HASH_TIME_FACTOR ((float)1.67/0x32) +#define BITMAIN_RESET_PITCH (300*1000*1000) + +#define BITMAIN_TOKEN_TYPE_TXCONFIG 0x51 +#define BITMAIN_TOKEN_TYPE_TXTASK 0x52 +#define BITMAIN_TOKEN_TYPE_RXSTATUS 0x53 + +#define BITMAIN_DATA_TYPE_RXSTATUS 0xa1 +#define BITMAIN_DATA_TYPE_RXNONCE 0xa2 + +#define BITMAIN_FAN_FACTOR 60 +#define BITMAIN_PWM_MAX 0xA0 +#define BITMAIN_DEFAULT_FAN_MIN 20 +#define BITMAIN_DEFAULT_FAN_MAX 100 +#define BITMAIN_DEFAULT_FAN_MAX_PWM 0xA0 /* 100% */ +#define BITMAIN_DEFAULT_FAN_MIN_PWM 0x20 /* 20% */ + +#define BITMAIN_TEMP_TARGET 50 +#define BITMAIN_TEMP_HYSTERESIS 3 +#define BITMAIN_TEMP_OVERHEAT 60 + +#define BITMAIN_DEFAULT_TIMEOUT 0x2D +#define BITMAIN_MIN_FREQUENCY 10 +#define BITMAIN_MAX_FREQUENCY 1000000 +#define BITMAIN_TIMEOUT_FACTOR 12690 +#define BITMAIN_DEFAULT_FREQUENCY 282 +#define BITMAIN_DEFAULT_VOLTAGE_T "0725" +#define BITMAIN_DEFAULT_VOLTAGE0 0x07 +#define BITMAIN_DEFAULT_VOLTAGE1 0x25 +#define BITMAIN_DEFAULT_CHAIN_NUM 8 +#define BITMAIN_DEFAULT_ASIC_NUM 32 +#define BITMAIN_DEFAULT_REG_DATA 0 + +#define BITMAIN_AUTO_CYCLE 1024 + +#define BITMAIN_FTDI_READSIZE 2048 +#define BITMAIN_USB_PACKETSIZE 512 +#define BITMAIN_SENDBUF_SIZE 8192 +#define BITMAIN_READBUF_SIZE 8192 +#define BITMAIN_RESET_TIMEOUT 100 +#define BITMAIN_READ_TIMEOUT 18 /* Enough to only half fill the buffer */ +#define BITMAIN_LATENCY 1 + +#ifdef BITMAIN_TYPE_S1 +#define BITMAIN_MAX_WORK_NUM 8 +#define BITMAIN_MAX_WORK_QUEUE_NUM 64 +#define BITMAIN_MAX_DEAL_QUEUE_NUM 1 +#define BITMAIN_MAX_NONCE_NUM 8 +#define BITMAIN_MAX_CHAIN_NUM 8 +#define BITMAIN_MAX_TEMP_NUM 32 +#define BITMAIN_MAX_FAN_NUM 32 +#define BITMAIN_ARRAY_SIZE 16384 +#define BITMAIN_SEND_STATUS_TIME 10 //s +#define BITMAIN_SEND_FULL_SPACE 128 +#endif + +#ifdef BITMAIN_TYPE_S2 +#define BITMAIN_MAX_WORK_NUM 64 +#define BITMAIN_MAX_WORK_QUEUE_NUM 4096 +#define BITMAIN_MAX_DEAL_QUEUE_NUM 32 +#define BITMAIN_MAX_NONCE_NUM 128 +#define BITMAIN_MAX_CHAIN_NUM 16 +#define BITMAIN_MAX_TEMP_NUM 32 +#define BITMAIN_MAX_FAN_NUM 32 +#define BITMAIN_ARRAY_SIZE 16384 +#define BITMAIN_SEND_STATUS_TIME 15 //s +#define BITMAIN_SEND_FULL_SPACE 512 +#endif + +#ifdef BITMAIN_TYPE_S3 +#define BITMAIN_MAX_WORK_NUM 8 +#define BITMAIN_MAX_WORK_QUEUE_NUM 1024 +#define BITMAIN_MAX_DEAL_QUEUE_NUM 2 +#define BITMAIN_MAX_NONCE_NUM 128 +#define BITMAIN_MAX_CHAIN_NUM 8 +#define BITMAIN_MAX_TEMP_NUM 32 +#define BITMAIN_MAX_FAN_NUM 32 +#define BITMAIN_ARRAY_SIZE 16384 +#define BITMAIN_SEND_STATUS_TIME 15 //s +#define BITMAIN_SEND_FULL_SPACE 256 +#endif + +#ifdef BITMAIN_TYPE_S4 +#define BITMAIN_MAX_WORK_NUM 64 +#define BITMAIN_MAX_WORK_QUEUE_NUM 4096 +#define BITMAIN_MAX_DEAL_QUEUE_NUM 32 +#define BITMAIN_MAX_NONCE_NUM 128 +#define BITMAIN_MAX_CHAIN_NUM 16 +#define BITMAIN_MAX_TEMP_NUM 32 +#define BITMAIN_MAX_FAN_NUM 32 +#define BITMAIN_ARRAY_SIZE 16384*2 +#define BITMAIN_SEND_STATUS_TIME 15 //s +#define BITMAIN_SEND_FULL_SPACE 512 +#endif + +struct bitmain_packet_head { + uint8_t token_type; + uint8_t version; + uint16_t length; +} __attribute__((packed, aligned(4))); + +struct bitmain_txconfig_token { + uint8_t token_type; + uint8_t version; + uint16_t length; + uint8_t reset :1; + uint8_t fan_eft :1; + uint8_t timeout_eft :1; + uint8_t frequency_eft :1; + uint8_t voltage_eft :1; + uint8_t chain_check_time_eft :1; + uint8_t chip_config_eft :1; + uint8_t hw_error_eft :1; + uint8_t beeper_ctrl :1; + uint8_t temp_over_ctrl :1; + uint8_t fan_home_mode :1; + uint8_t reserved1 :5; + uint8_t chain_check_time; + uint8_t reserved2; + + uint8_t chain_num; + uint8_t asic_num; + uint8_t fan_pwm_data; + uint8_t timeout_data; + + uint16_t frequency; + uint8_t voltage[2]; + + uint8_t reg_data[4]; + uint8_t chip_address; + uint8_t reg_address; + uint16_t crc; +} __attribute__((packed, aligned(4))); + +struct bitmain_txtask_work { + uint32_t work_id; + uint8_t midstate[32]; + uint8_t data2[12]; +} __attribute__((packed, aligned(4))); + +struct bitmain_txtask_token { + uint8_t token_type; + uint8_t version; + uint16_t length; + uint8_t new_block :1; + uint8_t reserved1 :7; + uint8_t diff; + uint16_t net_diff; + struct bitmain_txtask_work works[BITMAIN_MAX_WORK_NUM]; + uint16_t crc; +} __attribute__((packed, aligned(4))); + +struct bitmain_rxstatus_token { + uint8_t token_type; + uint8_t version; + uint16_t length; + uint8_t chip_status_eft :1; + uint8_t detect_get :1; + uint8_t reserved1 :6; + uint8_t reserved2[3]; + + uint8_t chip_address; + uint8_t reg_address; + uint16_t crc; +} __attribute__((packed, aligned(4))); + +struct bitmain_rxstatus_data { + uint8_t data_type; + uint8_t version; + uint16_t length; + uint8_t chip_value_eft :1; + uint8_t reserved1 :3; + uint8_t get_blk_num :4; + uint8_t chain_num; + uint16_t fifo_space; + uint8_t hw_version[4]; + uint8_t fan_num; + uint8_t temp_num; + uint16_t fan_exist; + uint32_t temp_exist; + uint32_t nonce_error; + uint32_t reg_value; + uint32_t chain_asic_exist[BITMAIN_MAX_CHAIN_NUM*8]; + uint32_t chain_asic_status[BITMAIN_MAX_CHAIN_NUM*8]; + uint8_t chain_asic_num[BITMAIN_MAX_CHAIN_NUM]; + uint8_t temp[BITMAIN_MAX_TEMP_NUM]; + uint8_t fan[BITMAIN_MAX_FAN_NUM]; + uint16_t crc; +} __attribute__((packed, aligned(4))); + +struct bitmain_rxnonce_nonce { + uint32_t work_id; + uint32_t nonce; +} __attribute__((packed, aligned(4))); + +struct bitmain_rxnonce_data { + uint8_t data_type; + uint8_t version; + uint16_t length; + uint16_t fifo_space; + uint16_t diff; + uint64_t total_nonce_num; + struct bitmain_rxnonce_nonce nonces[BITMAIN_MAX_NONCE_NUM]; + uint16_t crc; +} __attribute__((packed, aligned(4))); + +struct bitmain_info { + int baud; + int chain_num; + int asic_num; + int chain_asic_num[BITMAIN_MAX_CHAIN_NUM]; + uint32_t chain_asic_exist[BITMAIN_MAX_CHAIN_NUM*8]; + uint32_t chain_asic_status[BITMAIN_MAX_CHAIN_NUM*8]; + char chain_asic_status_t[BITMAIN_MAX_CHAIN_NUM][320]; + int timeout; + int errorcount; + uint32_t nonce_error; + uint32_t last_nonce_error; + uint8_t reg_data[4]; + + int fan_num; + int fan[BITMAIN_MAX_FAN_NUM]; + int temp_num; + int temp[BITMAIN_MAX_TEMP_NUM]; + + int temp_max; + int temp_avg; + int temp_history_count; + int temp_history_index; + int temp_sum; + int temp_old; + int fan_pwm; + uint64_t total_nonce_num; + + int frequency; + char frequency_t[256]; + uint8_t voltage[2]; + char voltage_t[8]; + + int diff; + + int no_matching_work; + //int matching_work[BITMAIN_DEFAULT_CHAIN_NUM]; + + struct thr_info *thr; + pthread_t read_thr; + pthread_t write_thr; + pthread_mutex_t lock; + pthread_mutex_t qlock; + pthread_cond_t qcond; + cgsem_t write_sem; + int nonces; + int fifo_space; + int hw_version[4]; + unsigned int last_work_block; + struct timeval last_status_time; + int send_full_space; + + int idle; + bool reset; + bool overheat; + bool optimal; +}; + +#define BITMAIN_READ_SIZE 12 + +#define BTM_GETS_ERROR -1 +#define BTM_GETS_OK 0 + +#define BTM_SEND_ERROR -1 +#define BTM_SEND_OK 0 + +#define BITMAIN_READ_TIME(baud) ((double)BITMAIN_READ_SIZE * (double)8.0 / (double)(baud)) +#define ASSERT1(condition) __maybe_unused static char sizeof_uint32_t_must_be_4[(condition)?1:-1] +ASSERT1(sizeof(uint32_t) == 4); + +extern struct bitmain_info **bitmain_info; +extern char opt_bitmain_dev[256]; +extern int opt_bitmain_temp; +extern int opt_bitmain_overheat; +extern int opt_bitmain_fan_min; +extern int opt_bitmain_fan_max; +extern bool opt_bitmain_auto; +extern char *set_bitmain_dev(char *arg); +extern char *set_bitmain_fan(char *arg); + +#endif /* USE_BITMAIN */ +#endif /* BITMAIN_H */ diff --git a/driver-blockerupter.c b/driver-blockerupter.c new file mode 100644 index 0000000..0c57ec8 --- /dev/null +++ b/driver-blockerupter.c @@ -0,0 +1,501 @@ +#include "config.h" +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef WIN32 +#include +#include +#include +#ifndef O_CLOEXEC +#define O_CLOEXEC 0 +#endif +#else +#include +#include +#endif + +#include "elist.h" +#include "miner.h" +#include "driver-blockerupter.h" +#include "usbutils.h" + +static void blockerupter_space_mode(struct cgpu_info *blockerupter) +{ + int interface; + unsigned int bits = 0; + + interface = usb_interface(blockerupter); + + bits |= CP210X_BITS_DATA_8; + bits |= CP210X_BITS_PARITY_SPACE; + + usb_transfer_data(blockerupter, CP210X_TYPE_OUT, CP210X_SET_LINE_CTL, bits, interface, NULL, 0, C_SETPARITY); +} + +static void blockerupter_mark_mode(struct cgpu_info *blockerupter) +{ + int interface; + unsigned int bits = 0; + + interface = usb_interface(blockerupter); + + bits |= CP210X_BITS_DATA_8; + bits |= CP210X_BITS_PARITY_MARK; + + usb_transfer_data(blockerupter, CP210X_TYPE_OUT, CP210X_SET_LINE_CTL, bits, interface, NULL, 0, C_SETPARITY); + +} + +static void blockerupter_init_com(struct cgpu_info *blockerupter) +{ + uint32_t baudrate; + int interface; + + if (blockerupter->usbinfo.nodev) + return; + + interface = usb_interface(blockerupter); + + // Enable the UART + usb_transfer_data(blockerupter, CP210X_TYPE_OUT, CP210X_REQUEST_IFC_ENABLE, + CP210X_VALUE_UART_ENABLE, interface, NULL, 0, C_ENABLE_UART); + if (blockerupter->usbinfo.nodev) + return; + + // Set data control + usb_transfer_data(blockerupter, CP210X_TYPE_OUT, CP210X_REQUEST_DATA, CP210X_VALUE_DATA, + interface, NULL, 0, C_SETDATA); + + if (blockerupter->usbinfo.nodev) + return; + + // Set the baud + baudrate = BET_BAUD; + + usb_transfer_data(blockerupter, CP210X_TYPE_OUT, CP210X_REQUEST_BAUD, 0, + interface, &baudrate, sizeof (baudrate), C_SETBAUD); + + // Set space mode + blockerupter_space_mode(blockerupter); +} + +static int blockerupter_send(struct cgpu_info *blockerupter, char *data, int len) +{ + int err; + int bytes_sent; + + if (unlikely(blockerupter->usbinfo.nodev)) + return SEND_FAIL; + + err = usb_write(blockerupter, data, len, &bytes_sent, C_BET_WRITE); + + if (err || bytes_sent != len) { + applog(LOG_DEBUG, "blockerupter: Send (%d/%d)", bytes_sent, len); + return SEND_FAIL; + } + + return SEND_OK; +} + +static int blockerupter_read(struct cgpu_info *blockerupter, char *data, int len) +{ + int err; + int bytes_read; + + if (unlikely(blockerupter->usbinfo.nodev)) + return READ_FAIL; + + err = usb_read_timeout(blockerupter, data, len, &bytes_read, 2, C_BET_READ); + + if (err || bytes_read != len) { + applog(LOG_DEBUG, "blockerupter: Read (%d/%d)", bytes_read, len); + return READ_FAIL; + } + + return READ_OK; +} + +static void blockerupter_setclock(struct cgpu_info *blockerupter, uint8_t clock) +{ + struct blockerupter_info *info; + info = blockerupter->device_data; + char command; + int err; + + command = C_GCK | clock; + info->clock = clock; + err = blockerupter_send(blockerupter, &command, 1); + if (!err) + applog(LOG_DEBUG, "%s%d: Set Clock to %d MHz", blockerupter->drv->name, + blockerupter->device_id, (clock + 1) * 10 / 2); +} + +static void blockerupter_setdiff(struct cgpu_info *blockerupter, int diff) +{ + struct blockerupter_info *info; + info = blockerupter->device_data; + char command,bits; + int err; + int local_diff; + + // min_diff for driver is 64 + if (diff >= 262144) { + bits = 3; + local_diff = 262144; + } else if (diff >= 4096) { + bits = 2; + local_diff = 4096; + } else { + bits = 1; + local_diff = 64; + } + + if (local_diff == info->diff) + return; + + command = C_DIF | bits; + err = blockerupter_send(blockerupter, &command, 1); + if (!err) { + applog(LOG_DEBUG, "%s%d: Set Diff Bits to %d", blockerupter->drv->name, + blockerupter->device_id, bits); + info->diff = local_diff; + } +} + +static void blockerupter_setrolling(struct cgpu_info *blockerupter, uint8_t rolling) +{ + struct blockerupter_info *info; + info = blockerupter->device_data; + char command; + int err; + + command = C_LPO | rolling; + err = blockerupter_send(blockerupter, &command, 1); + + if (!err) { + applog(LOG_DEBUG, "%s%d: Set nTime Rolling to %d seconds", blockerupter->drv->name, + blockerupter->device_id, (rolling + 1) * 30); + info->rolling = (rolling + 1) * 30; + } +} + +static void blockerupter_init(struct cgpu_info *blockerupter) +{ + struct blockerupter_info *info; + + info = blockerupter->device_data; + // Set Clock + if (!opt_bet_clk || opt_bet_clk< 19 || opt_bet_clk > 31) { + opt_bet_clk = BET_CLOCK_DEFAULT; + } + blockerupter_setclock(blockerupter, opt_bet_clk); + info->clock = (opt_bet_clk + 1) * 10; + info->expected = info->clock * 24 * 32 * info->found / 1000.0; + // Set Diff + blockerupter_setdiff(blockerupter, BET_DIFF_DEFAULT); + info->diff = BET_DIFF_DEFAULT; + // Set nTime Rolling + blockerupter_setrolling(blockerupter, BET_ROLLING_DEFAULT); + info->rolling = (BET_ROLLING_DEFAULT + 1) * 30; + cgtime(&info->start_time); +} + +static struct cgpu_info *blockerupter_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + struct blockerupter_info *info; + struct cgpu_info *blockerupter = usb_alloc_cgpu(&blockerupter_drv, 1); + int i, err; + char reset = C_RES; + + if (!usb_init(blockerupter, dev, found)) { + applog(LOG_ERR, "Blockerupter usb init failed"); + blockerupter = usb_free_cgpu(blockerupter); + return NULL; + } + + blockerupter->device_data = (struct blockerupter_info *) malloc(sizeof(struct blockerupter_info)); + info = blockerupter->device_data; + memset(info, 0, sizeof(blockerupter_info)); + blockerupter_init_com(blockerupter); + + err = blockerupter_send(blockerupter, &reset, 1); + if (err) { + applog(LOG_ERR, "Blockerupter detect failed"); + blockerupter = usb_free_cgpu(blockerupter); + return NULL; + } + cgsleep_ms(5000); + + + for (i = 0; i < BET_MAXBOARDS; i++) { + char detect, answer; + + answer = 0; + detect = C_ASK | (uint8_t)i; + blockerupter_send(blockerupter, &detect, 1); + blockerupter_read(blockerupter, &answer, 1); + if (answer == A_WAL) { + applog(LOG_DEBUG, "BlockErupter found Board: %d", i); + info->boards[i] = 1; + info->found++; + } else { + applog(LOG_DEBUG, "BlockErupter missing board: %d, received %02x", + i, answer); + } + } + + if (!info->found) { + usb_free_cgpu(blockerupter); + free(info); + return NULL; + } else { + blockerupter->threads = 1; + add_cgpu(blockerupter); + applog(LOG_DEBUG, "Add BlockErupter with %d/%d Boards", info->found, + BET_MAXBOARDS); + blockerupter_init(blockerupter); + return blockerupter; + } +} + +static inline void blockerupter_detect(bool __maybe_unused hotplug) +{ + usb_detect(&blockerupter_drv, blockerupter_detect_one); +} + +static struct api_data *blockerupter_api_stats(struct cgpu_info *blockerupter) +{ + struct blockerupter_info *info = blockerupter->device_data; + struct api_data *root = NULL; + struct timeval now, elapsed; + char buf[32]; + int i; + + cgtime(&now); + timersub(&now, &info->start_time, &elapsed); + + info->hashrate = elapsed.tv_sec ? info->hashes * 4.295 / elapsed.tv_sec : 0; + info->eff = info->hashrate / info->expected; + + root = api_add_int(root, "Nonces", &info->nonces, false); + root = api_add_uint8(root, "Board", &info->found, false); + root = api_add_int(root, "Clock", &info->clock, false); + root = api_add_int(root,"Accepted", &info->accepted, false); + root = api_add_double(root, "HashRate", &info->hashrate , false); + root = api_add_double(root, "Expected", &info->expected , false); + root = api_add_double(root, "Efficiency", &info->eff, false); + for (i = 0; i < BET_MAXBOARDS; i++) { + double brd_hashrate; + + if (info->boards[i]) { + sprintf(buf, "Board%02d accepted", i); + root = api_add_int(root, buf, &info->b_info[i].accepted, false); + sprintf(buf, "Board%02d nonces", i); + root = api_add_int(root, buf, &info->b_info[i].nonces, false); + sprintf(buf, "Board%02d hwerror", i); + root = api_add_double(root, buf, &info->b_info[i].hwe, false); + sprintf(buf, "Board%02d hashrate", i); + brd_hashrate = elapsed.tv_sec ? info->b_info[i].hashes * 4.295 / elapsed.tv_sec : 0; + root = api_add_double(root, buf, &brd_hashrate, false); + } + } + + return root; +} + +static bool blockerupter_prepare(struct thr_info *thr) +{ + struct cgpu_info *blockerupter = thr->cgpu; + struct blockerupter_info *info = blockerupter->device_data; + + cglock_init(&(info->pool.data_lock)); + + return true; +} + +static void blockerupter_sendjob(struct cgpu_info *blockerupter, int board) +{ + struct blockerupter_info *info = blockerupter->device_data; + struct thr_info *thr = blockerupter->thr[0]; + struct work *work; + uint8_t command, answer; + int err; + + work = get_work(thr, thr->id); + memcpy(&info->works[info->work_idx],work,sizeof(struct work)); + + blockerupter_setdiff(blockerupter,floor(work->work_difficulty)); + + command = C_JOB | (uint8_t)board; + blockerupter_send(blockerupter, (char *)&command, 1); + blockerupter_mark_mode(blockerupter); + cgsleep_ms(1); + + blockerupter_send(blockerupter, (char *)(work->midstate), 32); + blockerupter_send(blockerupter, (char *)&(work->data[64]), 12); + blockerupter_send(blockerupter, (char *)&work->nonce2, 4); + blockerupter_send(blockerupter, (char *)&info->work_idx, 1); + + cgsleep_ms(1); + blockerupter_space_mode(blockerupter); + + answer = 0; + err = blockerupter_read(blockerupter, (char *)&answer, 1); + + cgtime(&info->last_job); + + if (err || answer != A_GET) { + applog(LOG_ERR, "%s%d: Sync Error", blockerupter->drv->name, blockerupter->device_id); + } else { + info->b_info[board].job_count++; + applog(LOG_DEBUG, "%s%d: Sent work %d to board %d", blockerupter->drv->name, + blockerupter->device_id, info->work_idx, board); + } + + info->work_idx++; + if (info->work_idx >= BET_WORK_FIFO) + info->work_idx = 0; +} + +static uint64_t blockerupter_checknonce(struct cgpu_info *blockerupter, struct blockerupter_response *resp, int board) +{ + uint8_t test; + struct blockerupter_info *info; + struct thr_info *thr = blockerupter->thr[0]; + struct work work; + uint32_t nonce; + uint64_t hashes=0; + int i; + struct board_info *cur_brd; + struct asic_info *cur_asic; + + info = blockerupter->device_data; + work = info->works[resp->work_idx]; + + nonce = *(uint32_t *)resp->nonce; + + applog(LOG_DEBUG, "%s%d: Nonce %08x from board %d, asic %d for work %d", + blockerupter->drv->name, blockerupter->device_id, *(uint32_t *) resp->nonce, + board, resp->chip, resp->work_idx); + + memcpy(work.data + 4 + 32 + 32, resp->ntime, 4); + __bin2hex(work.ntime, resp->ntime, 4); + + info->nonces++; + cur_brd = &info->b_info[board]; + cur_brd->nonces++; + cur_asic = &info->b_info[board].asics[resp->chip]; + cur_asic->nonces++; + + for (i = 0; i < BET_NONCE_FIX; i++) { + test = test_nonce_diff(&work, nonce + i, (double)info->diff); + if (test) { + applog(LOG_DEBUG, "%s%d: Nonce Fix Pass @%d", blockerupter->drv->name, + blockerupter->device_id, i); + info->hashes += info->diff; + cur_brd->hashes += info->diff; + cur_asic->hashes += info->diff; + if (test_nonce_diff(&work, nonce + i, work.work_difficulty)) { + if (submit_nonce(thr, &work, nonce + i)) { + hashes += floor(work.work_difficulty) * (uint64_t) 0xffffffff; + info->accepted++; + cur_brd->accepted++; + cur_asic->accepted++; + } + } + break; + } + } + + if (i == BET_NONCE_FIX) { + applog(LOG_DEBUG, "%s%d: Nonce Fix Failed", blockerupter->drv->name, + blockerupter->device_id); + cur_brd->bad++; + cur_brd->hwe = cur_brd->nonces ? (double)cur_brd->bad / cur_brd->nonces : 0; + cur_asic->bad++; + cur_asic->hwe = cur_asic->nonces ? (double)cur_asic->bad / cur_asic->nonces : 0; + } + return hashes; +} + +static uint64_t blockerupter_getresp(struct cgpu_info *blockerupter, int board) +{ + struct blockerupter_response *resp; + int err; + uint64_t hashes = 0; + + resp = (struct blockerupter_response *) malloc(BET_RESP_SZ); + err = blockerupter_read(blockerupter, (char *)resp, BET_RESP_SZ); + if (!err) + hashes = blockerupter_checknonce(blockerupter, resp, board); + free(resp); + return hashes; +} + +static int64_t blockerupter_scanhash(struct thr_info *thr) +{ + struct cgpu_info *blockerupter = thr->cgpu; + struct blockerupter_info *info = blockerupter->device_data; + char ask; + uint8_t answer; + int i; + int64_t hashes=0; + + if (unlikely(blockerupter->usbinfo.nodev)) { + applog(LOG_ERR, "%s%d: Device disappeared, shutting down thread", + blockerupter->drv->name, blockerupter->device_id); + return -1; + } + + for (i = 0; i < BET_MAXBOARDS; i++) { + if (!info->boards[i]) + continue; + ask = C_ASK | (uint8_t)i; + blockerupter_send(blockerupter, &ask, 1); + cgsleep_ms(1); + answer = 0; + blockerupter_read(blockerupter, (char *)&answer, 1); + + switch (answer) { + case A_WAL: + blockerupter_sendjob(blockerupter, i); + break; + case A_YES: + hashes += blockerupter_getresp(blockerupter, i); + break; + case A_NO: + break; + default: + applog(LOG_ERR, "%s%d: Unexpected value %02x received", blockerupter->drv->name, + blockerupter->device_id, answer); + break; + } + } + + return hashes; +} + +static void blockerupter_flush_work(struct cgpu_info *blockerupter) +{ + uint8_t command = C_LPO | BET_ROLLING_DEFAULT; + + blockerupter_send(blockerupter, (char *)&command, 1); +} + +struct device_drv blockerupter_drv = { + .drv_id = DRIVER_blockerupter, + .dname = "blockerupter", + .name = "BET", + .min_diff = 64, + .get_api_stats = blockerupter_api_stats, + .drv_detect = blockerupter_detect, + .thread_prepare = blockerupter_prepare, + .hash_work = hash_driver_work, + .flush_work = blockerupter_flush_work, + .scanwork = blockerupter_scanhash +}; diff --git a/driver-blockerupter.h b/driver-blockerupter.h new file mode 100644 index 0000000..4e15f1e --- /dev/null +++ b/driver-blockerupter.h @@ -0,0 +1,130 @@ +#ifndef _BLOCKERUPTER_H +#define _BLOCKERUPTER_H + +/* +WIN32 Build + +1. Install mxe (check tutorial on http://mxe.cc) + +2. After install mxe +export PATH={PATH_TO_MXE}/usr/bin:$PATH +autoreconf -fi +./configure --host=i686-pc-mingw32 --enable-blockerupter --without-curses CFLAGS=-DCURL_STATICLIB +make + +3. Before starting cgminer +install WinUSB driver for detected CP2102x device with Zadig (Some users might need to reboot) +*/ + +#include "miner.h" +#include "util.h" + +#define BET_MAXBOARDS 32 +#define BET_MAXASICS 48 +#define BET_BAUD 460800 + +#define BET_CLOCK_MAX 29 +#define BET_CLOCK_DEFAULT 23 +#define BET_DIFF_DEFAULT 64 +#define BET_ROLLING_DEFAULT 5 +extern int opt_bet_clk; + +#define BET_WORK_FIFO 128 +#define BET_NONCE_FIX 4 + +#define SEND_OK 0 +#define SEND_FAIL 1 +#define READ_OK 0 +#define READ_FAIL 1 + +// Global Commands +// resets all mega88, recv nothing +#define C_RES (0 << 5) +// stop jobs on all boards, set nTime rolling to (BoardID+1)*30, recv nothing +#define C_LPO (1 << 5) +// set clock for all boards, clock = (BoardID+1)*5, recv nothing +#define C_GCK (2 << 5) +// set difficulty bits for all boards with last 2bits from BoardID, recv nothing +#define C_DIF (3 << 5) + +// Board Specific Commands (CMD|BoardID) +// Send midstate(32 bytes), remaining block header(12 bytes), extranonce2(4 bytes) and job index(1 byte) to board +// Recv 0x58 +#define C_JOB (4 << 5) +// Recv current status of board +#define C_ASK (5 << 5) +// Recv (max_asics) bytes of chip test result, (max asics) bytes of clocks, 1 byte of diff bits, 1 byte of max nTime rolling, 1 byte of firmware version. Total (max asics)*2+3 bytes +#define C_TRS (6 << 5) + +// answers on C_ASK|BoardID +// Idle, waiting for new job +#define A_WAL 0x56 +// Mining but no nonce yet +#define A_NO 0xa6 +// Found nonce, followed with midstate(32 bytes), remaining block header(12 bytes), extranonce2(4 bytes), nonce(4 bytes), job index(1 byte), chip index(1 byte). Total 54 bytes. +#define A_YES 0x5A + +// answer on C_JOB|BoardID +#define A_GET 0x58 + +#pragma pack(1) + +typedef struct asic_info { + int bad; + int accepted; + int nonces; + int hashes; + double hwe; +} asic_info; + +#pragma pack(1) + +typedef struct board_info { + int bad; + int job_count; + int nonces; + int accepted; + int hashes; + double hashrate; + double hwe; + struct asic_info asics[BET_MAXASICS]; +} board_info; + +#pragma pack(1) + +typedef struct blockerupter_info { + struct pool pool; + uint8_t found; + int clock; + int nonces; + int diff; + int rolling; + int accepted; + int hashes; + double hashrate; + double expected; + double eff; + uint8_t work_idx; + struct work works[BET_WORK_FIFO]; + uint8_t boards[BET_MAXBOARDS]; + board_info b_info[BET_MAXBOARDS]; + struct timeval start_time; + struct timeval last_job; +} blockerupter_info; + + +#pragma pack(1) + +typedef struct blockerupter_response { + uint8_t midstate[32]; + uint8_t merkle[4]; + uint8_t ntime[4]; + uint8_t diff[4]; + uint8_t exnonc2[4]; + uint8_t nonce[4]; + uint8_t work_idx; + uint8_t chip; +} blockerupter_response; +#define BET_RESP_SZ (sizeof(blockerupter_response)) + +#endif diff --git a/driver-bmsc.c b/driver-bmsc.c new file mode 100644 index 0000000..3795739 --- /dev/null +++ b/driver-bmsc.c @@ -0,0 +1,2031 @@ +/* + * Copyright 2012-2013 Andrew Smith + * Copyright 2013 Con Kolivas + * Copyright 2013 Lingchao Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +/* + * Those code should be works fine with AntMiner U1 of Bmsc. + * Operation: + * No detection implement. + * Input: 64B = 32B midstate + 20B fill bytes + last 12 bytes of block head. + * Return: send back 40bits immediately when Bmsc found a valid nonce. + * no query protocol implemented here, if no data send back in ~11.3 + * seconds (full cover time on 32bit nonce range by 380MH/s speed) + * just send another work. + * Notice: + * 1. Bmsc will start calculate when you push a work to them, even they + * are busy. + * 2. Bmsc will stop work when: a valid nonce has been found or 40 bits + * nonce range is completely calculated. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#ifdef WIN32 +#include +#endif + +#include "compat.h" +#include "miner.h" +#include "usbutils.h" + +// The serial I/O speed - Linux uses a define 'B115200' in bits/termios.h +#define BMSC_IO_SPEED 115200 + +#define BMSC_NONCE_ARRAY_SIZE 6 + +// The size of a successful nonce read +#define BMSC_READ_SIZE 5 + +// Ensure the sizes are correct for the Serial read +#if (BMSC_READ_SIZE != 5) +#error BMSC_READ_SIZE must be 5 +#endif +#define ASSERT1(condition) __maybe_unused static char sizeof_uint32_t_must_be_4[(condition)?1:-1] +ASSERT1(sizeof(uint32_t) == 4); + +// TODO: USB? Different calculation? - see usbstats to work it out e.g. 1/2 of normal send time +// or even use that number? 1/2 +// #define BMSC_READ_TIME(baud) ((double)BMSC_READ_SIZE * (double)8.0 / (double)(baud)) +// maybe 1ms? +#define BMSC_READ_TIME(baud) (0.001) + +// USB ms timeout to wait - user specified timeouts are multiples of this +#define BMSC_WAIT_TIMEOUT 100 +#define BMSC_CMR2_TIMEOUT 1 +#define BMSC_READ_BUF_LEN 8192 + +// Defined in multiples of BMSC_WAIT_TIMEOUT +// Must of course be greater than BMSC_READ_COUNT_TIMING/BMSC_WAIT_TIMEOUT +// There's no need to have this bigger, since the overhead/latency of extra work +// is pretty small once you get beyond a 10s nonce range time and 10s also +// means that nothing slower than 429MH/s can go idle so most bmsc devices +// will always mine without idling +#define BMSC_READ_TIME_LIMIT_MAX 100 + +// In timing mode: Default starting value until an estimate can be obtained +// 5000 ms allows for up to a ~840MH/s device +#define BMSC_READ_COUNT_TIMING 5000 +#define BMSC_READ_COUNT_MIN BMSC_WAIT_TIMEOUT +#define SECTOMS(s) ((int)((s) * 1000)) +// How many ms below the expected completion time to abort work +// extra in case the last read is delayed +#define BMSC_READ_REDUCE ((int)(BMSC_WAIT_TIMEOUT * 1.5)) + +// For a standard Bmsc (to 5 places) +// Since this rounds up a the last digit - it is a slight overestimate +// Thus the hash rate will be a VERY slight underestimate +// (by a lot less than the displayed accuracy) +// Minor inaccuracy of these numbers doesn't affect the work done, +// only the displayed MH/s +#define BMSC_REV3_HASH_TIME 0.0000000026316 +#define LANCELOT_HASH_TIME 0.0000000025000 +#define ASICMINERUSB_HASH_TIME 0.0000000029761 +// TODO: What is it? +#define CAIRNSMORE1_HASH_TIME 0.0000000027000 +// Per FPGA +#define CAIRNSMORE2_HASH_TIME 0.0000000066600 +#define NANOSEC 1000000000.0 + +#define CAIRNSMORE2_INTS 4 + +// Bmsc doesn't send a completion message when it finishes +// the full nonce range, so to avoid being idle we must abort the +// work (by starting a new work item) shortly before it finishes +// +// Thus we need to estimate 2 things: +// 1) How many hashes were done if the work was aborted +// 2) How high can the timeout be before the Bmsc is idle, +// to minimise the number of work items started +// We set 2) to 'the calculated estimate' - BMSC_READ_REDUCE +// to ensure the estimate ends before idle +// +// The simple calculation used is: +// Tn = Total time in seconds to calculate n hashes +// Hs = seconds per hash +// Xn = number of hashes +// W = code/usb overhead per work +// +// Rough but reasonable estimate: +// Tn = Hs * Xn + W (of the form y = mx + b) +// +// Thus: +// Line of best fit (using least squares) +// +// Hs = (n*Sum(XiTi)-Sum(Xi)*Sum(Ti))/(n*Sum(Xi^2)-Sum(Xi)^2) +// W = Sum(Ti)/n - (Hs*Sum(Xi))/n +// +// N.B. W is less when aborting work since we aren't waiting for the reply +// to be transferred back (BMSC_READ_TIME) +// Calculating the hashes aborted at n seconds is thus just n/Hs +// (though this is still a slight overestimate due to code delays) +// + +// Both below must be exceeded to complete a set of data +// Minimum how long after the first, the last data point must be +#define HISTORY_SEC 60 +// Minimum how many points a single BMSC_HISTORY should have +#define MIN_DATA_COUNT 5 +// The value MIN_DATA_COUNT used is doubled each history until it exceeds: +#define MAX_MIN_DATA_COUNT 100 + +static struct timeval history_sec = { HISTORY_SEC, 0 }; + +// Store the last INFO_HISTORY data sets +// [0] = current data, not yet ready to be included as an estimate +// Each new data set throws the last old set off the end thus +// keeping a ongoing average of recent data +#define INFO_HISTORY 10 + +#define BMSC_WORK_QUEUE_NUM 36 + +struct BMSC_HISTORY { + struct timeval finish; + double sumXiTi; + double sumXi; + double sumTi; + double sumXi2; + uint32_t values; + uint32_t hash_count_min; + uint32_t hash_count_max; +}; + +enum timing_mode { MODE_DEFAULT, MODE_SHORT, MODE_LONG, MODE_VALUE }; + +static const char *MODE_DEFAULT_STR = "default"; +static const char *MODE_SHORT_STR = "short"; +static const char *MODE_SHORT_STREQ = "short="; +static const char *MODE_LONG_STR = "long"; +static const char *MODE_LONG_STREQ = "long="; +static const char *MODE_VALUE_STR = "value"; +static const char *MODE_UNKNOWN_STR = "unknown"; + +struct BMSC_INFO { + enum sub_ident ident; + int intinfo; + + // time to calculate the golden_ob + uint64_t golden_hashes; + struct timeval golden_tv; + + struct BMSC_HISTORY history[INFO_HISTORY+1]; + uint32_t min_data_count; + + int timeout; + + // seconds per Hash + double Hs; + // ms til we abort + int read_time; + // ms limit for (short=/long=) read_time + int read_time_limit; + + enum timing_mode timing_mode; + bool do_bmsc_timing; + + bool start; + + double fullnonce; + int count; + double W; + uint32_t values; + uint64_t hash_count_range; + + // Determine the cost of history processing + // (which will only affect W) + uint64_t history_count; + struct timeval history_time; + + // bmsc-options + int baud; + int work_division; + int fpga_count; + uint32_t nonce_mask; + + uint8_t cmr2_speed; + bool speed_next_work; + bool flash_next_work; + + struct work * work_queue[BMSC_WORK_QUEUE_NUM]; + int work_queue_index; + + unsigned char nonce_bin[BMSC_NONCE_ARRAY_SIZE][BMSC_READ_SIZE+1]; + int nonce_index; +}; + +#define BMSC_MIDSTATE_SIZE 32 +#define BMSC_UNUSED_SIZE 15 +#define BMSC_WORK_SIZE 12 + +#define BMSC_WORK_DATA_OFFSET 64 + +#define BMSC_CMR2_SPEED_FACTOR 2.5 +#define BMSC_CMR2_SPEED_MIN_INT 100 +#define BMSC_CMR2_SPEED_DEF_INT 180 +#define BMSC_CMR2_SPEED_MAX_INT 220 +#define CMR2_INT_TO_SPEED(_speed) ((uint8_t)((float)_speed / BMSC_CMR2_SPEED_FACTOR)) +#define BMSC_CMR2_SPEED_MIN CMR2_INT_TO_SPEED(BMSC_CMR2_SPEED_MIN_INT) +#define BMSC_CMR2_SPEED_DEF CMR2_INT_TO_SPEED(BMSC_CMR2_SPEED_DEF_INT) +#define BMSC_CMR2_SPEED_MAX CMR2_INT_TO_SPEED(BMSC_CMR2_SPEED_MAX_INT) +#define BMSC_CMR2_SPEED_INC 1 +#define BMSC_CMR2_SPEED_DEC -1 +#define BMSC_CMR2_SPEED_FAIL -10 + +#define BMSC_CMR2_PREFIX ((uint8_t)0xB7) +#define BMSC_CMR2_CMD_SPEED ((uint8_t)0) +#define BMSC_CMR2_CMD_FLASH ((uint8_t)1) +#define BMSC_CMR2_DATA_FLASH_OFF ((uint8_t)0) +#define BMSC_CMR2_DATA_FLASH_ON ((uint8_t)1) +#define BMSC_CMR2_CHECK ((uint8_t)0x6D) + +struct BMSC_WORK { + uint8_t midstate[BMSC_MIDSTATE_SIZE]; + // These 4 bytes are for CMR2 bitstreams that handle MHz adjustment + uint8_t check; + uint8_t data; + uint8_t cmd; + uint8_t prefix; + uint8_t unused[BMSC_UNUSED_SIZE]; + uint8_t workid; + uint8_t work[BMSC_WORK_SIZE]; +}; + +#define END_CONDITION 0x0000ffff + +// Looking for options in --bmsc-timing and --bmsc-options: +// +// Code increments this each time we start to look at a device +// However, this means that if other devices are checked by +// the Bmsc code (e.g. Avalon only as at 20130517) +// they will count in the option offset +// +// This, however, is deterministic so that's OK +// +// If we were to increment after successfully finding an Bmsc +// that would be random since an Bmsc may fail and thus we'd +// not be able to predict the option order +// +// Devices are checked in the order libusb finds them which is ? +// +static int option_offset = -1; + +unsigned char CRC5(unsigned char *ptr, unsigned char len) +{ + unsigned char i, j, k; + unsigned char crc = 0x1f; + + unsigned char crcin[5] = {1, 1, 1, 1, 1}; + unsigned char crcout[5] = {1, 1, 1, 1, 1}; + unsigned char din = 0; + + j = 0x80; + k = 0; + for (i = 0; i < len; i++) + { + if (*ptr & j) { + din = 1; + } else { + din = 0; + } + crcout[0] = crcin[4] ^ din; + crcout[1] = crcin[0]; + crcout[2] = crcin[1] ^ crcin[4] ^ din; + crcout[3] = crcin[2]; + crcout[4] = crcin[3]; + + j = j >> 1; + k++; + if (k == 8) + { + j = 0x80; + k = 0; + ptr++; + } + memcpy(crcin, crcout, 5); + } + crc = 0; + if(crcin[4]) { + crc |= 0x10; + } + if(crcin[3]) { + crc |= 0x08; + } + if(crcin[2]) { + crc |= 0x04; + } + if(crcin[1]) { + crc |= 0x02; + } + if(crcin[0]) { + crc |= 0x01; + } + return crc; +} + +static void _transfer(struct cgpu_info *bmsc, uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, uint32_t *data, int siz, enum usb_cmds cmd) +{ + int err; + + err = usb_transfer_data(bmsc, request_type, bRequest, wValue, wIndex, data, siz, cmd); + + applog(LOG_DEBUG, "%s: bmgid %d %s got err %d", + bmsc->drv->name, bmsc->cgminer_id, + usb_cmdname(cmd), err); +} + +#define transfer(bmsc, request_type, bRequest, wValue, wIndex, cmd) \ + _transfer(bmsc, request_type, bRequest, wValue, wIndex, NULL, 0, cmd) + +static void bmsc_initialise(struct cgpu_info *bmsc, int baud) +{ + struct BMSC_INFO *info = (struct BMSC_INFO *)(bmsc->device_data); + uint16_t wValue, wIndex; + enum sub_ident ident; + int interface; + + if (bmsc->usbinfo.nodev) + return; + + interface = _usb_interface(bmsc, info->intinfo); + ident = usb_ident(bmsc); + + switch (ident) { + case IDENT_BLT: + case IDENT_LLT: + case IDENT_CMR1: + case IDENT_CMR2: + // Reset + transfer(bmsc, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, FTDI_VALUE_RESET, + interface, C_RESET); + + if (bmsc->usbinfo.nodev) + return; + + // Latency + _usb_ftdi_set_latency(bmsc, info->intinfo); + + if (bmsc->usbinfo.nodev) + return; + + // Set data control + transfer(bmsc, FTDI_TYPE_OUT, FTDI_REQUEST_DATA, FTDI_VALUE_DATA_BLT, + interface, C_SETDATA); + + if (bmsc->usbinfo.nodev) + return; + + // default to BLT/LLT 115200 + wValue = FTDI_VALUE_BAUD_BLT; + wIndex = FTDI_INDEX_BAUD_BLT; + + if (ident == IDENT_CMR1 || ident == IDENT_CMR2) { + switch (baud) { + case 115200: + wValue = FTDI_VALUE_BAUD_CMR_115; + wIndex = FTDI_INDEX_BAUD_CMR_115; + break; + case 57600: + wValue = FTDI_VALUE_BAUD_CMR_57; + wIndex = FTDI_INDEX_BAUD_CMR_57; + break; + default: + quit(1, "bmsc_intialise() invalid baud (%d) for Cairnsmore1", baud); + break; + } + } + + // Set the baud + transfer(bmsc, FTDI_TYPE_OUT, FTDI_REQUEST_BAUD, wValue, + (wIndex & 0xff00) | interface, C_SETBAUD); + + if (bmsc->usbinfo.nodev) + return; + + // Set Modem Control + transfer(bmsc, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, FTDI_VALUE_MODEM, + interface, C_SETMODEM); + + if (bmsc->usbinfo.nodev) + return; + + // Set Flow Control + transfer(bmsc, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, FTDI_VALUE_FLOW, + interface, C_SETFLOW); + + if (bmsc->usbinfo.nodev) + return; + + // Clear any sent data + transfer(bmsc, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, FTDI_VALUE_PURGE_TX, + interface, C_PURGETX); + + if (bmsc->usbinfo.nodev) + return; + + // Clear any received data + transfer(bmsc, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, FTDI_VALUE_PURGE_RX, + interface, C_PURGERX); + break; + case IDENT_ICA: + // Set Data Control + transfer(bmsc, PL2303_CTRL_OUT, PL2303_REQUEST_CTRL, PL2303_VALUE_CTRL, + interface, C_SETDATA); + + if (bmsc->usbinfo.nodev) + return; + + // Set Line Control + uint32_t ica_data[2] = { PL2303_VALUE_LINE0, PL2303_VALUE_LINE1 }; + _transfer(bmsc, PL2303_CTRL_OUT, PL2303_REQUEST_LINE, PL2303_VALUE_LINE, + interface, &ica_data[0], PL2303_VALUE_LINE_SIZE, C_SETLINE); + + if (bmsc->usbinfo.nodev) + return; + + // Vendor + transfer(bmsc, PL2303_VENDOR_OUT, PL2303_REQUEST_VENDOR, PL2303_VALUE_VENDOR, + interface, C_VENDOR); + break; + case IDENT_AMU: + // Enable the UART + transfer(bmsc, CP210X_TYPE_OUT, CP210X_REQUEST_IFC_ENABLE, + CP210X_VALUE_UART_ENABLE, + interface, C_ENABLE_UART); + + if (bmsc->usbinfo.nodev) + return; + + // Set data control + transfer(bmsc, CP210X_TYPE_OUT, CP210X_REQUEST_DATA, CP210X_VALUE_DATA, + interface, C_SETDATA); + + if (bmsc->usbinfo.nodev) + return; + + // Set the baud + uint32_t data = CP210X_DATA_BAUD; + _transfer(bmsc, CP210X_TYPE_OUT, CP210X_REQUEST_BAUD, 0, + interface, &data, sizeof(data), C_SETBAUD); + break; + default: + quit(1, "bmsc_intialise() called with invalid %s cgid %i ident=%d", + bmsc->drv->name, bmsc->cgminer_id, ident); + } +} + +#define BTM_NONCE_ERROR -1 +#define BTM_NONCE_OK 0 +#define BTM_NONCE_RESTART 1 +#define BTM_NONCE_TIMEOUT 2 + +static int bmsc_get_nonce(struct cgpu_info *bmsc, unsigned char *buf, struct timeval *tv_start, + struct timeval *tv_finish, struct thr_info *thr, int read_time) +{ + struct BMSC_INFO *info = (struct BMSC_INFO *)(bmsc->device_data); + int err, amt, rc; + + if (bmsc->usbinfo.nodev) + return BTM_NONCE_ERROR; + + cgtime(tv_start); + err = usb_read_ii_timeout_cancellable(bmsc, info->intinfo, (char *)buf, + BMSC_READ_SIZE, &amt, read_time, + C_GETRESULTS); + cgtime(tv_finish); + + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_ERR, "%s%i: Comms error (rerr=%d amt=%d)", bmsc->drv->name, + bmsc->device_id, err, amt); + dev_error(bmsc, REASON_DEV_COMMS_ERROR); + return BTM_NONCE_ERROR; + } + + if (amt >= BMSC_READ_SIZE) + return BTM_NONCE_OK; + + rc = SECTOMS(tdiff(tv_finish, tv_start)); + if (thr && thr->work_restart) { + applog(LOG_DEBUG, "Bmsc Read: Work restart at %d ms", rc); + return BTM_NONCE_RESTART; + } + + if (amt > 0) + applog(LOG_DEBUG, "Bmsc Read: Timeout reading for %d ms", rc); + else + applog(LOG_DEBUG, "Bmsc Read: No data for %d ms", rc); + return BTM_NONCE_TIMEOUT; +} + +static const char *timing_mode_str(enum timing_mode timing_mode) +{ + switch(timing_mode) { + case MODE_DEFAULT: + return MODE_DEFAULT_STR; + case MODE_SHORT: + return MODE_SHORT_STR; + case MODE_LONG: + return MODE_LONG_STR; + case MODE_VALUE: + return MODE_VALUE_STR; + default: + return MODE_UNKNOWN_STR; + } +} + +static void set_timing_mode(int this_option_offset, struct cgpu_info *bmsc, float readtimeout) +{ + struct BMSC_INFO *info = (struct BMSC_INFO *)(bmsc->device_data); + enum sub_ident ident; + double Hs; + char buf[BUFSIZ+1]; + char *ptr, *comma, *eq; + size_t max; + int i; + + ident = usb_ident(bmsc); + switch (ident) { + case IDENT_ICA: + info->Hs = BMSC_REV3_HASH_TIME; + break; + case IDENT_BLT: + case IDENT_LLT: + info->Hs = LANCELOT_HASH_TIME; + break; + case IDENT_AMU: + info->Hs = ASICMINERUSB_HASH_TIME; + break; + case IDENT_CMR1: + info->Hs = CAIRNSMORE1_HASH_TIME; + break; + case IDENT_CMR2: + info->Hs = CAIRNSMORE2_HASH_TIME; + break; + default: + quit(1, "Bmsc get_options() called with invalid %s ident=%d", + bmsc->drv->name, ident); + } + + info->read_time = 0; + info->read_time_limit = 0; // 0 = no limit + + info->fullnonce = info->Hs * (((double) 0xffffffff) + 1); + info->read_time = (int)(readtimeout * BMSC_WAIT_TIMEOUT); + + if(info->read_time < 0) + info->read_time = 1; + + info->timing_mode = MODE_DEFAULT; + info->do_bmsc_timing = false; + + info->min_data_count = MIN_DATA_COUNT; + + // All values are in multiples of BMSC_WAIT_TIMEOUT + info->read_time_limit *= BMSC_WAIT_TIMEOUT; + + applog(LOG_ERR, "%s%d Init: mode=%s read_time=%dms limit=%dms Hs=%e", + bmsc->drv->name, bmsc->cgminer_id, + timing_mode_str(info->timing_mode), + info->read_time, info->read_time_limit, info->Hs); +} + +static uint32_t mask(int work_division) +{ + uint32_t nonce_mask = 0x7fffffff; + + // yes we can calculate these, but this way it's easy to see what they are + switch (work_division) { + case 1: + nonce_mask = 0xffffffff; + break; + case 2: + nonce_mask = 0x7fffffff; + break; + case 4: + nonce_mask = 0x3fffffff; + break; + case 8: + nonce_mask = 0x1fffffff; + break; + default: + quit(1, "Invalid2 bmsc-options for work_division (%d) must be 1, 2, 4 or 8", work_division); + } + + return nonce_mask; +} + +static void get_options(int this_option_offset, struct cgpu_info *bmsc, int *baud, float *readtimeout) +{ + char buf[BUFSIZ+1]; + char *ptr, *comma, *colon, *colon2; + enum sub_ident ident; + size_t max; + int i, tmp; + float tmpf; + + if (opt_bmsc_options == NULL) + buf[0] = '\0'; + else { + ptr = opt_bmsc_options; + for (i = 0; i < this_option_offset; i++) { + comma = strchr(ptr, ','); + if (comma == NULL) + break; + ptr = comma + 1; + } + + comma = strchr(ptr, ','); + if (comma == NULL) + max = strlen(ptr); + else + max = comma - ptr; + + if (max > BUFSIZ) + max = BUFSIZ; + strncpy(buf, ptr, max); + buf[max] = '\0'; + } + + ident = usb_ident(bmsc); + switch (ident) { + case IDENT_ICA: + case IDENT_BLT: + case IDENT_LLT: + *baud = BMSC_IO_SPEED; + break; + case IDENT_AMU: + *baud = BMSC_IO_SPEED; + break; + case IDENT_CMR1: + *baud = BMSC_IO_SPEED; + break; + case IDENT_CMR2: + *baud = BMSC_IO_SPEED; + break; + default: + quit(1, "Bmsc get_options() called with invalid %s ident=%d", + bmsc->drv->name, ident); + } + + if (*buf) { + colon = strchr(buf, ':'); + if (colon) + *(colon++) = '\0'; + + if (*buf) { + tmp = atoi(buf); + switch (tmp) { + case 115200: + *baud = 115200; + break; + case 57600: + *baud = 57600; + break; + default: + quit(1, "Invalid bmsc-options for baud (%s) must be 115200 or 57600", buf); + } + } + + if (colon && *colon) { + tmpf = atof(colon); + if (tmpf > 0) { + *readtimeout = tmpf; + } else { + quit(1, "Invalid bmsc-options for timeout (%s) must be > 0", colon); + } + } + } +} + +static void get_bandops(unsigned char * core_buf, int *corenum, char *coreenable, int *coresleep) +{ + char buf[512] = {0}; + char *colon, *colon2, * colon3; + int i, len; + + if (opt_bmsc_bandops) { + len = strlen(opt_bmsc_bandops); + if(len <= 0 || len >= 512) { + quit(1, "Invalid bmsc-bandops %s %d", opt_bmsc_bandops, len); + } + strcpy(buf, opt_bmsc_bandops); + colon = strchr(buf, ':'); + if (colon) + *(colon++) = '\0'; + + if (*buf) { + if(strlen(buf) > 8 || strlen(buf)%2 != 0 || strlen(buf)/2 == 0) { + quit(1, "Invalid bitmain-options for core command, must be hex now: %s", buf); + } + memset(core_buf, 0, 4); + if(!hex2bin(core_buf, buf, strlen(buf)/2)) { + quit(1, "Invalid bitmain-options for core command, hex2bin error now: %s", buf); + } + } + + if (colon && *colon) { + colon2 = strchr(colon, ':'); + if (colon2) + *(colon2++) = '\0'; + + if (*colon) { + *corenum = atoi(colon); + if(*corenum <= 0 || *corenum >= 256) { + quit(1, "Invalid bitmain-bandops for asic core num, must %d be > 0 and < 256", *corenum); + } + } + + if(colon2 && *colon2) { + colon3 = strchr(colon2, ':'); + if (colon3) + *(colon3++) = '\0'; + + if(*colon2) { + strcpy(coreenable, colon2); + if(strlen(coreenable) != *corenum) { + quit(1, "Invalid bitmain-bandops for asic core enable, must be equal core num %d", *corenum); + } + } + + if (colon3 && *colon3) { + *coresleep = atoi(colon3); + } + } + } + } +} + +static struct cgpu_info *bmsc_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + int this_option_offset = ++option_offset; + struct BMSC_INFO *info; + struct timeval tv_start, tv_finish; + + // Block 171874 nonce = (0xa2870100) = 0x000187a2 + // N.B. golden_ob MUST take less time to calculate + // than the timeout set in bmsc_open() + // This one takes ~0.53ms on Rev3 Bmsc + const char golden_ob[] = + "4679ba4ec99876bf4bfe086082b40025" + "4df6c356451471139a3afa71e48f544a" + "00000000000000004000000000000000" + "0000001f87320b1a1426674f2fa722ce"; + const char golden_ob1[] = + "e1eb393a50f6ae97e306ea87c1c47eae" + "1f9ad02d729d9f86bd48a213a4600144" + "00000000000000004000000000000000" + "0000001ffb0b0719aaf19752dd5e83a4"; + const char golden_ob2[] = + "b65911ea2c4b0c52958cb408caebff32" + "8dece4e6a002fe2693ba9906ffde7e8a" + "00000000000000004000000000000000" + "0000001f20dc1c190642455201756658"; + const char golden_ob3[] = + "c99da189374bcc69a1134d6f4953addc" + "7420499b132b7f8f999b0c71fe7efbf2" + "00000000000000004000000000000000" + "0000001f20dc1c198e4145526d74dee3"; + const char golden_ob4[] = + "696af96144b6079c1b437fbc6e539e4d" + "996d25b027ea9eefdfaf4eff6add6986" + "00000000000000004000000000000000" + "0000001f20dc1c19f84e4552ac86dc14"; + + char bandops_ob[] = + "00000000000000000000000000000000" + "00000000000000000000000000000000" + "00000000000000000000000000000000" + "00000000000000000000000000000000"; + + const char golden_nonce[] = "000187a2"; + const char golden_nonce1[] = "0345182b"; + const char golden_nonce2[] = "466b30a5"; + const char golden_nonce3[] = "857e65ee"; + const char golden_nonce4[] = "c6f70284"; + const uint32_t golden_nonce_val = 0x000187a2; + unsigned char nonce_bin[BMSC_READ_SIZE]; + struct BMSC_WORK workdata; + char *nonce_hex; + int baud = 115200, work_division = 1, fpga_count = 1; + float readtimeout = 1.0; + struct cgpu_info *bmsc; + int ret, err, amount, tries, i; + bool ok; + bool cmr2_ok[CAIRNSMORE2_INTS]; + int cmr2_count; + + unsigned char cmd_buf[4] = {0}; + unsigned char rdreg_buf[4] = {0}; + unsigned char reg_data[4] = {0}; + unsigned char voltage_data[2] = {0}; + + unsigned char rebuf[BMSC_READ_BUF_LEN] = {0}; + int relen = 0; + int realllen = 0; + int nodata = 0; + char msg[10240] = {0}; + int sendfreqstatus = 1; + int k = 0; + + unsigned char core_cmd[4] = {0}; + int corenum = 0; + char coreenable[256] = {0}; + int coresleep = 0; + + if (opt_bmsc_options == NULL) + return NULL; + + if ((sizeof(workdata) << 1) != (sizeof(golden_ob) - 1)) + quithere(1, "Data and golden_ob sizes don't match"); + if ((sizeof(workdata) << 1) != (sizeof(bandops_ob) - 1)) + quithere(1, "Data and bandops_ob sizes don't match"); + + bmsc = usb_alloc_cgpu(&bmsc_drv, 1); + + if (!usb_init(bmsc, dev, found)) + goto shin; + + get_options(this_option_offset, bmsc, &baud, &readtimeout); + get_bandops(core_cmd, &corenum, coreenable, &coresleep); + + info = (struct BMSC_INFO *)calloc(1, sizeof(struct BMSC_INFO)); + if (unlikely(!info)) + quit(1, "Failed to malloc BMSC_INFO"); + bmsc->device_data = (void *)info; + + info->ident = usb_ident(bmsc); + info->start = true; + switch (info->ident) { + case IDENT_ICA: + case IDENT_BLT: + case IDENT_LLT: + case IDENT_AMU: + case IDENT_CMR1: + info->timeout = BMSC_WAIT_TIMEOUT; + break; + case IDENT_CMR2: + if (found->intinfo_count != CAIRNSMORE2_INTS) { + quithere(1, "CMR2 Interface count (%d) isn't expected: %d", + found->intinfo_count, + CAIRNSMORE2_INTS); + } + info->timeout = BMSC_CMR2_TIMEOUT; + cmr2_count = 0; + for (i = 0; i < CAIRNSMORE2_INTS; i++) + cmr2_ok[i] = false; + break; + default: + quit(1, "%s bmsc_detect_one() invalid %s ident=%d", + bmsc->drv->dname, bmsc->drv->dname, info->ident); + } +// For CMR2 test each USB Interface +cmr2_retry: + tries = 2; + ok = false; + while (!ok && tries-- > 0) { + bmsc_initialise(bmsc, baud); + + if(opt_bmsc_bootstart) { + applog(LOG_ERR, "---------------------start bootstart----------------------"); + cmd_buf[0] = 0xbb; + cmd_buf[1] = 0x00; + cmd_buf[2] = 0x00; + cmd_buf[3] = 0x00; //0-7 + cmd_buf[3] = CRC5(cmd_buf, 27); + cmd_buf[3] |= 0x80; + + cgsleep_ms(500); + applog(LOG_ERR, "Send bootstart off %02x%02x%02x%02x", cmd_buf[0], cmd_buf[1], cmd_buf[2], cmd_buf[3]); + err = usb_write(bmsc, (char * )cmd_buf, 4, &amount, C_SENDTESTWORK); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "Write bootstart Comms error (werr=%d amount=%d)", err, amount); + continue; + } + + cmd_buf[0] = 0xbb; + cmd_buf[1] = 0x08; + cmd_buf[2] = 0x00; + cmd_buf[3] = 0x00; //0-7 + cmd_buf[3] = CRC5(cmd_buf, 27); + cmd_buf[3] |= 0x80; + + cgsleep_ms(500); + applog(LOG_ERR, "Send bootstart on %02x%02x%02x%02x", cmd_buf[0], cmd_buf[1], cmd_buf[2], cmd_buf[3]); + err = usb_write(bmsc, (char * )cmd_buf, 4, &amount, C_SENDTESTWORK); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "Write bootstart Comms error (werr=%d amount=%d)", err, amount); + continue; + } + applog(LOG_ERR, "Send bootstart ok"); + } + + if(opt_bmsc_voltage) { + if(strlen(opt_bmsc_voltage) > 4 || strlen(opt_bmsc_voltage)%2 != 0 || strlen(opt_bmsc_voltage)/2 == 0) { + quit(1, "Invalid options for voltage data, must be hex now: %s", opt_bmsc_voltage); + } + memset(voltage_data, 0, 2); + if(!hex2bin(voltage_data, opt_bmsc_voltage, strlen(opt_bmsc_voltage)/2)) { + quit(1, "Invalid options for voltage data, hex2bin error now: %s", opt_bmsc_voltage); + } + cmd_buf[0] = 0xaa; + cmd_buf[1] = voltage_data[0]; + cmd_buf[1] &=0x0f; + cmd_buf[1] |=0xb0; + cmd_buf[2] = voltage_data[1]; + cmd_buf[3] = 0x00; //0-7 + cmd_buf[3] = CRC5(cmd_buf, 4*8 - 5); + cmd_buf[3] |= 0xc0; + + applog(LOG_ERR, "---------------------start voltage----------------------"); + cgsleep_ms(500); + applog(LOG_ERR, "Send voltage %02x%02x%02x%02x", cmd_buf[0], cmd_buf[1], cmd_buf[2], cmd_buf[3]); + err = usb_write(bmsc, (char * )cmd_buf, 4, &amount, C_SENDTESTWORK); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "Write voltage Comms error (werr=%d amount=%d)", err, amount); + continue; + } + applog(LOG_ERR, "Send voltage ok"); + } + + if (opt_bmsc_gray) { + cmd_buf[0] = 3; + cmd_buf[0] |= 0x80; + cmd_buf[1] = 0; //16-23 + cmd_buf[2] = 0x80; //8-15 + cmd_buf[3] = 0x80; //0-7 + cmd_buf[3] = CRC5(cmd_buf, 27); + cmd_buf[3] |= 0x80; + + applog(LOG_ERR, "-----------------start gray-------------------"); + cgsleep_ms(500); + applog(LOG_ERR, "Send gray %02x%02x%02x%02x", cmd_buf[0], cmd_buf[1], cmd_buf[2], cmd_buf[3]); + err = usb_write_ii(bmsc, info->intinfo, (char * )cmd_buf, 4, &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "%s%i: Write freq Comms error (werr=%d amount=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + continue; + } + applog(LOG_DEBUG, "Send gray ok"); + } + + if (opt_bmsc_freq) { + if (strcmp(opt_bmsc_freq, "0") != 0) { + applog(LOG_DEBUG, "Device detect freq parameter=%s", opt_bmsc_freq); + if (strlen(opt_bmsc_freq) > 8 || strlen(opt_bmsc_freq) % 2 != 0 || strlen(opt_bmsc_freq) / 2 == 0) { + quit(1, "Invalid bmsc_freq for freq data, must be hex now: %s", opt_bmsc_freq); + } + memset(reg_data, 0, 4); + if (!hex2bin(reg_data, opt_bmsc_freq, strlen(opt_bmsc_freq) / 2)) { + quit(1, "Invalid bmsc_freq for freq data, hex2bin error now: %s", opt_bmsc_freq); + } + cmd_buf[0] = 2; + cmd_buf[0] |= 0x80; + cmd_buf[1] = reg_data[0]; //16-23 + cmd_buf[2] = reg_data[1]; //8-15 + cmd_buf[3] = 0; + cmd_buf[3] = CRC5(cmd_buf, 27); + applog(LOG_DEBUG, "Set_frequency cmd_buf[1]{%02x}cmd_buf[2]{%02x}", cmd_buf[1], cmd_buf[2]); + + rdreg_buf[0] = 4; + rdreg_buf[0] |= 0x80; + rdreg_buf[1] = 0; //16-23 + rdreg_buf[2] = 0x04; //8-15 + rdreg_buf[3] = 0; + rdreg_buf[3] = CRC5(rdreg_buf, 27); + + applog(LOG_ERR, "-----------------start freq-------------------"); + cgsleep_ms(500); + + applog(LOG_ERR, "Send frequency %02x%02x%02x%02x", cmd_buf[0], cmd_buf[1], cmd_buf[2], cmd_buf[3]); + err = usb_write_ii(bmsc, info->intinfo, (char * )cmd_buf, 4, &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "%s%i: Write freq Comms error (werr=%d amount=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + continue; + } + applog(LOG_DEBUG, "Send frequency ok"); + + cgsleep_ms(500); + + applog(LOG_ERR, "Send freq getstatus %02x%02x%02x%02x", rdreg_buf[0], rdreg_buf[1], rdreg_buf[2], rdreg_buf[3]); + + for(i = 0; i < 10; i++) { + usb_read_ii_timeout_cancellable(bmsc, info->intinfo, (char * )rebuf, BMSC_READ_SIZE, &relen, 100, C_GETRESULTS); + } + + err = usb_write_ii(bmsc, info->intinfo, (char * )rdreg_buf, 4, &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "%s%i: Write freq getstatus Comms error (werr=%d amount=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + continue; + } + applog(LOG_DEBUG, "Send freq getstatus ok"); + + nodata = 0; + realllen = 0; + while (1) { + relen = 0; + err = usb_read_ii_timeout_cancellable(bmsc, info->intinfo, (char * )rebuf + realllen, BMSC_READ_SIZE, &relen, 200, C_GETRESULTS); + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_ERR, "%s%i: Read freq Comms error (rerr=%d relen=%d)", bmsc->drv->name, bmsc->device_id, err, relen); + break; + } else if (err == LIBUSB_ERROR_TIMEOUT) { + applog(LOG_DEBUG, "%s%i: Read freq Comms timeout (rerr=%d relen=%d)", bmsc->drv->name, bmsc->device_id, err, relen); + + nodata++; + if (nodata > 5) { + if (realllen <= 0) { + if (sendfreqstatus) { + sendfreqstatus = 0; + applog(LOG_ERR, "Send freq getstatus %02x%02x%02x%02x", rdreg_buf[0], rdreg_buf[1], rdreg_buf[2], rdreg_buf[3]); + usb_read_ii_timeout_cancellable(bmsc, info->intinfo, (char * )rebuf, BMSC_READ_SIZE, &relen, 200, C_GETRESULTS); + err = usb_write_ii(bmsc, info->intinfo, (char * )rdreg_buf, 4, &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "%s%i: Write freq getstatus Comms error (werr=%d amount=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + continue; + } + applog(LOG_DEBUG, "Send freq getstatus ok"); + } else { + applog(LOG_ERR, "------recv freq getstatus no data finish------"); + break; + } + } else { + applog(LOG_DEBUG, "Recv freq getstatus len=%d", realllen); + for (i = 0; i < realllen; i += 5) { + applog(LOG_ERR, "Recv %d freq getstatus=%02x%02x%02x%02x%02x", i / 5 + 1, rebuf[i], rebuf[i + 1], rebuf[i + 2], rebuf[i + 3], rebuf[i + 4]); + } + applog(LOG_ERR, "--------recv freq getstatus ok finish---------"); + break; + } + } + continue; + } else { + nodata = 0; + realllen += relen; + for (i = 0; i < relen; i++) { + sprintf(msg + i * 2, "%02x", rebuf[i]); + } + applog(LOG_DEBUG, "Read data(%d):%s", relen, msg); + } + } + } else { + applog(LOG_ERR, "Device detect freq 0 parameter"); + } + } + + if (opt_bmsc_rdreg) { + applog(LOG_DEBUG, "Device detect rdreg parameter=%s", opt_bmsc_rdreg); + if (strlen(opt_bmsc_rdreg) > 8 || strlen(opt_bmsc_rdreg) % 2 != 0 || strlen(opt_bmsc_rdreg) / 2 == 0) { + quit(1, "Invalid bmsc_rdreg for reg data, must be hex now: %s", opt_bmsc_rdreg); + } + memset(reg_data, 0, 4); + if (!hex2bin(reg_data, opt_bmsc_rdreg, strlen(opt_bmsc_rdreg) / 2)) { + quit(1, "Invalid bmsc_rdreg for reg data, hex2bin error now: %s", opt_bmsc_rdreg); + } + rdreg_buf[0] = 4; + rdreg_buf[0] |= 0x80; + rdreg_buf[1] = 0; //16-23 + rdreg_buf[2] = reg_data[0]; //8-15 + rdreg_buf[3] = 0; + rdreg_buf[3] = CRC5(rdreg_buf, 27); + applog(LOG_DEBUG, "Get_status rdreg_buf[1]{%02x}rdreg_buf[2]{%02x}", rdreg_buf[1], rdreg_buf[2]); + + applog(LOG_ERR, "-----------------start rdreg------------------"); + applog(LOG_ERR, "Send getstatus %02x%02x%02x%02x", rdreg_buf[0], rdreg_buf[1], rdreg_buf[2], rdreg_buf[3]); + + for(i = 0; i < 10; i++) { + usb_read_ii_timeout_cancellable(bmsc, info->intinfo, (char * )rebuf, BMSC_READ_SIZE, &relen, 100, C_GETRESULTS); + } + + err = usb_write_ii(bmsc, info->intinfo, (char * )rdreg_buf, 4, &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "%s%i: Write rdreg Comms error (werr=%d amount=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + continue; + } + applog(LOG_DEBUG, "Send getstatus ok"); + + nodata = 0; + realllen = 0; + while (1) { + relen = 0; + err = usb_read_ii_timeout_cancellable(bmsc, info->intinfo, (char * )rebuf + realllen, BMSC_READ_SIZE, &relen, 200, C_GETRESULTS); + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_ERR, "%s%i: Read rdreg Comms error (rerr=%d relen=%d)", bmsc->drv->name, bmsc->device_id, err, relen); + break; + } else if (err == LIBUSB_ERROR_TIMEOUT) { + applog(LOG_DEBUG, "%s%i: Read rdreg Comms timeout (rerr=%d relen=%d)", bmsc->drv->name, bmsc->device_id, err, relen); + + nodata++; + if (nodata > 5) { + applog(LOG_DEBUG, "Recv rdreg getstatus len=%d", realllen); + for (i = 0; i < realllen; i += 5) { + applog(LOG_ERR, "Recv %d rdreg getstatus=%02x%02x%02x%02x%02x", i / 5 + 1, rebuf[i], rebuf[i + 1], rebuf[i + 2], rebuf[i + 3], rebuf[i + 4]); + } + applog(LOG_ERR, "---------recv rdreg getstatus finish----------"); + break; + } + continue; + } else { + nodata = 0; + realllen += relen; + for (i = 0; i < relen; i++) { + sprintf(msg + i * 2, "%02x", rebuf[i]); + } + applog(LOG_DEBUG, "Read data(%d):%s", relen, msg); + } + } + } + + if (opt_bmsc_bandops) { + unsigned char tmpbyte = 0; + cmd_buf[0] = core_cmd[0]; + cmd_buf[1] = core_cmd[1]; + cmd_buf[2] = core_cmd[2]; + tmpbyte = core_cmd[3] & 0xE0; + cmd_buf[3] = tmpbyte; + cmd_buf[3] = CRC5(cmd_buf, 27); + cmd_buf[3] |= tmpbyte; + + applog(LOG_ERR, "-----------------start bandops-------------------"); + applog(LOG_ERR, "SetBandOPS cmd:%02x%02x%02x%02x corenum:%d enable:%s sleep:%d", core_cmd[0], core_cmd[1], core_cmd[2], core_cmd[3], corenum, coreenable, coresleep); + cgsleep_ms(500); + applog(LOG_ERR, "Send bandops %02x%02x%02x%02x", cmd_buf[0], cmd_buf[1], cmd_buf[2], cmd_buf[3]); + err = usb_write_ii(bmsc, info->intinfo, (char * )cmd_buf, 4, &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "%s%i: Write BandOPS Comms error (werr=%d amount=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + continue; + } + applog(LOG_DEBUG, "Send bandops command ok"); + for(i = 0; i < corenum; i++) { + if(coreenable[i] == '1') { + bandops_ob[127] = '1'; + } else { + bandops_ob[127] = '0'; + } + amount = 0; + hex2bin((void *)(&workdata), bandops_ob, sizeof(workdata)); + applog(LOG_ERR, "Send %d %s", i, bandops_ob); + err = usb_write_ii(bmsc, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != sizeof(workdata)) { + applog(LOG_ERR, "%d %s%i: Write BandOPS Enable Comms error (werr=%d amount=%d)", i, bmsc->drv->name, bmsc->device_id, err, amount); + break; + } + if(coresleep > 0) { + cgsleep_ms(coresleep); + } + } + if(i >= corenum) { + applog(LOG_DEBUG, "Send bandops core enable ok"); + } else { + continue; + } + } + cgsleep_ms(1000); + + applog(LOG_ERR, "-----------------start nonce------------------"); +#if 0 + applog(LOG_ERR, "Bmsc send golden nonce"); + + hex2bin((void *)(&workdata), golden_ob, sizeof(workdata)); + err = usb_write_ii(bmsc, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != sizeof(workdata)) + continue; + + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = bmsc_get_nonce(bmsc, nonce_bin, &tv_start, &tv_finish, NULL, 500); + if (ret != BTM_NONCE_OK) { + applog(LOG_ERR, "Bmsc recv golden nonce timeout"); + continue; + } + + nonce_hex = bin2hex(nonce_bin, sizeof(nonce_bin)); + if (strncmp(nonce_hex, golden_nonce, 8) == 0) + ok = true; + else { + applog(LOG_ERR, "Bmsc recv golden nonce %s != %s and retry", nonce_hex, golden_nonce); + if (tries < 0 && info->ident != IDENT_CMR2) { + applog(LOG_ERR, "Bmsc Detect: Test failed at %s: get %s, should: %s", + bmsc->device_path, nonce_hex, golden_nonce); + } + } + + applog(LOG_ERR, "Bmsc recv golden nonce %s -- %s ", nonce_hex, golden_nonce); +#else + applog(LOG_ERR, "Bmsc send golden nonce1"); + hex2bin((void *)(&workdata), golden_ob1, sizeof(workdata)); + err = usb_write_ii(bmsc, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != sizeof(workdata)) + continue; + + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = bmsc_get_nonce(bmsc, nonce_bin, &tv_start, &tv_finish, NULL, 500); + if (ret != BTM_NONCE_OK) { + applog(LOG_ERR, "Bmsc recv golden nonce timeout"); + continue; + } + + nonce_hex = bin2hex(nonce_bin, sizeof(nonce_bin)); + if (strncmp(nonce_hex, golden_nonce1, 8) == 0) + ok = true; + else { + applog(LOG_ERR, "Bmsc recv golden nonce %s != %s and retry", nonce_hex, golden_nonce1); + applog(LOG_ERR,"The first chip may not work,reconnrect the device will get better stats"); + cgsleep_ms(1000); + if (tries < 0 && info->ident != IDENT_CMR2) { + applog(LOG_ERR, "Bmsc Detect: Test failed at %s: get %s, should: %s", + bmsc->device_path, nonce_hex, golden_nonce1); + } + } + applog(LOG_ERR, "Bmsc recv golden nonce1 %s -- %s ", nonce_hex, golden_nonce1); + + applog(LOG_ERR, "Bmsc send golden nonce2"); + + hex2bin((void *)(&workdata), golden_ob2, sizeof(workdata)); + err = usb_write_ii(bmsc, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != sizeof(workdata)) + continue; + + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = bmsc_get_nonce(bmsc, nonce_bin, &tv_start, &tv_finish, NULL, 500); + if (ret != BTM_NONCE_OK) { + applog(LOG_ERR, "Bmsc recv golden nonce timeout"); + continue; + } + + nonce_hex = bin2hex(nonce_bin, sizeof(nonce_bin)); + if (strncmp(nonce_hex, golden_nonce2, 8) == 0) + ok = true; + else { + applog(LOG_ERR, "Bmsc recv golden nonce %s != %s and retry", nonce_hex, golden_nonce2); + applog(LOG_ERR,"The second chip may not work,reconnrect the device will get better stats"); + cgsleep_ms(1000); + if (tries < 0 && info->ident != IDENT_CMR2) { + applog(LOG_ERR, "Bmsc Detect: Test failed at %s: get %s, should: %s", + bmsc->device_path, nonce_hex, golden_nonce2); + } + } + applog(LOG_ERR, "Bmsc recv golden nonce2 %s -- %s ", nonce_hex, golden_nonce2); + applog(LOG_ERR, "Bmsc send golden nonce3"); + hex2bin((void *)(&workdata), golden_ob3, sizeof(workdata)); + err = usb_write_ii(bmsc, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != sizeof(workdata)) + continue; + + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = bmsc_get_nonce(bmsc, nonce_bin, &tv_start, &tv_finish, NULL, 500); + if (ret != BTM_NONCE_OK) { + applog(LOG_ERR, "Bmsc recv golden nonce timeout"); + continue; + } + + nonce_hex = bin2hex(nonce_bin, sizeof(nonce_bin)); + if (strncmp(nonce_hex, golden_nonce3, 8) == 0) + ok = true; + else { + applog(LOG_ERR, "Bmsc recv golden nonce %s != %s and retry", nonce_hex, golden_nonce3); + applog(LOG_ERR,"The third chip may not work,reconnrect the device will get better stats"); + cgsleep_ms(1000); + if (tries < 0 && info->ident != IDENT_CMR2) { + applog(LOG_ERR, "Bmsc Detect: Test failed at %s: get %s, should: %s", + bmsc->device_path, nonce_hex, golden_nonce3); + } + } + + applog(LOG_ERR, "Bmsc recv golden nonce %s -- %s ", nonce_hex, golden_nonce3); + applog(LOG_ERR, "Bmsc send golden nonce4"); + + hex2bin((void *)(&workdata), golden_ob4, sizeof(workdata)); + err = usb_write_ii(bmsc, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != sizeof(workdata)) + continue; + + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = bmsc_get_nonce(bmsc, nonce_bin, &tv_start, &tv_finish, NULL, 500); + if (ret != BTM_NONCE_OK) { + applog(LOG_ERR, "Bmsc recv golden nonce4 timeout"); + continue; + } + + nonce_hex = bin2hex(nonce_bin, sizeof(nonce_bin)); + if (strncmp(nonce_hex, golden_nonce4, 8) == 0) + ok = true; + + else { + applog(LOG_ERR, "Bmsc recv golden nonce %s != %s and retry", nonce_hex, golden_nonce4); + applog(LOG_ERR,"The fourth chip may not work,reconnrect the device will get better stats"); + cgsleep_ms(1000); + if (tries < 0 && info->ident != IDENT_CMR2) { + applog(LOG_ERR, "Bmsc Detect: Test failed at %s: get %s, should: %s", + bmsc->device_path, nonce_hex, golden_nonce4); + } + } + + applog(LOG_ERR, "Bmsc recv golden nonce %s -- %s ", nonce_hex, golden_nonce4); +#endif + free(nonce_hex); + } + + if (!ok) { + if (info->ident != IDENT_CMR2) + goto unshin; + + if (info->intinfo < CAIRNSMORE2_INTS-1) { + info->intinfo++; + goto cmr2_retry; + } + } else { + if (info->ident == IDENT_CMR2) { + applog(LOG_DEBUG, + "Bmsc Detect: " + "Test succeeded at %s i%d: got %s", + bmsc->device_path, info->intinfo, golden_nonce); + + cmr2_ok[info->intinfo] = true; + cmr2_count++; + if (info->intinfo < CAIRNSMORE2_INTS-1) { + info->intinfo++; + goto cmr2_retry; + } + } + } + + if (info->ident == IDENT_CMR2) { + if (cmr2_count == 0) { + applog(LOG_ERR, + "Bmsc Detect: Test failed at %s: for all %d CMR2 Interfaces", + bmsc->device_path, CAIRNSMORE2_INTS); + goto unshin; + } + + // set the interface to the first one that succeeded + for (i = 0; i < CAIRNSMORE2_INTS; i++) + if (cmr2_ok[i]) { + info->intinfo = i; + break; + } + } else { + applog(LOG_DEBUG, + "Bmsc Detect: " + "Test succeeded at %s: got %s", + bmsc->device_path, golden_nonce); + } + + /* We have a real Bmsc! */ + if (!add_cgpu(bmsc)) + goto unshin; + + update_usb_stats(bmsc); + + applog(LOG_INFO, "%s%d: Found at %s", + bmsc->drv->name, bmsc->device_id, bmsc->device_path); + + if (info->ident == IDENT_CMR2) { + applog(LOG_INFO, "%s%d: with %d Interface%s", + bmsc->drv->name, bmsc->device_id, + cmr2_count, cmr2_count > 1 ? "s" : ""); + + // Assume 1 or 2 are running FPGA pairs + if (cmr2_count < 3) { + work_division = fpga_count = 2; + info->Hs /= 2; + } + } + + applog(LOG_DEBUG, "%s%d: Init baud=%d work_division=%d fpga_count=%d readtimeout=%f", + bmsc->drv->name, bmsc->device_id, baud, work_division, fpga_count, readtimeout); + + info->baud = baud; + info->work_division = work_division; + info->fpga_count = fpga_count; + info->nonce_mask = mask(work_division); + info->work_queue_index = 0; + for(k = 0; k < BMSC_WORK_QUEUE_NUM; k++) { + info->work_queue[k] = NULL; + } + + info->golden_hashes = (golden_nonce_val & info->nonce_mask) * fpga_count; + timersub(&tv_finish, &tv_start, &(info->golden_tv)); + + set_timing_mode(this_option_offset, bmsc, readtimeout); + + if (info->ident == IDENT_CMR2) { + int i; + for (i = info->intinfo + 1; i < bmsc->usbdev->found->intinfo_count; i++) { + struct cgpu_info *cgtmp; + struct BMSC_INFO *intmp; + + if (!cmr2_ok[i]) + continue; + + cgtmp = usb_copy_cgpu(bmsc); + if (!cgtmp) { + applog(LOG_ERR, "%s%d: Init failed initinfo %d", + bmsc->drv->name, bmsc->device_id, i); + continue; + } + + cgtmp->usbinfo.usbstat = USB_NOSTAT; + + intmp = (struct BMSC_INFO *)malloc(sizeof(struct BMSC_INFO)); + if (unlikely(!intmp)) + quit(1, "Failed2 to malloc BMSC_INFO"); + + cgtmp->device_data = (void *)intmp; + + // Initialise everything to match + memcpy(intmp, info, sizeof(struct BMSC_INFO)); + + intmp->intinfo = i; + + bmsc_initialise(cgtmp, baud); + + if (!add_cgpu(cgtmp)) { + usb_uninit(cgtmp); + free(intmp); + continue; + } + + update_usb_stats(cgtmp); + } + } + + return bmsc; + +unshin: + + usb_uninit(bmsc); + free(info); + bmsc->device_data = NULL; + +shin: + + bmsc = usb_free_cgpu(bmsc); + + return NULL; +} + +static void bmsc_detect(bool __maybe_unused hotplug) +{ + usb_detect(&bmsc_drv, bmsc_detect_one); +} + +static bool bmsc_prepare(__maybe_unused struct thr_info *thr) +{ +// struct cgpu_info *bmsc = thr->cgpu; + return true; +} + +static void cmr2_command(struct cgpu_info *bmsc, uint8_t cmd, uint8_t data) +{ + struct BMSC_INFO *info = (struct BMSC_INFO *)(bmsc->device_data); + struct BMSC_WORK workdata; + int amount; + + memset((void *)(&workdata), 0, sizeof(workdata)); + + workdata.prefix = BMSC_CMR2_PREFIX; + workdata.cmd = cmd; + workdata.data = data; + workdata.check = workdata.data ^ workdata.cmd ^ workdata.prefix ^ BMSC_CMR2_CHECK; + + usb_write_ii(bmsc, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); +} + +static void cmr2_commands(struct cgpu_info *bmsc) +{ + struct BMSC_INFO *info = (struct BMSC_INFO *)(bmsc->device_data); + + if (info->speed_next_work) { + info->speed_next_work = false; + cmr2_command(bmsc, BMSC_CMR2_CMD_SPEED, info->cmr2_speed); + return; + } + + if (info->flash_next_work) { + info->flash_next_work = false; + cmr2_command(bmsc, BMSC_CMR2_CMD_FLASH, BMSC_CMR2_DATA_FLASH_ON); + cgsleep_ms(250); + cmr2_command(bmsc, BMSC_CMR2_CMD_FLASH, BMSC_CMR2_DATA_FLASH_OFF); + cgsleep_ms(250); + cmr2_command(bmsc, BMSC_CMR2_CMD_FLASH, BMSC_CMR2_DATA_FLASH_ON); + cgsleep_ms(250); + cmr2_command(bmsc, BMSC_CMR2_CMD_FLASH, BMSC_CMR2_DATA_FLASH_OFF); + return; + } +} + +static int64_t bmsc_scanwork(struct thr_info *thr) +{ + struct cgpu_info *bmsc = thr->cgpu; + struct BMSC_INFO *info = (struct BMSC_INFO *)(bmsc->device_data); + int ret, err, amount; + unsigned char nonce_bin[BMSC_READ_SIZE]; + struct BMSC_WORK workdata; + char *ob_hex; + uint32_t nonce; + int64_t hash_count = 0; + struct timeval tv_start, tv_finish, elapsed; + struct timeval tv_history_start, tv_history_finish; + double Ti, Xi; + int curr_hw_errors, i; + bool was_hw_error; + struct work *work = NULL; + struct work *worktmp = NULL; + + struct BMSC_HISTORY *history0, *history; + int count; + double Hs, W, fullnonce; + int read_time; + bool limited; + int64_t estimate_hashes; + uint32_t values; + int64_t hash_count_range; + unsigned char workid = 0; + int submitfull = 0; + bool submitnonceok = true; + + // Device is gone + if (bmsc->usbinfo.nodev) + return -1; + + elapsed.tv_sec = elapsed.tv_usec = 0; + +retry: + work = get_work(thr, thr->id); + memset((void *)(&workdata), 0, sizeof(workdata)); + memcpy(&(workdata.midstate), work->midstate, BMSC_MIDSTATE_SIZE); + memcpy(&(workdata.work), work->data + BMSC_WORK_DATA_OFFSET, BMSC_WORK_SIZE); + rev((void *)(&(workdata.midstate)), BMSC_MIDSTATE_SIZE); + rev((void *)(&(workdata.work)), BMSC_WORK_SIZE); + + if(work->midstate[BMSC_MIDSTATE_SIZE-1] == 0xaa) + goto retry; + workdata.workid = work->id; + workid = work->id; + workid = workid & 0x1F; + + // We only want results for the work we are about to send + usb_buffer_clear(bmsc); + + if(info->work_queue[workid]) { + free(info->work_queue[workid]); + info->work_queue[workid] = NULL; + } + info->work_queue[workid] = copy_work(work); + + err = usb_write_ii(bmsc, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err < 0 || amount != sizeof(workdata)) { + applog(LOG_ERR, "%s%i: Comms error (werr=%d amt=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + dev_error(bmsc, REASON_DEV_COMMS_ERROR); + bmsc_initialise(bmsc, info->baud); + goto out; + } + + if (opt_debug) { + ob_hex = bin2hex((void *)(&workdata), sizeof(workdata)); + applog(LOG_DEBUG, "%s%d: sent %s", bmsc->drv->name, bmsc->device_id, ob_hex); + free(ob_hex); + } + + /* Bmsc will return 4 bytes (BMSC_READ_SIZE) nonces or nothing */ + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = bmsc_get_nonce(bmsc, nonce_bin, &tv_start, &tv_finish, thr, info->read_time); + if (ret == BTM_NONCE_ERROR) + goto out; + + // aborted before becoming idle, get new work + if (ret == BTM_NONCE_TIMEOUT || ret == BTM_NONCE_RESTART) { + timersub(&tv_finish, &tv_start, &elapsed); + + // ONLY up to just when it aborted + // We didn't read a reply so we don't subtract BMSC_READ_TIME + estimate_hashes = ((double)(elapsed.tv_sec) + ((double)(elapsed.tv_usec))/((double)1000000)) / info->Hs; + + // If some Serial-USB delay allowed the full nonce range to + // complete it can't have done more than a full nonce + if (unlikely(estimate_hashes > 0xffffffff)) + estimate_hashes = 0xffffffff; + + applog(LOG_DEBUG, "%s%d: no nonce = 0x%08lX hashes (%ld.%06lds)", bmsc->drv->name, bmsc->device_id, (long unsigned int)estimate_hashes, elapsed.tv_sec, elapsed.tv_usec); + + hash_count = 0; + goto out; + } + + memcpy((char *)&nonce, nonce_bin, sizeof(nonce_bin)); + nonce = htobe32(nonce); + curr_hw_errors = bmsc->hw_errors; + + workid = nonce_bin[4]; + workid = workid & 0x1F; + worktmp = info->work_queue[workid]; + if(info->start && workid == 0x1f){ + goto out; + }else{ + info->start = false; + } + if(worktmp) { + submitfull = 0; + if(submit_nonce_1(thr, worktmp, nonce, &submitfull)) { + submitnonceok = true; + submit_nonce_2(worktmp); + } else { + if(submitfull) { + submitnonceok = true; + } else { + submitnonceok = false; + } + } + cg_logwork(worktmp, nonce_bin, submitnonceok); + } else { + applog(LOG_ERR, "%s%d: work %02x not find error", bmsc->drv->name, bmsc->device_id, workid); + } + + was_hw_error = (curr_hw_errors > bmsc->hw_errors); + + hash_count = (nonce & info->nonce_mask); + hash_count++; + hash_count *= info->fpga_count; + + hash_count = 0xffffffff; + + if (opt_debug || info->do_bmsc_timing) + timersub(&tv_finish, &tv_start, &elapsed); + + applog(LOG_DEBUG, "%s%d: nonce = 0x%08x = 0x%08lX hashes (%ld.%06lds)", bmsc->drv->name, bmsc->device_id, nonce, (long unsigned int)hash_count, elapsed.tv_sec, elapsed.tv_usec); + +out: + free_work(work); + return hash_count; +} +/* +static int64_t bmsc_scanwork(struct thr_info *thr) +{ + struct cgpu_info *bmsc = thr->cgpu; + struct BMSC_INFO *info = (struct BMSC_INFO *)(bmsc->device_data); + int ret, err, amount; + unsigned char nonce_bin[BMSC_READ_SIZE]; + struct BMSC_WORK workdata; + char *ob_hex; + uint32_t nonce; + int64_t hash_count = 0; + int64_t hash_done = 0; + struct timeval tv_start, tv_finish, elapsed; + struct timeval tv_history_start, tv_history_finish; + double Ti, Xi; + int curr_hw_errors; + bool was_hw_error; + struct work *work; + + struct BMSC_HISTORY *history0, *history; + double Hs, W, fullnonce; + bool limited; + int64_t estimate_hashes; + uint32_t values; + int64_t hash_count_range; + + int i = 0, count = 0, nofullcount = 0, readalllen = 0, readlen = 0, read_time = 0, nofull = 0; + bool nonceok = false; + bool noncedup = false; + + char testbuf[256] = {0}; + char testtmp[256] = {0}; + int asicnum = 0; + int k = 0; + + // Device is gone + if (bmsc->usbinfo.nodev) + return -1; + + elapsed.tv_sec = elapsed.tv_usec = 0; + + work = get_work(thr, thr->id); + memset((void *)(&workdata), 0, sizeof(workdata)); + memcpy(&(workdata.midstate), work->midstate, BMSC_MIDSTATE_SIZE); + memcpy(&(workdata.work), work->data + BMSC_WORK_DATA_OFFSET, BMSC_WORK_SIZE); + rev((void *)(&(workdata.midstate)), BMSC_MIDSTATE_SIZE); + rev((void *)(&(workdata.work)), BMSC_WORK_SIZE); + + applog(LOG_DEBUG, "bmsc_scanhash start ------------"); + + readalllen = 0; + readlen = 0; + if(info->work_queue != NULL) { + while (true) { + if (bmsc->usbinfo.nodev) + return -1; + amount = 0; + memset(nonce_bin, 0, sizeof(nonce_bin)); + err = usb_read_once_timeout(bmsc, (char *)nonce_bin+readlen, 5-readlen, &amount, BMSC_WAIT_TIMEOUT, C_GETRESULTS); + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_ERR, "%s%i: Comms error (rerr=%d amt=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + dev_error(bmsc, REASON_DEV_COMMS_ERROR); + return 0; + } + if (amount > 0) { + readalllen += amount; + readlen += amount; + if (readlen >= 5) { + nonceok = false; + + memcpy((char *) &nonce, nonce_bin, BMSC_READ_SIZE); + noncedup = false; + for(i = 0; i < BMSC_NONCE_ARRAY_SIZE; i++) { + if(memcmp(nonce_bin, info->nonce_bin[i], BMSC_READ_SIZE) == 0) { + noncedup = true; + break; + } + } + if (!noncedup) { + if(info->nonce_index < 0 || info->nonce_index >= BMSC_NONCE_ARRAY_SIZE) + info->nonce_index = 0; + + memcpy(info->nonce_bin[info->nonce_index], nonce_bin, BMSC_READ_SIZE); + info->nonce_index++; + + nonce = htobe32(nonce); + + nofull = 0; + if (submit_nonce_1(thr, info->work_queue, nonce, &nofull)) { + applog(LOG_DEBUG, "Bmsc nonce(0x%08x) match old work", nonce); + submit_nonce_2(info->work_queue); + nonceok = true; + } else { + if(!nofull) { + applog(LOG_DEBUG, "Bmsc nonce(0x%08x) not match old work", nonce); + usb_buffer_clear(bmsc); + inc_hw_errors(thr); + break; + } else { + nofullcount++; + } + } + } else { + applog(LOG_DEBUG, "Bmsc nonce duplication"); + } + + if (nonceok) { + count++; + hash_count = (nonce & info->nonce_mask); + hash_count++; + hash_count *= info->fpga_count; + hash_done += 0xffffffff;//hash_count; + + applog(LOG_DEBUG, "%s%d: nonce = 0x%08x = 0x%08lX hashes (%ld.%06lds)", + bmsc->drv->name, bmsc->device_id, nonce, (long unsigned int )hash_count, elapsed.tv_sec, elapsed.tv_usec); + } + readlen = 0; + } + } else { + //usb_buffer_clear(bmsc); + applog(LOG_DEBUG, "bmsc_scanhash usb_read_once_timeout read time out"); + break; + } + } + } + + err = usb_write(bmsc, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err < 0 || amount != sizeof(workdata)) { + applog(LOG_ERR, "%s%i: Comms error (werr=%d amt=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + dev_error(bmsc, REASON_DEV_COMMS_ERROR); + bmsc_initialise(bmsc, info->baud); + return 0; + } + + if (opt_debug) { + ob_hex = bin2hex((char *)&workdata, sizeof(workdata)); + applog(LOG_DEBUG, "%s%d: sent %s", bmsc->drv->name, bmsc->device_id, ob_hex); + free(ob_hex); + } + + cgtime(&tv_start); + readlen = 0; + while(true) { + if (bmsc->usbinfo.nodev) + return -1; + amount = 0; + memset(nonce_bin, 0, sizeof(nonce_bin)); + err = usb_read_once_timeout(bmsc, (char *)nonce_bin+readlen, 5-readlen, &amount, BMSC_WAIT_TIMEOUT, C_GETRESULTS); + cgtime(&tv_finish); + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_ERR, "%s%i: Comms error (rerr=%d amt=%d)", bmsc->drv->name, bmsc->device_id, err, amount); + dev_error(bmsc, REASON_DEV_COMMS_ERROR); + return 0; + } + if(amount > 0) { + readalllen += amount; + readlen += amount; + if (readlen >= 5) { + nonceok = false; + + memcpy((char *) &nonce, nonce_bin, BMSC_READ_SIZE); + noncedup = false; + for(i = 0; i < BMSC_NONCE_ARRAY_SIZE; i++) { + if(memcmp(nonce_bin, info->nonce_bin[i], BMSC_READ_SIZE) == 0) { + noncedup = true; + break; + } + } + if(!noncedup) { + if(info->nonce_index < 0 || info->nonce_index >= BMSC_NONCE_ARRAY_SIZE) + info->nonce_index = 0; + + memcpy(info->nonce_bin[info->nonce_index], nonce_bin, BMSC_READ_SIZE); + info->nonce_index++; + + nonce = htobe32(nonce); + nofull = 0; + if (submit_nonce_1(thr, work, nonce, &nofull)) { + applog(LOG_DEBUG, "Bmsc nonce(0x%08x) match current work", nonce); + submit_nonce_2(work); + nonceok = true; + } else { + if(!nofull) { + if (info->work_queue != NULL) { + nofull = 0; + if (submit_nonce_1(thr, info->work_queue, nonce, &nofull)) { + applog(LOG_DEBUG, "Bmsc nonce(0x%08x) match old work", nonce); + submit_nonce_2(info->work_queue); + nonceok = true; + } else { + if(!nofull) { + applog(LOG_DEBUG, "Bmsc nonce(0x%08x) not match work", nonce); + usb_buffer_clear(bmsc); + inc_hw_errors(thr); + break; + } else { + nofullcount++; + } + } + } else { + applog(LOG_DEBUG, "Bmsc nonce(0x%08x) no old work", nonce); + } + } else { + nofullcount++; + } + } + } else { + applog(LOG_DEBUG, "Bmsc nonce duplication"); + } + + if(nonceok) { + count++; + hash_count = (nonce & info->nonce_mask); + hash_count++; + hash_count *= info->fpga_count; + hash_done += 0xffffffff;//hash_count; + + applog(LOG_DEBUG, "%s%d: nonce = 0x%08x = 0x%08lX hashes (%ld.%06lds)", + bmsc->drv->name, bmsc->device_id, nonce, (long unsigned int )hash_count, elapsed.tv_sec, elapsed.tv_usec); + } + readlen = 0; + } + } else { + applog(LOG_DEBUG, "bmsc_scanhash usb_read_once_timeout read time out"); + } + + read_time = SECTOMS(tdiff(&tv_finish, &tv_start)); + if(read_time >= info->read_time) { + if (readalllen > 0) + applog(LOG_DEBUG, "Bmsc Read: Nonce ok:%d below:%d in %d ms", count, nofullcount, read_time); + else + applog(LOG_DEBUG, "Bmsc Read: No nonce work %d for %d ms", work->id, read_time); + + break; + } + } + + if(info->work_queue != NULL) { + free_work(info->work_queue); + info->work_queue = NULL; + } + + info->work_queue = copy_work(work); + + applog(LOG_DEBUG, "bmsc_scanhash stop ------------"); +out: + free_work(work); + return hash_count; +}*/ + +static struct api_data *bmsc_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + struct BMSC_INFO *info = (struct BMSC_INFO *)(cgpu->device_data); + + // Warning, access to these is not locked - but we don't really + // care since hashing performance is way more important than + // locking access to displaying API debug 'stats' + // If locking becomes an issue for any of them, use copy_data=true also + root = api_add_int(root, "read_time", &(info->read_time), false); + root = api_add_int(root, "read_time_limit", &(info->read_time_limit), false); + root = api_add_double(root, "fullnonce", &(info->fullnonce), false); + root = api_add_int(root, "count", &(info->count), false); + root = api_add_hs(root, "Hs", &(info->Hs), false); + root = api_add_double(root, "W", &(info->W), false); + root = api_add_uint(root, "total_values", &(info->values), false); + root = api_add_uint64(root, "range", &(info->hash_count_range), false); + root = api_add_uint64(root, "history_count", &(info->history_count), false); + root = api_add_timeval(root, "history_time", &(info->history_time), false); + root = api_add_uint(root, "min_data_count", &(info->min_data_count), false); + root = api_add_uint(root, "timing_values", &(info->history[0].values), false); + root = api_add_const(root, "timing_mode", timing_mode_str(info->timing_mode), false); + root = api_add_bool(root, "is_timing", &(info->do_bmsc_timing), false); + root = api_add_int(root, "baud", &(info->baud), false); + root = api_add_int(root, "work_division", &(info->work_division), false); + root = api_add_int(root, "fpga_count", &(info->fpga_count), false); + + return root; +} + +static void bmsc_statline_before(char *buf, size_t bufsiz, struct cgpu_info *cgpu) +{ + struct BMSC_INFO *info = (struct BMSC_INFO *)(cgpu->device_data); + + if (info->ident == IDENT_CMR2 && info->cmr2_speed > 0) + tailsprintf(buf, bufsiz, "%5.1fMhz", (float)(info->cmr2_speed) * BMSC_CMR2_SPEED_FACTOR); + else + tailsprintf(buf, bufsiz, " "); + + tailsprintf(buf, bufsiz, " | "); +} + +static void bmsc_shutdown(__maybe_unused struct thr_info *thr) +{ + // TODO: ? +} + +static void bmsc_identify(struct cgpu_info *cgpu) +{ + struct BMSC_INFO *info = (struct BMSC_INFO *)(cgpu->device_data); + + if (info->ident == IDENT_CMR2) + info->flash_next_work = true; +} + +static char *bmsc_set(struct cgpu_info *cgpu, char *option, char *setting, char *replybuf) +{ + struct BMSC_INFO *info = (struct BMSC_INFO *)(cgpu->device_data); + int val; + + if (info->ident != IDENT_CMR2) { + strcpy(replybuf, "no set options available"); + return replybuf; + } + + if (strcasecmp(option, "help") == 0) { + sprintf(replybuf, "clock: range %d-%d", + BMSC_CMR2_SPEED_MIN_INT, BMSC_CMR2_SPEED_MAX_INT); + return replybuf; + } + + if (strcasecmp(option, "clock") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing clock setting"); + return replybuf; + } + + val = atoi(setting); + if (val < BMSC_CMR2_SPEED_MIN_INT || val > BMSC_CMR2_SPEED_MAX_INT) { + sprintf(replybuf, "invalid clock: '%s' valid range %d-%d", + setting, + BMSC_CMR2_SPEED_MIN_INT, + BMSC_CMR2_SPEED_MAX_INT); + } + + info->cmr2_speed = CMR2_INT_TO_SPEED(val); + info->speed_next_work = true; + + return NULL; + } + + sprintf(replybuf, "Unknown option: %s", option); + return replybuf; +} + +struct device_drv bmsc_drv = { + .drv_id = DRIVER_bmsc, + .dname = "Bitmain", + .name = "BTM", + .drv_detect = bmsc_detect, + .hash_work = &hash_driver_work, + .get_api_stats = bmsc_api_stats, + .get_statline_before = bmsc_statline_before, + .set_device = bmsc_set, + .identify_device = bmsc_identify, + .thread_prepare = bmsc_prepare, + .scanwork = bmsc_scanwork, + .thread_shutdown = bmsc_shutdown, +}; diff --git a/driver-btm-c5.c b/driver-btm-c5.c new file mode 100644 index 0000000..7286f74 --- /dev/null +++ b/driver-btm-c5.c @@ -0,0 +1,4824 @@ +#include "config.h" +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifndef WIN32 +#include +#include +#include +#include +#ifndef O_CLOEXEC +#define O_CLOEXEC 0 +#endif +#else +#include "compat.h" +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include "elist.h" +#include "miner.h" +#include "usbutils.h" +#include "hexdump.c" +#include "util.h" +#include "driver-btm-c5.h" +#include "sha2_c5.h" + +//global various +int fd; // axi fpga +int fd_fpga_mem; // fpga memory +int fpga_version; +int pcb_version; +unsigned int *axi_fpga_addr = NULL; // axi address +unsigned int *fpga_mem_addr = NULL; // fpga memory address +unsigned int *nonce2_jobid_address = NULL; // the value should be filled in NONCE2_AND_JOBID_STORE_ADDRESS +unsigned int *job_start_address_1 = NULL; // the value should be filled in JOB_START_ADDRESS +unsigned int *job_start_address_2 = NULL; // the value should be filled in JOB_START_ADDRESS +struct thr_info *read_nonce_reg_id; // thread id for read nonce and register +struct thr_info *check_system_work_id; // thread id for check system +struct thr_info *read_temp_id; +struct thr_info *read_hash_rate; +struct thr_info *pic_heart_beat; +struct thr_info *change_voltage_to_old; +struct thr_info *send_mac_thr; + + + +bool gBegin_get_nonce = false; +struct timeval tv_send_job = {0, 0}; + +pthread_mutex_t reg_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_mutex_t nonce_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_mutex_t reg_read_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_mutex_t iic_mutex = PTHREAD_MUTEX_INITIALIZER; + +uint64_t h = 0; + + +uint32_t given_id = 2; +uint32_t c_coinbase_padding = 0; +uint32_t c_merkles_num = 0; +uint32_t l_coinbase_padding = 0; +uint32_t l_merkles_num = 0; +int last_temperature = 0, temp_highest = 0; + +bool opt_bitmain_fan_ctrl = false; +int opt_bitmain_fan_pwm = 0; +int opt_bitmain_c5_freq = 600; +int opt_bitmain_c5_voltage = 176; +int ADD_FREQ = 0; +int ADD_FREQ1 = 0; +uint8_t de_voltage = 176; + +bool opt_bitmain_new_cmd_type_vil = false; +bool status_error = false; +bool once_error = false; +bool iic_ok = false; +int check_iic = 0; +bool update_temp =false; +uint64_t rate[BITMAIN_MAX_CHAIN_NUM] = {0}; +int rate_error[BITMAIN_MAX_CHAIN_NUM] = {0}; +char displayed_rate[BITMAIN_MAX_CHAIN_NUM][16]; +uint8_t chain_voltage[BITMAIN_MAX_CHAIN_NUM] = {0}; +unsigned char hash_board_id[BITMAIN_MAX_CHAIN_NUM][12]; + + +#define id_string_len 34 +#define AUTH_URL "auth.minerlink.com" +#define PORT "7000" + +static bool need_send = true; +char * mac; +bool stop_mining = false; +char hash_board_id_string[BITMAIN_MAX_CHAIN_NUM*id_string_len]; + + +struct nonce_content temp_nonce_buf[MAX_RETURNED_NONCE_NUM]; +struct reg_content temp_reg_buf[MAX_RETURNED_NONCE_NUM]; +struct nonce_buf nonce_read_out; +struct reg_buf reg_value_buf; + + +#define USE_IIC 1 +#define TEMP_CALI 0 +#define MID_OR_BOT 1 + + +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) + + +void *gpio0_vaddr=NULL; +struct all_parameters *dev; +unsigned int is_first_job = 0; + +//other equipment related + +// -------------------------------------------------------------- +// CRC16 check table +// -------------------------------------------------------------- +const uint8_t chCRCHTalbe[] = // CRC high byte table +{ + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x00, 0xC1, 0x81, 0x40, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40, 0x01, 0xC0, 0x80, 0x41, 0x01, 0xC0, 0x80, 0x41, + 0x00, 0xC1, 0x81, 0x40 +}; + +const uint8_t chCRCLTalbe[] = // CRC low byte table +{ + 0x00, 0xC0, 0xC1, 0x01, 0xC3, 0x03, 0x02, 0xC2, 0xC6, 0x06, 0x07, 0xC7, + 0x05, 0xC5, 0xC4, 0x04, 0xCC, 0x0C, 0x0D, 0xCD, 0x0F, 0xCF, 0xCE, 0x0E, + 0x0A, 0xCA, 0xCB, 0x0B, 0xC9, 0x09, 0x08, 0xC8, 0xD8, 0x18, 0x19, 0xD9, + 0x1B, 0xDB, 0xDA, 0x1A, 0x1E, 0xDE, 0xDF, 0x1F, 0xDD, 0x1D, 0x1C, 0xDC, + 0x14, 0xD4, 0xD5, 0x15, 0xD7, 0x17, 0x16, 0xD6, 0xD2, 0x12, 0x13, 0xD3, + 0x11, 0xD1, 0xD0, 0x10, 0xF0, 0x30, 0x31, 0xF1, 0x33, 0xF3, 0xF2, 0x32, + 0x36, 0xF6, 0xF7, 0x37, 0xF5, 0x35, 0x34, 0xF4, 0x3C, 0xFC, 0xFD, 0x3D, + 0xFF, 0x3F, 0x3E, 0xFE, 0xFA, 0x3A, 0x3B, 0xFB, 0x39, 0xF9, 0xF8, 0x38, + 0x28, 0xE8, 0xE9, 0x29, 0xEB, 0x2B, 0x2A, 0xEA, 0xEE, 0x2E, 0x2F, 0xEF, + 0x2D, 0xED, 0xEC, 0x2C, 0xE4, 0x24, 0x25, 0xE5, 0x27, 0xE7, 0xE6, 0x26, + 0x22, 0xE2, 0xE3, 0x23, 0xE1, 0x21, 0x20, 0xE0, 0xA0, 0x60, 0x61, 0xA1, + 0x63, 0xA3, 0xA2, 0x62, 0x66, 0xA6, 0xA7, 0x67, 0xA5, 0x65, 0x64, 0xA4, + 0x6C, 0xAC, 0xAD, 0x6D, 0xAF, 0x6F, 0x6E, 0xAE, 0xAA, 0x6A, 0x6B, 0xAB, + 0x69, 0xA9, 0xA8, 0x68, 0x78, 0xB8, 0xB9, 0x79, 0xBB, 0x7B, 0x7A, 0xBA, + 0xBE, 0x7E, 0x7F, 0xBF, 0x7D, 0xBD, 0xBC, 0x7C, 0xB4, 0x74, 0x75, 0xB5, + 0x77, 0xB7, 0xB6, 0x76, 0x72, 0xB2, 0xB3, 0x73, 0xB1, 0x71, 0x70, 0xB0, + 0x50, 0x90, 0x91, 0x51, 0x93, 0x53, 0x52, 0x92, 0x96, 0x56, 0x57, 0x97, + 0x55, 0x95, 0x94, 0x54, 0x9C, 0x5C, 0x5D, 0x9D, 0x5F, 0x9F, 0x9E, 0x5E, + 0x5A, 0x9A, 0x9B, 0x5B, 0x99, 0x59, 0x58, 0x98, 0x88, 0x48, 0x49, 0x89, + 0x4B, 0x8B, 0x8A, 0x4A, 0x4E, 0x8E, 0x8F, 0x4F, 0x8D, 0x4D, 0x4C, 0x8C, + 0x44, 0x84, 0x85, 0x45, 0x87, 0x47, 0x46, 0x86, 0x82, 0x42, 0x43, 0x83, + 0x41, 0x81, 0x80, 0x40 +}; + + +//crc +uint16_t CRC16(const uint8_t* p_data, uint16_t w_len) +{ + uint8_t chCRCHi = 0xFF; // CRC high byte initialize + uint8_t chCRCLo = 0xFF; // CRC low byte initialize + uint16_t wIndex = 0; // CRC cycling index + + while (w_len--) + { + wIndex = chCRCLo ^ *p_data++; + chCRCLo = chCRCHi ^ chCRCHTalbe[wIndex]; + chCRCHi = chCRCLTalbe[wIndex]; + } + return ((chCRCHi << 8) | chCRCLo); +} + +unsigned char CRC5(unsigned char *ptr, unsigned char len) +{ + unsigned char i, j, k; + unsigned char crc = 0x1f; + + unsigned char crcin[5] = {1, 1, 1, 1, 1}; + unsigned char crcout[5] = {1, 1, 1, 1, 1}; + unsigned char din = 0; + + j = 0x80; + k = 0; + for (i = 0; i < len; i++) + { + if (*ptr & j) + { + din = 1; + } + else + { + din = 0; + } + crcout[0] = crcin[4] ^ din; + crcout[1] = crcin[0]; + crcout[2] = crcin[1] ^ crcin[4] ^ din; + crcout[3] = crcin[2]; + crcout[4] = crcin[3]; + + j = j >> 1; + k++; + if (k == 8) + { + j = 0x80; + k = 0; + ptr++; + } + memcpy(crcin, crcout, 5); + } + crc = 0; + if(crcin[4]) + { + crc |= 0x10; + } + if(crcin[3]) + { + crc |= 0x08; + } + if(crcin[2]) + { + crc |= 0x04; + } + if(crcin[1]) + { + crc |= 0x02; + } + if(crcin[0]) + { + crc |= 0x01; + } + return crc; +} + +// pic +unsigned int get_pic_iic() +{ + int ret = -1; + ret = *(axi_fpga_addr + IIC_COMMAND); + + applog(LOG_DEBUG,"%s: IIC_COMMAND is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +unsigned char set_pic_iic(unsigned int data) +{ + unsigned int ret=0; + unsigned char ret_data = 0; + + *((unsigned int *)(axi_fpga_addr + IIC_COMMAND)) = data & 0x7fffffff; + applog(LOG_DEBUG,"%s: set IIC_COMMAND is 0x%x\n", __FUNCTION__, data & 0x7fffffff); + + while(1) + { + ret = get_pic_iic(); + if(ret & 0x80000000) + { + ret_data = (unsigned char)(ret & 0x000000ff); + return ret_data; + } + else + { + applog(LOG_DEBUG,"%s: waiting write pic iic\n", __FUNCTION__); + cgsleep_us(1000); + } + } +} + +unsigned char write_pic_iic(bool read, bool reg_addr_valid, unsigned char reg_addr, unsigned char chain, unsigned char data) +{ + unsigned int value = 0x00000000; + unsigned char ret = 0; + + if(read) + { + value |= IIC_READ; + } + + if(reg_addr_valid) + { + value |= IIC_REG_ADDR_VALID; + value |= IIC_REG_ADDR(reg_addr); + } + + value |= IIC_ADDR_HIGH_4_BIT; + + value |= IIC_CHAIN_NUMBER(chain); + + value |= data; + + ret = set_pic_iic(value); + + return ret; +} + +void send_pic_command(unsigned char chain) +{ + write_pic_iic(false, false, 0x0, chain, PIC_COMMAND_1); + write_pic_iic(false, false, 0x0, chain, PIC_COMMAND_2); +} + + +void set_pic_iic_flash_addr_pointer(unsigned char chain, unsigned char addr_H, unsigned char addr_L) +{ + send_pic_command(chain); + write_pic_iic(false, false, 0x0, chain, SET_PIC_FLASH_POINTER); + write_pic_iic(false, false, 0x0, chain, addr_H); + write_pic_iic(false, false, 0x0, chain, addr_L); +} + +void send_data_to_pic_iic(unsigned char chain, unsigned char command, unsigned char *buf, unsigned char length) +{ + unsigned char i=0; + + write_pic_iic(false, false, 0x0, chain, command); + for(i=0; ichain_exist[i] == 1) + { + enable_pic_dc_dc(i); + cgsleep_ms(1); + } + } +} + +void set_pic_voltage_all(int voltage) +{ + unsigned char i; + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + cgsleep_ms(100); + jump_to_app_from_loader(i); + cgsleep_ms(100); + set_pic_voltage(i,voltage); + cgsleep_ms(1); + } + } +} + +void enable_pic_dac(unsigned char chain) +{ + send_pic_command(chain); + write_pic_iic(false, false, 0x0, chain, ENABLE_VOLTAGE); + write_pic_iic(false, false, 0x0, chain, 1); +} + +void disable_pic_dac(unsigned char chain) +{ + send_pic_command(chain); + write_pic_iic(false, false, 0x0, chain, ENABLE_VOLTAGE); + write_pic_iic(false, false, 0x0, chain, 0); +} + + +void pic_heart_beat_each_chain(unsigned char chain) +{ + send_pic_command(chain); + write_pic_iic(false, false, 0x0, chain, SEND_HEART_BEAT); +} + +//FPGA related +int get_nonce2_and_job_id_store_address(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + NONCE2_AND_JOBID_STORE_ADDRESS)); + applog(LOG_DEBUG,"%s: NONCE2_AND_JOBID_STORE_ADDRESS is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_nonce2_and_job_id_store_address(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + NONCE2_AND_JOBID_STORE_ADDRESS)) = value; + applog(LOG_DEBUG,"%s: set NONCE2_AND_JOBID_STORE_ADDRESS is 0x%x\n", __FUNCTION__, value); + get_nonce2_and_job_id_store_address(); +} + +int get_job_start_address(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + JOB_START_ADDRESS)); + applog(LOG_DEBUG,"%s: JOB_START_ADDRESS is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_job_start_address(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + JOB_START_ADDRESS)) = value; + applog(LOG_DEBUG,"%s: set JOB_START_ADDRESS is 0x%x\n", __FUNCTION__, value); + get_job_start_address(); +} + +int get_QN_write_data_command(void) +{ + int ret = -1; + ret = *((axi_fpga_addr + QN_WRITE_DATA_COMMAND)); + applog(LOG_DEBUG,"%s: QN_WRITE_DATA_COMMAND is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_QN_write_data_command(unsigned int value) +{ + *(axi_fpga_addr + QN_WRITE_DATA_COMMAND) = value; + applog(LOG_DEBUG,"%s: set QN_WRITE_DATA_COMMAND is 0x%x\n", __FUNCTION__, value); + get_QN_write_data_command(); +} + +int bitmain_axi_init() +{ + unsigned int data; + int ret=0; + + fd = open("/dev/axi_fpga_dev", O_RDWR); + if(fd < 0) + { + applog(LOG_DEBUG,"/dev/axi_fpga_dev open failed. fd = %d\n", fd); + perror("open"); + return -1; + } + + axi_fpga_addr = mmap(NULL, TOTAL_LEN, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); + if(!axi_fpga_addr) + { + applog(LOG_DEBUG,"mmap axi_fpga_addr failed. axi_fpga_addr = 0x%x\n", axi_fpga_addr); + return -1; + } + applog(LOG_DEBUG,"mmap axi_fpga_addr = 0x%x\n", axi_fpga_addr); + + //check the value in address 0xff200000 + data = *axi_fpga_addr; + if((data & 0x0000FFFF) != HARDWARE_VERSION_VALUE) + { + applog(LOG_DEBUG,"data = 0x%x, and it's not equal to HARDWARE_VERSION_VALUE : 0x%x\n", data, HARDWARE_VERSION_VALUE); + //return -1; + } + applog(LOG_DEBUG,"axi_fpga_addr data = 0x%x\n", data); + + fd_fpga_mem = open("/dev/fpga_mem", O_RDWR); + if(fd_fpga_mem < 0) + { + applog(LOG_DEBUG,"/dev/fpga_mem open failed. fd_fpga_mem = %d\n", fd_fpga_mem); + perror("open"); + return -1; + } + + fpga_mem_addr = mmap(NULL, FPGA_MEM_TOTAL_LEN, PROT_READ|PROT_WRITE, MAP_SHARED, fd_fpga_mem, 0); + if(!fpga_mem_addr) + { + applog(LOG_DEBUG,"mmap fpga_mem_addr failed. fpga_mem_addr = 0x%x\n", fpga_mem_addr); + return -1; + } + applog(LOG_DEBUG,"mmap fpga_mem_addr = 0x%x\n", fpga_mem_addr); + + nonce2_jobid_address = fpga_mem_addr; + job_start_address_1 = fpga_mem_addr + NONCE2_AND_JOBID_STORE_SPACE/sizeof(int); + job_start_address_2 = fpga_mem_addr + (NONCE2_AND_JOBID_STORE_SPACE + JOB_STORE_SPACE)/sizeof(int); + + applog(LOG_DEBUG,"job_start_address_1 = 0x%x\n", job_start_address_1); + applog(LOG_DEBUG,"job_start_address_2 = 0x%x\n", job_start_address_2); + + set_nonce2_and_job_id_store_address(PHY_MEM_NONCE2_JOBID_ADDRESS); + set_job_start_address(PHY_MEM_JOB_START_ADDRESS_1); + + + dev = calloc(sizeof(struct all_parameters), sizeof(char)); + if(!dev) + { + applog(LOG_DEBUG,"kmalloc for dev failed.\n"); + return -1; + } + else + { + dev->current_job_start_address = job_start_address_1; + applog(LOG_DEBUG,"kmalloc for dev success.\n"); + } + return ret; +} + +int bitmain_axi_close() +{ + int ret = 0; + + ret = munmap((void *)axi_fpga_addr, TOTAL_LEN); + if(ret<0) + { + applog(LOG_DEBUG,"munmap failed!\n"); + } + + ret = munmap((void *)fpga_mem_addr, FPGA_MEM_TOTAL_LEN); + if(ret<0) + { + applog(LOG_DEBUG,"munmap failed!\n"); + } + + //free_pages((unsigned long)nonce2_jobid_address, NONCE2_AND_JOBID_STORE_SPACE_ORDER); + //free(temp_job_start_address_1); + //free(temp_job_start_address_2); + + close(fd); + close(fd_fpga_mem); +} + +int get_fan_control(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + FAN_CONTROL)); + applog(LOG_DEBUG,"%s: FAN_CONTROL is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_fan_control(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + FAN_CONTROL)) = value; + applog(LOG_DEBUG,"%s: set FAN_CONTROL is 0x%x\n", __FUNCTION__, value); + get_fan_control(); +} + +int get_hash_on_plug(void) +{ + int ret = -1; + ret = *(axi_fpga_addr + HASH_ON_PLUG); + + applog(LOG_DEBUG,"%s: HASH_ON_PLUG is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +int get_hardware_version(void) +{ + int ret = -1; + ret = *((int *)(axi_fpga_addr + HARDWARE_VERSION)); + + applog(LOG_DEBUG,"%s: HARDWARE_VERSION is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +int get_fan_speed(unsigned char *fan_id, unsigned int *fan_speed) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + FAN_SPEED)); + *fan_speed = 0x000000ff & ret; + *fan_id = (unsigned char)(0x00000007 & (ret >> 8)); + if(*fan_speed > 0) + { + applog(LOG_DEBUG,"%s: fan_id is 0x%x, fan_speed is 0x%x\n", __FUNCTION__, *fan_id, *fan_speed); + } + return ret; +} + +int get_temperature_0_3(void) +{ + int ret = -1; + ret = *((int *)(axi_fpga_addr + TEMPERATURE_0_3)); + //applog(LOG_DEBUG,"%s: TEMPERATURE_0_3 is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +int get_temperature_4_7(void) +{ + int ret = -1; + ret = *((int *)(axi_fpga_addr + TEMPERATURE_4_7)); + //applog(LOG_DEBUG,"%s: TEMPERATURE_4_7 is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +int get_temperature_8_11(void) +{ + int ret = -1; + ret = *((int *)(axi_fpga_addr + TEMPERATURE_8_11)); + //applog(LOG_DEBUG,"%s: TEMPERATURE_8_11 is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +int get_temperature_12_15(void) +{ + int ret = -1; + ret = *((int *)(axi_fpga_addr + TEMPERATURE_12_15)); + //applog(LOG_DEBUG,"%s: TEMPERATURE_12_15 is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +int get_time_out_control(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + TIME_OUT_CONTROL)); + applog(LOG_DEBUG,"%s: TIME_OUT_CONTROL is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_time_out_control(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + TIME_OUT_CONTROL)) = value; + applog(LOG_DEBUG,"%s: set FAN_CONTROL is 0x%x\n", __FUNCTION__, value); + get_time_out_control(); +} + +int get_BC_command_buffer(unsigned int *buf) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + BC_COMMAND_BUFFER)); + *(buf + 0) = ret; //this is for FIL + ret = *((unsigned int *)(axi_fpga_addr + BC_COMMAND_BUFFER + 1)); + *(buf + 1) = ret; + ret = *((unsigned int *)(axi_fpga_addr + BC_COMMAND_BUFFER + 2)); + *(buf + 2) = ret; + applog(LOG_DEBUG,"%s: BC_COMMAND_BUFFER buf[0]: 0x%x, buf[1]: 0x%x, buf[2]: 0x%x\n", __FUNCTION__, *(buf + 0), *(buf + 1), *(buf + 2)); + return ret; +} + +void set_BC_command_buffer(unsigned int *value) +{ + unsigned int buf[4] = {0}; + *((unsigned int *)(axi_fpga_addr + BC_COMMAND_BUFFER)) = *(value + 0); //this is for FIL + *((unsigned int *)(axi_fpga_addr + BC_COMMAND_BUFFER + 1)) = *(value + 1); + *((unsigned int *)(axi_fpga_addr + BC_COMMAND_BUFFER + 2)) = *(value + 2); + applog(LOG_DEBUG,"%s: set BC_COMMAND_BUFFER value[0]: 0x%x, value[1]: 0x%x, value[2]: 0x%x\n", __FUNCTION__, *(value + 0), *(value + 1), *(value + 2)); + get_BC_command_buffer(buf); +} + +int get_nonce_number_in_fifo(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + NONCE_NUMBER_IN_FIFO)); + //applog(LOG_DEBUG,"%s: NONCE_NUMBER_IN_FIFO is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +int get_return_nonce(unsigned int *buf) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + RETURN_NONCE)); + *(buf + 0) = ret; + ret = *((unsigned int *)(axi_fpga_addr + RETURN_NONCE + 1)); + *(buf + 1) = ret; //there is nonce3 + //applog(LOG_DEBUG,"%s: RETURN_NONCE buf[0] is 0x%x, buf[1] is 0x%x\n", __FUNCTION__, *(buf + 0), *(buf + 1)); + return ret; +} + +int get_BC_write_command(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + BC_WRITE_COMMAND)); + applog(LOG_DEBUG,"%s: BC_WRITE_COMMAND is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_BC_write_command(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + BC_WRITE_COMMAND)) = value; + //applog(LOG_DEBUG,"%s: set BC_WRITE_COMMAND is 0x%x\n", __FUNCTION__, value); + + if(value & BC_COMMAND_BUFFER_READY) + { + while(get_BC_write_command() & BC_COMMAND_BUFFER_READY) + { + cgsleep_ms(1); + //applog(LOG_DEBUG,"%s ---\n", __FUNCTION__); + } + } + else + { + get_BC_write_command(); + } +} + +int get_ticket_mask(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + TICKET_MASK_FPGA)); + applog(LOG_DEBUG,"%s: TICKET_MASK_FPGA is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_ticket_mask(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + TICKET_MASK_FPGA)) = value; + applog(LOG_DEBUG,"%s: set TICKET_MASK_FPGA is 0x%x\n", __FUNCTION__, value); + get_ticket_mask(); +} + +int get_job_id(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + JOB_ID)); + applog(LOG_DEBUG,"%s: JOB_ID is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_job_id(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + JOB_ID)) = value; + applog(LOG_DEBUG,"%s: set JOB_ID is 0x%x\n", __FUNCTION__, value); + get_job_id(); +} + +int get_job_length(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + JOB_LENGTH)); + applog(LOG_DEBUG,"%s: JOB_LENGTH is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_job_length(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + JOB_LENGTH)) = value; + applog(LOG_DEBUG,"%s: set JOB_LENGTH is 0x%x\n", __FUNCTION__, value); + get_job_id(); +} + + +int get_block_header_version(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + BLOCK_HEADER_VERSION)); + applog(LOG_DEBUG,"%s: BLOCK_HEADER_VERSION is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_block_header_version(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + BLOCK_HEADER_VERSION)) = value; + applog(LOG_DEBUG,"%s: set BLOCK_HEADER_VERSION is 0x%x\n", __FUNCTION__, value); + get_block_header_version(); +} + +int get_time_stamp() +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + TIME_STAMP)); + applog(LOG_DEBUG,"%s: TIME_STAMP is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_time_stamp(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + TIME_STAMP)) = value; + applog(LOG_DEBUG,"%s: set TIME_STAMP is 0x%x\n", __FUNCTION__, value); + get_time_stamp(); +} + +int get_target_bits(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + TARGET_BITS)); + applog(LOG_DEBUG,"%s: TARGET_BITS is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_target_bits(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + TARGET_BITS)) = value; + applog(LOG_DEBUG,"%s: set TARGET_BITS is 0x%x\n", __FUNCTION__, value); + get_target_bits(); +} + +int get_pre_header_hash(unsigned int *buf) +{ + int ret = -1; + *(buf + 0) = *((unsigned int *)(axi_fpga_addr + PRE_HEADER_HASH)); + *(buf + 1) = *((unsigned int *)(axi_fpga_addr + PRE_HEADER_HASH + 1)); + *(buf + 2) = *((unsigned int *)(axi_fpga_addr + PRE_HEADER_HASH + 2)); + *(buf + 3) = *((unsigned int *)(axi_fpga_addr + PRE_HEADER_HASH + 3)); + *(buf + 4) = *((unsigned int *)(axi_fpga_addr + PRE_HEADER_HASH + 4)); + *(buf + 5) = *((unsigned int *)(axi_fpga_addr + PRE_HEADER_HASH + 5)); + *(buf + 6) = *((unsigned int *)(axi_fpga_addr + PRE_HEADER_HASH + 6)); + *(buf + 7) = *((unsigned int *)(axi_fpga_addr + PRE_HEADER_HASH + 7)); + applog(LOG_DEBUG,"%s: PRE_HEADER_HASH buf[0]: 0x%x, buf[1]: 0x%x, buf[2]: 0x%x, buf[3]: 0x%x, buf[4]: 0x%x, buf[5]: 0x%x, buf[6]: 0x%x, buf[7]: 0x%x\n", __FUNCTION__, *(buf + 0), *(buf + 1), *(buf + 2), *(buf + 3), *(buf + 4), *(buf + 5), *(buf + 6), *(buf + 7)); + ret = *(buf + 7); + return ret; +} + +void set_pre_header_hash(unsigned int *value) +{ + unsigned int buf[8] = {0}; + *(axi_fpga_addr + PRE_HEADER_HASH) = *(value + 0); + *(axi_fpga_addr + PRE_HEADER_HASH + 1) = *(value + 1); + *(axi_fpga_addr + PRE_HEADER_HASH + 2) = *(value + 2); + *(axi_fpga_addr + PRE_HEADER_HASH + 3) = *(value + 3); + *(axi_fpga_addr + PRE_HEADER_HASH + 4) = *(value + 4); + *(axi_fpga_addr + PRE_HEADER_HASH + 5) = *(value + 5); + *(axi_fpga_addr + PRE_HEADER_HASH + 6) = *(value + 6); + *(axi_fpga_addr + PRE_HEADER_HASH + 7) = *(value + 7); + applog(LOG_DEBUG,"%s: set PRE_HEADER_HASH value[0]: 0x%x, value[1]: 0x%x, value[2]: 0x%x, value[3]: 0x%x, value[4]: 0x%x, value[5]: 0x%x, value[6]: 0x%x, value[7]: 0x%x\n", __FUNCTION__, *(value + 0), *(value + 1), *(value + 2), *(value + 3), *(value + 4), *(value + 5), *(value + 6), *(value + 7)); + //get_pre_header_hash(buf); +} + +int get_coinbase_length_and_nonce2_length(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + COINBASE_AND_NONCE2_LENGTH)); + applog(LOG_DEBUG,"%s: COINBASE_AND_NONCE2_LENGTH is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_coinbase_length_and_nonce2_length(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + COINBASE_AND_NONCE2_LENGTH)) = value; + applog(LOG_DEBUG,"%s: set COINBASE_AND_NONCE2_LENGTH is 0x%x\n", __FUNCTION__, value); + get_coinbase_length_and_nonce2_length(); +} + +int get_work_nonce2(unsigned int *buf) +{ + int ret = -1; + *(buf + 0) = *((unsigned int *)(axi_fpga_addr + WORK_NONCE_2)); + *(buf + 1) = *((unsigned int *)(axi_fpga_addr + WORK_NONCE_2 + 1)); + applog(LOG_DEBUG,"%s: WORK_NONCE_2 buf[0]: 0x%x, buf[1]: 0x%x\n", __FUNCTION__, *(buf + 0), *(buf + 1)); + return ret; +} + +void set_work_nonce2(unsigned int *value) +{ + unsigned int buf[2] = {0}; + *((unsigned int *)(axi_fpga_addr + WORK_NONCE_2)) = *(value + 0); + *((unsigned int *)(axi_fpga_addr + WORK_NONCE_2 + 1)) = *(value + 1); + applog(LOG_DEBUG,"%s: set WORK_NONCE_2 value[0]: 0x%x, value[1]: 0x%x\n", __FUNCTION__, *(value + 0), *(value + 1)); + get_work_nonce2(buf); +} + +int get_merkle_bin_number(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + MERKLE_BIN_NUMBER)); + ret = ret & 0x0000ffff; + applog(LOG_DEBUG,"%s: MERKLE_BIN_NUMBER is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_merkle_bin_number(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + MERKLE_BIN_NUMBER)) = value & 0x0000ffff; + applog(LOG_DEBUG,"%s: set MERKLE_BIN_NUMBER is 0x%x\n", __FUNCTION__, value & 0x0000ffff); + get_merkle_bin_number(); +} + +int get_nonce_fifo_interrupt(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + NONCE_FIFO_INTERRUPT)); + applog(LOG_DEBUG,"%s: NONCE_FIFO_INTERRUPT is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_nonce_fifo_interrupt(unsigned int value) +{ + *((unsigned int *)(axi_fpga_addr + NONCE_FIFO_INTERRUPT)) = value; + applog(LOG_DEBUG,"%s: set NONCE_FIFO_INTERRUPT is 0x%x\n", __FUNCTION__, value); + get_nonce_fifo_interrupt(); +} + +int get_dhash_acc_control(void) +{ + int ret = -1; + ret = *((unsigned int *)(axi_fpga_addr + DHASH_ACC_CONTROL)); + applog(LOG_DEBUG,"%s: DHASH_ACC_CONTROL is 0x%x\n", __FUNCTION__, ret); + return ret; +} + +void set_dhash_acc_control(unsigned int value) +{ + int a = 10; + *((unsigned int *)(axi_fpga_addr + DHASH_ACC_CONTROL)) = value; + applog(LOG_DEBUG,"%s: set DHASH_ACC_CONTROL is 0x%x\n", __FUNCTION__, value); + while (a>0) + { + if ((value | NEW_BLOCK) == (get_dhash_acc_control() |NEW_BLOCK)) + break; + *((unsigned int *)(axi_fpga_addr + DHASH_ACC_CONTROL)) = value; + a--; + cgsleep_ms(2); + } + if (a == 0) + applog(LOG_DEBUG,"%s set DHASH_ACC_CONTROL failed!",__FUNCTION__); +} + +void set_TW_write_command(unsigned int *value) +{ + unsigned int i; + for(i=0; ichain_num = 0; + + ret = get_hash_on_plug(); + + if(ret < 0) + { + applog(LOG_DEBUG,"%s: get_hash_on_plug functions error\n"); + } + else + { + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if((ret >> i) & 0x1) + { + dev->chain_exist[i] = 1; + dev->chain_num++; + } + else + { + dev->chain_exist[i] = 0; + } + } + } +} + +void check_fan() +{ + unsigned char i=0, j=0; + unsigned char fan_id = 0; + unsigned int fan_speed; + + for(j=0; j < 2; j++) //means check for twice to make sure find out all fan + { + for(i=0; i < BITMAIN_MAX_FAN_NUM; i++) + { + if(get_fan_speed(&fan_id, &fan_speed) != -1) + { + dev->fan_speed_value[fan_id] = fan_speed * 60 * 2; + if((fan_speed > 0) && (dev->fan_exist[fan_id] == 0)) + { + dev->fan_exist[fan_id] = 1; + dev->fan_num++; + dev->fan_exist_map |= (0x1 << fan_id); + } + else if((fan_speed == 0) && (dev->fan_exist[fan_id] == 1)) + { + dev->fan_exist[fan_id] = 0; + dev->fan_num--; + dev->fan_exist_map &= !(0x1 << fan_id); + } + if(dev->fan_speed_top1 < dev->fan_speed_value[fan_id]) + dev->fan_speed_top1 = dev->fan_speed_value[fan_id]; + } + } + } +} + +int get_all_temperature() +{ + int ret = 0; + int ret_value = 0; + unsigned int i = 0; + int temperature = 0, highest_temp = 0; + int temperature_gap[BITMAIN_MAX_CHAIN_NUM] = {0}; + int biggest_gap = 0; + + + dev->temp_top1_last = dev->temp_top1; + dev->temp_num = 0; + dev->temp_top1 = 0; + dev->temp_sensor_map = 0; + + ret = get_temperature_0_3(); + if(ret != -1) + { + for(i=0; i<4; i++) + { + temperature = (ret >> i*8) & 0xff; + if((dev->chain_exist[i] == 1) && temperature) + { + dev->temp_sensor_map |= (0x1 << i); + dev->temp_num++; + dev->temp[i] = temperature; + } + else if((dev->chain_exist[i] == 0) && temperature) + { + ret_value = -1; + applog(LOG_DEBUG,"%s: chain%d is not exist, but it has temperature:%d\n", __FUNCTION__, i, temperature); + } + else if((dev->chain_exist[i] == 1) && !temperature) + { + ret_value = -1; + applog(LOG_DEBUG,"%s: chain%d is exist, but its temperature is:%d\n", __FUNCTION__, i, temperature); + } + else + { + //applog(LOG_DEBUG,"%s: no chain%d no temperature\n", __FUNCTION__, i); + } + } + } + + ret = get_temperature_4_7(); + if(ret != -1) + { + for(i=0; i<4; i++) + { + temperature = (ret >> i*8) & 0xff; + if((dev->chain_exist[i+4] == 1) && temperature) + { + dev->temp_sensor_map |= (0x1 << (i+4)); + dev->temp_num++; + dev->temp[i+4] = temperature; + } + else if((dev->chain_exist[i+4] == 0) && temperature) + { + ret_value = -1; + applog(LOG_DEBUG,"%s: chain%d is not exist, but it has temperature:%d\n", __FUNCTION__, i+4, temperature); + } + else if((dev->chain_exist[i+4] == 1) && !temperature) + { + ret_value = -1; + applog(LOG_DEBUG,"%s: chain%d is exist, but its temperature is:%d\n", __FUNCTION__, i+4, temperature); + } + else + { + //applog(LOG_DEBUG,"%s: no chain%d no temperature\n", __FUNCTION__, i+4); + } + } + } + + ret = get_temperature_8_11(); + if(ret != -1) + { + for(i=0; i<4; i++) + { + temperature = (ret >> i*8) & 0xff; + if((dev->chain_exist[i+8] == 1) && temperature) + { + dev->temp_sensor_map |= (0x1 << (i+8)); + dev->temp_num++; + dev->temp[i+8] = temperature; + } + else if((dev->chain_exist[i+8] == 0) && temperature) + { + ret_value = -1; + applog(LOG_DEBUG,"%s: chain%d is not exist, but it has temperature:%d\n", __FUNCTION__, i+8, temperature); + } + else if((dev->chain_exist[i+8] == 1) && !temperature) + { + ret_value = -1; + applog(LOG_DEBUG,"%s: chain%d is exist, but its temperature is:%d\n", __FUNCTION__, i+8, temperature); + } + else + { + //applog(LOG_DEBUG,"%s: no chain%d no temperature\n", __FUNCTION__, i+8); + } + } + } + + ret = get_temperature_12_15(); + if(ret != -1) + { + for(i=0; i<4; i++) + { + temperature = (ret >> i*8) & 0xff; + if((dev->chain_exist[i+12] == 1) && temperature) + { + dev->temp_sensor_map |= (0x1 << (i+12)); + dev->temp_num++; + dev->temp[i+12] = temperature; + } + else if((dev->chain_exist[i+12] == 0) && temperature) + { + ret_value = -1; + applog(LOG_DEBUG,"%s: chain%d is not exist, but it has temperature:%d\n", __FUNCTION__, i+12, temperature); + } + else if((dev->chain_exist[i+12] == 1) && !temperature) + { + ret_value = -1; + applog(LOG_DEBUG,"%s: chain%d is exist, but its temperature is:%d\n", __FUNCTION__, i+12, temperature); + } + else + { + //applog(LOG_DEBUG,"%s: no chain%d no temperature\n", __FUNCTION__, i+12); + } + } + } + + for(i=0; itemp[i] > highest_temp) + { + highest_temp = dev->temp[i]; + dev->temp_top1 = dev->temp[i]; + } + } + + if((dev->temp_top1 - dev->temp_top1_last < 2) && (dev->temp_top1 - dev->temp_top1_last > -2)) + { + dev->temp_top1_last = dev->temp_top1; + } + return ret_value; +} + +void set_PWM(unsigned char pwm_percent) +{ + uint16_t pwm_high_value = 0, pwm_low_value = 0; + int temp_pwm_percent = 0; + + temp_pwm_percent = pwm_percent; + + if(temp_pwm_percent < MIN_PWM_PERCENT) + { + temp_pwm_percent = MIN_PWM_PERCENT; + } + + if(temp_pwm_percent > MAX_PWM_PERCENT) + { + temp_pwm_percent = MAX_PWM_PERCENT; + } + + pwm_high_value = temp_pwm_percent * PWM_SCALE / 100; + pwm_low_value = (100 - temp_pwm_percent) * PWM_SCALE / 100; + dev->pwm_value = (pwm_high_value << 16) | pwm_low_value; + dev->pwm_percent = temp_pwm_percent; + + set_fan_control(dev->pwm_value); +} + +void set_PWM_according_to_temperature() +{ + int pwm_percent = 0, temp_change = 0; + temp_highest = dev->temp_top1; + if(temp_highest >= MAX_FAN_TEMP) + { + applog(LOG_DEBUG,"%s: Temperature is higher than %d 'C\n", __FUNCTION__, temp_highest); + } + + if(dev->fan_eft) + { + if((dev->fan_pwm >= 0) && (dev->fan_pwm <= 100)) + { + set_PWM(dev->fan_pwm); + return; + } + } + + temp_change = temp_highest - last_temperature; + + if(temp_highest >= MAX_FAN_TEMP || temp_highest == 0) + { + set_PWM(MAX_PWM_PERCENT); + dev->fan_pwm = MAX_PWM_PERCENT; + applog(LOG_DEBUG,"%s: Set PWM percent : MAX_PWM_PERCENT\n", __FUNCTION__); + return; + } + + if(temp_highest <= MIN_FAN_TEMP) + { + set_PWM(MIN_PWM_PERCENT); + dev->fan_pwm = MIN_PWM_PERCENT; + applog(LOG_DEBUG,"%s: Set PWM percent : MIN_PWM_PERCENT\n", __FUNCTION__); + return; + } + + if(temp_change >= TEMP_INTERVAL || temp_change <= -TEMP_INTERVAL) + { + pwm_percent = MIN_PWM_PERCENT + (temp_highest -MIN_FAN_TEMP) * PWM_ADJUST_FACTOR; + if(pwm_percent < 0) + { + pwm_percent = 0; + } + dev->fan_pwm = pwm_percent; + applog(LOG_DEBUG,"%s: Set PWM percent : %d\n", __FUNCTION__, pwm_percent); + set_PWM(pwm_percent); + last_temperature = temp_highest; + } +} + +static void get_plldata(int type,int freq,uint32_t * reg_data,uint16_t * reg_data2, uint32_t *vil_data) +{ + uint32_t i; + char freq_str[10]; + sprintf(freq_str,"%d", freq); + char plldivider1[32] = {0}; + char plldivider2[32] = {0}; + char vildivider[32] = {0}; + + if(type == 1385) + { + for(i=0; i < sizeof(freq_pll_1385)/sizeof(freq_pll_1385[0]); i++) + { + if( memcmp(freq_pll_1385[i].freq, freq_str, sizeof(freq_pll_1385[i].freq)) == 0) + break; + } + } + + if(i == sizeof(freq_pll_1385)/sizeof(freq_pll_1385[0])) + { + i = 4; + } + + sprintf(plldivider1, "%08x", freq_pll_1385[i].fildiv1); + sprintf(plldivider2, "%04x", freq_pll_1385[i].fildiv2); + sprintf(vildivider, "%04x", freq_pll_1385[i].vilpll); + + *reg_data = freq_pll_1385[i].fildiv1; + *reg_data2 = freq_pll_1385[i].fildiv2; + *vil_data = freq_pll_1385[i].vilpll; +} + +void set_frequency(unsigned short int frequency) +{ + unsigned char buf[9] = {0,0,0,0,0,0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}; + unsigned char i; + unsigned int ret, value; + uint32_t reg_data_pll = 0; + uint16_t reg_data_pll2 = 0; + uint32_t reg_data_vil = 0; + + applog(LOG_DEBUG,"\n--- %s\n", __FUNCTION__); + + get_plldata(1385, frequency, ®_data_pll, ®_data_pll2, ®_data_vil); + applog(LOG_DEBUG,"%s: frequency = %d\n", __FUNCTION__, frequency); + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + //applog(LOG_DEBUG,"%s: i = %d\n", __FUNCTION__, i); + if(!opt_multi_version) // fil mode + { + memset(buf,0,sizeof(buf)); + memset(cmd_buf,0,sizeof(cmd_buf)); + buf[0] = 0; + buf[0] |= SET_PLL_DIVIDER1; + buf[1] = (reg_data_pll >> 16) & 0xff; + buf[2] = (reg_data_pll >> 8) & 0xff; + buf[3] = (reg_data_pll >> 0) & 0xff; + buf[3] |= CRC5(buf, 4*8 - 5); + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + + cgsleep_us(3000); + + memset(buf,0,sizeof(buf)); + memset(cmd_buf,0,sizeof(cmd_buf)); + buf[0] = SET_PLL_DIVIDER2; + buf[0] |= COMMAND_FOR_ALL; + buf[1] = 0; //addr + buf[2] = reg_data_pll2 >> 8; + buf[3] = reg_data_pll2& 0x0ff; + buf[3] |= CRC5(buf, 4*8 - 5); + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + + dev->freq[i] = frequency; + + cgsleep_us(5000); + } + else // vil + { + memset(buf,0,9); + memset(cmd_buf,0,3*sizeof(int)); + buf[0] = VIL_COMMAND_TYPE | VIL_ALL | SET_CONFIG; + buf[1] = 0x09; + buf[2] = 0; + buf[3] = PLL_PARAMETER; + buf[4] = (reg_data_vil >> 24) & 0xff; + buf[5] = (reg_data_vil >> 16) & 0xff; + buf[6] = (reg_data_vil >> 8) & 0xff; + buf[7] = (reg_data_vil >> 0) & 0xff; + buf[8] = CRC5(buf, 8*8); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + cmd_buf[1] = buf[4]<<24 | buf[5]<<16 | buf[6]<<8 | buf[7];; + cmd_buf[2] = buf[8]<<24; + + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + + dev->freq[i] = frequency; + cgsleep_us(10000); + } + } + } +} + +void set_frequency_with_addr(unsigned short int frequency,unsigned char mode,unsigned char addr, unsigned char chain) +{ + unsigned char buf[9] = {0,0,0,0,0,0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}; + unsigned char i; + unsigned int ret, value; + uint32_t reg_data_pll = 0; + uint16_t reg_data_pll2 = 0; + uint32_t reg_data_vil = 0; + i = chain; + + applog(LOG_DEBUG,"\n--- %s\n", __FUNCTION__); + + get_plldata(1385, frequency, ®_data_pll, ®_data_pll2, ®_data_vil); + applog(LOG_DEBUG,"%s: frequency = %d\n", __FUNCTION__, frequency); + + //applog(LOG_DEBUG,"%s: i = %d\n", __FUNCTION__, i); + if(!opt_multi_version) // fil mode + { + memset(buf,0,sizeof(buf)); + memset(cmd_buf,0,sizeof(cmd_buf)); + buf[0] = 0; + buf[0] |= SET_PLL_DIVIDER1; + buf[1] = (reg_data_pll >> 16) & 0xff; + buf[2] = (reg_data_pll >> 8) & 0xff; + buf[3] = (reg_data_pll >> 0) & 0xff; + buf[3] |= CRC5(buf, 4*8 - 5); + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + + cgsleep_us(3000); + + memset(buf,0,sizeof(buf)); + memset(cmd_buf,0,sizeof(cmd_buf)); + buf[0] = SET_PLL_DIVIDER2; + buf[0] |= COMMAND_FOR_ALL; + buf[1] = 0; //addr + buf[2] = reg_data_pll2 >> 8; + buf[3] = reg_data_pll2& 0x0ff; + buf[3] |= CRC5(buf, 4*8 - 5); + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + + dev->freq[i] = frequency; + + cgsleep_us(5000); + } + else // vil + { + memset(buf,0,9); + memset(cmd_buf,0,3*sizeof(int)); + if(mode) + buf[0] = VIL_COMMAND_TYPE | VIL_ALL | SET_CONFIG; + else + buf[0] = VIL_COMMAND_TYPE | SET_CONFIG; + buf[1] = 0x09; + buf[2] = addr; + buf[3] = PLL_PARAMETER; + buf[4] = (reg_data_vil >> 24) & 0xff; + buf[5] = (reg_data_vil >> 16) & 0xff; + buf[6] = (reg_data_vil >> 8) & 0xff; + buf[7] = (reg_data_vil >> 0) & 0xff; + buf[8] = CRC5(buf, 8*8); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + cmd_buf[1] = buf[4]<<24 | buf[5]<<16 | buf[6]<<8 | buf[7];; + cmd_buf[2] = buf[8]<<24; + + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + + dev->freq[i] = frequency; + cgsleep_us(10000); + } +} + + +void clear_nonce_fifo() +{ + unsigned char i; + unsigned int buf[2] = {0}; + + for(i=0; i<3; i++) //loop 3 times for making sure read out all nonce/register data + { + while(get_nonce_number_in_fifo() & MAX_NONCE_NUMBER_IN_FIFO) + { + get_return_nonce(buf); + } + } +} + +void clear_register_value_buf() +{ + pthread_mutex_lock(®_mutex); + reg_value_buf.p_wr = 0; + reg_value_buf.p_rd = 0; + reg_value_buf.reg_value_num = 0; + //memset(reg_value_buf.reg_buffer, 0, sizeof(struct reg_content)*MAX_NONCE_NUMBER_IN_FIFO); + pthread_mutex_unlock(®_mutex); +} + +void read_asic_register(unsigned char chain, unsigned char mode, unsigned char chip_addr, unsigned char reg_addr) +{ + unsigned char buf[5] = {0,0,0,0,0}; + unsigned char buf_vil[12] = {0,0,0,0,0,0,0,0,0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}; + unsigned int ret, value; + + if(!opt_multi_version) // fil mode + { + buf[0] = GET_STATUS; + buf[1] = chip_addr; + buf[2] = reg_addr; + if (mode) //all + buf[0] |= COMMAND_FOR_ALL; + buf[3] = CRC5(buf, 4*8 - 5); + applog(LOG_DEBUG,"%s: buf[0]=0x%x, buf[1]=0x%x, buf[2]=0x%x, buf[3]=0x%x\n", __FUNCTION__, buf[0], buf[1], buf[2], buf[3]); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + set_BC_command_buffer(cmd_buf); + + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (chain << 16) | (ret & 0x1f); + set_BC_write_command(value); + } + else // vil mode + { + buf[0] = VIL_COMMAND_TYPE | GET_STATUS; + if(mode) + buf[0] |= VIL_ALL; + buf[1] = 0x05; + buf[2] = chip_addr; + buf[3] = reg_addr; + buf[4] = CRC5(buf, 4*8); + applog(LOG_DEBUG,"%s:VIL buf[0]=0x%x, buf[1]=0x%x, buf[2]=0x%x, buf[3]=0x%x, buf[4]=0x%x", __FUNCTION__, buf[0], buf[1], buf[2], buf[3], buf[4]); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + cmd_buf[1] = buf[4]<<24; + + while (1) + { + if (((ret = get_BC_write_command()) & 0x80000000) == 0) + break; + cgsleep_ms(1); + } + set_BC_command_buffer(cmd_buf); + + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (chain << 16) | (ret & 0x1f); + set_BC_write_command(value); + } +} + +void read_temp(unsigned char device,unsigned reg,unsigned char data,unsigned char write,unsigned char chip_addr,int chain) +{ + unsigned char buf[9] = {0,0,0,0,0,0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}; + unsigned int ret, value,i; + i = chain; + if(!opt_multi_version) + { + printf("fil mode do not support temp reading"); + } + else + { + buf[0] = VIL_COMMAND_TYPE | SET_CONFIG; + buf[1] = 0x09; + buf[2] = chip_addr; + buf[3] = GENERAL_I2C_COMMAND; + buf[4] = 0x01; + buf[5] = device | write; + buf[6] = reg; + buf[7] = data; + buf[8] = CRC5(buf, 8*8); + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + cmd_buf[1] = buf[4]<<24 | buf[5]<<16 | buf[6]<<8 | buf[7]; + cmd_buf[2] = buf[8]<<24; + while (1) + { + ret = get_BC_write_command(); + if ((ret & 0x80000000) == 0) + break; + cgsleep_ms(1); + } + set_BC_command_buffer(cmd_buf); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + } + +} + +static void suffix_string_c5(uint64_t val, char *buf, size_t bufsiz, int sigdigits,bool display) +{ + const double dkilo = 1000.0; + const uint64_t kilo = 1000ull; + const uint64_t mega = 1000000ull; + const uint64_t giga = 1000000000ull; + const uint64_t tera = 1000000000000ull; + const uint64_t peta = 1000000000000000ull; + const uint64_t exa = 1000000000000000000ull; + char suffix[2] = ""; + bool decimal = true; + double dval; + /* + if (val >= exa) + { + val /= peta; + dval = (double)val / dkilo; + strcpy(suffix, "E"); + } + else if (val >= peta) + { + val /= tera; + dval = (double)val / dkilo; + strcpy(suffix, "P"); + } + else if (val >= tera) + { + val /= giga; + dval = (double)val / dkilo; + strcpy(suffix, "T"); + } + else */if (val >= giga) + { + val /= mega; + dval = (double)val / dkilo; + strcpy(suffix, "G"); + } + else if (val >= mega) + { + val /= kilo; + dval = (double)val / dkilo; + strcpy(suffix, "M"); + } + else if (val >= kilo) + { + dval = (double)val / dkilo; + strcpy(suffix, "K"); + } + else + { + dval = val; + decimal = false; + } + + if (!sigdigits) + { + if (decimal) + snprintf(buf, bufsiz, "%.3g%s", dval, suffix); + else + snprintf(buf, bufsiz, "%d%s", (unsigned int)dval, suffix); + } + else + { + /* Always show sigdigits + 1, padded on right with zeroes + * followed by suffix */ + int ndigits = sigdigits - 1 - (dval > 0.0 ? floor(log10(dval)) : 0); + if(display) + snprintf(buf, bufsiz, "%*.*f%s", sigdigits + 1, ndigits, dval, suffix); + else + snprintf(buf, bufsiz, "%*.*f", sigdigits + 1, ndigits, dval); + + } +} + + +void check_asic_reg(unsigned int reg) +{ + + unsigned char i, j, not_reg_data_time=0; + int nonce_number = 0; + unsigned int buf[2] = {0}; + unsigned int reg_value_num=0; + unsigned int temp_nonce = 0; + unsigned char reg_buf[5] = {0,0,0,0,0}; + int read_num = 0; + uint64_t tmp_rate = 0; +rerun_all: + clear_register_value_buf(); + tmp_rate = 0; + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + read_num = 0; + if(dev->chain_exist[i] == 1) + { + tmp_rate = 0; + applog(LOG_DEBUG,"%s: check chain J%d \n", __FUNCTION__, i+1); + read_asic_register(i, 1, 0, reg); + if (reg ==CHIP_ADDRESS) + dev->chain_asic_num[i] = 0; + + while(not_reg_data_time < 3) //if there is no register value for 3 times, we can think all asic return their address + { + cgsleep_ms(300); + + pthread_mutex_lock(®_mutex); + + reg_value_num = reg_value_buf.reg_value_num; + //applog(LOG_DEBUG,"%s: reg_value_num = %d\n", __FUNCTION__, reg_value_num); + pthread_mutex_unlock(®_mutex); + if((reg_value_num >= MAX_NONCE_NUMBER_IN_FIFO || reg_value_buf.p_rd >= MAX_NONCE_NUMBER_IN_FIFO) && not_reg_data_time <3) + { + not_reg_data_time ++; + goto rerun_all; + } + if(not_reg_data_time == 3) + { + return; + } + + //applog(LOG_DEBUG,"%s: reg_value_buf.reg_value_num = 0x%x\n", __FUNCTION__, reg_value_num); + + if(reg_value_num > 0) + { + not_reg_data_time = 0; + + applog(LOG_DEBUG,"%s: reg_value_buf.reg_value_num = %d\n", __FUNCTION__, reg_value_num); + + for(j = 0; j < reg_value_num; j++) + { + pthread_mutex_lock(®_mutex); + + //applog(LOG_DEBUG,"%\n"); + if(reg_value_buf.reg_buffer[reg_value_buf.p_rd].chain_number != i) + { + applog(LOG_DEBUG,"%s: the return data is from chain%d, but it should be from chain%d\n", __FUNCTION__, reg_value_buf.reg_buffer[reg_value_buf.p_rd].chain_number, i); + pthread_mutex_unlock(®_mutex); + continue; + } + //applog(LOG_DEBUG,"@\n"); + + reg_buf[3] = (unsigned char)(reg_value_buf.reg_buffer[reg_value_buf.p_rd].reg_value & 0xff); + reg_buf[2] = (unsigned char)((reg_value_buf.reg_buffer[reg_value_buf.p_rd].reg_value >> 8) & 0xff); + reg_buf[1] = (unsigned char)((reg_value_buf.reg_buffer[reg_value_buf.p_rd].reg_value >> 16)& 0xff); + reg_buf[0] = (unsigned char)((reg_value_buf.reg_buffer[reg_value_buf.p_rd].reg_value >> 24)& 0xff); + + applog(LOG_DEBUG,"%s: reg_value = 0x%x\n", __FUNCTION__, reg_value_buf.reg_buffer[reg_value_buf.p_rd].reg_value); + + reg_value_buf.p_rd++; + reg_value_buf.reg_value_num--; + if(reg_value_buf.p_rd >= MAX_NONCE_NUMBER_IN_FIFO) + { + reg_value_buf.p_rd = 0; + } + //applog(LOG_DEBUG,"%s: reg_value_buf.reg_value_num = %d\n", __FUNCTION__, reg_value_buf.reg_value_num); + pthread_mutex_unlock(®_mutex); + + if(reg == CHIP_ADDRESS) + { + dev->chain_asic_num[i]++; + } + + if(reg == PLL_PARAMETER) + { + applog(LOG_DEBUG,"%s: the asic freq is 0x%x\n", __FUNCTION__, reg_value_buf.reg_buffer[reg_value_buf.p_rd].reg_value); + } + + if(reg == 0x08) + { + int i; + uint64_t temp_hash_rate = 0; + uint8_t rate_buf[10]; + uint8_t displayed_rate[16]; + for(i = 0; i < 4; i++) + { + sprintf(rate_buf + 2*i,"%02x",reg_buf[i]); + } + applog(LOG_DEBUG,"%s: hashrate is %s\n", __FUNCTION__, rate_buf); + temp_hash_rate = strtol(rate_buf,NULL,16); + temp_hash_rate = (temp_hash_rate << 24); + tmp_rate += temp_hash_rate; + read_num ++; + } + } + if(reg == CHIP_ADDRESS) + { + if (dev->chain_asic_num[i] == CHAIN_ASIC_NUM) + break; + } + } + else + { + cgsleep_ms(100); + not_reg_data_time++; + applog(LOG_DEBUG,"%s: no asic address register come back for %d time.\n", __FUNCTION__, not_reg_data_time); + } + } + + not_reg_data_time = 0; + + if(reg == CHIP_ADDRESS) + { + if(dev->chain_asic_num[i] > dev->max_asic_num_in_one_chain) + { + dev->max_asic_num_in_one_chain = dev->chain_asic_num[i]; + } + applog(LOG_DEBUG,"%s: chain J%d has %d ASIC\n", __FUNCTION__, i+1, dev->chain_asic_num[i]); + } + if(read_num == CHAIN_ASIC_NUM) + { + rate[i] = tmp_rate; + suffix_string_c5(rate[i], (char * )displayed_rate[i], sizeof(displayed_rate[i]), 6,false); + rate_error[i] = 0; + applog(LOG_DEBUG,"%s: chain %d hashrate is %s\n", __FUNCTION__, i, displayed_rate[i]); + } + if(read_num == 0 || status_error ) + { + rate_error[i]++; + if(rate_error[i] > 3 || status_error) + { + rate[i] = 0; + suffix_string_c5(rate[i], (char * )displayed_rate[i], sizeof(displayed_rate[i]), 6,false); + } + } + //set_nonce_fifo_interrupt(get_nonce_fifo_interrupt() & ~(FLUSH_NONCE3_FIFO)); + clear_register_value_buf(); + } + } +} + +#define RETRY_NUM 5 +unsigned int check_asic_reg_with_addr(unsigned int reg,unsigned int chip_addr,unsigned int chain, int check_num) +{ + unsigned char i, j, not_reg_data_time=0; + int nonce_number = 0; + unsigned int reg_value_num=0; + unsigned int reg_buf = 0; + i = chain; +rerun: + clear_register_value_buf(); + read_asic_register(i, 0, chip_addr, reg); + cgsleep_ms(80); + + while(not_reg_data_time < RETRY_NUM) //if there is no register value for 3 times, we can think all asic return their address + { + pthread_mutex_lock(®_mutex); + reg_value_num = reg_value_buf.reg_value_num; + //applog(LOG_NOTICE,"%s: p_wr = %d reg_value_num = %d\n", __FUNCTION__,reg_value_buf.p_wr,reg_value_buf.reg_value_num); + pthread_mutex_unlock(®_mutex); + applog(LOG_DEBUG,"%s: reg_value_num %d", __FUNCTION__, reg_value_num); + if((reg_value_num >= MAX_NONCE_NUMBER_IN_FIFO || reg_value_buf.p_rd >= MAX_NONCE_NUMBER_IN_FIFO ||reg_value_num ==0 ) && not_reg_data_time = RETRY_NUM) + { + return 0; + } + + pthread_mutex_lock(®_mutex); + for(i = 0; i < reg_value_num; i++) + { + reg_buf = reg_value_buf.reg_buffer[reg_value_buf.p_rd].reg_value; + applog(LOG_DEBUG,"%s: chip %x reg %x reg_buff %x", __FUNCTION__, chip_addr,reg,reg_buf); + reg_value_buf.p_rd++; + reg_value_buf.reg_value_num--; + if(reg_value_buf.p_rd < MAX_NONCE_NUMBER_IN_FIFO) + { + reg_value_buf.p_rd = 0; + } + if(reg == GENERAL_I2C_COMMAND) + { + if((reg_buf & 0xc0000000) == 0x0) + { + pthread_mutex_unlock(®_mutex); + clear_register_value_buf(); + return reg_buf; + } + else + { + pthread_mutex_unlock(®_mutex); + clear_register_value_buf(); + return 0; + } + } + } + pthread_mutex_unlock(®_mutex); + } + //set_nonce_fifo_interrupt(get_nonce_fifo_interrupt() & ~(FLUSH_NONCE3_FIFO)); + clear_register_value_buf(); + return 0; +} + + +unsigned int wait_iic_ok(unsigned int chip_addr,unsigned int chain,bool update) +{ + int fail_time = 0; + unsigned int ret = 0; + while(fail_time < 2) + { + ret = check_asic_reg_with_addr(GENERAL_I2C_COMMAND,chip_addr,chain,1); + if (ret != 0) + { + return ret; + } + else + { + fail_time++; + cgsleep_ms(1); + } + } + return 0; +} + +unsigned int check_reg_temp(unsigned char device,unsigned reg,unsigned char data,unsigned char write,unsigned char chip_addr,int chain) +{ + int fail_time =0; + unsigned int ret; + if(!write) + { + do + { + wait_iic_ok(chip_addr,chain,0); + read_temp(device, reg, data, write,chip_addr,chain); + cgsleep_ms(1); + ret = wait_iic_ok(chip_addr,chain,1); + cgsleep_ms(1); + fail_time++; + } + while (((ret & 0xff00) >>8 != reg || (ret & 0xff) == 0xff || (ret & 0xff) == 0x7f ) && fail_time < 2); + } + else + { + do + { + wait_iic_ok(chip_addr,chain,0); + read_temp(device, reg, data, write,chip_addr,chain); + wait_iic_ok(chip_addr,chain,1); + cgsleep_ms(1); + wait_iic_ok(chip_addr,chain,0); + read_temp(device, reg, 0, 0,chip_addr,chain); + ret = wait_iic_ok(chip_addr,chain,1); + cgsleep_ms(1); + fail_time++; + } + while (((ret & 0xff00) >>8 != reg && (ret & 0xff) != data )&& fail_time < 2); + } + + if (fail_time == 2) + return 0; + else + return ret; +} + + +int8_t calibration_sensor_offset(unsigned char device,unsigned char chip_addr,int chain,unsigned chip_num) +{ + int8_t offset,middle,local; + unsigned int ret = 0; + ret = check_reg_temp(device, 0x11, 0xba, 1, chip_addr, chain); + ret = check_reg_temp(device, 0x0, 0x0, 0, chip_addr, chain); + local = ret & 0xff; + ret = check_reg_temp(device, 0x1, 0x0, 0, chip_addr, chain); + middle = ret & 0xff; + offset = -70 + (local - middle); + ret = check_reg_temp(device, 0x11, offset, 1, chip_addr, chain); +} +void read_temp_func() +{ + int i; + unsigned int ret = 0; + int16_t temp_top = 0; + + while(1) + { + temp_top = 0; + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + pthread_mutex_lock(®_read_mutex); + ret = check_reg_temp(0x98, 0x00, 0x0, 0x0, HAVE_TEMP, i); + if (ret != 0) + { + dev->chain_asic_temp[i][2][0] = (ret & 0xff); + if (dev->chain_asic_temp[i][2][0] > temp_top) + { + temp_top = dev->chain_asic_temp[i][2][0]; + } + } + + ret = check_reg_temp(0x98, 0x01, 0x0, 0x0, HAVE_TEMP, i); + if (ret != 0) + { + dev->chain_asic_temp[i][2][1] = (ret & 0xff); + } + } + } + dev->temp_top1 = temp_top; + sleep(1); + } +} + + +void chain_inactive(unsigned char chain) +{ + unsigned char buf[5] = {0,0,0,0,5}; + unsigned int cmd_buf[3] = {0,0,0}; + unsigned int ret, value; + + if(!opt_multi_version) // fil mode + { + buf[0] = CHAIN_INACTIVE | COMMAND_FOR_ALL; + buf[1] = 0; + buf[2] = 0; + buf[3] = CRC5(buf, 4*8 - 5); + applog(LOG_DEBUG,"%s: buf[0]=0x%x, buf[1]=0x%x, buf[2]=0x%x, buf[3]=0x%x\n", __FUNCTION__, buf[0], buf[1], buf[2], buf[3]); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + set_BC_command_buffer(cmd_buf); + + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (chain << 16) | (ret & 0x1f); + set_BC_write_command(value); + } + else // vil mode + { + buf[0] = VIL_COMMAND_TYPE | VIL_ALL | CHAIN_INACTIVE; + buf[1] = 0x05; + buf[2] = 0; + buf[3] = 0; + buf[4] = CRC5(buf, 4*8); + applog(LOG_DEBUG,"%s: buf[0]=0x%x, buf[1]=0x%x, buf[2]=0x%x, buf[3]=0x%x, buf[4]=0x%x\n", __FUNCTION__, buf[0], buf[1], buf[2], buf[3], buf[4]); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + cmd_buf[1] = buf[4]<<24; + while (1) + { + ret = get_BC_write_command(); + if ((ret & 0x80000000) == 0) + break; + cgsleep_ms(1); + } + set_BC_command_buffer(cmd_buf); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (chain << 16) | (ret & 0x1f); + set_BC_write_command(value); + } +} + +void set_address(unsigned char chain, unsigned char mode, unsigned char address) +{ + unsigned char buf[4] = {0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}; + unsigned int ret, value; + + if(!opt_multi_version) // fil mode + { + buf[0] = SET_ADDRESS; + buf[1] = address; + buf[2] = 0; + if (mode) //all + buf[0] |= COMMAND_FOR_ALL; + buf[3] = CRC5(buf, 4*8 - 5); + applog(LOG_DEBUG,"%s: buf[0]=0x%x, buf[1]=0x%x, buf[2]=0x%x, buf[3]=0x%x\n", __FUNCTION__, buf[0], buf[1], buf[2], buf[3]); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + set_BC_command_buffer(cmd_buf); + + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (chain << 16) | (ret & 0x1f); + set_BC_write_command(value); + } + else // vil mode + { + buf[0] = VIL_COMMAND_TYPE | SET_ADDRESS; + buf[1] = 0x05; + buf[2] = address; + buf[3] = 0; + buf[4] = CRC5(buf, 4*8); + //applog(LOG_DEBUG,"%s: buf[0]=0x%x, buf[1]=0x%x, buf[2]=0x%x, buf[3]=0x%x, buf[4]=0x%x\n", __FUNCTION__, buf[0], buf[1], buf[2], buf[3], buf[4]); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + cmd_buf[1] = buf[4]<<24; + while (1) + { + ret = get_BC_write_command(); + if ((ret & 0x80000000) == 0) + break; + cgsleep_ms(1); + } + set_BC_command_buffer(cmd_buf); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (chain << 16) | (ret & 0x1f); + set_BC_write_command(value); + } +} + +int calculate_asic_number(unsigned int actual_asic_number) +{ + int i = 0; + if(actual_asic_number == 1) + { + i = 1; + } + else if(actual_asic_number == 2) + { + i = 2; + } + else if((actual_asic_number > 2) && (actual_asic_number <= 4)) + { + i = 4; + } + else if((actual_asic_number > 4) && (actual_asic_number <= 8)) + { + i = 8; + } + else if((actual_asic_number > 8) && (actual_asic_number <= 16)) + { + i = 16; + } + else if((actual_asic_number > 16) && (actual_asic_number <= 32)) + { + i = 32; + } + else if((actual_asic_number > 32) && (actual_asic_number <= 64)) + { + i = 64; + } + else if((actual_asic_number > 64) && (actual_asic_number <= 128)) + { + i = 128; + } + else + { + applog(LOG_DEBUG,"actual_asic_number = %d, but it is error\n", actual_asic_number); + return -1; + } + return i; +} + +int calculate_core_number(unsigned int actual_core_number) +{ + int i = 0; + if(actual_core_number == 1) + { + i = 1; + } + else if(actual_core_number == 2) + { + i = 2; + } + else if((actual_core_number > 2) && (actual_core_number <= 4)) + { + i = 4; + } + else if((actual_core_number > 4) && (actual_core_number <= 8)) + { + i = 8; + } + else if((actual_core_number > 8) && (actual_core_number <= 16)) + { + i = 16; + } + else if((actual_core_number > 16) && (actual_core_number <= 32)) + { + i = 32; + } + else if((actual_core_number > 32) && (actual_core_number <= 64)) + { + i = 64; + } + else if((actual_core_number > 64) && (actual_core_number <= 128)) + { + i = 128; + } + else + { + applog(LOG_DEBUG,"actual_core_number = %d, but it is error\n", actual_core_number); + return -1; + } + return i; +} + +void software_set_address() +{ + int temp_asic_number = 0; + unsigned int i, j; + unsigned char chip_addr = 0; + unsigned char check_bit = 0; + + applog(LOG_DEBUG,"--- %s\n", __FUNCTION__); + + temp_asic_number = calculate_asic_number(dev->max_asic_num_in_one_chain); + if(temp_asic_number <= 0) + { + dev->addrInterval = 0x1; + return; + } + + dev->addrInterval = 0x100 / temp_asic_number; + check_bit = dev->addrInterval - 1; + while(check_bit) + { + check_bit = check_bit >> 1; + dev->check_bit++; + } + + for(i=0; ichain_exist[i] == 1 && dev->chain_asic_num[i] == CHAIN_ASIC_NUM) + { + applog(LOG_DEBUG,"%s: chain %d has %d ASIC, and addrInterval is %d\n", __FUNCTION__, i, dev->chain_asic_num[i], dev->addrInterval); + + chip_addr = 0; + chain_inactive(i); + cgsleep_ms(30); + chain_inactive(i); + cgsleep_ms(30); + chain_inactive(i); + cgsleep_ms(30); + + for(j = 0; j < 0x100/dev->addrInterval; j++) + { + set_address(i, 0, chip_addr); + chip_addr += dev->addrInterval; + cgsleep_ms(30); + } + } + } +} + +void set_asic_ticket_mask(unsigned int ticket_mask) +{ + unsigned char buf[4] = {0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}; + unsigned int ret, value,i; + unsigned int tm; + + tm = Swap32(ticket_mask); + + for(i=0; ichain_exist[i] == 1) + { + //first step: send new bauddiv to ASIC, but FPGA doesn't change its bauddiv, it uses old bauddiv to send BC command to ASIC + if(!opt_multi_version) // fil mode + { + buf[0] = SET_BAUD_OPS; + buf[1] = 0x10; + buf[2] = ticket_mask & 0x1f; + buf[0] |= COMMAND_FOR_ALL; + buf[3] = CRC5(buf, 4*8 - 5); + applog(LOG_DEBUG,"%s: buf[0]=0x%x, buf[1]=0x%x, buf[2]=0x%x, buf[3]=0x%x\n", __FUNCTION__, buf[0], buf[1], buf[2], buf[3]); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + set_BC_command_buffer(cmd_buf); + + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + } + else // vil mode + { + buf[0] = VIL_COMMAND_TYPE | VIL_ALL | SET_CONFIG; + buf[1] = 0x09; + buf[2] = 0; + buf[3] = TICKET_MASK; + buf[4] = tm & 0xff; + buf[5] = (tm >> 8) & 0xff; + buf[6] = (tm >> 16) & 0xff; + buf[7] = (tm >> 24) & 0xff; + buf[8] = CRC5(buf, 8*8); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + cmd_buf[1] = buf[4]<<24 | buf[5]<<16 | buf[6]<<8 | buf[7]; + cmd_buf[2] = buf[8]<<24; + + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + } + } + } +} + + +#if 1 + +void set_baud(unsigned char bauddiv,int no_use) +{ + unsigned char buf[4] = {0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}; + unsigned int ret, value,i; + + + if(dev->baud == bauddiv) + { + applog(LOG_DEBUG,"%s: the setting bauddiv(%d) is the same as before\n", __FUNCTION__, bauddiv); + return; + } + + for(i=0; ichain_exist[i] == 1) + { + //first step: send new bauddiv to ASIC, but FPGA doesn't change its bauddiv, it uses old bauddiv to send BC command to ASIC + if(!opt_multi_version) // fil mode + { + buf[0] = SET_BAUD_OPS; + buf[1] = 0x10; + buf[2] = bauddiv & 0x1f; + buf[0] |= COMMAND_FOR_ALL; + buf[3] = CRC5(buf, 4*8 - 5); + applog(LOG_DEBUG,"%s: buf[0]=0x%x, buf[1]=0x%x, buf[2]=0x%x, buf[3]=0x%x\n", __FUNCTION__, buf[0], buf[1], buf[2], buf[3]); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + set_BC_command_buffer(cmd_buf); + + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + } + else // vil mode + { + buf[0] = VIL_COMMAND_TYPE | VIL_ALL | SET_CONFIG; + buf[1] = 0x09; + buf[2] = 0; + buf[3] = MISC_CONTROL; + buf[4] = 0; + buf[5] = INV_CLKO; + buf[6] = bauddiv & 0x1f; + buf[7] = 0; + buf[8] = CRC5(buf, 8*8); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + cmd_buf[1] = buf[4]<<24 | buf[5]<<16 | buf[6]<<8 | buf[7]; + cmd_buf[2] = buf[8]<<24; + applog(LOG_DEBUG,"%s: cmd_buf[0]=0x%x, cmd_buf[1]=0x%x, cmd_buf[2]=0x%x\n", __FUNCTION__, cmd_buf[0], cmd_buf[1], cmd_buf[2]); + + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + } + } + } + + // second step: change FPGA's bauddiv + cgsleep_us(50000); + ret = get_BC_write_command(); + value = (ret & 0xffffffe0) | (bauddiv & 0x1f); + set_BC_write_command(value); + dev->baud = bauddiv; +} +#endif + +void set_baud_with_addr(unsigned char bauddiv,unsigned int mode,unsigned int chip_addr,int chain,int iic,int open_core,int bottom_or_mid) +{ + unsigned char buf[9] = {0,0,0,0,0,0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}; + unsigned int ret, value,i; + i = chain; + + //first step: send new bauddiv to ASIC, but FPGA doesn't change its bauddiv, it uses old bauddiv to send BC command to ASIC + if(!opt_multi_version) // fil mode + { + buf[0] = SET_BAUD_OPS; + buf[1] = 0x10; + buf[2] = bauddiv & 0x1f; + buf[0] |= COMMAND_FOR_ALL; + buf[3] = CRC5(buf, 4*8 - 5); + applog(LOG_DEBUG,"%s: buf[0]=0x%x, buf[1]=0x%x, buf[2]=0x%x, buf[3]=0x%x\n", __FUNCTION__, buf[0], buf[1], buf[2], buf[3]); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + set_BC_command_buffer(cmd_buf); + + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + } + else // vil mode + { + buf[0] = VIL_COMMAND_TYPE | SET_CONFIG; + if(mode) + buf[0] = VIL_COMMAND_TYPE | SET_CONFIG |VIL_ALL; + buf[1] = 0x09; + buf[2] = chip_addr; + buf[3] = MISC_CONTROL; + buf[4] = 0x40; + if(bottom_or_mid) + buf[5] = 0x20; + else + buf[5] = 0x21; + + if(iic) + { + buf[6] = (bauddiv & 0x1f) | 0x40; + buf[7] = 0x60; + } + else + { + buf[6] = (bauddiv & 0x1f); + buf[7] = 0x00; + } + if(open_core) + buf[6] = buf[6]| GATEBCLK; + buf[8] = 0; + buf[8] = CRC5(buf, 8*8); + + cmd_buf[0] = buf[0]<<24 | buf[1]<<16 | buf[2]<<8 | buf[3]; + cmd_buf[1] = buf[4]<<24 | buf[5]<<16 | buf[6]<<8 | buf[7]; + cmd_buf[2] = buf[8]<<24; + + while (1) + { + if (((ret = get_BC_write_command()) & 0x80000000) == 0) + break; + cgsleep_ms(1); + } + set_BC_command_buffer(cmd_buf); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID| (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + } +} + + +void init_uart_baud() +{ + unsigned int rBaudrate = 0, baud = 0; + unsigned char bauddiv = 0; + int i =0; + + rBaudrate = 1000000 * 5/3 / dev->timeout * (64*8); //64*8 need send bit, ratio=2/3 + baud = 25000000/rBaudrate/8 - 1; + baud = 1; + + if(baud > MAX_BAUD_DIVIDER) + { + bauddiv = MAX_BAUD_DIVIDER; + } + else + { + bauddiv = baud; + } + + applog(LOG_DEBUG,"%s: bauddiv = %d\n", __FUNCTION__, bauddiv); + + set_baud(bauddiv,1); +} + +void set_led(bool stop) +{ + static bool blink = true; + char cmd[100]; + blink = !blink; + if(stop) + { + sprintf(cmd,"echo %d > %s", 0,GREEN_LED_DEV); + system(cmd); + sprintf(cmd,"echo %d > %s", (blink)?1:0,RED_LED_DEV); + system(cmd); + } + else + { + sprintf(cmd,"echo %d > %s", 0,RED_LED_DEV); + system(cmd); + sprintf(cmd,"echo %d > %s", (blink)?1:0,GREEN_LED_DEV); + system(cmd); + } + +} + +void pic_heart_beat_func() +{ + int i; + while(1) + { + for(i=0; ichain_exist[i]) + { + pthread_mutex_lock(&iic_mutex); + pic_heart_beat_each_chain(i); + pthread_mutex_unlock(&iic_mutex); + cgsleep_ms(10); + } + } + sleep(HEART_BEAT_TIME_GAP); + } +} + +void change_pic_voltage_old() +{ + int i; + sleep(300); + for(i=0; ichain_exist[i]) + { + while(1) + { + if(tmp_vol > chain_voltage[i]) + break; + tmp_vol += 5; + if(tmp_vol > chain_voltage[i]) + tmp_vol = chain_voltage[i]; + pthread_mutex_lock(&iic_mutex); + set_pic_voltage(i,tmp_vol); + pthread_mutex_unlock(&iic_mutex); + pthread_mutex_lock(&iic_mutex); + get_pic_voltage(i); + pthread_mutex_unlock(&iic_mutex); + if(tmp_vol == chain_voltage[i]) + break; + cgsleep_ms(100); + } + } + } +} + + +void check_system_work() +{ + struct timeval tv_start = {0, 0}, tv_end,tv_send; + int i = 0, j = 0; + cgtime(&tv_end); + copy_time(&tv_start, &tv_end); + copy_time(&tv_send_job,&tv_send); + bool stop = false; + int asic_num = 0, error_asic = 0, avg_num = 0; + while(1) + { + struct timeval diff; + cgtime(&tv_end); + cgtime(&tv_send); + timersub(&tv_end, &tv_start, &diff); + + if (diff.tv_sec > 60) + { + asic_num = 0, error_asic = 0, avg_num = 0; + for(i=0; ichain_exist[i]) + { + asic_num += dev->chain_asic_num[i]; + for(j=0; jchain_asic_num[i]; j++) + { + avg_num += dev->chain_asic_nonce[i][j]; + applog(LOG_DEBUG,"%s: chain %d asic %d asic_nonce_num %d", __FUNCTION__, i,j,dev->chain_asic_nonce[i][j]); + } + } + } + if (asic_num != 0) + { + applog(LOG_DEBUG,"%s: avg_num %d asic_num %d", __FUNCTION__, avg_num,asic_num); + avg_num = avg_num / asic_num / 8; + } + else + { + avg_num = 1; + } + for(i=0; ichain_exist[i]) + { + int offset = 0; + + for(j=0; jchain_asic_num[i]; j++) + { + if(j%8 == 0) + { + dev->chain_asic_status_string[i][j+offset] = ' '; + offset++; + } + + if(dev->chain_asic_nonce[i][j]>avg_num) + { + dev->chain_asic_status_string[i][j+offset] = 'o'; + } + else + { + dev->chain_asic_status_string[i][j+offset] = 'x'; + error_asic++; + } + dev->chain_asic_nonce[i][j] = 0; + } + dev->chain_asic_status_string[i][j+offset] = '\0'; + } + } + copy_time(&tv_start, &tv_end); + } + + check_fan(); + set_PWM_according_to_temperature(); + timersub(&tv_send, &tv_send_job, &diff); + if(diff.tv_sec > 120 || dev->temp_top1 > MAX_TEMP + || dev->fan_num < MIN_FAN_NUM || dev->fan_speed_top1 < (MAX_FAN_SPEED * dev->fan_pwm / 150)) + { + stop = true; + if(dev->temp_top1 > MAX_TEMP + || dev->fan_num < MIN_FAN_NUM || dev->fan_speed_top1 < (MAX_FAN_SPEED * dev->fan_pwm / 150)) + { + status_error = true; + once_error = true; + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + pthread_mutex_lock(&iic_mutex); + disable_pic_dac(i); + pthread_mutex_unlock(&iic_mutex); + } + } + } + set_dhash_acc_control((unsigned int)get_dhash_acc_control() & ~RUN_BIT); + } + else + { + stop = false; + if (!once_error) + status_error = false; + } + if(stop_mining) + status_error = true; + /* + if(error_asic > asic_num/5 || asic_num == 0) + { + stop = true; + } + */ + set_led(stop); + + cgsleep_ms(1000); + } +} + + +void open_core() +{ + unsigned int i = 0, j = 0, k, m, work_id = 0, ret = 0, value = 0, work_fifo_ready = 0, loop=0; + unsigned char gateblk[4] = {0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}, buf[TW_WRITE_COMMAND_LEN/sizeof(unsigned int)]= {0}; + unsigned int buf_vil_tw[TW_WRITE_COMMAND_LEN_VIL/sizeof(unsigned int)]= {0}; + unsigned char data[TW_WRITE_COMMAND_LEN] = {0xff}; + unsigned char buf_vil[9] = {0,0,0,0,0,0,0,0,0}; + struct vil_work work_vil; + struct vil_work_1387 work_vil_1387; + + loop = 114; + + + if(!opt_multi_version) // fil mode + { + set_dhash_acc_control(get_dhash_acc_control() & (~OPERATION_MODE)); + set_hash_counting_number(0); + gateblk[0] = SET_BAUD_OPS; + gateblk[1] = 0;//0x10; //16-23 + gateblk[2] = dev->baud | 0x80; //8-15 gateblk=1 + gateblk[0] |= 0x80; + //gateblk[3] = CRC5(gateblk, 4*8 - 5); + gateblk[3] = 0x80; // MMEN=1 + gateblk[3] = 0x80 | (0x1f & CRC5(gateblk, 4*8 - 5)); + applog(LOG_DEBUG,"%s: gateblk[0]=0x%x, gateblk[1]=0x%x, gateblk[2]=0x%x, gateblk[3]=0x%x\n", __FUNCTION__, gateblk[0], gateblk[1], gateblk[2], gateblk[3]); + cmd_buf[0] = gateblk[0]<<24 | gateblk[1]<<16 | gateblk[2]<<8 | gateblk[3]; + + memset(data, 0x00, TW_WRITE_COMMAND_LEN); + data[TW_WRITE_COMMAND_LEN - 1] = 0xff; + data[TW_WRITE_COMMAND_LEN - 12] = 0xff; + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + cgsleep_us(10000); + + for(m=0; mbaud & 0x1f) | GATEBCLK; // enable gateblk + buf_vil[7] = MMEN; // MMEN=1 + + buf_vil[8] = CRC5(buf_vil, 8*8); + + cmd_buf[0] = buf_vil[0]<<24 | buf_vil[1]<<16 | buf_vil[2]<<8 | buf_vil[3]; + cmd_buf[1] = buf_vil[4]<<24 | buf_vil[5]<<16 | buf_vil[6]<<8 | buf_vil[7]; + cmd_buf[2] = buf_vil[8]<<24; + + // prepare special work for openning core + memset(buf_vil_tw, 0x00, TW_WRITE_COMMAND_LEN_VIL/sizeof(unsigned int)); + memset(&work_vil_1387, 0xff, sizeof(struct vil_work_1387)); + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + work_vil_1387.work_type = NORMAL_BLOCK_MARKER; + work_vil_1387.chain_id = 0x80 | i; + work_vil_1387.reserved1[0]= 0; + work_vil_1387.reserved1[1]= 0; + work_vil_1387.work_count = 0; + work_vil_1387.data[0] = 0xff; + work_vil_1387.data[11] = 0xff; + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + cgsleep_us(10000); + + for(m=0; mbaud,0,0x60,i,1,0,MID_OR_BOT); + cgsleep_ms(10); + set_baud_with_addr(dev->baud,0,0xa8,i,1,0,MID_OR_BOT); + cgsleep_ms(10); + */ + set_baud_with_addr(dev->baud,0,HAVE_TEMP,i,1,0,MID_OR_BOT); + cgsleep_ms(10); +#endif + } + } + set_dhash_acc_control(get_dhash_acc_control()| VIL_MODE | VIL_MIDSTATE_NUMBER(opt_multi_version)); + } +} + +#if 0 +void open_core() +{ + unsigned int i = 0, j = 0, m, work_id = 0, ret = 0, value = 0, work_fifo_ready = 0; + unsigned char gateblk[4] = {0,0,0,0}; + unsigned int cmd_buf[3] = {0,0,0}, buf[TW_WRITE_COMMAND_LEN/sizeof(unsigned int)]= {0}; + unsigned int buf_vil_tw[TW_WRITE_COMMAND_LEN_VIL/sizeof(unsigned int)]= {0}; + unsigned char data[TW_WRITE_COMMAND_LEN] = {0xff}; + unsigned char buf_vil[9] = {0}; + struct vil_work work_vil; + + if(!opt_multi_version) // fil mode + { + set_dhash_acc_control(get_dhash_acc_control() & (~OPERATION_MODE)); + set_hash_counting_number(0); + gateblk[0] = SET_BAUD_OPS; + gateblk[1] = 0;//0x10; //16-23 + gateblk[2] = dev->baud | 0x80; //8-15 gateblk=1 + gateblk[0] |= 0x80; + gateblk[3] = CRC5(gateblk, 4*8 - 5); + applog(LOG_DEBUG,"%s: gateblk[0]=0x%x, gateblk[1]=0x%x, gateblk[2]=0x%x, gateblk[3]=0x%x\n", __FUNCTION__, gateblk[0], gateblk[1], gateblk[2], gateblk[3]); + cmd_buf[0] = gateblk[0]<<24 | gateblk[1]<<16 | gateblk[2]<<8 | gateblk[3]; + + memset(data, 0x00, TW_WRITE_COMMAND_LEN); + data[TW_WRITE_COMMAND_LEN - 1] = 0xff; + data[TW_WRITE_COMMAND_LEN - 12] = 0xff; + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + cgsleep_ms(10); + + for(m=0; mbaud & 0x1f) | GATEBCLK; + buf_vil[7] = 0; + buf_vil[8] = CRC5(buf_vil, 8*8); + + cmd_buf[0] = buf_vil[0]<<24 | buf_vil[1]<<16 | buf_vil[2]<<8 | buf_vil[3]; + cmd_buf[1] = buf_vil[4]<<24 | buf_vil[5]<<16 | buf_vil[6]<<8 | buf_vil[7]; + cmd_buf[2] = buf_vil[8]<<24; + + // prepare special work for openning core + memset(&work_vil, 0, sizeof(struct vil_work)); + work_vil.type = 0x01 << 5; + work_vil.length = sizeof(struct vil_work); + work_vil.wc_base = 0; + work_vil.mid_num = 1; + //work_vil.sno = 0; + work_vil.data2[0] = 0xff; + work_vil.data2[11] = 0xff; + + memset(data, 0x00, 4); + memset(buf_vil_tw, 0x00, TW_WRITE_COMMAND_LEN_VIL); + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + set_BC_command_buffer(cmd_buf); + ret = get_BC_write_command(); + value = BC_COMMAND_BUFFER_READY | BC_COMMAND_EN_CHAIN_ID | (i << 16) | (ret & 0x1f); + set_BC_write_command(value); + cgsleep_ms(10); + + for(m=0; m= MAX_NONCE_NUMBER_IN_FIFO || reg_value_buf.p_wr >= MAX_NONCE_NUMBER_IN_FIFO) + { + clear_register_value_buf(); + continue; + } + pthread_mutex_lock(®_mutex); + + reg_value_buf.reg_buffer[reg_value_buf.p_wr].reg_value = buf[1]; + reg_value_buf.reg_buffer[reg_value_buf.p_wr].crc = (buf[0] >> 24) & 0x1f; + reg_value_buf.reg_buffer[reg_value_buf.p_wr].chain_number = CHAIN_NUMBER(buf[0]); + + if(reg_value_buf.p_wr < MAX_NONCE_NUMBER_IN_FIFO ) + { + reg_value_buf.p_wr++; + } + else + { + reg_value_buf.p_wr = 0; + } + + if(reg_value_buf.reg_value_num < MAX_NONCE_NUMBER_IN_FIFO) + { + reg_value_buf.reg_value_num++; + } + else + { + reg_value_buf.reg_value_num = MAX_NONCE_NUMBER_IN_FIFO; + } + //applog(LOG_NOTICE,"%s: p_wr = %d reg_value_num = %d\n", __FUNCTION__,reg_value_buf.p_wr,reg_value_buf.reg_value_num); + pthread_mutex_unlock(®_mutex); + } + } + } + } +} + + +//interface between bmminer and axi driver +int bitmain_c5_init(struct init_config config) +{ + char ret=0,j; + uint16_t crc = 0; + struct init_config config_parameter; + int i=0,x = 0,y = 0; + int hardware_version; + unsigned int data = 0; + + memcpy(&config_parameter, &config, sizeof(struct init_config)); + + if(config_parameter.token_type != INIT_CONFIG_TYPE) + { + applog(LOG_DEBUG,"%s: config_parameter.token_type != 0x%x, it is 0x%x\n", __FUNCTION__, INIT_CONFIG_TYPE, config_parameter.token_type); + return -1; + } + + crc = CRC16((uint8_t*)(&config_parameter), sizeof(struct init_config) - sizeof(uint16_t)); + if(crc != config_parameter.crc) + { + applog(LOG_DEBUG,"%s: config_parameter.crc = 0x%x, but we calculate it as 0x%x\n", __FUNCTION__, config_parameter.crc, crc); + return -2; + } + + //malloc nonce_read_out +#if 0 + nonce_read_out = malloc(sizeof(struct nonce_buf)); + if(!nonce_read_out) + { + applog(LOG_DEBUG,"%s: malloc nonce_read_out failed\n", __FUNCTION__); + return -3; + } + else + { + memset(nonce_read_out, 0, sizeof(struct nonce_buf)); + mutex_init(&nonce_read_out.spinlock); + } + + //malloc register value buffer + reg_value_buf = malloc(sizeof(struct reg_buf)); + if(!reg_value_buf) + { + applog(LOG_DEBUG,"%s: malloc reg_value_buf failed\n", __FUNCTION__); + return -4; + } + else + { + memset(reg_value_buf, 0, sizeof(struct reg_buf)); + mutex_init(®_value_buf.spinlock); + } +#endif + read_nonce_reg_id = calloc(1,sizeof(struct thr_info)); + if(thr_info_create(read_nonce_reg_id, NULL, get_nonce_and_register, read_nonce_reg_id)) + { + applog(LOG_DEBUG,"%s: create thread for get nonce and register from FPGA failed\n", __FUNCTION__); + return -5; + } + + pthread_detach(read_nonce_reg_id->pth); + + //init axi + bitmain_axi_init(); + + //reset FPGA & HASH board + if(config_parameter.reset) + { + set_QN_write_data_command(RESET_HASH_BOARD | RESET_ALL | RESET_FPGA | RESET_TIME(15)); + while(get_QN_write_data_command() & RESET_HASH_BOARD) + { + cgsleep_ms(30); + } + } + set_nonce2_and_job_id_store_address(PHY_MEM_NONCE2_JOBID_ADDRESS); + set_job_start_address(PHY_MEM_JOB_START_ADDRESS_1); + //check chain + check_chain(); + + char * buf_hex = NULL; + int board_num = 0; + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + pthread_mutex_lock(&iic_mutex); + get_hash_board_id_number(i,hash_board_id[i]); + buf_hex = bin2hex(hash_board_id[i],12); + sprintf(hash_board_id_string + (board_num*id_string_len),"{\"ID\":\"%s\"},",buf_hex); + board_num++; + free(buf_hex); + buf_hex = NULL; + pthread_mutex_unlock(&iic_mutex); + } + } + hash_board_id_string[board_num*id_string_len - 1] = '\0'; + + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + pthread_mutex_lock(&iic_mutex); + reset_iic_pic(i); + cgsleep_ms(500); + jump_to_app_from_loader(i); + pthread_mutex_unlock(&iic_mutex); + } + } + +#if 0 + de_voltage = opt_bitmain_c5_voltage; + cgsleep_ms(100); + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + pthread_mutex_lock(&iic_mutex); + chain_voltage[i] = get_pic_voltage(i); + pthread_mutex_unlock(&iic_mutex); + } + } + cgsleep_ms(100); + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + pthread_mutex_lock(&iic_mutex); + //if(de_voltage < chain_voltage[i]) + set_pic_voltage(i,de_voltage); + pthread_mutex_unlock(&iic_mutex); + pthread_mutex_lock(&iic_mutex); + get_pic_voltage(i); + pthread_mutex_unlock(&iic_mutex); + } + } + cgsleep_ms(100); +#endif +#if 0 + change_voltage_to_old = calloc(1,sizeof(struct thr_info)); + if(thr_info_create(change_voltage_to_old, NULL, change_pic_voltage_old, change_voltage_to_old)) + { + applog(LOG_DEBUG,"%s: create thread error for pic_heart_beat_func\n", __FUNCTION__); + return -6; + } + pthread_detach(change_voltage_to_old->pth); +#endif + pic_heart_beat = calloc(1,sizeof(struct thr_info)); + if(thr_info_create(pic_heart_beat, NULL, pic_heart_beat_func, pic_heart_beat)) + { + applog(LOG_DEBUG,"%s: create thread error for pic_heart_beat_func\n", __FUNCTION__); + return -6; + } + pthread_detach(pic_heart_beat->pth); + + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + pthread_mutex_lock(&iic_mutex); + enable_pic_dac(i); + pthread_mutex_unlock(&iic_mutex); + } + } + + cgsleep_ms(100); + + if(config_parameter.reset) + { + set_QN_write_data_command(RESET_HASH_BOARD | RESET_ALL | RESET_TIME(15)); + while(get_QN_write_data_command() & RESET_HASH_BOARD) + { + cgsleep_ms(30); + } + } + + if(opt_multi_version) + set_dhash_acc_control(get_dhash_acc_control() & (~OPERATION_MODE) | VIL_MODE | VIL_MIDSTATE_NUMBER(opt_multi_version) & (~NEW_BLOCK) & (~RUN_BIT)); + + cgsleep_ms(10); + //check ASIC number for every chain + check_asic_reg(CHIP_ADDRESS); + cgsleep_ms(10); + //set core number + dev->corenum = BM1387_CORE_NUM; + + software_set_address(); + cgsleep_ms(10); + + check_asic_reg(CHIP_ADDRESS); + cgsleep_ms(10); + + set_asic_ticket_mask(63); + cgsleep_ms(10); + + if(config_parameter.frequency_eft) + { + dev->frequency = config_parameter.frequency; + set_frequency(dev->frequency); + sprintf(dev->frequency_t,"%u",dev->frequency); + } + + cgsleep_ms(10); + + //check who control fan + dev->fan_eft = config_parameter.fan_eft; + dev->fan_pwm= config_parameter.fan_pwm_percent; + applog(LOG_DEBUG,"%s: fan_eft : %d fan_pwm : %d\n", __FUNCTION__,dev->fan_eft,dev->fan_pwm); + if(config_parameter.fan_eft) + { + if((config_parameter.fan_pwm_percent >= 0) && (config_parameter.fan_pwm_percent <= 100)) + { + set_PWM(config_parameter.fan_pwm_percent); + } + else + { + set_PWM_according_to_temperature(); + } + } + else + { + set_PWM_according_to_temperature(); + } + + //calculate real timeout + if(config_parameter.timeout_eft) + { + if(config_parameter.timeout_data_integer == 0 && config_parameter.timeout_data_fractions == 0) //driver calculate out timeout value + { + dev->timeout = 0x1000000/calculate_core_number(dev->corenum)*dev->addrInterval/(dev->frequency)*90/100; + applog(LOG_DEBUG,"dev->timeout = %d\n", dev->timeout); + } + else + { + dev->timeout = config_parameter.timeout_data_integer * 1000 + config_parameter.timeout_data_fractions; + } + + if(dev->timeout > MAX_TIMEOUT_VALUE) + { + dev->timeout = MAX_TIMEOUT_VALUE; + } + } + + //set baud + init_uart_baud(); + cgsleep_ms(10); +#if USE_IIC + if(access("/config/temp_sensor", 0) == -1) + { + system("touch /config/temp_sensor"); + for(i=0; ichain_exist[i] == 1) + { + set_baud_with_addr(dev->baud,0,HAVE_TEMP,i,1,open_core,MID_OR_BOT); + } + } + + cgsleep_ms(5); + + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1 && dev->chain_asic_num[i] == CHAIN_ASIC_NUM) + { + calibration_sensor_offset(0x98,HAVE_TEMP,i,3); + cgsleep_ms(10); + } + } + } +#endif + + +#if 0 + for(i=0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1 && dev->chain_asic_num[i] == CHAIN_ASIC_NUM) + { + set_frequency_with_addr(dev->frequency + ADD_FREQ1,0,0x54,i); + //set_frequency_with_addr(dev->frequency + ADD_FREQ1,0,0x58,i); + //set_frequency_with_addr(dev->frequency + ADD_FREQ1,0,0x5c,i); + set_frequency_with_addr(dev->frequency + ADD_FREQ,0,0x60,i); + //set_frequency_with_addr(dev->frequency + ADD_FREQ,0,0x64,i); + //set_frequency_with_addr(dev->frequency + ADD_FREQ,0,0x68,i); + set_frequency_with_addr(dev->frequency + ADD_FREQ,0,0x6c,i); + //set_frequency_with_addr(dev->frequency + ADD_FREQ,0,0x70,i); + //set_frequency_with_addr(dev->frequency + ADD_FREQ,0,0x74,i); + set_frequency_with_addr(dev->frequency + ADD_FREQ1,0,0x78,i); + //set_frequency_with_addr(dev->frequency + ADD_FREQ1,0,0x7c,i); + //set_frequency_with_addr(dev->frequency + ADD_FREQ1,0,0x80,i); + } + } +#endif + //set big timeout value for open core + //set_time_out_control((MAX_TIMEOUT_VALUE - 100) | TIME_OUT_VALID); + set_time_out_control(0xc350 | TIME_OUT_VALID); + + open_core(); + + //set real timeout back + if(opt_multi_version) + set_time_out_control(((dev->timeout * opt_multi_version) & MAX_TIMEOUT_VALUE) | TIME_OUT_VALID); + else + set_time_out_control(((dev->timeout) & MAX_TIMEOUT_VALUE) | TIME_OUT_VALID); + check_system_work_id = calloc(1,sizeof(struct thr_info)); + if(thr_info_create(check_system_work_id, NULL, check_system_work, check_system_work_id)) + { + applog(LOG_DEBUG,"%s: create thread for check system\n", __FUNCTION__); + return -6; + } + pthread_detach(check_system_work_id->pth); + +#if 1 + read_hash_rate = calloc(1,sizeof(struct thr_info)); + if(thr_info_create(read_hash_rate, NULL, get_hash_rate, read_hash_rate)) + { + applog(LOG_DEBUG,"%s: create thread for get nonce and register from FPGA failed\n", __FUNCTION__); + return -5; + } + + pthread_detach(read_hash_rate->pth); +#endif + +#if 1 + read_temp_id = calloc(1,sizeof(struct thr_info)); + if(thr_info_create(read_temp_id, NULL, read_temp_func, read_temp_id)) + { + applog(LOG_DEBUG,"%s: create thread for read temp\n", __FUNCTION__); + return -7; + } + pthread_detach(read_temp_id->pth); + +#endif + + + for(x=0; xchain_exist[x]) + { + int offset = 0; + for(y=0; ychain_asic_num[x]; y++) + { + if(y%8 == 0) + { + dev->chain_asic_status_string[x][y+offset] = ' '; + offset++; + } + dev->chain_asic_status_string[x][y+offset] = 'o'; + dev->chain_asic_nonce[x][y] = 0; + } + dev->chain_asic_status_string[x][y+offset] = '\0'; + } + } + + hardware_version = get_hardware_version(); + pcb_version = (hardware_version >> 16) & 0x0000ffff; + fpga_version = hardware_version & 0x000000ff; + sprintf(g_miner_version, "%d.%d.%d.%d", fpga_version, pcb_version, C5_VERSION, 0); + + return 0; +} + +int parse_job_to_c5(unsigned char **buf,struct pool *pool,uint32_t id) +{ + uint16_t crc = 0; + uint32_t buf_len = 0; + uint64_t nonce2 = 0; + unsigned char * tmp_buf; + int i; + static uint64_t pool_send_nu = 0; + struct part_of_job part_job; + char *buf_hex = NULL; + + part_job.token_type = SEND_JOB_TYPE; + part_job.version = 0x00; + part_job.pool_nu = pool_send_nu; + part_job.new_block = pool->swork.clean ?1:0; + part_job.asic_diff_valid = 1; + part_job.asic_diff = 15; + part_job.job_id = id; + + hex2bin(&part_job.bbversion, pool->bbversion, 4); + hex2bin(part_job.prev_hash, pool->prev_hash, 32); + hex2bin(&part_job.nbit, pool->nbit, 4); + hex2bin(&part_job.ntime, pool->ntime, 4); + part_job.coinbase_len = pool->coinbase_len; + part_job.nonce2_offset = pool->nonce2_offset; + part_job.nonce2_bytes_num = pool->n2size; + + nonce2 = htole64(pool->nonce2); + memcpy(&(part_job.nonce2_start_value), pool->coinbase + pool->nonce2_offset,8); + memcpy(&(part_job.nonce2_start_value), &nonce2,pool->n2size); + + part_job.merkles_num = pool->merkles; + buf_len = sizeof(struct part_of_job) + pool->coinbase_len + pool->merkles * 32 + 2; + + tmp_buf = (unsigned char *)malloc(buf_len); + if (unlikely(!tmp_buf)) + quit(1, "Failed to malloc tmp_buf"); + part_job.length = buf_len -8; + + memset(tmp_buf,0,buf_len); + memcpy(tmp_buf,&part_job,sizeof(struct part_of_job)); + memcpy(tmp_buf + sizeof(struct part_of_job), pool->coinbase, pool->coinbase_len); + /* + buf_hex = bin2hex(pool->coinbase,pool->coinbase_len); + printf("coinbase:%s offset:%d n2size:%d nonce2%lld\n",buf_hex,pool->nonce2_offset,pool->n2size,pool->nonce2); + free(buf_hex); + */ + for (i = 0; i < pool->merkles; i++) + { + memcpy(tmp_buf + sizeof(struct part_of_job) + pool->coinbase_len + i * 32, pool->swork.merkle_bin[i], 32); + } + + crc = CRC16((uint8_t *)tmp_buf, buf_len-2); + memcpy(tmp_buf + (buf_len - 2), &crc, 2); + + pool_send_nu++; + *buf = (unsigned char *)malloc(buf_len); + if (unlikely(!tmp_buf)) + quit(1, "Failed to malloc buf"); + memcpy(*buf,tmp_buf,buf_len); + free(tmp_buf); + return buf_len; +} + +static void show_status(int if_quit) +{ + char * buf_hex = NULL; + unsigned int *l_job_start_address = NULL; + unsigned int buf[2] = {0}; + int i = 0; + get_work_nonce2(buf); + set_dhash_acc_control((unsigned int)get_dhash_acc_control() & ~RUN_BIT); + while((unsigned int)get_dhash_acc_control() & RUN_BIT) + { + cgsleep_ms(1); + applog(LOG_DEBUG,"%s: run bit is 1 after set it to 0", __FUNCTION__); + } + + buf_hex = bin2hex((unsigned char *)dev->current_job_start_address,c_coinbase_padding); + + free(buf_hex); + for(i=0; icurrent_job_start_address + c_coinbase_padding+ i*MERKLE_BIN_LEN,32); + free(buf_hex); + } + if(dev->current_job_start_address == job_start_address_1) + { + l_job_start_address = job_start_address_2; + } + else if(dev->current_job_start_address == job_start_address_2) + { + l_job_start_address = job_start_address_1; + } + buf_hex = bin2hex((unsigned char *)l_job_start_address,l_coinbase_padding); + free(buf_hex); + for(i=0; icoinbase,pool->coinbase_len); + printf("%s: nonce2 0x%x\n", __FUNCTION__, nonce2); + printf("%s: coinbase : %s\n", __FUNCTION__, buf_hex); + free(buf_hex); + for(i=0; imerkles; i++) + { + buf_hex = bin2hex(pool->swork.merkle_bin[i],32); + printf("%s: merkle_bin %d : %s\n", __FUNCTION__, i, buf_hex); + free(buf_hex); + } +} + + +int send_job(unsigned char *buf) +{ + unsigned int len = 0, i=0, j=0, coinbase_padding_len = 0; + unsigned short int crc = 0, job_length = 0; + unsigned char *temp_buf = NULL, *coinbase_padding = NULL, *merkles_bin = NULL; + unsigned char buf1[PREV_HASH_LEN] = {0}; + unsigned int buf2[PREV_HASH_LEN] = {0}; + int times = 0; + struct part_of_job *part_job = NULL; + + applog(LOG_DEBUG,"--- %s\n", __FUNCTION__); + + if(*(buf + 0) != SEND_JOB_TYPE) + { + applog(LOG_DEBUG,"%s: SEND_JOB_TYPE is wrong : 0x%x\n", __FUNCTION__, *(buf + 0)); + return -1; + } + + len = *((unsigned int *)buf + 4/sizeof(int)); + applog(LOG_DEBUG,"%s: len = 0x%x\n", __FUNCTION__, len); + + temp_buf = malloc(len + 8*sizeof(unsigned char)); + if(!temp_buf) + { + applog(LOG_DEBUG,"%s: malloc buffer failed.\n", __FUNCTION__); + return -2; + } + else + { + memset(temp_buf, 0, len + 8*sizeof(unsigned char)); + memcpy(temp_buf, buf, len + 8*sizeof(unsigned char)); + part_job = (struct part_of_job *)temp_buf; + } + + //write new job data into dev->current_job_start_address + if(dev->current_job_start_address == job_start_address_1) + { + dev->current_job_start_address = job_start_address_2; + } + else if(dev->current_job_start_address == job_start_address_2) + { + dev->current_job_start_address = job_start_address_1; + } + else + { + applog(LOG_DEBUG,"%s: dev->current_job_start_address = 0x%x, but job_start_address_1 = 0x%x, job_start_address_2 = 0x%x\n", __FUNCTION__, dev->current_job_start_address, job_start_address_1, job_start_address_2); + return -3; + } + + + if((part_job->coinbase_len % 64) > 55) + { + coinbase_padding_len = (part_job->coinbase_len/64 + 2) * 64; + } + else + { + coinbase_padding_len = (part_job->coinbase_len/64 + 1) * 64; + } + + coinbase_padding = malloc(coinbase_padding_len); + if(!coinbase_padding) + { + applog(LOG_DEBUG,"%s: malloc coinbase_padding failed.\n", __FUNCTION__); + return -4; + } + else + { + applog(LOG_DEBUG,"%s: coinbase_padding = 0x%x", __FUNCTION__, (unsigned int)coinbase_padding); + } + + if(part_job->merkles_num) + { + merkles_bin = malloc(part_job->merkles_num * MERKLE_BIN_LEN); + if(!merkles_bin) + { + applog(LOG_DEBUG,"%s: malloc merkles_bin failed.\n", __FUNCTION__); + return -5; + } + else + { + applog(LOG_DEBUG,"%s: merkles_bin = 0x%x", __FUNCTION__, (unsigned int)merkles_bin); + } + } + + //applog(LOG_DEBUG,"%s: copy coinbase into memory ...\n", __FUNCTION__); + memset(coinbase_padding, 0, coinbase_padding_len); + memcpy(coinbase_padding, buf + sizeof(struct part_of_job), part_job->coinbase_len); + *(coinbase_padding + part_job->coinbase_len) = 0x80; + *((unsigned int *)coinbase_padding + (coinbase_padding_len - 4)/sizeof(int)) = Swap32((unsigned int)((unsigned long long int)(part_job->coinbase_len * sizeof(char) * 8) & 0x00000000ffffffff)); // 8 means 8 bits + *((unsigned int *)coinbase_padding + (coinbase_padding_len - 8)/sizeof(int)) = Swap32((unsigned int)(((unsigned long long int)(part_job->coinbase_len * sizeof(char) * 8) & 0xffffffff00000000) >> 32)); // 8 means 8 bits + + l_coinbase_padding = c_coinbase_padding; + c_coinbase_padding = coinbase_padding_len; + for(i=0; icurrent_job_start_address + i) = *(coinbase_padding + i); + //applog(LOG_DEBUG,"%s: coinbase_padding_in_ddr[%d] = 0x%x", __FUNCTION__, i, *(((unsigned char *)dev->current_job_start_address + i))); + } + + /* check coinbase & padding in ddr */ + for(i=0; icurrent_job_start_address + i) != *(coinbase_padding + i)) + { + applog(LOG_DEBUG,"%s: coinbase_padding_in_ddr[%d] = 0x%x, but *(coinbase_padding + %d) = 0x%x", __FUNCTION__, i, *(((unsigned char *)dev->current_job_start_address + i)), i, *(coinbase_padding + i)); + } + } + l_merkles_num = c_merkles_num; + c_merkles_num = part_job->merkles_num; + if(part_job->merkles_num) + { + applog(LOG_DEBUG,"%s: copy merkle bin into memory ...\n", __FUNCTION__); + memset(merkles_bin, 0, part_job->merkles_num * MERKLE_BIN_LEN); + memcpy(merkles_bin, buf + sizeof(struct part_of_job) + part_job->coinbase_len , part_job->merkles_num * MERKLE_BIN_LEN); + + for(i=0; i<(part_job->merkles_num * MERKLE_BIN_LEN); i++) + { + *((unsigned char *)dev->current_job_start_address + coinbase_padding_len + i) = *(merkles_bin + i); + //applog(LOG_DEBUG,"%s: merkles_in_ddr[%d] = 0x%x", __FUNCTION__, i, *(((unsigned char *)dev->current_job_start_address + coinbase_padding_len + i))); + } + + for(i=0; i<(part_job->merkles_num * MERKLE_BIN_LEN); i++) + { + if(*((unsigned char *)dev->current_job_start_address + coinbase_padding_len + i) != *(merkles_bin + i)) + { + applog(LOG_DEBUG,"%s: merkles_in_ddr[%d] = 0x%x, but *(merkles_bin + %d) =0x%x", __FUNCTION__, i, *(((unsigned char *)dev->current_job_start_address + coinbase_padding_len + i)), i, *(merkles_bin + i)); + } + } + } + + + + set_dhash_acc_control((unsigned int)get_dhash_acc_control() & ~RUN_BIT); + while((unsigned int)get_dhash_acc_control() & RUN_BIT) + { + cgsleep_ms(1); + applog(LOG_DEBUG,"%s: run bit is 1 after set it to 0\n", __FUNCTION__); + times++; + } + cgsleep_ms(1); + + + //write new job data into dev->current_job_start_address + if(dev->current_job_start_address == job_start_address_1) + { + set_job_start_address(PHY_MEM_JOB_START_ADDRESS_1); + //applog(LOG_DEBUG,"%s: dev->current_job_start_address = 0x%x\n", __FUNCTION__, (unsigned int)job_start_address_2); + } + else if(dev->current_job_start_address == job_start_address_2) + { + set_job_start_address(PHY_MEM_JOB_START_ADDRESS_2); + //applog(LOG_DEBUG,"%s: dev->current_job_start_address = 0x%x\n", __FUNCTION__, (unsigned int)job_start_address_1); + } + + if(part_job->asic_diff_valid) + { + set_ticket_mask((unsigned int)(part_job->asic_diff & 0x000000ff)); + dev->diff = part_job->asic_diff & 0xff; + } + + set_job_id(part_job->job_id); + + set_block_header_version(part_job->bbversion); + + memset(buf2, 0, PREV_HASH_LEN*sizeof(unsigned int)); + for(i=0; i<(PREV_HASH_LEN/sizeof(unsigned int)); i++) + { + buf2[i] = ((part_job->prev_hash[4*i + 3]) << 24) | ((part_job->prev_hash[4*i + 2]) << 16) | ((part_job->prev_hash[4*i + 1]) << 8) | (part_job->prev_hash[4*i + 0]); + } + set_pre_header_hash(buf2); + + set_time_stamp(part_job->ntime); + + set_target_bits(part_job->nbit); + + j = (part_job->nonce2_offset << 16) | ((unsigned char)(part_job->nonce2_bytes_num & 0x00ff)) << 8 | (unsigned char)((coinbase_padding_len/64) & 0x000000ff); + set_coinbase_length_and_nonce2_length(j); + + //memset(buf2, 0, PREV_HASH_LEN*sizeof(unsigned int)); + buf2[0] = 0; + buf2[1] = 0; + buf2[0] = ((unsigned long long )(part_job->nonce2_start_value)) & 0xffffffff; + buf2[1] = ((unsigned long long )(part_job->nonce2_start_value) >> 32) & 0xffffffff; + set_work_nonce2(buf2); + + set_merkle_bin_number(part_job->merkles_num); + + job_length = coinbase_padding_len + part_job->merkles_num*MERKLE_BIN_LEN; + set_job_length((unsigned int)job_length & 0x0000ffff); + + cgsleep_ms(1); + + + if(!gBegin_get_nonce) + { + set_nonce_fifo_interrupt(get_nonce_fifo_interrupt() | FLUSH_NONCE3_FIFO); + gBegin_get_nonce = true; + } +#if 1 + //start FPGA generating works + if(part_job->new_block) + { + if(!opt_multi_version) + { + set_dhash_acc_control((unsigned int)get_dhash_acc_control() | NEW_BLOCK ); + set_dhash_acc_control((unsigned int)get_dhash_acc_control() | RUN_BIT | OPERATION_MODE); + } + else + { + set_dhash_acc_control((unsigned int)get_dhash_acc_control() | NEW_BLOCK ); + set_dhash_acc_control((unsigned int)get_dhash_acc_control() | RUN_BIT | OPERATION_MODE |VIL_MODE); + } + } + else + { + if(!opt_multi_version) + set_dhash_acc_control((unsigned int)get_dhash_acc_control() | RUN_BIT| OPERATION_MODE ); + else + set_dhash_acc_control((unsigned int)get_dhash_acc_control() | RUN_BIT| OPERATION_MODE |VIL_MODE); + } +#endif + + free(temp_buf); + free((unsigned char *)coinbase_padding); + if(part_job->merkles_num) + { + free((unsigned char *)merkles_bin); + } + + applog(LOG_DEBUG,"--- %s end\n", __FUNCTION__); + cgtime(&tv_send_job); + return 0; +} + +static void copy_pool_stratum(struct pool *pool_stratum, struct pool *pool) +{ + int i; + int merkles = pool->merkles; + size_t coinbase_len = pool->coinbase_len; + + if (!pool->swork.job_id) + return; + + cg_wlock(&pool_stratum->data_lock); + free(pool_stratum->swork.job_id); + free(pool_stratum->nonce1); + free(pool_stratum->coinbase); + + align_len(&coinbase_len); + pool_stratum->coinbase = calloc(coinbase_len, 1); + if (unlikely(!pool_stratum->coinbase)) + quit(1, "Failed to calloc pool_stratum coinbase in c5"); + memcpy(pool_stratum->coinbase, pool->coinbase, coinbase_len); + + + for (i = 0; i < pool_stratum->merkles; i++) + free(pool_stratum->swork.merkle_bin[i]); + if (merkles) + { + pool_stratum->swork.merkle_bin = realloc(pool_stratum->swork.merkle_bin, + sizeof(char *) * merkles + 1); + for (i = 0; i < merkles; i++) + { + pool_stratum->swork.merkle_bin[i] = malloc(32); + if (unlikely(!pool_stratum->swork.merkle_bin[i])) + quit(1, "Failed to malloc pool_stratum swork merkle_bin"); + memcpy(pool_stratum->swork.merkle_bin[i], pool->swork.merkle_bin[i], 32); + } + } + pool_stratum->pool_no = pool->pool_no; + pool_stratum->sdiff = pool->sdiff; + pool_stratum->coinbase_len = pool->coinbase_len; + pool_stratum->nonce2_offset = pool->nonce2_offset; + pool_stratum->n2size = pool->n2size; + pool_stratum->merkles = pool->merkles; + + pool_stratum->swork.job_id = strdup(pool->swork.job_id); + pool_stratum->nonce1 = strdup(pool->nonce1); + + memcpy(pool_stratum->ntime, pool->ntime, sizeof(pool_stratum->ntime)); + memcpy(pool_stratum->header_bin, pool->header_bin, sizeof(pool_stratum->header_bin)); + cg_wunlock(&pool_stratum->data_lock); +} + + +static void noblock_socket(int fd) +{ + int flags = fcntl(fd, F_GETFL, 0); + fcntl(fd, F_SETFL, O_NONBLOCK | flags); +} + +static void block_socket(int fd) +{ + int flags = fcntl(fd, F_GETFL, 0); + fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); +} + +static bool sock_connecting(void) +{ + return errno == EINPROGRESS; +} + +static int get_mac(char * device,char **mac) +{ + struct ifreq ifreq; + int sock = 0; + + sock = socket(AF_INET,SOCK_STREAM,0); + if(sock < 0) + { + perror("error sock"); + return 2; + } + strcpy(ifreq.ifr_name,device); + if(ioctl(sock,SIOCGIFHWADDR,&ifreq) < 0) + { + perror("error ioctl"); + close(sock); + return 3; + } + int i = 0; + for(i = 0; i < 6; i++) + { + sprintf(*mac+3*i, "%02X:", (unsigned char)ifreq.ifr_hwaddr.sa_data[i]); + } + (*mac)[strlen(*mac) - 1] = 0; + close(sock); + return 0; +} + +static bool setup_send_mac_socket(char * s) +{ + struct addrinfo *servinfo, hints, *p; + int sockd; + int send_bytes,recv_bytes; + char rec[1024]; + int flags; + + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + + if (getaddrinfo(AUTH_URL, PORT, &hints, &servinfo) != 0) + { + return false; + } + for (p = servinfo; p != NULL; p = p->ai_next) + { + sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); + if (sockd == -1) + { + continue; + } + noblock_socket(sockd); + if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) + { + struct timeval tv_timeout = {10, 0}; + int selret; + fd_set rw; + if (!sock_connecting()) + { + close(sockd); + continue; + } + retry: + FD_ZERO(&rw); + FD_SET(sockd, &rw); + selret = select(sockd + 1, NULL, &rw, NULL, &tv_timeout); + if (selret > 0 && FD_ISSET(sockd, &rw)) + { + socklen_t len; + int err, n; + + len = sizeof(err); + n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (!n && !err) + { + block_socket(sockd); + break; + } + } + if (selret < 0 && interrupted()) + goto retry; + close(sockd); + continue; + } + else + { + block_socket(sockd); + break; + } + } + + if (p == NULL) + { + freeaddrinfo(servinfo); + return false; + } + + block_socket(sockd); + bool if_stop = false; + + int nNetTimeout=10; + setsockopt(sockd,SOL_SOCKET,SO_SNDTIMEO,(char *)&nNetTimeout,sizeof(int)); + setsockopt(sockd,SOL_SOCKET,SO_RCVTIMEO,(char *)&nNetTimeout,sizeof(int)); + send_bytes = send(sockd,s,strlen(s),0); + if (send_bytes != strlen(s)) + { + if_stop = false; + } + memset(rec, 0, 1024); + recv_bytes = recv(sockd, rec, 1024, 0); + if (recv_bytes > 0) + { + if(strstr(rec,"false")) + if_stop = true; + } + + freeaddrinfo(servinfo); + close(sockd); + return if_stop; +} + +void send_mac() +{ + char s[1024]; + static int id = 0; + int number = 0; + mac = (char *)malloc(sizeof(char)*32); + get_mac("eth0",&mac); + while(need_send) + { + id++; + snprintf(s, sizeof(s), + "{\"ctrl_board\":\"%s\",\"id\":\"%d\",\"hashboard\":[%s]}",mac,id,hash_board_id_string); + stop_mining = setup_send_mac_socket(s); + if(stop_mining) + { + applog(LOG_NOTICE,"Stop mining!!!"); + break; + } + srand((unsigned) time(NULL)); + number = rand() % 600 + 60; + sleep(number); + } + free(mac); +} + + +static bool bitmain_c5_prepare(struct thr_info *thr) +{ + struct cgpu_info *bitmain_c5 = thr->cgpu; + struct bitmain_c5_info *info = bitmain_c5->device_data; + + info->thr = thr; + mutex_init(&info->lock); + cglock_init(&info->update_lock); + cglock_init(&info->pool0.data_lock); + cglock_init(&info->pool1.data_lock); + cglock_init(&info->pool2.data_lock); + + struct init_config c5_config = + { + .token_type = 0x51, + .version = 0, + .length = 26, + .reset = 1, + .fan_eft = opt_bitmain_fan_ctrl, + .timeout_eft = 1, + .frequency_eft = 1, + .voltage_eft = 1, + .chain_check_time_eft = 1, + .chip_config_eft =1, + .hw_error_eft =1, + .beeper_ctrl =1, + .temp_ctrl =1, + .chain_freq_eft =1, + .reserved1 =0, + .reserved2 ={0}, + .chain_num = 9, + .asic_num = 54, + .fan_pwm_percent = opt_bitmain_fan_pwm, + .temperature = 80, + .frequency = opt_bitmain_c5_freq, + .voltage = {0x07,0x25}, + .chain_check_time_integer = 10, + .chain_check_time_fractions = 10, + .timeout_data_integer = 0, + .timeout_data_fractions = 0, + .reg_data = 0, + .chip_address = 0x04, + .reg_address= 0, + .chain_min_freq = 400, + .chain_max_freq = 600, + }; + c5_config.crc = CRC16((uint8_t *)(&c5_config), sizeof(c5_config)-2); + + bitmain_c5_init(c5_config); + + send_mac_thr = calloc(1,sizeof(struct thr_info)); + if(thr_info_create(send_mac_thr, NULL, send_mac, send_mac_thr)) + { + applog(LOG_DEBUG,"%s: create thread for send mac\n", __FUNCTION__); + } + + return true; +} + +static void bitmain_c5_reinit_device(struct cgpu_info *bitmain) +{ + if(!status_error) + system("/etc/init.d/bmminer.sh restart > /dev/null 2>&1 &"); +} + + + +static void bitmain_c5_detect(__maybe_unused bool hotplug) +{ + struct cgpu_info *cgpu = calloc(1, sizeof(*cgpu)); + struct device_drv *drv = &bitmain_c5_drv; + struct bitmain_c5_info *a; + + assert(cgpu); + cgpu->drv = drv; + cgpu->deven = DEV_ENABLED; + cgpu->threads = 1; + cgpu->device_data = calloc(sizeof(struct bitmain_c5_info), 1); + if (unlikely(!(cgpu->device_data))) + quit(1, "Failed to calloc cgpu_info data"); + a = cgpu->device_data; + a->pool0_given_id = 0; + a->pool1_given_id = 1; + a->pool2_given_id = 2; + + assert(add_cgpu(cgpu)); +} + +static __inline void flip_swab(void *dest_p, const void *src_p, unsigned int length) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + int i; + + for (i = 0; i < length/4; i++) + dest[i] = swab32(src[i]); +} +static uint64_t hashtest_submit(struct thr_info *thr, struct work *work, uint32_t nonce, uint8_t *midstate,struct pool *pool,uint64_t nonce2,uint32_t chain_id ) +{ + unsigned char hash1[32]; + unsigned char hash2[32]; + unsigned char i,j; + unsigned char which_asic_nonce; + uint64_t hashes = 0; + static uint64_t pool_diff = 0, net_diff = 0; + static uint64_t pool_diff_bit = 0, net_diff_bit = 0; + + if(pool_diff != (uint64_t)work->sdiff) + { + pool_diff = (uint64_t)work->sdiff; + pool_diff_bit = 0; + uint64_t tmp_pool_diff = pool_diff; + while(tmp_pool_diff > 0) + { + tmp_pool_diff = tmp_pool_diff >> 1; + pool_diff_bit++; + } + pool_diff_bit--; + applog(LOG_DEBUG,"%s: pool_diff:%d work_diff:%d pool_diff_bit:%d ...\n", __FUNCTION__,pool_diff,work->sdiff,pool_diff_bit); + } + + if(net_diff != (uint64_t)current_diff) + { + net_diff = (uint64_t)current_diff; + net_diff_bit = 0; + uint64_t tmp_net_diff = net_diff; + while(tmp_net_diff > 0) + { + tmp_net_diff = tmp_net_diff >> 1; + net_diff_bit++; + } + net_diff_bit--; + applog(LOG_DEBUG,"%s:net_diff:%d current_diff:%d net_diff_bit %d ...\n", __FUNCTION__,net_diff,current_diff,net_diff_bit); + } + + uint32_t *hash2_32 = (uint32_t *)hash1; + __attribute__ ((aligned (4))) sha2_context ctx; + memcpy(ctx.state, (void*)work->midstate, 32); +#if TEST_DHASH + rev((unsigned char*)ctx.state, sizeof(ctx.state)); +#endif + ctx.total[0] = 80; + ctx.total[1] = 00; + memcpy(hash1, (void*)work->data + 64, 12); +#if TEST_DHASH + rev(hash1, 12); +#endif + flip_swab(ctx.buffer, hash1, 12); + memcpy(hash1, &nonce, 4); +#if TEST_DHASH + rev(hash1, 4); +#endif + flip_swab(ctx.buffer + 12, hash1, 4); + + sha2_finish(&ctx, hash1); + + memset( &ctx, 0, sizeof( sha2_context ) ); + sha2(hash1, 32, hash2); + + flip32(hash1, hash2); + + if (hash2_32[7] != 0) + { + if(dev->chain_exist[chain_id] == 1) + { + inc_hw_errors(thr); + dev->chain_hw[chain_id]++; + } + //inc_hw_errors_with_diff(thr,(0x01UL << DEVICE_DIFF)); + //dev->chain_hw[chain_id]+=(0x01UL << DEVICE_DIFF); + applog(LOG_DEBUG,"%s: HASH2_32[7] != 0", __FUNCTION__); + return 0; + } + for(i=0; i < 7; i++) + { + if(be32toh(hash2_32[6 - i]) != 0) + break; + } + if(i >= pool_diff_bit/32) + { + which_asic_nonce = (nonce >> (24 + dev->check_bit)) & 0xff; + applog(LOG_DEBUG,"%s: chain %d which_asic_nonce %d ", __FUNCTION__, chain_id, which_asic_nonce); + dev->chain_asic_nonce[chain_id][which_asic_nonce]++; + if(be32toh(hash2_32[6 - pool_diff_bit/32]) < ((uint32_t)0xffffffff >> (pool_diff_bit%32))) + { + hashes += (0x01UL << DEVICE_DIFF); + if(current_diff != 0) + { + for(i=0; i < net_diff_bit/32; i++) + { + if(be32toh(hash2_32[6 - i]) != 0) + break; + } + if(i == net_diff_bit/32) + { + if(be32toh(hash2_32[6 - net_diff_bit/32]) < ((uint32_t)0xffffffff >> (net_diff_bit%32))) + { + // to do found block!!! + } + } + } + submit_nonce(thr, work, nonce); + } + else if(be32toh(hash2_32[6 - DEVICE_DIFF/32]) < ((uint32_t)0xffffffff >> (DEVICE_DIFF%32))) + { + hashes += (0x01UL << DEVICE_DIFF); + } + } + return hashes; +} + +static int64_t bitmain_scanhash(struct thr_info *thr) +{ + struct cgpu_info *bitmain_c5 = thr->cgpu; + struct bitmain_c5_info *info = bitmain_c5->device_data; + struct timeval current; + double device_tdiff, hwp; + uint32_t a = 0, b = 0; + static uint32_t last_nonce3 = 0; + static uint32_t last_workid = 0; + int i, j; + /* Stop polling the device if there is no stratum in 3 minutes, network is down */ + cgtime(¤t); + h = 0; + pthread_mutex_lock(&nonce_mutex); + cg_rlock(&info->update_lock); + while(nonce_read_out.nonce_num) + { + uint32_t nonce3 = nonce_read_out.nonce_buffer[nonce_read_out.p_rd].nonce3; + uint32_t job_id = nonce_read_out.nonce_buffer[nonce_read_out.p_rd].job_id; + uint64_t nonce2 = nonce_read_out.nonce_buffer[nonce_read_out.p_rd].nonce2; + uint32_t chain_id = nonce_read_out.nonce_buffer[nonce_read_out.p_rd].chain_num; + uint32_t work_id = nonce_read_out.nonce_buffer[nonce_read_out.p_rd].work_id; + uint32_t version = Swap32(nonce_read_out.nonce_buffer[nonce_read_out.p_rd].header_version); + uint8_t midstate[32] = {0}; + int i = 0; + for(i=0; i<32; i++) + { + + midstate[(7-(i/4))*4 + (i%4)] = nonce_read_out.nonce_buffer[nonce_read_out.p_rd].midstate[i]; + } + applog(LOG_DEBUG,"%s: job_id:0x%x work_id:0x%x nonce2:0x%llx nonce3:0x%x version:0x%x\n", __FUNCTION__,job_id, work_id,nonce2, nonce3,version); + struct work * work; + + struct pool *pool, *c_pool; + struct pool *pool_stratum0 = &info->pool0; + struct pool *pool_stratum1 = &info->pool1; + struct pool *pool_stratum2 = &info->pool2; + + if(nonce_read_out.p_rd< MAX_NONCE_NUMBER_IN_FIFO) + { + nonce_read_out.p_rd++; + } + else + { + nonce_read_out.p_rd = 0; + } + + nonce_read_out.nonce_num--; + + if(nonce3 != last_nonce3 || work_id != last_workid ) + { + last_nonce3 = nonce3; + last_workid = work_id; + } + else + { + if(dev->chain_exist[chain_id] == 1) + { + inc_hw_errors(thr); + dev->chain_hw[chain_id]++; + } + continue; + } + + applog(LOG_DEBUG,"%s: Chain ID J%d ...\n", __FUNCTION__, chain_id + 1); + if( (given_id -2)> job_id && given_id < job_id) + { + applog(LOG_DEBUG,"%s: job_id error ...\n", __FUNCTION__); + if(dev->chain_exist[chain_id] == 1) + { + inc_hw_errors(thr); + dev->chain_hw[chain_id]++; + } + continue; + } + + applog(LOG_DEBUG,"%s: given_id:%d job_id:%d switch:%d ...\n", __FUNCTION__,given_id,job_id,given_id - job_id); + + switch (given_id - job_id) + { + case 0: + pool = pool_stratum0; + break; + case 1: + pool = pool_stratum1; + break; + case 2: + pool = pool_stratum2; + break; + default: + applog(LOG_DEBUG,"%s: job_id non't found ...\n", __FUNCTION__); + if(dev->chain_exist[chain_id] == 1) + { + inc_hw_errors(thr); + dev->chain_hw[chain_id]++; + } + continue; + } + c_pool = pools[pool->pool_no]; + get_work_by_nonce2(thr,&work,pool,c_pool,nonce2,pool->ntime,version); + h += hashtest_submit(thr,work,nonce3,midstate,pool,nonce2,chain_id); + free_work(work); + } + cg_runlock(&info->update_lock); + pthread_mutex_unlock(&nonce_mutex); + cgsleep_ms(1); + if(h != 0) + { + applog(LOG_DEBUG,"%s: hashes %u ...\n", __FUNCTION__,h * 0xffffffffull); + } + h = h * 0xffffffffull; +} + +static int64_t bitmain_c5_scanhash(struct thr_info *thr) +{ + h = 0; + pthread_t send_id; + pthread_create(&send_id, NULL, bitmain_scanhash, thr); + pthread_join(send_id, NULL); + return h; +} + +static void bitmain_c5_update(struct cgpu_info *bitmain_c5) +{ + struct bitmain_c5_info *info = bitmain_c5->device_data; + struct thr_info *thr = bitmain_c5->thr[0]; + struct work *work; + struct pool *pool; + int i, count = 0; + mutex_lock(&info->lock); + static char *last_job = NULL; + bool same_job = true; + unsigned char *buf = NULL; + thr->work_update = false; + thr->work_restart = false; + /* Step 1: Make sure pool is ready */ + work = get_work(thr, thr->id); + discard_work(work); /* Don't leak memory */ + /* Step 2: Protocol check */ + pool = current_pool(); + if (!pool->has_stratum) + quit(1, "Bitmain S9 has to use stratum pools"); + + /* Step 3: Parse job to c5 formart */ + cg_wlock(&info->update_lock); + cg_rlock(&pool->data_lock); + info->pool_no = pool->pool_no; + copy_pool_stratum(&info->pool2, &info->pool1); + info->pool2_given_id = info->pool1_given_id; + + copy_pool_stratum(&info->pool1, &info->pool0); + info->pool1_given_id = info->pool0_given_id; + + copy_pool_stratum(&info->pool0, pool); + info->pool0_given_id = ++given_id; + parse_job_to_c5(&buf, pool, info->pool0_given_id); + /* Step 4: Send out buf */ + if(!status_error) + send_job(buf); + cg_runlock(&pool->data_lock); + cg_wunlock(&info->update_lock); + free(buf); + mutex_unlock(&info->lock); +} + +static void get_bitmain_statline_before(char *buf, size_t bufsiz, struct cgpu_info *bitmain_c5) +{ + struct bitmain_c5_info *info = bitmain_c5->device_data; +} + +static struct api_data *bitmain_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + struct bitmain_c5_info *info = cgpu->device_data; + char buf[64]; + int i = 0; + uint64_t hash_rate_all = 0; + char displayed_rate_all[16]; + bool copy_data = false; + + root = api_add_uint8(root, "miner_count", &(dev->chain_num), copy_data); + root = api_add_string(root, "frequency", dev->frequency_t, copy_data); + root = api_add_uint8(root, "fan_num", &(dev->fan_num), copy_data); + + for(i = 0; i < BITMAIN_MAX_FAN_NUM; i++) + { + char fan_name[12]; + sprintf(fan_name,"fan%d",i+1); + root = api_add_uint(root, fan_name, &(dev->fan_speed_value[i]), copy_data); + } + + root = api_add_uint8(root, "temp_num", &(dev->chain_num), copy_data); + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + char temp_name[12]; + sprintf(temp_name,"temp%d",i+1); + root = api_add_int16(root, temp_name, &(dev->chain_asic_temp[i][2][0]), copy_data); + } + + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + char temp2_name[12]; + sprintf(temp2_name,"temp2_%d",i+1); + root = api_add_int16(root, temp2_name, &(dev->chain_asic_temp[i][2][1]), copy_data); + } + + root = api_add_int(root, "temp_max", &(dev->temp_top1), copy_data); + total_diff1 = total_diff_accepted + total_diff_rejected + total_diff_stale; + double hwp = (hw_errors + total_diff1) ? + (double)(hw_errors) / (double)(hw_errors + total_diff1) : 0; + root = api_add_percent(root, "Device Hardware%", &hwp, false); + root = api_add_int(root, "no_matching_work", &hw_errors, false); + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + char chain_name[12]; + sprintf(chain_name,"chain_acn%d",i+1); + root = api_add_uint8(root, chain_name, &(dev->chain_asic_num[i]), copy_data); + } + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + char chain_asic_name[12]; + sprintf(chain_asic_name,"chain_acs%d",i+1); + root = api_add_string(root, chain_asic_name, dev->chain_asic_status_string[i], copy_data); + } + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + char chain_hw[16]; + sprintf(chain_hw,"chain_hw%d",i+1); + root = api_add_uint32(root, chain_hw, &(dev->chain_hw[i]), copy_data); + } + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + char chain_rate[16]; + sprintf(chain_rate,"chain_rate%d",i+1); + root = api_add_string(root, chain_rate, displayed_rate[i], copy_data); + } + + for(i = 0; i < BITMAIN_MAX_CHAIN_NUM; i++) + { + if(dev->chain_exist[i] == 1) + { + hash_rate_all += rate[i]; + } + } + + suffix_string_c5(hash_rate_all, (char * )displayed_hash_rate, sizeof(displayed_hash_rate), 7,false); + + return root; +} + + +static void bitmain_c5_shutdown(struct thr_info *thr) +{ + unsigned int ret; + thr_info_cancel(check_system_work_id); + thr_info_cancel(read_nonce_reg_id); + thr_info_cancel(read_temp_id); + thr_info_cancel(read_hash_rate); + thr_info_cancel(pic_heart_beat); + thr_info_cancel(send_mac_thr); + + ret = get_BC_write_command(); //disable null work + ret &= ~BC_COMMAND_EN_NULL_WORK; + set_BC_write_command(ret); + set_dhash_acc_control((unsigned int)get_dhash_acc_control() & ~RUN_BIT); +} + + +struct device_drv bitmain_c5_drv = +{ + .drv_id = DRIVER_bitmain_c5, + .dname = "Bitmain_C5", + .name = "BC5", + .drv_detect = bitmain_c5_detect, + .thread_prepare = bitmain_c5_prepare, + .hash_work = hash_driver_work, + .scanwork = bitmain_c5_scanhash, + .flush_work = bitmain_c5_update, + .update_work = bitmain_c5_update, + .get_api_stats = bitmain_api_stats, + .reinit_device = bitmain_c5_reinit_device, + .get_statline_before = get_bitmain_statline_before, + .thread_shutdown = bitmain_c5_shutdown, +}; + diff --git a/driver-btm-c5.h b/driver-btm-c5.h new file mode 100644 index 0000000..e57e901 --- /dev/null +++ b/driver-btm-c5.h @@ -0,0 +1,560 @@ +#ifndef C5_H +#define C5_H + +//FPGA rgister Address Map +#define HARDWARE_VERSION (0x00000000/sizeof(int)) +#define FAN_SPEED (0x00000004/sizeof(int)) +#define HASH_ON_PLUG (0x00000008/sizeof(int)) +#define BUFFER_SPACE (0x0000000c/sizeof(int)) +#define RETURN_NONCE (0x00000010/sizeof(int)) +#define NONCE_NUMBER_IN_FIFO (0x00000018/sizeof(int)) +#define NONCE_FIFO_INTERRUPT (0x0000001c/sizeof(int)) +#define TEMPERATURE_0_3 (0x00000020/sizeof(int)) +#define TEMPERATURE_4_7 (0x00000024/sizeof(int)) +#define TEMPERATURE_8_11 (0x00000028/sizeof(int)) +#define TEMPERATURE_12_15 (0x0000002c/sizeof(int)) +#define IIC_COMMAND (0x00000030/sizeof(int)) +#define TW_WRITE_COMMAND (0x00000040/sizeof(int)) +#define QN_WRITE_DATA_COMMAND (0x00000080/sizeof(int)) +#define FAN_CONTROL (0x00000084/sizeof(int)) +#define TIME_OUT_CONTROL (0x00000088/sizeof(int)) +#define TICKET_MASK_FPGA (0x0000008c/sizeof(int)) +#define HASH_COUNTING_NUMBER_FPGA (0x00000090/sizeof(int)) +#define SNO (0x00000094/sizeof(int)) +#define BC_WRITE_COMMAND (0x000000c0/sizeof(int)) +#define BC_COMMAND_BUFFER (0x000000c4/sizeof(int)) +#define FPGA_CHIP_ID_ADDR (0x000000f0/sizeof(int)) +#define DHASH_ACC_CONTROL (0x00000100/sizeof(int)) +#define COINBASE_AND_NONCE2_LENGTH (0x00000104/sizeof(int)) +#define WORK_NONCE_2 (0x00000108/sizeof(int)) +#define NONCE2_AND_JOBID_STORE_ADDRESS (0x00000110/sizeof(int)) +#define MERKLE_BIN_NUMBER (0x00000114/sizeof(int)) +#define JOB_START_ADDRESS (0x00000118/sizeof(int)) +#define JOB_LENGTH (0x0000011c/sizeof(int)) +#define JOB_DATA_READY (0x00000120/sizeof(int)) +#define JOB_ID (0x00000124/sizeof(int)) +#define BLOCK_HEADER_VERSION (0x00000130/sizeof(int)) +#define TIME_STAMP (0x00000134/sizeof(int)) +#define TARGET_BITS (0x00000138/sizeof(int)) +#define PRE_HEADER_HASH (0x00000140/sizeof(int)) + +//FPGA registers bit map +//QN_WRITE_DATA_COMMAND +#define RESET_HASH_BOARD (1 << 31) +#define RESET_ALL (1 << 23) +#define CHAIN_ID(id) (id << 16) +#define RESET_FPGA (1 << 15) +#define RESET_TIME(time) (time << 0) +#define TIME_OUT_VALID (1 << 31) +//RETURN_NONCE +#define WORK_ID_OR_CRC (1 << 31) +#define WORK_ID_OR_CRC_VALUE(value) ((value >> 16) & 0x7fff) +#define NONCE_INDICATOR (1 << 7) +#define CHAIN_NUMBER(value) (value & 0xf) +#define REGISTER_DATA_CRC(value) ((value >> 24) & 0x7f) +//BC_WRITE_COMMAND +#define BC_COMMAND_BUFFER_READY (1 << 31) +#define BC_COMMAND_EN_CHAIN_ID (1 << 23) +#define BC_COMMAND_EN_NULL_WORK (1 << 22) +//NONCE2_AND_JOBID_STORE_ADDRESS +#define JOB_ID_OFFSET (0x0/sizeof(int)) +#define HEADER_VERSION_OFFSET (0x4/sizeof(int)) +#define NONCE2_L_OFFSET (0x8/sizeof(int)) +#define NONCE2_H_OFFSET (0xc/sizeof(int)) +#define MIDSTATE_OFFSET 0x20 +//DHASH_ACC_CONTROL +#define VIL_MODE (1 << 15) +#define VIL_MIDSTATE_NUMBER(value) ((value &0x0f) << 8) +#define NEW_BLOCK (1 << 7) +#define RUN_BIT (1 << 6) +#define OPERATION_MODE (1 << 5) +//NONCE_FIFO_INTERRUPT +#define FLUSH_NONCE3_FIFO (1 << 16) + + +//ASIC macro define +//ASIC register address +#define C5_VERSION 1 +#define CHIP_ADDRESS 0x0 +#define GOLDEN_NONCE_COUNTER 0x8 +#define PLL_PARAMETER 0xc +#define START_NONCE_OFFSET 0x10 +#define HASH_COUNTING_NUMBER 0x14 +#define TICKET_MASK 0x18 +#define MISC_CONTROL 0x1c +#define GENERAL_I2C_COMMAND 0X20 + +//ASIC command +#define SET_ADDRESS 0x1 +#define SET_PLL_DIVIDER2 0x2 +#define PATTERN_CONTROL 0x3 +#define GET_STATUS 0x4 +#define CHAIN_INACTIVE 0x5 +#define SET_BAUD_OPS 0x6 +#define SET_PLL_DIVIDER1 0x7 +#define SET_CONFIG 0x8 +#define COMMAND_FOR_ALL 0x80 +//other ASIC macro define +#define MAX_BAUD_DIVIDER 26 +#define DEFAULT_BAUD_DIVIDER 26 +#define BM1387_CORE_NUM 114 +#define VIL_COMMAND_TYPE (0x02 << 5) +#define VIL_ALL (0x01 << 4) +#define PAT (0x01 << 7) +#define GRAY (0x01 << 6) +#define INV_CLKO (0x01 << 5) +#define LPD (0x01 << 4) +#define GATEBCLK (0x01 << 7) +#define RFS (0x01 << 6) +#define MMEN (0x01 << 7) +#define TFS(x) ((x & 0x03) << 5) + + +// Pic +#define PIC_FLASH_POINTER_START_ADDRESS_H 0x03 +#define PIC_FLASH_POINTER_START_ADDRESS_L 0x00 +#define PIC_FLASH_POINTER_END_ADDRESS_H 0x0f +#define PIC_FLASH_POINTER_END_ADDRESS_L 0x7f +#define PIC_FLASH_LENGTH (((unsigned int)PIC_FLASH_POINTER_END_ADDRESS_H<<8 + PIC_FLASH_POINTER_END_ADDRESS_L) - ((unsigned int)PIC_FLASH_POINTER_START_ADDRESS_H<<8 + PIC_FLASH_POINTER_START_ADDRESS_L) + 1) +#define PIC_FLASH_SECTOR_LENGTH 32 +#define PIC_SOFTWARE_VERSION_LENGTH 1 +#define PIC_VOLTAGE_TIME_LENGTH 6 +#define PIC_COMMAND_1 0x55 +#define PIC_COMMAND_2 0xaa +#define SET_PIC_FLASH_POINTER 0x01 +#define SEND_DATA_TO_IIC 0x02 // just send data into pic's cache +#define READ_DATA_FROM_IIC 0x03 +#define ERASE_IIC_FLASH 0x04 // erase 32 bytes one time +#define WRITE_DATA_INTO_PIC 0x05 // tell pic write data into flash from cache +#define JUMP_FROM_LOADER_TO_APP 0x06 +#define RESET_PIC 0x07 +#define GET_PIC_FLASH_POINTER 0x08 +#define SET_VOLTAGE 0x10 +#define SET_VOLTAGE_TIME 0x11 +#define SET_HASH_BOARD_ID 0x12 +#define GET_HASH_BOARD_ID 0x13 +#define SET_HOST_MAC_ADDRESS 0x14 +#define ENABLE_VOLTAGE 0x15 +#define SEND_HEART_BEAT 0x16 +#define GET_PIC_SOFTWARE_VERSION 0x17 +#define GET_VOLTAGE 0x18 +#define GET_DATE 0x19 +#define GET_WHICH_MAC 0x20 +#define GET_MAC 0x21 +#define WR_TEMP_OFFSET_VALUE 0x22 +#define RD_TEMP_OFFSET_VALUE 0x23 + + + +#define HEART_BEAT_TIME_GAP 10 // 10s +#define IIC_READ (1 << 25) +#define IIC_WRITE (~IIC_READ) +#define IIC_REG_ADDR_VALID (1 << 24) +#define IIC_ADDR_HIGH_4_BIT (0x0A << 20) +#define IIC_CHAIN_NUMBER(x) ((x & 0x0f) << 16) +#define IIC_REG_ADDR(x) ((x & 0xff) << 8) + + + + +//other FPGA macro define +#define TOTAL_LEN 0x160 +#define FPGA_MEM_TOTAL_LEN (16*1024*1024) // 16M bytes +#define HARDWARE_VERSION_VALUE 0xC501 +#define NONCE2_AND_JOBID_STORE_SPACE (2*1024*1024) // 2M bytes +#define NONCE2_AND_JOBID_STORE_SPACE_ORDER 9 // for 2M bytes space +#define JOB_STORE_SPACE (1 << 16) // for 64K bytes space +#define JOB_START_SPACE (1024*8) // 8K bytes +#define JOB_START_ADDRESS_ALIGN 32 // JOB_START_ADDRESS need 32 bytes aligned +#define NONCE2_AND_JOBID_ALIGN 64 // NONCE2_AND_JOBID_STORE_SPACE need 64 bytes aligned +#define MAX_TIMEOUT_VALUE 0x1ffff // defined in TIME_OUT_CONTROL +#define MAX_NONCE_NUMBER_IN_FIFO 0x1ff // 511 nonce +#define NONCE_DATA_LENGTH 4 // 4 bytes +#define REGISTER_DATA_LENGTH 4 // 4 bytes +#define TW_WRITE_COMMAND_LEN 52 +#define TW_WRITE_COMMAND_LEN_VIL 52 +#define NEW_BLOCK_MARKER 0x11 +#define NORMAL_BLOCK_MARKER 0x01 +#define PHY_MEM_NONCE2_JOBID_ADDRESS ((1024-16)*1024*1024) +#define PHY_MEM_JOB_START_ADDRESS_1 (PHY_MEM_NONCE2_JOBID_ADDRESS + NONCE2_AND_JOBID_STORE_SPACE) +#define PHY_MEM_JOB_START_ADDRESS_2 (PHY_MEM_JOB_START_ADDRESS_1 + JOB_STORE_SPACE) + +// macro define about miner +#define BITMAIN_MAX_CHAIN_NUM 16 +#define CHAIN_ASIC_NUM 63 +#define BITMAIN_MAX_FAN_NUM 8 // FPGA just can supports 8 fan +#define BITMAIN_DEFAULT_ASIC_NUM 64 // max support 64 ASIC on 1 HASH board +#define MIDSTATE_LEN 32 +#define DATA2_LEN 12 +#define MAX_RETURNED_NONCE_NUM 10 +#define PREV_HASH_LEN 32 +#define MERKLE_BIN_LEN 32 +#define INIT_CONFIG_TYPE 0x51 +#define STATUS_DATA_TYPE 0xa1 +#define SEND_JOB_TYPE 0x52 +#define READ_JOB_TYPE 0xa2 +#define CHECK_SYSTEM_TIME_GAP 10000 // 10s +//fan +#define MIN_FAN_NUM 2 +#define MAX_FAN_SPEED 6000 +#define MIN_PWM_PERCENT 20 +#define MAX_PWM_PERCENT 100 +#define TEMP_INTERVAL 2 +#define MAX_TEMP 85 +#define MAX_FAN_TEMP 75 +#define MIN_FAN_TEMP 35 +#define HAVE_TEMP 0xF4 + +#define PWM_ADJUST_FACTOR ((100 - MIN_PWM_PERCENT)/(MAX_FAN_TEMP-MIN_FAN_TEMP)) +#define PWM_SCALE 50 +#define PWM_ADJ_SCALE 9/10 +//use for hash test +#define TEST_DHASH 0 +#define DEVICE_DIFF 8 +//use for status check +//#define XILINX +#define C5 + +#ifdef C5 +#define RED_LED_DEV "/sys/class/leds/hps_led2/brightness" +#define GREEN_LED_DEV "/sys/class/leds/hps_led0/brightness" +#else ifdef XILINX +#define RED_LED_DEV "/sys/class/gpio/gpio37/value" +#define GREEN_LED_DEV "/sys/class/gpio/gpio38/value" +#endif + + + +struct init_config { + uint8_t token_type; + uint8_t version; + uint16_t length; + uint8_t reset :1; + uint8_t fan_eft :1; + uint8_t timeout_eft :1; + uint8_t frequency_eft :1; + uint8_t voltage_eft :1; + uint8_t chain_check_time_eft :1; + uint8_t chip_config_eft :1; + uint8_t hw_error_eft :1; + uint8_t beeper_ctrl :1; + uint8_t temp_ctrl :1; + uint8_t chain_freq_eft :1; + uint8_t reserved1 :5; + uint8_t reserved2[2]; + uint8_t chain_num; + uint8_t asic_num; + uint8_t fan_pwm_percent; + uint8_t temperature; + uint16_t frequency; + uint8_t voltage[2]; + uint8_t chain_check_time_integer; + uint8_t chain_check_time_fractions; + uint8_t timeout_data_integer; + uint8_t timeout_data_fractions; + uint32_t reg_data; + uint8_t chip_address; + uint8_t reg_address; + uint16_t chain_min_freq; + uint16_t chain_max_freq; + uint16_t crc; +} __attribute__((packed, aligned(4))); + + + +struct bitmain_c5_info { + cglock_t update_lock; + + uint8_t data_type; + uint8_t version; + uint16_t length; + uint8_t chip_value_eft :1; + uint8_t reserved1 :7; + uint8_t chain_num; + uint16_t reserved2; + uint8_t fan_num; + uint8_t temp_num; + uint8_t reserved3[2]; + uint32_t fan_exist; + uint32_t temp_exist; + uint16_t diff; + uint16_t reserved4; + uint32_t reg_value; + uint32_t chain_asic_exist[BITMAIN_MAX_CHAIN_NUM][BITMAIN_DEFAULT_ASIC_NUM/32]; + uint32_t chain_asic_status[BITMAIN_MAX_CHAIN_NUM][BITMAIN_DEFAULT_ASIC_NUM/32]; + uint8_t chain_asic_num[BITMAIN_MAX_CHAIN_NUM]; + uint8_t temp[BITMAIN_MAX_CHAIN_NUM]; + uint8_t fan_speed_value[BITMAIN_MAX_FAN_NUM]; + uint16_t freq[BITMAIN_MAX_CHAIN_NUM]; + struct thr_info *thr; + pthread_t read_nonce_thr; + pthread_mutex_t lock; + + struct init_config c5_config; + int pool_no; + struct pool pool0; + struct pool pool1; + struct pool pool2; + uint32_t pool0_given_id; + uint32_t pool1_given_id; + uint32_t pool2_given_id; + + uint16_t crc; +} __attribute__((packed, aligned(4))); + +struct part_of_job { + uint8_t token_type; // buf[0] + uint8_t version; + uint16_t reserved; + uint32_t length; // buf[1] + uint8_t pool_nu; // buf[2] + uint8_t new_block :1; + uint8_t asic_diff_valid :1; + uint8_t reserved1 :6; + uint8_t asic_diff; + uint8_t reserved2[1]; + uint32_t job_id; // buf[3] + uint32_t bbversion; // buf[4] + uint8_t prev_hash[32]; // buf[5] - buf[12] + uint32_t ntime; // buf[13] + uint32_t nbit; // buf[14] + uint16_t coinbase_len; // buf[15] + uint16_t nonce2_offset; + uint16_t nonce2_bytes_num; // 4 or 8 bytes // buf[16] + uint16_t merkles_num; + uint64_t nonce2_start_value; //nonce2 start calculate value. // buf[17] - buf[18] +}; + //uint8_t coinbase //this is variable + //uint8_t merkle_bin[32] * merkles_num + //uint16_t crc + +struct nonce_content { + uint32_t job_id; + uint32_t work_id; + uint32_t header_version; + uint64_t nonce2; + uint32_t nonce3; + uint32_t chain_num; + uint8_t midstate[MIDSTATE_LEN]; +} __attribute__((packed, aligned(4))); + +struct nonce { + uint8_t token_type; + uint8_t version; + uint16_t length; + uint16_t valid_nonce_num; + struct nonce_content nonce_cont[MAX_RETURNED_NONCE_NUM]; + uint16_t crc; +} __attribute__((packed, aligned(4))); + +struct all_parameters { + + unsigned int *current_job_start_address; + unsigned int pwm_value; + unsigned int chain_exist[BITMAIN_MAX_CHAIN_NUM]; + unsigned int timeout; + unsigned int fan_exist_map; + unsigned int temp_sensor_map; + unsigned int nonce_error; + unsigned int chain_asic_exist[BITMAIN_MAX_CHAIN_NUM][8]; + unsigned int chain_asic_status[BITMAIN_MAX_CHAIN_NUM][8]; + int16_t chain_asic_temp[BITMAIN_MAX_CHAIN_NUM][8][4]; + int8_t chain_asic_iic[CHAIN_ASIC_NUM]; + uint32_t chain_hw[BITMAIN_MAX_CHAIN_NUM]; + uint64_t chain_asic_nonce[BITMAIN_MAX_CHAIN_NUM][BITMAIN_DEFAULT_ASIC_NUM]; + char chain_asic_status_string[BITMAIN_MAX_CHAIN_NUM][BITMAIN_DEFAULT_ASIC_NUM+8]; + + unsigned long long int total_nonce_num; + + unsigned char fan_exist[BITMAIN_MAX_FAN_NUM]; + unsigned int fan_speed_value[BITMAIN_MAX_FAN_NUM]; + int temp[BITMAIN_MAX_CHAIN_NUM]; + uint8_t chain_asic_num[BITMAIN_MAX_CHAIN_NUM]; + unsigned char check_bit; + unsigned char pwm_percent; + unsigned char chain_num; + unsigned char fan_num; + unsigned char temp_num; + unsigned int fan_speed_top1; + int temp_top1; + int temp_top1_last; + unsigned char corenum; + unsigned char addrInterval; + unsigned char max_asic_num_in_one_chain; + unsigned char baud; + unsigned char diff; + uint8_t fan_eft; + uint8_t fan_pwm; + + unsigned short int frequency; + char frequency_t[10]; + unsigned short int freq[BITMAIN_MAX_CHAIN_NUM]; +} __attribute__((packed, aligned(4))); + +volatile struct nonce_buf { + unsigned int p_wr; + unsigned int p_rd; + unsigned int nonce_num; + struct nonce_content nonce_buffer[MAX_NONCE_NUMBER_IN_FIFO]; +}__attribute__((packed, aligned(4))); + +struct reg_content { + unsigned int reg_value; + unsigned char crc; + unsigned char chain_number; +} __attribute__((packed, aligned(4))); + +volatile struct reg_buf { + unsigned int p_wr; + unsigned int p_rd; + unsigned int reg_value_num; + struct reg_content reg_buffer[MAX_NONCE_NUMBER_IN_FIFO]; +}__attribute__((packed, aligned(4))); + +struct freq_pll +{ + const char *freq; + unsigned int fildiv1; + unsigned int fildiv2; + unsigned int vilpll; +}; + +#define Swap32(l) (((l) >> 24) | (((l) & 0x00ff0000) >> 8) | (((l) & 0x0000ff00) << 8) | ((l) << 24)) + + +struct vil_work +{ + uint8_t type; // Bit[7:5]: Type,fixed 0x01. Bit[4:0]:Reserved + uint8_t length; // data length, from Byte0 to the end. + uint8_t wc_base; // Bit[7]: Reserved. Bit[6:0]: Work count base, muti-Midstate, each Midstate corresponding work count increase one by one. + uint8_t mid_num; // Bit[7:3]: Reserved Bit[2:0]: MSN, midstate num,now support 1,2,4. + //uint32_t sno; // SPAT mode??Start Nonce Number Normal mode??Reserved. + uint8_t midstate[32]; + uint8_t data2[12]; +}; + +struct vil_work_1387 +{ + uint8_t work_type; + uint8_t chain_id; + uint8_t reserved1[2]; + uint32_t work_count; + uint8_t data[12]; + uint8_t midstate[32]; +}; + + +static struct freq_pll freq_pll_1385[] = { + {"100",0x020040, 0x0420, 0x200241}, + {"125",0x028040, 0x0420, 0x280241}, + {"150",0x030040, 0x0420, 0x300241}, + {"175",0x038040, 0x0420, 0x380241}, + {"200",0x040040, 0x0420, 0x400241}, + {"225",0x048040, 0x0420, 0x480241}, + {"250",0x050040, 0x0420, 0x500241}, + {"275",0x058040, 0x0420, 0x580241}, + {"300",0x060040, 0x0420, 0x600241}, + {"325",0x068040, 0x0420, 0x680241}, + {"350",0x070040, 0x0420, 0x700241}, + {"375",0x078040, 0x0420, 0x780241}, + {"400",0x080040, 0x0420, 0x800241}, + {"404",0x061040, 0x0320, 0x610231}, + {"406",0x041040, 0x0220, 0x410221}, + {"408",0x062040, 0x0320, 0x620231}, + {"412",0x042040, 0x0220, 0x420221}, + {"416",0x064040, 0x0320, 0x640231}, + {"418",0x043040, 0x0220, 0x430221}, + {"420",0x065040, 0x0320, 0x650231}, + {"425",0x044040, 0x0220, 0x440221}, + {"429",0x067040, 0x0320, 0x670231}, + {"431",0x045040, 0x0220, 0x450221}, + {"433",0x068040, 0x0320, 0x680231}, + {"437",0x046040, 0x0220, 0x460221}, + {"441",0x06a040, 0x0320, 0x6a0231}, + {"443",0x047040, 0x0220, 0x470221}, + {"445",0x06b040, 0x0320, 0x6b0231}, + {"450",0x048040, 0x0220, 0x480221}, + {"454",0x06d040, 0x0320, 0x6d0231}, + {"456",0x049040, 0x0220, 0x490221}, + {"458",0x06e040, 0x0320, 0x6e0231}, + {"462",0x04a040, 0x0220, 0x4a0221}, + {"466",0x070040, 0x0320, 0x700231}, + {"468",0x04b040, 0x0220, 0x4b0221}, + {"470",0x071040, 0x0320, 0x710231}, + {"475",0x04c040, 0x0220, 0x4c0221}, + {"479",0x073040, 0x0320, 0x730231}, + {"481",0x04d040, 0x0220, 0x4d0221}, + {"483",0x074040, 0x0320, 0x740231}, + {"487",0x04e040, 0x0220, 0x4e0221}, + {"491",0x076040, 0x0320, 0x760231}, + {"493",0x04f040, 0x0220, 0x4f0221}, + {"495",0x077040, 0x0320, 0x770231}, + {"500",0x050040, 0x0220, 0x500221}, + {"504",0x079040, 0x0320, 0x790231}, + {"506",0x051040, 0x0220, 0x510221}, + {"508",0x07a040, 0x0320, 0x7a0231}, + {"512",0x052040, 0x0220, 0x520221}, + {"516",0x07c040, 0x0320, 0x7c0231}, + {"518",0x053040, 0x0220, 0x530221}, + {"520",0x07d040, 0x0320, 0x7d0231}, + {"525",0x054040, 0x0220, 0x540221}, + {"529",0x07f040, 0x0320, 0x7f0231}, + {"531",0x055040, 0x0220, 0x550221}, + {"533",0x080040, 0x0320, 0x800231}, + {"537",0x056040, 0x0220, 0x560221}, + {"543",0x057040, 0x0220, 0x570221}, + {"550",0x058040, 0x0220, 0x580221}, + {"556",0x059040, 0x0220, 0x590221}, + {"562",0x05a040, 0x0220, 0x5a0221}, + {"568",0x05b040, 0x0220, 0x5b0221}, + {"575",0x05c040, 0x0220, 0x5c0221}, + {"581",0x05d040, 0x0220, 0x5d0221}, + {"587",0x05e040, 0x0220, 0x5e0221}, + {"593",0x05f040, 0x0220, 0x5f0221}, + {"600",0x060040, 0x0220, 0x600221}, + {"606",0x061040, 0x0220, 0x610221}, + {"612",0x062040, 0x0220, 0x620221}, + {"618",0x063040, 0x0220, 0x630221}, + {"625",0x064040, 0x0220, 0x640221}, + {"631",0x065040, 0x0220, 0x650221}, + {"637",0x066040, 0x0220, 0x660221}, + {"643",0x067040, 0x0220, 0x670221}, + {"650",0x068040, 0x0220, 0x680221}, + {"656",0x069040, 0x0220, 0x690221}, + {"662",0x06a040, 0x0220, 0x6a0221}, + {"668",0x06b040, 0x0220, 0x6b0221}, + {"675",0x06c040, 0x0220, 0x6c0221}, + {"681",0x06d040, 0x0220, 0x6d0221}, + {"687",0x06e040, 0x0220, 0x6e0221}, + {"693",0x06f040, 0x0220, 0x6f0221}, + {"700",0x070040, 0x0220, 0x700221}, + {"706",0x071040, 0x0220, 0x710221}, + {"712",0x072040, 0x0220, 0x720221}, + {"718",0x073040, 0x0220, 0x730221}, + {"725",0x074040, 0x0220, 0x740221}, + {"731",0x075040, 0x0220, 0x750221}, + {"737",0x076040, 0x0220, 0x760221}, + {"743",0x077040, 0x0220, 0x770221}, + {"750",0x078040, 0x0220, 0x780221}, + {"756",0x079040, 0x0220, 0x790221}, + {"762",0x07a040, 0x0220, 0x7a0221}, + {"768",0x07b040, 0x0220, 0x7b0221}, + {"775",0x07c040, 0x0220, 0x7c0221}, + {"781",0x07d040, 0x0220, 0x7d0221}, + {"787",0x07e040, 0x0220, 0x7e0221}, + {"793",0x07f040, 0x0220, 0x7f0221}, + {"800",0x080040, 0x0220, 0x800221}, + {"825",0x042040, 0x0120, 0x420211}, +}; + +extern bool opt_bitmain_fan_ctrl; +extern int opt_bitmain_fan_pwm; +extern int opt_bitmain_c5_freq; +extern int opt_bitmain_c5_voltage; +extern bool opt_bitmain_new_cmd_type_vil; +extern int ADD_FREQ; +extern int ADD_FREQ1; + + +#endif + diff --git a/driver-cointerra.c b/driver-cointerra.c new file mode 100644 index 0000000..dbf6845 --- /dev/null +++ b/driver-cointerra.c @@ -0,0 +1,1376 @@ +/* + * Copyright 2013-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include "miner.h" +#include "driver-cointerra.h" + +static const char *cointerra_hdr = "ZZ"; + +int opt_ps_load; + +static void cta_gen_message(char *msg, char type) +{ + memset(msg, 0, CTA_MSG_SIZE); + memcpy(msg, cointerra_hdr, 2); + msg[CTA_MSG_TYPE] = type; +} + +/* Find the number of leading zero bits in diff */ +static uint8_t diff_to_bits(double diff) +{ + uint64_t diff64; + uint8_t i; + + diff /= 0.9999847412109375; + diff *= (double)2147483648.0; + if (diff > 0x8000000000000000ULL) + diff = 0x8000000000000000ULL; + /* Convert it to an integer */ + diff64 = diff; + for (i = 0; diff64; i++, diff64 >>= 1); + + return i; +} + +static double bits_to_diff(uint8_t bits) +{ + double ret = 1.0; + + if (likely(bits > 32)) + ret *= 1ull << (bits - 32); + else if (unlikely(bits < 32)) + ret /= 1ull << (32 - bits); + return ret; +} + +static bool cta_reset_init(char *buf) +{ + return ((buf[CTA_MSG_TYPE] == CTA_RECV_RDONE) && ((buf[CTA_RESET_TYPE]&0x3) == CTA_RESET_INIT)); +} + +static char *mystrstr(char *haystack, int size, const char *needle) +{ + int loop = 0; + + while (loop < (size-1)) { + if ((haystack[loop] == needle[0])&& + (haystack[loop+1] == needle[1])) + return &haystack[loop]; + loop++; + } + return NULL; +} + +static bool cta_open(struct cgpu_info *cointerra) +{ + int err, amount, offset = 0; + char buf[CTA_MSG_SIZE]; + cgtimer_t ts_start; + bool ret = false; + + if (cointerra->usbinfo.nodev) + return false; + + applog(LOG_INFO, "CTA_OPEN"); + + cta_gen_message(buf, CTA_SEND_RESET); + // set the initial difficulty + buf[CTA_RESET_TYPE] = CTA_RESET_INIT | CTA_RESET_DIFF; + buf[CTA_RESET_DIFF] = diff_to_bits(CTA_INIT_DIFF); + buf[CTA_RESET_LOAD] = opt_cta_load ? opt_cta_load : 255; + buf[CTA_RESET_PSLOAD] = opt_ps_load; + + if (cointerra->usbinfo.nodev) + return ret; + + err = usb_write(cointerra, buf, CTA_MSG_SIZE, &amount, C_CTA_WRITE); + if (err) { + applog(LOG_INFO, "Write error %d, wrote %d of %d", err, amount, CTA_MSG_SIZE); + return ret; + } + + cgtimer_time(&ts_start); + + /* Read from the device for up to 2 seconds discarding any data that + * doesn't match a reset complete acknowledgement. */ + while (42) { + cgtimer_t ts_now, ts_diff; + char *msg; + + cgtimer_time(&ts_now); + cgtimer_sub(&ts_now, &ts_start, &ts_diff); + if (cgtimer_to_ms(&ts_diff) > 2000) { + applog(LOG_DEBUG, "%s %d: Timed out waiting for response to reset init", + cointerra->drv->name, cointerra->device_id); + break; + } + + if (cointerra->usbinfo.nodev) + break; + + err = usb_read(cointerra, buf + offset, CTA_MSG_SIZE - offset, &amount, C_CTA_READ); + if (err && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_INFO, "%s %d: Read error %d, read %d", cointerra->drv->name, + cointerra->device_id, err, amount); + break; + } + if (!amount) + continue; + + msg = mystrstr(buf, amount, cointerra_hdr); + if (!msg) { + /* Keep the last byte in case it's the first byte of + * the 2 byte header. */ + offset = 1; + memmove(buf, buf + amount - 1, offset); + continue; + } + + if (msg > buf) { + /* length of message = offset for next usb_read after moving */ + offset = CTA_MSG_SIZE - (msg - buf); + memmove(buf, msg, offset); + continue; + } + + /* We have a full sized message starting with the header now */ + if (cta_reset_init(buf)) { + /* We can't store any other data returned with this + * reset since we have not allocated any memory for + * a cointerra_info structure yet. */ + applog(LOG_INFO, "%s %d: Successful reset init received", + cointerra->drv->name, cointerra->device_id); + ret = true; + break; + } + } + + return ret; +} + +static void cta_clear_work(struct cgpu_info *cgpu) +{ + struct work *work, *tmp; + + wr_lock(&cgpu->qlock); + HASH_ITER(hh, cgpu->queued_work, work, tmp) { + __work_completed(cgpu, work); + free_work(work); + } + wr_unlock(&cgpu->qlock); +} + +static void cta_close(struct cgpu_info *cointerra) +{ + struct cointerra_info *info = cointerra->device_data; + + /* Wait for read thread to die */ + pthread_join(info->read_thr, NULL); + + /* Open does the same reset init followed by response as is required to + * close the device. */ + if (!cta_open(cointerra)) { + applog(LOG_INFO, "%s %d: Reset on close failed", cointerra->drv->name, + cointerra->device_id); + } + + mutex_destroy(&info->lock); + mutex_destroy(&info->sendlock); + /* Don't free info here to avoid trying to access dereferenced members + * once a device is unplugged. */ + cta_clear_work(cointerra); +} + +static struct cgpu_info *cta_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + struct cgpu_info *cointerra = usb_alloc_cgpu(&cointerra_drv, 1); + int tries = 0; + + if (!usb_init(cointerra, dev, found)) + goto fail; + applog(LOG_INFO, "%s %d: Found at %s", cointerra->drv->name, + cointerra->device_id, cointerra->device_path); + + while (!cta_open(cointerra) && !cointerra->usbinfo.nodev) { + if (tries++ > 3) + goto failed_open; + applog(LOG_INFO, "%s %d: Failed to open %d times, retrying", cointerra->drv->name, + cointerra->device_id, tries); + } + + if (!add_cgpu(cointerra)) + goto fail_close; + + update_usb_stats(cointerra); + applog(LOG_INFO, "%s %d: Successfully set up %s", cointerra->drv->name, + cointerra->device_id, cointerra->device_path); + return cointerra; + +fail_close: + cta_close(cointerra); +failed_open: + applog(LOG_INFO, "%s %d: Failed to initialise %s", cointerra->drv->name, + cointerra->device_id, cointerra->device_path); +fail: + usb_free_cgpu(cointerra); + return NULL; +} + +static void cta_detect(bool __maybe_unused hotplug) +{ + usb_detect(&cointerra_drv, cta_detect_one); +} + +/* This function will remove a work item from the hashtable if it matches the + * id in work->subid and return a pointer to the work but it will not free the + * work. It may return NULL if it cannot find matching work. */ +static struct work *take_work_by_id(struct cgpu_info *cgpu, uint16_t id) +{ + struct work *work, *tmp, *ret = NULL; + + wr_lock(&cgpu->qlock); + HASH_ITER(hh, cgpu->queued_work, work, tmp) { + if (work->subid == id) { + ret = work; + break; + } + } + if (ret) + __work_completed(cgpu, ret); + wr_unlock(&cgpu->qlock); + + return ret; +} + +/* This function will look up a work item in the hashtable if it matches the + * id in work->subid and return a cloned work item if it matches. It may return + * NULL if it cannot find matching work. */ +static struct work *clone_work_by_id(struct cgpu_info *cgpu, uint16_t id) +{ + struct work *work, *tmp, *ret = NULL; + + rd_lock(&cgpu->qlock); + HASH_ITER(hh, cgpu->queued_work, work, tmp) { + if (work->subid == id) { + ret = work; + break; + } + } + if (ret) + ret = copy_work(ret); + rd_unlock(&cgpu->qlock); + + return ret; +} + +static bool cta_send_msg(struct cgpu_info *cointerra, char *buf); + +static uint16_t hu16_from_msg(char *buf, int msg) +{ + return le16toh(*(uint16_t *)&buf[msg]); +} + +static uint32_t hu32_from_msg(char *buf, int msg) +{ + return le32toh(*(uint32_t *)&buf[msg]); +} + +static uint64_t hu64_from_msg(char *buf, int msg) +{ + return le64toh(*(uint64_t *)&buf[msg]); +} + +static uint8_t u8_from_msg(char *buf, int msg) +{ + return *(uint8_t *)&buf[msg]; +} + +static void msg_from_hu16(char *buf, int msg, uint16_t val) +{ + *(uint16_t *)&buf[msg] = htole16(val); +} + +static void cta_parse_reqwork(struct cgpu_info *cointerra, struct cointerra_info *info, + char *buf) +{ + uint16_t retwork; + + retwork = hu16_from_msg(buf, CTA_REQWORK_REQUESTS); + applog(LOG_DEBUG, "%s %d: Request work message for %u items received", + cointerra->drv->name, cointerra->device_id, retwork); + + mutex_lock(&info->lock); + info->requested = retwork; + /* Wake up the main scanwork loop since we need more + * work. */ + pthread_cond_signal(&info->wake_cond); + mutex_unlock(&info->lock); +} + +static void cta_parse_recvmatch(struct thr_info *thr, struct cgpu_info *cointerra, + struct cointerra_info *info, char *buf) +{ + uint32_t timestamp_offset, mcu_tag; + uint16_t retwork; + struct work *work; + + /* No endian switch needs doing here since it's sent and returned as + * the same 4 bytes */ + retwork = *(uint16_t *)(&buf[CTA_DRIVER_TAG]); + mcu_tag = hu32_from_msg(buf, CTA_MCU_TAG); + applog(LOG_DEBUG, "%s %d: Match message for id 0x%04x MCU id 0x%08x received", + cointerra->drv->name, cointerra->device_id, retwork, mcu_tag); + + work = clone_work_by_id(cointerra, retwork); + if (likely(work)) { + uint8_t wdiffbits = u8_from_msg(buf, CTA_WORK_DIFFBITS); + uint32_t nonce = hu32_from_msg(buf, CTA_MATCH_NONCE); + unsigned char rhash[32]; + char outhash[16]; + double wdiff; + bool ret; + + timestamp_offset = hu32_from_msg(buf, CTA_MATCH_NOFFSET); + if (timestamp_offset) { + struct work *base_work = work; + + work = copy_work_noffset(base_work, timestamp_offset); + free_work(base_work); + } + + /* Test against the difficulty we asked for along with the work */ + wdiff = bits_to_diff(wdiffbits); + ret = test_nonce_diff(work, nonce, wdiff); + + if (opt_debug) { + /* Debugging, remove me */ + swab256(rhash, work->hash); + __bin2hex(outhash, rhash, 8); + applog(LOG_WARNING, "submit work %s 0x%04x 0x%08x %d 0x%08x", + outhash, retwork, mcu_tag, timestamp_offset, nonce); + } + + if (likely(ret)) { + uint8_t asic, core, pipe, coreno; + int pipeno, bitchar, bitbit; + uint64_t hashes; + + asic = u8_from_msg(buf, CTA_MCU_ASIC); + core = u8_from_msg(buf, CTA_MCU_CORE); + pipe = u8_from_msg(buf, CTA_MCU_PIPE); + pipeno = asic * 512 + core * 128 + pipe; + coreno = asic * 4 + core; + if (unlikely(asic > 1 || core > 3 || pipe > 127 || pipeno > 1023)) { + applog(LOG_WARNING, "%s %d: MCU invalid pipe asic %d core %d pipe %d", + cointerra->drv->name, cointerra->device_id, asic, core, pipe); + coreno = 0; + } else { + info->last_pipe_nonce[pipeno] = time(NULL); + bitchar = pipeno / 8; + bitbit = pipeno % 8; + info->pipe_bitmap[bitchar] |= 0x80 >> bitbit; + } + + applog(LOG_DEBUG, "%s %d: Submitting tested work job_id %s work_id %u", + cointerra->drv->name, cointerra->device_id, work->job_id, work->subid); + ret = submit_tested_work(thr, work); + + hashes = (uint64_t)wdiff * 0x100000000ull; + mutex_lock(&info->lock); + info->share_hashes += hashes; + info->tot_core_hashes[coreno] += hashes; + info->hashes += nonce; + mutex_unlock(&info->lock); + } else { + char sendbuf[CTA_MSG_SIZE]; + uint8_t asic, core, coreno; + asic = u8_from_msg(buf, CTA_MCU_ASIC); + core = u8_from_msg(buf, CTA_MCU_CORE); + coreno = asic * 4 + core; + inc_hw_errors(thr); + + applog(LOG_WARNING, "%s %d: Notify bad match work", + cointerra->drv->name, cointerra->device_id); + if (coreno < CTA_CORES) + info->fmatch_errors[coreno]++; + if (opt_debug) { + uint64_t sdiff = share_diff(work); + unsigned char midstate[32], wdata[12]; + char hexmidstate[68], hexwdata[28]; + uint16_t wid; + + memcpy(&wid, &info->work_id, 2); + flip32(midstate, work->midstate); + __bin2hex(hexmidstate, midstate, 32); + flip12(wdata, &work->data[64]); + __bin2hex(hexwdata, wdata, 12); + applog(LOG_DEBUG, "False match sent: work id %u midstate %s blkhdr %s", + wid, hexmidstate, hexwdata); + applog(LOG_DEBUG, "False match reports: work id 0x%04x MCU id 0x%08x work diff %.1f", + retwork, mcu_tag, wdiff); + applog(LOG_DEBUG, "False match tested: nonce 0x%08x noffset %d %s", + nonce, timestamp_offset, outhash); + applog(LOG_DEBUG, "False match devdiff set to %.1f share diff calc %"PRIu64, + work->device_diff, sdiff); + } + + /* Tell the device we got a false match */ + cta_gen_message(sendbuf, CTA_SEND_FMATCH); + memcpy(sendbuf + 3, buf + 3, CTA_MSG_SIZE - 3); + cta_send_msg(cointerra, sendbuf); + } + free_work(work); + } else { + applog(LOG_WARNING, "%s %d: Matching work id 0x%X %d not found", cointerra->drv->name, + cointerra->device_id, retwork, __LINE__); + inc_hw_errors(thr); + + mutex_lock(&info->lock); + info->no_matching_work++; + mutex_unlock(&info->lock); + } +} + +static void cta_parse_wdone(struct thr_info *thr, struct cgpu_info *cointerra, + struct cointerra_info *info, char *buf) +{ + uint16_t retwork = *(uint16_t *)(&buf[CTA_DRIVER_TAG]); + struct work *work = take_work_by_id(cointerra, retwork); + uint64_t hashes; + + if (likely(work)) { + free_work(work); + applog(LOG_DEBUG, "%s %d: Done work found id 0x%X %d", + cointerra->drv->name, cointerra->device_id, retwork, __LINE__); + } else { + applog(LOG_WARNING, "%s %d: Done work not found id 0x%X %d", + cointerra->drv->name, cointerra->device_id, retwork, __LINE__); + inc_hw_errors(thr); + } + + /* Removing hashes from work done message */ + hashes = hu64_from_msg(buf, CTA_WDONE_NONCES); + if (unlikely(hashes > (61 * 0x100000000ull))) { + applog(LOG_INFO, "%s Invalid hash returned %"PRIu64"x %"PRIu64"x %"PRIu64"X", + __func__, info->hashes, hashes, hashes); + hashes = 0; + } + + mutex_lock(&info->lock); + info->hashes += hashes; + mutex_unlock(&info->lock); +} + +static void u16array_from_msg(uint16_t *u16, int entries, int var, char *buf) +{ + int i, j; + + for (i = 0, j = 0; i < entries; i++, j += sizeof(uint16_t)) + u16[i] = hu16_from_msg(buf, var + j); +} + +static void cta_parse_statread(struct cgpu_info *cointerra, struct cointerra_info *info, + char *buf) +{ + float max_temp = 0; + int i; + + mutex_lock(&info->lock); + u16array_from_msg(info->coretemp, CTA_CORES, CTA_STAT_CORETEMPS, buf); + info->ambtemp_low = hu16_from_msg(buf, CTA_STAT_AMBTEMP_LOW); + info->ambtemp_avg = hu16_from_msg(buf, CTA_STAT_AMBTEMP_AVG); + info->ambtemp_high = hu16_from_msg(buf, CTA_STAT_AMBTEMP_HIGH); + u16array_from_msg(info->pump_tachs, CTA_PUMPS, CTA_STAT_PUMP_TACHS, buf); + u16array_from_msg(info->fan_tachs, CTA_FANS, CTA_STAT_FAN_TACHS, buf); + u16array_from_msg(info->corevolts, CTA_CORES, CTA_STAT_CORE_VOLTS, buf); + info->volts33 = hu16_from_msg(buf, CTA_STAT_VOLTS33); + info->volts12 = hu16_from_msg(buf, CTA_STAT_VOLTS12); + info->inactive = hu16_from_msg(buf, CTA_STAT_INACTIVE); + info->active = hu16_from_msg(buf, CTA_STAT_ACTIVE); + mutex_unlock(&info->lock); + + for (i = 0; i < CTA_CORES; i++) { + if (info->coretemp[i] > max_temp) + max_temp = info->coretemp[i]; + } + max_temp /= 100.0; + /* Store the max temperature in the cgpu struct as an exponentially + * changing value. */ + cointerra->temp = cointerra->temp * 0.63 + max_temp * 0.37; +} + +static void u8array_from_msg(uint8_t *u8, int entries, int var, char *buf) +{ + int i; + + for (i = 0; i < entries; i++) + u8[i] = u8_from_msg(buf, var + i); +} + +static void cta_parse_statset(struct cointerra_info *info, char *buf) +{ + mutex_lock(&info->lock); + u8array_from_msg(info->coreperf, CTA_CORES, CTA_STAT_PERFMODE, buf); + u8array_from_msg(info->fanspeed, CTA_FANS, CTA_STAT_FANSPEEDS, buf); + info->dies_active = u8_from_msg(buf, CTA_STAT_DIES_ACTIVE); + u8array_from_msg(info->pipes_enabled, CTA_CORES, CTA_STAT_PIPES_ENABLED, buf); + u16array_from_msg(info->corefreqs, CTA_CORES, CTA_STAT_CORE_FREQS, buf); + info->uptime = hu32_from_msg(buf,CTA_STAT_UPTIME); + mutex_unlock(&info->lock); +} + +static void cta_parse_irstat(struct cointerra_info *info, char *buf) +{ + uint8_t channel = u8_from_msg(buf,CTA_IRSTAT_CHANNEL); + + if (channel >= CTA_CORES) + return; + + mutex_lock(&info->lock); + info->irstat_vin[channel] = hu16_from_msg(buf,CTA_IRSTAT_VIN); + info->irstat_iin[channel] = hu16_from_msg(buf,CTA_IRSTAT_IIN); + info->irstat_vout[channel] = hu16_from_msg(buf,CTA_IRSTAT_VOUT); + info->irstat_iout[channel] = hu16_from_msg(buf,CTA_IRSTAT_IOUT); + info->irstat_temp1[channel] = hu16_from_msg(buf,CTA_IRSTAT_TEMP1); + info->irstat_temp2[channel] = hu16_from_msg(buf,CTA_IRSTAT_TEMP2); + info->irstat_pout[channel] = hu16_from_msg(buf,CTA_IRSTAT_POUT); + info->irstat_pin[channel] = hu16_from_msg(buf,CTA_IRSTAT_PIN); + info->irstat_efficiency[channel] = hu16_from_msg(buf,CTA_IRSTAT_EFF); + info->irstat_status[channel] = hu16_from_msg(buf,CTA_IRSTAT_STATUS); + mutex_unlock(&info->lock); +} + +static void cta_parse_info(struct cgpu_info *cointerra, struct cointerra_info *info, + char *buf) +{ + mutex_lock(&info->lock); + info->hwrev = hu64_from_msg(buf, CTA_INFO_HWREV); + info->serial = hu32_from_msg(buf, CTA_INFO_SERNO); + info->asics = u8_from_msg(buf, CTA_INFO_NUMASICS); + info->dies = u8_from_msg(buf, CTA_INFO_NUMDIES); + info->cores = hu16_from_msg(buf, CTA_INFO_NUMCORES); + info->board_number = u8_from_msg(buf, CTA_INFO_BOARDNUMBER); + info->fwrev[0] = u8_from_msg(buf, CTA_INFO_FWREV_MAJ); + info->fwrev[1] = u8_from_msg(buf, CTA_INFO_FWREV_MIN); + info->fwrev[2] = u8_from_msg(buf, CTA_INFO_FWREV_MIC); + info->fw_year = hu16_from_msg(buf, CTA_INFO_FWDATE_YEAR); + info->fw_month = u8_from_msg(buf, CTA_INFO_FWDATE_MONTH); + info->fw_day = u8_from_msg(buf, CTA_INFO_FWDATE_DAY); + info->init_diffbits = u8_from_msg(buf, CTA_INFO_INITDIFFBITS); + info->min_diffbits = u8_from_msg(buf, CTA_INFO_MINDIFFBITS); + info->max_diffbits = u8_from_msg(buf, CTA_INFO_MAXDIFFBITS); + mutex_unlock(&info->lock); + + if (!cointerra->unique_id) { + uint32_t b32 = htobe32(info->serial); + + cointerra->unique_id = bin2hex((unsigned char *)&b32, 4); + } +} + +static void cta_parse_rdone(struct cgpu_info *cointerra, struct cointerra_info *info, + char *buf) +{ + uint8_t reset_type, diffbits; + uint64_t wdone; + + reset_type = buf[CTA_RESET_TYPE]; + diffbits = buf[CTA_RESET_DIFF]; + wdone = hu64_from_msg(buf, CTA_WDONE_NONCES); + + if (wdone) { + applog(LOG_INFO, "%s %d: Reset done type %u message %u diffbits %"PRIu64" done received", + cointerra->drv->name, cointerra->device_id, reset_type, diffbits, wdone); + + mutex_lock(&info->lock); + info->hashes += wdone; + mutex_unlock(&info->lock); + } + + /* Note that the cgsem that is posted here must not be waited on while + * holding the info->lock to not get into a livelock since this + * function also grabs the lock first and it's always best to not sleep + * while holding a lock. */ + if (reset_type == CTA_RESET_NEW) { + cta_clear_work(cointerra); + /* Tell reset sender that the reset is complete + * and it may resume. */ + cgsem_post(&info->reset_sem); + } +} + +static void cta_zero_stats(struct cgpu_info *cointerra); + +static void cta_parse_debug(struct cointerra_info *info, char *buf) +{ + mutex_lock(&info->lock); + + info->tot_underruns = hu16_from_msg(buf, CTA_STAT_UNDERRUNS); + u16array_from_msg(info->tot_hw_errors, CTA_CORES, CTA_STAT_HW_ERRORS, buf); + info->tot_hashes = hu64_from_msg(buf, CTA_STAT_HASHES); + info->tot_flushed_hashes = hu64_from_msg(buf, CTA_STAT_FLUSHED_HASHES); + info->autovoltage = u8_from_msg(buf, CTA_STAT_AUTOVOLTAGE); + info->current_ps_percent = u8_from_msg(buf, CTA_STAT_POWER_PERCENT); + info->power_used = hu16_from_msg(buf,CTA_STAT_POWER_USED); + info->power_voltage = hu16_from_msg(buf,CTA_STAT_VOLTAGE); + info->ipower_used = hu16_from_msg(buf,CTA_STAT_IPOWER_USED); + info->ipower_voltage = hu16_from_msg(buf,CTA_STAT_IVOLTAGE); + info->power_temps[0] = hu16_from_msg(buf,CTA_STAT_PS_TEMP1); + info->power_temps[1] = hu16_from_msg(buf,CTA_STAT_PS_TEMP2); + + mutex_unlock(&info->lock); + + /* Autovoltage is positive only once at startup and eventually drops + * to zero. After that time we reset the stats since they're unreliable + * till then. */ + if (unlikely(!info->autovoltage_complete && !info->autovoltage)) { + struct cgpu_info *cointerra = info->thr->cgpu; + + info->autovoltage_complete = true; + cgtime(&cointerra->dev_start_tv); + cta_zero_stats(cointerra); + cointerra->total_mhashes = 0; + cointerra->accepted = 0; + cointerra->rejected = 0; + cointerra->hw_errors = 0; + cointerra->utility = 0.0; + cointerra->last_share_pool_time = 0; + cointerra->diff1 = 0; + cointerra->diff_accepted = 0; + cointerra->diff_rejected = 0; + cointerra->last_share_diff = 0; + } +} + +static int verify_checksum(char *buf) +{ + unsigned char checksum = 0; + unsigned char i; + + for (i = 0; i < 63; i++) + checksum += buf[i]; + + return (checksum == buf[63]); +} + +static void cta_parse_msg(struct thr_info *thr, struct cgpu_info *cointerra, + struct cointerra_info *info, char *buf) +{ + if ((buf[CTA_MSG_TYPE] != CTA_RECV_MATCH)&& + (buf[CTA_MSG_TYPE] != CTA_RECV_WDONE)) { + if (unlikely(verify_checksum(buf) == 0)) { + inc_hw_errors(thr); + applog(LOG_INFO, "%s %d: checksum bad",cointerra->drv->name,cointerra->device_id); + } + } + + switch (buf[CTA_MSG_TYPE]) { + default: + case CTA_RECV_UNUSED: + applog(LOG_INFO, "%s %d: Unidentified message type %u", + cointerra->drv->name, cointerra->device_id, buf[CTA_MSG_TYPE]); + break; + case CTA_RECV_REQWORK: + cta_parse_reqwork(cointerra, info, buf); + break; + case CTA_RECV_MATCH: + cta_parse_recvmatch(thr, cointerra, info, buf); + break; + case CTA_RECV_WDONE: + applog(LOG_DEBUG, "%s %d: Work done message received", + cointerra->drv->name, cointerra->device_id); + cta_parse_wdone(thr, cointerra, info, buf); + break; + case CTA_RECV_STATREAD: + applog(LOG_DEBUG, "%s %d: Status readings message received", + cointerra->drv->name, cointerra->device_id); + cta_parse_statread(cointerra, info, buf); + break; + case CTA_RECV_STATSET: + applog(LOG_DEBUG, "%s %d: Status settings message received", + cointerra->drv->name, cointerra->device_id); + cta_parse_statset(info, buf); + break; + case CTA_RECV_INFO: + applog(LOG_DEBUG, "%s %d: Info message received", + cointerra->drv->name, cointerra->device_id); + cta_parse_info(cointerra, info, buf); + break; + case CTA_RECV_MSG: + applog(LOG_NOTICE, "%s %d: MSG: %s", + cointerra->drv->name, cointerra->device_id, &buf[CTA_MSG_RECVD]); + break; + case CTA_RECV_RDONE: + cta_parse_rdone(cointerra, info, buf); + break; + case CTA_RECV_STATDEBUG: + cta_parse_debug(info, buf); + break; + case CTA_RECV_IRSTAT: + cta_parse_irstat(info, buf); + break; + } +} + +static void *cta_recv_thread(void *arg) +{ + struct thr_info *thr = (struct thr_info *)arg; + struct cgpu_info *cointerra = thr->cgpu; + struct cointerra_info *info = cointerra->device_data; + char threadname[24]; + int offset = 0; + + snprintf(threadname, 24, "cta_recv/%d", cointerra->device_id); + RenameThread(threadname); + + while (likely(!cointerra->shutdown)) { + char buf[CTA_READBUF_SIZE]; + int amount, err; + + if (unlikely(cointerra->usbinfo.nodev)) { + applog(LOG_DEBUG, "%s %d: Device disappeared, disabling recv thread", + cointerra->drv->name, cointerra->device_id); + break; + } + + err = usb_read(cointerra, buf + offset, CTA_MSG_SIZE, &amount, C_CTA_READ); + if (err && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_ERR, "%s %d: Read error %d, read %d", cointerra->drv->name, + cointerra->device_id, err, amount); + break; + } + offset += amount; + + while (offset >= CTA_MSG_SIZE) { + char *msg = mystrstr(buf, offset, cointerra_hdr); + int begin; + + if (unlikely(!msg)) { + applog(LOG_WARNING, "%s %d: No message header found, discarding buffer", + cointerra->drv->name, cointerra->device_id); + inc_hw_errors(thr); + /* Save the last byte in case it's the fist + * byte of a header. */ + begin = CTA_MSG_SIZE - 1; + offset -= begin; + memmove(buf, buf + begin, offset); + continue; + } + + if (unlikely(msg != buf)) { + begin = msg - buf; + applog(LOG_WARNING, "%s %d: Reads out of sync, discarding %d bytes", + cointerra->drv->name, cointerra->device_id, begin); + inc_hw_errors(thr); + offset -= begin; + memmove(buf, msg, offset); + if (offset < CTA_MSG_SIZE) + break; + } + + /* We have enough buffer for a full message, parse now */ + cta_parse_msg(thr, cointerra, info, msg); + offset -= CTA_MSG_SIZE; + if (offset > 0) + memmove(buf, buf + CTA_MSG_SIZE, offset); + } + } + + return NULL; +} + +static bool cta_send_msg(struct cgpu_info *cointerra, char *buf) +{ + struct cointerra_info *info = cointerra->device_data; + int amount, err; + + if (unlikely(cointerra->usbinfo.nodev)) + return false; + + /* Serialise usb writes to prevent overlap in case multiple threads + * send messages */ + mutex_lock(&info->sendlock); + err = usb_write(cointerra, buf, CTA_MSG_SIZE, &amount, C_CTA_WRITE); + mutex_unlock(&info->sendlock); + + if (unlikely(err || amount != CTA_MSG_SIZE)) { + applog(LOG_ERR, "%s %d: Write error %d, wrote %d of %d", cointerra->drv->name, + cointerra->device_id, err, amount, CTA_MSG_SIZE); + return false; + } + return true; +} + +static bool cta_prepare(struct thr_info *thr) +{ + struct cgpu_info *cointerra = thr->cgpu; + struct cointerra_info *info = calloc(sizeof(struct cointerra_info), 1); + char buf[CTA_MSG_SIZE]; + + if (unlikely(cointerra->usbinfo.nodev)) + return false; + + if (unlikely(!info)) + quit(1, "Failed to calloc info in cta_detect_one"); + cointerra->device_data = info; + /* Nominally set a requested value when starting, preempting the need + * for a req-work message. */ + info->requested = CTA_MAX_QUEUE; + + info->thr = thr; + mutex_init(&info->lock); + mutex_init(&info->sendlock); + if (unlikely(pthread_cond_init(&info->wake_cond, NULL))) + quit(1, "Failed to create cta pthread cond"); + cgsem_init(&info->reset_sem); + if (pthread_create(&info->read_thr, NULL, cta_recv_thread, (void *)thr)) + quit(1, "Failed to create cta_recv_thread"); + + /* Request a single status setting message */ + cta_gen_message(buf, CTA_SEND_REQUEST); + msg_from_hu16(buf, CTA_REQ_MSGTYPE, CTA_RECV_STATSET); + msg_from_hu16(buf, CTA_REQ_INTERVAL, 0); + if (!cta_send_msg(cointerra, buf)) + return false; + + /* Request status debug messages every 60 seconds */ + cta_gen_message(buf, CTA_SEND_REQUEST); + msg_from_hu16(buf, CTA_REQ_MSGTYPE, CTA_RECV_STATDEBUG); + msg_from_hu16(buf, CTA_REQ_INTERVAL, 6000); + if (!cta_send_msg(cointerra, buf)) + return false; + + cgtime(&info->core_hash_start); + + return true; +} + +static void cta_send_reset(struct cgpu_info *cointerra, struct cointerra_info *info, + uint8_t reset_type, uint8_t diffbits); +static void cta_flush_work(struct cgpu_info *cointerra); + +/* *_fill and *_scanwork are serialised wrt to each other */ +static bool cta_fill(struct cgpu_info *cointerra) +{ + struct cointerra_info *info = cointerra->device_data; + bool ret = true; + char buf[CTA_MSG_SIZE]; + struct work *work = NULL; + unsigned short nroll_limit; + uint32_t swab[8]; + uint8_t diffbits; + + //applog(LOG_WARNING, "%s %d: cta_fill %d", cointerra->drv->name, cointerra->device_id,__LINE__); + + if (unlikely(info->thr->work_restart)) + cta_flush_work(cointerra); + + mutex_lock(&info->lock); + if (!info->requested) + goto out_unlock; + work = get_queued(cointerra); + if (unlikely(!work)) { + ret = false; + goto out_unlock; + } + if (--info->requested > 0) + ret = false; + + /* It does not matter what endian this uint16_t is since it will be + * the same value on sending to the MC as returning in match/done. This + * will automatically wrap as a uint16_t. It cannot be zero for the MCU + * though. */ + if (unlikely(++info->work_id == 0)) + info->work_id = 1; + work->subid = info->work_id; + + diffbits = diff_to_bits(work->device_diff); + + cta_gen_message(buf, CTA_SEND_WORK); + + memcpy(buf + CTA_DRIVER_TAG, &info->work_id, 2); + + flip32(swab, work->midstate); + memcpy(buf + CTA_WORK_MIDSTATE, swab, 32); + + flip12(swab, &work->data[64]); + memcpy(buf + CTA_WORK_DATA, swab, 12); + + nroll_limit = htole16(work->drv_rolllimit); + memcpy(buf + CTA_WORK_NROLL, &nroll_limit, 2); + + memcpy(buf + CTA_WORK_DIFFBITS, &diffbits, 1); + +out_unlock: + mutex_unlock(&info->lock); + + if (work) { + cgtime(&work->tv_work_start); + applog(LOG_DEBUG, "%s %d: Sending work job_id %s work_id %u", cointerra->drv->name, + cointerra->device_id, work->job_id, work->subid); + if (unlikely(!cta_send_msg(cointerra, buf))) { + work_completed(cointerra, work); + applog(LOG_INFO, "%s %d: Failed to send work", + cointerra->drv->name, cointerra->device_id); + /* The device will fail after this */ + } + } + + return ret; +} + +static void cta_send_reset(struct cgpu_info *cointerra, struct cointerra_info *info, + uint8_t reset_type, uint8_t diffbits) +{ + char buf[CTA_MSG_SIZE]; + int ret, retries = 0; + + /* Clear any accumulated messages in case we've gotten out of sync. */ + cgsem_reset(&info->reset_sem); +resend: + cta_gen_message(buf, CTA_SEND_RESET); + + buf[CTA_RESET_TYPE] = reset_type; + buf[CTA_RESET_LOAD] = opt_cta_load ? opt_cta_load : 255; + buf[CTA_RESET_PSLOAD] = opt_ps_load; + + applog(LOG_INFO, "%s %d: Sending Reset type %u with diffbits %u", cointerra->drv->name, + cointerra->device_id, reset_type, diffbits); + cta_send_msg(cointerra, buf); + + /* Wait for read thread to parse a reset message and signal us we may + * return to submitting other messages. Use a timeout in case we have + * a problem and the reset done message never returns. */ + if (reset_type == CTA_RESET_NEW) { + ret = cgsem_mswait(&info->reset_sem, CTA_RESET_TIMEOUT); + if (ret) { + if (++retries < 5) { + applog(LOG_INFO, "%s %d: Timed out waiting for reset done msg, retrying", + cointerra->drv->name, cointerra->device_id); + goto resend; + } + applog(LOG_WARNING, "%s %d: Timed out waiting for reset done msg", + cointerra->drv->name, cointerra->device_id); + } + /* Good place to flush any work we have */ + flush_queue(cointerra); + } +} + +static void cta_flush_work(struct cgpu_info *cointerra) +{ + struct cointerra_info *info = cointerra->device_data; + + applog(LOG_INFO, "%s %d: cta_flush_work %d", cointerra->drv->name, cointerra->device_id, + __LINE__); + cta_send_reset(cointerra, info, CTA_RESET_NEW, 0); + info->thr->work_restart = false; +} + +static void cta_update_work(struct cgpu_info *cointerra) +{ + struct cointerra_info *info = cointerra->device_data; + + applog(LOG_INFO, "%s %d: Update work", cointerra->drv->name, cointerra->device_id); + cta_send_reset(cointerra, info, CTA_RESET_UPDATE, 0); +} + +static void cta_zero_corehashes(struct cointerra_info *info) +{ + int i; + + for (i = 0; i < CTA_CORES; i++) + info->tot_core_hashes[i] = 0; + cgtime(&info->core_hash_start); +} + +/* Send per core hashrate calculations at regular intervals ~every 5 minutes */ +static void cta_send_corehashes(struct cgpu_info *cointerra, struct cointerra_info *info, + double corehash_time) +{ + uint16_t core_ghs[CTA_CORES]; + double k[CTA_CORES]; + char buf[CTA_MSG_SIZE]; + int i, offset; + + for (i = 0; i < CTA_CORES; i++) { + k[i] = (double)info->tot_core_hashes[i]; +#if 0 + k[i] /= ((double)32 * (double)0x100000000ull); + k[i] = sqrt(k[i]) + 1; + k[i] *= k[i]; + k[i] = k[i] * 32 * ((double)0x100000000ull ); +#endif + k[i] /= ((double)1000000000 * corehash_time); + core_ghs[i] = k[i]; + } + cta_gen_message(buf, CTA_SEND_COREHASHRATE); + offset = CTA_CORE_HASHRATES; + for (i = 0; i < CTA_CORES; i++) { + msg_from_hu16(buf, offset, core_ghs[i]); + offset += 2; // uint16_t + } + cta_send_msg(cointerra, buf); +} + +static int64_t cta_scanwork(struct thr_info *thr) +{ + struct cgpu_info *cointerra = thr->cgpu; + struct cointerra_info *info = cointerra->device_data; + double corehash_time; + struct timeval now; + uint32_t runtime; + int64_t hashes; + + applog(LOG_DEBUG, "%s %d: cta_scanwork %d", cointerra->drv->name, cointerra->device_id,__LINE__); + + if (unlikely(cointerra->usbinfo.nodev)) { + hashes = -1; + goto out; + } + + cgtime(&now); + + if (unlikely(thr->work_restart)) { + applog(LOG_INFO, "%s %d: Flush work line %d", + cointerra->drv->name, cointerra->device_id,__LINE__); + cta_flush_work(cointerra); + } else { + struct timespec abstime, tsdiff = {0, 500000000}; + time_t now_t; + int i; + + timeval_to_spec(&abstime, &now); + timeraddspec(&abstime, &tsdiff); + + /* Discard work that was started more than 5 minutes ago as + * a safety precaution backup in case the hardware failed to + * return a work done message for some work items. */ + age_queued_work(cointerra, 300.0); + + /* Each core should be 1.7MH so at max diff of 32 should + * average a share every ~80 seconds.Use this opportunity to + * unset the bits in any pipes that have not returned a valid + * nonce for over 30 full nonce ranges or 2400s. */ + now_t = time(NULL); + for (i = 0; i < 1024; i++) { + if (unlikely(now_t > info->last_pipe_nonce[i] + 2400)) { + int bitchar = i / 8, bitbit = i % 8; + + info->pipe_bitmap[bitchar] &= ~(0x80 >> bitbit); + } + } + + /* Sleep for up to 0.5 seconds, waking if we need work or + * have received a restart message. */ + mutex_lock(&info->lock); + pthread_cond_timedwait(&info->wake_cond, &info->lock, &abstime); + mutex_unlock(&info->lock); + + if (thr->work_restart) { + applog(LOG_INFO, "%s %d: Flush work line %d", + cointerra->drv->name, cointerra->device_id,__LINE__); + cta_flush_work(cointerra); + } + } + + corehash_time = tdiff(&now, &info->core_hash_start); + if (corehash_time > 300) { + cta_send_corehashes(cointerra, info, corehash_time); + cta_zero_corehashes(info); + } + + mutex_lock(&info->lock); + hashes = info->share_hashes; + info->tot_share_hashes += info->share_hashes; + info->tot_calc_hashes += info->hashes; + runtime = cgpu_runtime(thr->cgpu); + runtime /= 30; + info->old_hashes[runtime % 32] = info->tot_calc_hashes; + info->hashes = info->share_hashes = 0; + mutex_unlock(&info->lock); + + if (unlikely(cointerra->usbinfo.nodev)) + hashes = -1; +out: + return hashes; +} + +/* This is used for a work restart. We don't actually perform the work restart + * here but wake up the scanwork loop if it's waiting on the conditional so + * that it can test for the restart message. */ +static void cta_wake(struct cgpu_info *cointerra) +{ + struct cointerra_info *info = cointerra->device_data; + + mutex_lock(&info->lock); + pthread_cond_signal(&info->wake_cond); + mutex_unlock(&info->lock); +} + +static void cta_shutdown(struct thr_info *thr) +{ + struct cgpu_info *cointerra = thr->cgpu; + + cta_close(cointerra); +} + +static void cta_zero_stats(struct cgpu_info *cointerra) +{ + struct cointerra_info *info = cointerra->device_data; + int i; + + info->tot_calc_hashes = 0; + info->tot_reset_hashes = info->tot_hashes; + info->tot_share_hashes = 0; + cta_zero_corehashes(info); + + for (i = 0; i < 16 * 2; i++) + info->old_hashes[i] = 0; +} + +static int bits_set(char v) +{ + int c; + + for (c = 0; v; c++) + v &= v - 1; + return c; +} + +static struct api_data *cta_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + struct cointerra_info *info = cgpu->device_data; + double dev_runtime = cgpu_runtime(cgpu); + int i, asic, core, coreno = 0; + struct timeval now; + char bitmaphex[36]; + uint64_t ghs, val; + char buf[64]; + uint32_t runtime = cgpu_runtime(cgpu); + + /* Info data */ + root = api_add_uint16(root, "HW Revision", &info->hwrev, false); + root = api_add_uint32(root, "Serial", &info->serial, false); + root = api_add_uint8(root, "Asics", &info->asics, false); + root = api_add_uint8(root, "Dies", &info->dies, false); + root = api_add_uint16(root, "Cores", &info->cores, false); + root = api_add_uint8(root, "Board number", &info->board_number, false); + sprintf(buf, "%u.%u.%u", info->fwrev[0], info->fwrev[1], info->fwrev[2]); + root = api_add_string(root, "FW Revision", buf, true); + sprintf(buf, "%04u-%02u-%02u", info->fw_year, info->fw_month, info->fw_day); + root = api_add_string(root, "FW Date", buf, true); + root = api_add_uint8(root, "Init diffbits", &info->init_diffbits, false); + root = api_add_uint8(root, "Min diffbits", &info->min_diffbits, false); + root = api_add_uint8(root, "Max diffbits", &info->max_diffbits, false); + + /* Status readings */ + for (i = 0; i < CTA_CORES; i++) { + sprintf(buf, "CoreTemp%d", i); + root = api_add_int16(root, buf, &info->coretemp[i], false); + } + root = api_add_int16(root, "Ambient Low", &info->ambtemp_low, false); + root = api_add_int16(root, "Ambient Avg", &info->ambtemp_avg, false); + root = api_add_int16(root, "Ambient High", &info->ambtemp_high, false); + for (i = 0; i < CTA_PUMPS; i++) { + sprintf(buf, "PumpRPM%d", i); + root = api_add_uint16(root, buf, &info->pump_tachs[i], false); + } + for (i = 0; i < CTA_FANS; i++) { + sprintf(buf, "FanRPM%d", i); + root = api_add_uint16(root, buf, &info->fan_tachs[i], false); + } + for (i = 0; i < CTA_CORES; i++) { + sprintf(buf, "CoreFreqs%d", i); + root = api_add_uint16(root, buf, &info->corefreqs[i], false); + } + + for (i = 0; i < CTA_CORES; i++) { + sprintf(buf, "CoreVolts%d", i); + root = api_add_uint16(root, buf, &info->corevolts[i], false); + } + root = api_add_uint16(root, "Volts3.3", &info->volts33, false); + root = api_add_uint16(root, "Volts12", &info->volts12, false); + root = api_add_uint16(root, "Inactive", &info->inactive, false); + root = api_add_uint16(root, "Active", &info->active, false); + + /* Status settings */ + for (i = 0; i < CTA_CORES; i++) { + sprintf(buf, "CorePerfMode%d", i); + root = api_add_uint8(root, buf, &info->coreperf[i], false); + } + for (i = 0; i < CTA_FANS; i++) { + sprintf(buf, "FanSpeed%d", i); + root = api_add_uint8(root, buf, &info->fanspeed[i], false); + } + root = api_add_uint8(root, "DiesActive", &info->dies_active, false); + for (i = 0; i < CTA_CORES; i++) { + sprintf(buf, "PipesEnabled%d", i); + root = api_add_uint8(root, buf, &info->pipes_enabled[i], false); + } + + /* Status debug */ + root = api_add_int(root, "Underruns", &info->tot_underruns, false); + for (i = 0; i < CTA_CORES; i++) { + sprintf(buf, "HWErrors%d", i); + root = api_add_uint16(root, buf, &info->tot_hw_errors[i], false); + } + ghs = info->tot_calc_hashes / dev_runtime; + root = api_add_uint64(root, "Calc hashrate", &ghs, true); + ghs = (info->tot_hashes - info->tot_reset_hashes) / dev_runtime; + root = api_add_uint64(root, "Hashrate", &ghs, true); + //root = api_add_uint64(root, "bmminer 15m Hashrate", &cgpu->rolling15, true); + // get runtime in 30 second steps + runtime = runtime / 30; + // store the current hashes + info->old_hashes[runtime%32] = info->tot_calc_hashes; + // calc the 15 minute average hashrate + ghs = (info->old_hashes[(runtime+31)%32] - info->old_hashes[(runtime+1)%32])/(15*60); + root = api_add_uint64(root, "15m Hashrate", &ghs, true); + ghs = info->tot_share_hashes / dev_runtime; + root = api_add_uint64(root, "Share hashrate", &ghs, true); + root = api_add_uint64(root, "Total calc hashes", &info->tot_calc_hashes, false); + ghs = info->tot_hashes - info->tot_reset_hashes; + root = api_add_uint64(root, "Total hashes", &ghs, true); + root = api_add_uint64(root, "Total raw hashes", &info->tot_hashes, false); + root = api_add_uint64(root, "Total share hashes", &info->tot_share_hashes, false); + root = api_add_uint64(root, "Total flushed hashes", &info->tot_flushed_hashes, false); + val = cgpu->diff_accepted * 0x100000000ull; + root = api_add_uint64(root, "Accepted hashes", &val, true); + ghs = val / dev_runtime; + root = api_add_uint64(root, "Accepted hashrate", &ghs, true); + val = cgpu->diff_rejected * 0x100000000ull; + root = api_add_uint64(root, "Rejected hashes", &val, true); + ghs = val / dev_runtime; + root = api_add_uint64(root, "Rejected hashrate", &ghs, true); + + cgtime(&now); + dev_runtime = tdiff(&now, &info->core_hash_start); + if (dev_runtime < 1) + dev_runtime = 1; + for (i = 0; i < CTA_CORES; i++) { + sprintf(buf, "Core%d hashrate", i); + ghs = info->tot_core_hashes[i] / dev_runtime; + root = api_add_uint64(root, buf, &ghs, true); + } + root = api_add_uint32(root, "Uptime",&info->uptime,false); + for (asic = 0; asic < 2; asic++) { + for (core = 0; core < 4; core++) { + char bitmapcount[40], asiccore[12]; + int count = 0; + + sprintf(asiccore, "Asic%dCore%d", asic, core); + __bin2hex(bitmaphex, &info->pipe_bitmap[coreno], 16); + for (i = coreno; i < coreno + 16; i++) + count += bits_set(info->pipe_bitmap[i]); + snprintf(bitmapcount, 40, "%d:%s", count, bitmaphex); + root = api_add_string(root, asiccore, bitmapcount, true); + coreno += 16; + } + } + root = api_add_uint8(root, "AV", &info->autovoltage, false); + root = api_add_uint8(root, "Power Supply Percent", &info->current_ps_percent, false); + //if (info->power_used != 0) { + { + double value = info->power_used/100.0; + + value *= (info->power_voltage/100.0); + root = api_add_double(root, "Power Used", &value, true); + } + root = api_add_uint16(root, "IOUT", &info->power_used, false); + root = api_add_uint16(root, "VOUT", &info->power_voltage, false); + root = api_add_uint16(root, "IIN", &info->ipower_used, false); + root = api_add_uint16(root, "VIN", &info->ipower_voltage, false); + root = api_add_uint16(root, "PSTemp1", &info->power_temps[0], false); + root = api_add_uint16(root, "PSTemp2", &info->power_temps[1], false); + //} + + for (core = 0; core < CTA_CORES; core++) { + char name[20]; + char str[20]; + double value; + + sprintf(name,"IRVIN%d",core+1); + value = info->irstat_vin[core]/100.0; + root = api_add_double(root,name,&value,true); + sprintf(name,"IRIIN%d",core+1); + value = info->irstat_iin[core]/100.0; + root = api_add_double(root,name,&value,true); + sprintf(name,"IRVOUT%d",core+1); + value = info->irstat_vout[core]/100.0; + root = api_add_double(root,name,&value,true); + sprintf(name,"IRIOUT%d",core+1); + value = info->irstat_iout[core]/100.0; + root = api_add_double(root,name,&value,true); + sprintf(name,"IRTEMP1_%d",core+1); + value = info->irstat_temp1[core]/100.0; + root = api_add_double(root,name,&value,true); + sprintf(name,"IRTEMP2_%d",core+1); + value = info->irstat_temp2[core]/100.0; + root = api_add_double(root,name,&value,true); + sprintf(name,"IRPOUT%d",core+1); + value = info->irstat_pout[core]/100.0; + root = api_add_double(root,name,&value,true); + sprintf(name,"IRPIN%d",core+1); + value = info->irstat_pin[core]/100.0; + root = api_add_double(root,name,&value,true); + sprintf(name,"IREFFICIENCY%d",core+1); + value = info->irstat_efficiency[core]/100.0; + root = api_add_double(root,name,&value,true); + sprintf(name,"IRSTATUS%d",core+1); + //root = api_add_uint16(root,name,&info->irstat_status[core],false); + sprintf(str,"0x%04X",info->irstat_status[core]); + root = api_add_string(root, name, str, true); + } + + for (i = 0; i < CTA_CORES; i++) { + sprintf(buf, "CoreFmatch%d", i); + root = api_add_uint16(root, buf, &info->fmatch_errors[i], false); + } + + return root; +} + +static void cta_statline_before(char *buf, size_t bufsiz, struct cgpu_info *cointerra) +{ + struct cointerra_info *info = cointerra->device_data; + double max_volt = 0; + int freq = 0, i; + + for (i = 0; i < CTA_CORES; i++) { + if (info->corevolts[i] > max_volt) + max_volt = info->corevolts[i]; + if (info->corefreqs[i] > freq) + freq = info->corefreqs[i]; + } + max_volt /= 1000; + + tailsprintf(buf, bufsiz, "%3dMHz %3.1fC %3.2fV", freq, cointerra->temp, max_volt); +} + +struct device_drv cointerra_drv = { + .drv_id = DRIVER_cointerra, + .dname = "cointerra", + .name = "CTA", + .drv_detect = cta_detect, + .thread_prepare = cta_prepare, + .hash_work = hash_queued_work, + .queue_full = cta_fill, + .update_work = cta_update_work, + .scanwork = cta_scanwork, + .flush_work = cta_wake, + .get_api_stats = cta_api_stats, + .get_statline_before = cta_statline_before, + .thread_shutdown = cta_shutdown, + .zero_stats = cta_zero_stats, + .max_diff = 64, // Set it below the actual limit to check nonces +}; diff --git a/driver-cointerra.h b/driver-cointerra.h new file mode 100644 index 0000000..6f0649c --- /dev/null +++ b/driver-cointerra.h @@ -0,0 +1,251 @@ +/* + * Copyright 2013-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef COINTERRA_H +#define COINTERRA_H + +#define CTA_READBUF_SIZE 8192 +#define CTA_MSG_SIZE 64 +#define CTA_READ_TIMEOUT 1 +#define CTA_READ_INTERVAL 100 +#define CTA_SCAN_INTERVAL 500 +#define CTA_RESET_TIMEOUT 1000 + +#define CTA_INIT_DIFF 32*0.9999847412109375 + +#if 0 +/* FIXME: how big should this be? */ +#define CTA_MAX_QUEUE 2300 +#else +#define CTA_MAX_QUEUE (32 / CTA_NROLL_TIME) +#endif + +#define CTA_NROLL_TIME 2 + +/* Offsets into buffer */ +#define CTA_MSG_TYPE 2 +#define CTA_RESET_TYPE 3 +#define CTA_RESET_DIFF 4 +#define CTA_RESET_LOAD 5 +#define CTA_RESET_PSLOAD 6 +#define CTA_DRIVER_TAG 3 +#define CTA_MCU_TAG 5 +#define CTA_MCU_CORE 5 +#define CTA_MCU_ASIC 6 +#define CTA_MCU_PIPE 8 +#define CTA_MATCH_NOFFSET 45 +#define CTA_MATCH_NONCE 60 +#define CTA_WDONE_NONCES 11 +#define CTA_MSG_RECVD 3 +#define CTA_WORK_MIDSTATE 9 +#define CTA_WORK_DATA 41 +#define CTA_WORK_NROLL 53 +#define CTA_WORK_DIFFBITS 55 +#define CTA_REQWORK_REQUESTS 3 +#define CTA_CORE_HASHRATES 3 + +/* Received message types */ +#define CTA_RECV_UNUSED 0 +#define CTA_RECV_REQWORK 1 +#define CTA_RECV_MATCH 2 +#define CTA_RECV_WDONE 3 +#define CTA_RECV_STATREAD 4 +#define CTA_RECV_STATSET 5 +#define CTA_RECV_INFO 6 +#define CTA_RECV_MSG 7 +#define CTA_RECV_RDONE 8 +#define CTA_RECV_STATDEBUG 10 +#define CTA_RECV_IRSTAT 11 + +/* Sent message types */ +#define CTA_SEND_UNUSED 0 +#define CTA_SEND_RESET 1 +#define CTA_SEND_WORK 2 +#define CTA_SEND_SETPERF 3 +#define CTA_SEND_REQUEST 4 +#define CTA_SEND_FMATCH 5 +#define CTA_SEND_IDENTIFY 6 +#define CTA_SEND_COREHASHRATE 7 + +/* Types of reset in CTA_RESET_TYPE */ +#define CTA_RESET_NONE 0 +#define CTA_RESET_UPDATE 1 +#define CTA_RESET_NEW 2 +#define CTA_RESET_INIT 3 + +#define CTA_INFO_HWREV 3 +#define CTA_INFO_SERNO 5 +#define CTA_INFO_NUMASICS 9 +#define CTA_INFO_NUMDIES 10 +#define CTA_INFO_NUMCORES 11 +#define CTA_INFO_BOARDNUMBER 13 +#define CTA_INFO_FWREV_MAJ 19 +#define CTA_INFO_FWREV_MIN 20 +#define CTA_INFO_FWREV_MIC 21 +#define CTA_INFO_FWDATE_YEAR 23 +#define CTA_INFO_FWDATE_MONTH 25 +#define CTA_INFO_FWDATE_DAY 26 +#define CTA_INFO_INITDIFFBITS 27 +#define CTA_INFO_MINDIFFBITS 28 +#define CTA_INFO_MAXDIFFBITS 29 + +#define CTA_STAT_CORETEMPS 3 +#define CTA_STAT_AMBTEMP_LOW 19 +#define CTA_STAT_AMBTEMP_AVG 21 +#define CTA_STAT_AMBTEMP_HIGH 23 +#define CTA_STAT_PUMP_TACHS 25 +#define CTA_STAT_FAN_TACHS 29 +#define CTA_STAT_CORE_VOLTS 37 +#define CTA_STAT_VOLTS33 53 +#define CTA_STAT_VOLTS12 55 +#define CTA_STAT_INACTIVE 57 +#define CTA_STAT_ACTIVE 59 + +#define CTA_STAT_PERFMODE 3 +#define CTA_STAT_FANSPEEDS 11 +#define CTA_STAT_DIES_ACTIVE 15 +#define CTA_STAT_PIPES_ENABLED 16 +#define CTA_STAT_MIN_FAN_SPEED 24 +#define CTA_STAT_UPTIME 25 +#define CTA_STAT_HEARTBEATS 29 +#define CTA_STAT_CORE_FREQS 45 + +#define CTA_STAT_UNDERRUNS 3 +#define CTA_STAT_HW_ERRORS 5 +#define CTA_STAT_UPTIME_MS 21 +#define CTA_STAT_HASHES 25 +#define CTA_STAT_FLUSHED_HASHES 33 +#define CTA_STAT_AUTOVOLTAGE 41 +#define CTA_STAT_POWER_PERCENT 42 +#define CTA_STAT_POWER_USED 43 +#define CTA_STAT_VOLTAGE 45 +#define CTA_STAT_IPOWER_USED 47 +#define CTA_STAT_IVOLTAGE 49 +#define CTA_STAT_PS_TEMP1 51 +#define CTA_STAT_PS_TEMP2 53 + +#define CTA_IRSTAT_CHANNEL 3 +#define CTA_IRSTAT_VIN 4 +#define CTA_IRSTAT_IIN 6 +#define CTA_IRSTAT_VOUT 8 +#define CTA_IRSTAT_IOUT 10 +#define CTA_IRSTAT_TEMP1 12 +#define CTA_IRSTAT_TEMP2 14 +#define CTA_IRSTAT_POUT 16 +#define CTA_IRSTAT_PIN 18 +#define CTA_IRSTAT_EFF 20 +#define CTA_IRSTAT_STATUS 22 + + +#define CTA_CORES 8 +#define CTA_PUMPS 2 +#define CTA_FANS 4 + +#define CTA_REQ_MSGTYPE 3 +#define CTA_REQ_INTERVAL 5 + + +int opt_cta_load; +int opt_ps_load; + +struct cointerra_info { + /* Info data */ + uint16_t hwrev; + uint32_t serial; + uint8_t asics; + uint8_t dies; + uint16_t cores; + uint8_t board_number; + uint8_t fwrev[3]; + uint16_t fw_year; + uint8_t fw_month; + uint8_t fw_day; + uint8_t init_diffbits; + uint8_t min_diffbits; + uint8_t max_diffbits; + + /* Status readings data */ + uint16_t coretemp[CTA_CORES]; + uint16_t ambtemp_low; + uint16_t ambtemp_avg; + uint16_t ambtemp_high; + uint16_t pump_tachs[CTA_PUMPS]; + uint16_t fan_tachs[CTA_FANS]; + uint16_t corevolts[CTA_CORES]; + uint16_t volts33; + uint16_t volts12; + uint16_t inactive; + uint16_t active; + uint16_t corefreqs[CTA_CORES]; + uint32_t uptime; + + /* Status settings data */ + uint8_t coreperf[CTA_CORES]; + uint8_t fanspeed[CTA_FANS]; + uint8_t dies_active; + uint8_t pipes_enabled[CTA_CORES]; + + /* Status debug data */ + uint16_t underruns; + uint16_t hw_errors[CTA_CORES]; + uint16_t fmatch_errors[CTA_CORES]; + + /* Running total from debug messages */ + int tot_underruns; + uint16_t tot_hw_errors[CTA_CORES]; + uint64_t tot_hashes; + uint64_t tot_reset_hashes; + uint64_t tot_flushed_hashes; + uint8_t autovoltage; + uint8_t current_ps_percent; + uint16_t power_used; + uint16_t power_voltage; + uint16_t ipower_used; + uint16_t ipower_voltage; + uint16_t power_temps[2]; + + bool autovoltage_complete; + + /* Calculated totals based on work done and nonces found */ + uint64_t hashes; + uint64_t tot_calc_hashes; + + /* Calculated totals based on shares returned */ + uint64_t share_hashes; + uint64_t tot_core_hashes[CTA_CORES]; + uint64_t tot_share_hashes; + struct timeval core_hash_start; + int requested; + uint16_t work_id; + int no_matching_work; + time_t last_pipe_nonce[1024]; + unsigned char pipe_bitmap[128]; + + struct thr_info *thr; + pthread_mutex_t lock; + pthread_mutex_t sendlock; + pthread_cond_t wake_cond; + pthread_t read_thr; + cgsem_t reset_sem; + + uint16_t irstat_vin[CTA_CORES]; + uint16_t irstat_iin[CTA_CORES]; + uint16_t irstat_vout[CTA_CORES]; + uint16_t irstat_iout[CTA_CORES]; + uint16_t irstat_temp1[CTA_CORES]; + uint16_t irstat_temp2[CTA_CORES]; + uint16_t irstat_pout[CTA_CORES]; + uint16_t irstat_pin[CTA_CORES]; + uint16_t irstat_efficiency[CTA_CORES]; + uint16_t irstat_status[CTA_CORES]; + + uint64_t old_hashes[16 * 2]; +}; + +#endif /* COINTERRA_H */ diff --git a/driver-drillbit.c b/driver-drillbit.c new file mode 100644 index 0000000..9b6a2c0 --- /dev/null +++ b/driver-drillbit.c @@ -0,0 +1,1092 @@ +/* + * Copyright 2013 Con Kolivas + * Copyright 2013 Angus Gratton + * Copyright 2013 James Nichols + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include "miner.h" +#include "driver-drillbit.h" +#include "sha2.h" + +#define TIMEOUT 3000 +#define RESULT_TIMEOUT 5000 +#define MAX_RESULTS 16 // max results from a single chip + +#define drvlog(prio, fmt, ...) do { \ + if (drillbit->device_id == -1) { \ + applog(prio, "%s: "fmt, \ + drillbit->drv->dname, \ + ##__VA_ARGS__); \ + } else { \ + applog(prio, "%s %d: "fmt, \ + drillbit->drv->name, \ + drillbit->device_id, \ + ##__VA_ARGS__); \ + } \ +} while (0) + +/* Request and response structs for firmware */ + +typedef struct { + uint16_t chip_id; + uint8_t midstate[32]; + uint8_t data[12]; +} WorkRequest; + +#define SZ_SERIALISED_WORKREQUEST 46 +static void serialise_work_request(char *buf, uint16_t chip_id, const struct work *wr); + +typedef struct { + uint16_t chip_id; + uint8_t num_nonces; + uint8_t is_idle; + uint32_t nonce[MAX_RESULTS]; +} WorkResult; + +#define SZ_SERIALISED_WORKRESULT (4+4*MAX_RESULTS) +static void deserialise_work_result(WorkResult *work_result, const char *buf); + +/* V4 config is the preferred one, used internally, non-ASIC-specific */ +typedef struct { + uint16_t core_voltage; // Millivolts + uint16_t clock_freq; // Clock frequency in MHz (or clock level 30-48 for Bitfury internal clock level) + uint8_t clock_div2; // Apply the /2 clock divider (both internal and external), where available + uint8_t use_ext_clock; // Flag. Ignored on boards without external clocks +} BoardConfig; + +typedef struct +{ + uint16_t chip_id; + uint8_t increase_clock; +} AutoTuneRequest; + +#define SZ_SERIALISED_AUTOTUNEREQUEST 3 +static void serialise_autotune_request(char *buf, AutoTuneRequest *aq); + +#define CONFIG_PW1 (1<<0) +#define CONFIG_PW2 (1<<1) + +// Possible core voltage settings on PW1 & PW2, used by legacy V3 config only +#define CONFIG_CORE_065V 0 +#define CONFIG_CORE_075V CONFIG_PW2 +#define CONFIG_CORE_085V CONFIG_PW1 +#define CONFIG_CORE_095V (CONFIG_PW1|CONFIG_PW2) + +/* V3 config is for backwards compatibility with older firmwares */ +typedef struct { + uint8_t core_voltage; // Set to flags defined above + uint8_t int_clock_level; // Clock level (30-48 without divider), see asic.c for details + uint8_t clock_div2; // Apply the /2 clock divider (both internal and external) + uint8_t use_ext_clock; // Ignored on boards without external clocks + uint16_t ext_clock_freq; + } BoardConfigV3; + +#define SZ_SERIALISED_BOARDCONFIG 6 +static void serialise_board_configV4(char *buf, BoardConfig *boardconfig); +static void serialise_board_configV3(char *buf, BoardConfigV3 *boardconfig); + +typedef struct { + uint8_t protocol_version; + char product[8]; + uint32_t serial; + uint8_t num_chips; + uint16_t capabilities; +} Identity; + +/* Capabilities flags known to bmminer */ +#define CAP_TEMP (1<<0) +#define CAP_EXT_CLOCK (1<<1) +#define CAP_IS_AVALON (1<<2) +#define CAP_LIMITER_REMOVED (1<<3) + +#define SZ_SERIALISED_IDENTITY 16 +static void deserialise_identity(Identity *identity, const char *buf); + +// Hashable structure of per-device config settings +typedef struct { + char key[9]; + BoardConfig config; + UT_hash_handle hh; +} config_setting; + +static config_setting *settings; + +static void drillbit_empty_buffer(struct cgpu_info *drillbit); + +/* Automatic tuning parameters */ +static uint32_t auto_every = 100; +static uint32_t auto_good = 1; +static uint32_t auto_bad = 3; +static uint32_t auto_max = 10; + +/* Return a pointer to the chip_info structure for a given chip id, or NULL otherwise */ +static struct drillbit_chip_info *find_chip(struct drillbit_info *info, uint16_t chip_id) { + int i; + + for (i = 0; i < info->num_chips; i++) { + if (info->chips[i].chip_id == chip_id) + return &info->chips[i]; + } + return NULL; +} + +/* Read a fixed size buffer back from USB, returns true on success */ +static bool usb_read_fixed_size(struct cgpu_info *drillbit, void *result, size_t result_size, int timeout, enum usb_cmds command_name) { + char *res = (char *)result; + int ms_left; + size_t count; + struct timeval tv_now, tv_start; + int amount; + + cgtime(&tv_start); + ms_left = timeout; + + amount = 1; + count = 0; + while (count < result_size && ms_left > 0) { + usb_read_timeout(drillbit, &res[count], result_size-count, &amount, ms_left, command_name); + count += amount; + cgtime(&tv_now); + ms_left = timeout - ms_tdiff(&tv_now, &tv_start); + } + if (count == result_size) { + return true; + } + drvlog(LOG_ERR, "Read incomplete fixed size packet - got %d bytes / %d (timeout %d)", + (int)count, (int)result_size, timeout); + drillbit_empty_buffer(drillbit); + return false; +} + +static bool usb_read_simple_response(struct cgpu_info *drillbit, char command, enum usb_cmds command_name); + +/* Write a simple one-byte command and expect a simple one-byte response + Returns true on success +*/ +static bool usb_send_simple_command(struct cgpu_info *drillbit, char command, enum usb_cmds command_name) { + int amount; + + usb_write_timeout(drillbit, &command, 1, &amount, TIMEOUT, C_BF_REQWORK); + if (amount != 1) { + drvlog(LOG_ERR, "Failed to write command %c", command); + return false; + } + return usb_read_simple_response(drillbit, command, command_name); +} + +/* Read a simple single-byte response and check it matches the correct command character + Return true on success +*/ +static bool usb_read_simple_response(struct cgpu_info *drillbit, char command, enum usb_cmds command_name) { + int amount; + char response; + /* Expect a single byte, matching the command, as acknowledgement */ + usb_read_timeout(drillbit, &response, 1, &amount, TIMEOUT, command_name); + if (amount != 1) { + drvlog(LOG_ERR, "Got no response to command %c", command); + return false; + } + if (response != command) { + drvlog(LOG_ERR, "Got unexpected response %c to command %c", response, command); + return false; + } + return true; +} + +#define EMPTY_TIMEOUT 5 + +static void drillbit_empty_buffer(struct cgpu_info *drillbit) +{ + char buf[512]; + int amount; + + do { + usb_read_timeout(drillbit, buf, sizeof(buf), &amount, EMPTY_TIMEOUT, C_BF_FLUSH); + } while (amount); +} + +static void drillbit_open(struct cgpu_info *drillbit) +{ + drillbit_empty_buffer(drillbit); +} + +static void drillbit_close(struct cgpu_info *drillbit) +{ + struct drillbit_info *info = drillbit->device_data; + drillbit_empty_buffer(drillbit); + if (info->chips) + free(info->chips); +} + +static void drillbit_identify(struct cgpu_info *drillbit) +{ + usb_send_simple_command(drillbit, 'L', C_BF_IDENTIFY); +} + +#define ID_TIMEOUT 1000 + +static bool drillbit_getinfo(struct cgpu_info *drillbit, struct drillbit_info *info) +{ + int err; + int amount; + char buf[SZ_SERIALISED_IDENTITY]; + Identity identity; + + drillbit_empty_buffer(drillbit); + err = usb_write_timeout(drillbit, "I", 1, &amount, TIMEOUT, C_BF_REQINFO); + if (err) { + drvlog(LOG_INFO, "Failed to write REQINFO"); + return false; + } + // can't call usb_read_fixed_size here as stats not initialised + err = usb_read_timeout(drillbit, buf, SZ_SERIALISED_IDENTITY, &amount, ID_TIMEOUT, C_BF_GETINFO); + if (err) { + drvlog(LOG_ERR, "Failed to read GETINFO"); + return false; + } + if (amount != SZ_SERIALISED_IDENTITY) { + drvlog(LOG_ERR, "Getinfo received %d bytes instead of %d", + amount, (int)sizeof(Identity)); + return false; + } + deserialise_identity(&identity, buf); + + // sanity checks on the identity buffer we get back + if (strlen(identity.product) == 0 || identity.serial == 0 || identity.num_chips == 0) { + drvlog(LOG_ERR, "Got invalid contents for GETINFO identity response"); + return false; + } + + const int MIN_VERSION = 2; + const int MAX_VERSION = 4; + if (identity.protocol_version < MIN_VERSION) { + drvlog(LOG_ERR, "Unknown device protocol version %d.", identity.protocol_version); + return false; + } + if (identity.protocol_version > MAX_VERSION) { + drvlog(LOG_ERR, "Device firmware uses newer Drillbit protocol %d. We only support up to %d. Find a newer cgminer!", identity.protocol_version, MAX_VERSION); + return false; + } + + if (identity.protocol_version == 2 && identity.num_chips == 1) { + // Production firmware Thumbs don't set any capability bits, so fill in the EXT_CLOCK one + identity.capabilities = CAP_EXT_CLOCK; + } + + // load identity data into device info structure + info->protocol_version = identity.protocol_version; + if (strncmp(identity.product, "DRILLBIT", sizeof(identity.product)) == 0) { + // Hack: first production firmwares all described themselves as DRILLBIT, so fill in the gaps + if (identity.num_chips == 1) + strcpy(info->product, "Thumb"); + else + strcpy(info->product, "Eight"); + } else { + memcpy(info->product, identity.product, sizeof(identity.product)); + } + info->serial = identity.serial; + info->num_chips = identity.num_chips; + info->capabilities = identity.capabilities; + + drvlog(LOG_INFO, "Getinfo returned version %d, product %s serial %08x num_chips %d", + info->protocol_version, info->product, info->serial, info->num_chips); + + drillbit_empty_buffer(drillbit); + return true; +} + +static bool drillbit_reset(struct cgpu_info *drillbit) +{ + struct drillbit_info *info = drillbit->device_data; + struct drillbit_chip_info *chip; + int i, k, res; + + res = usb_send_simple_command(drillbit, 'R', C_BF_REQRESET); + + for (i = 0; i < info->num_chips; i++) { + chip = &info->chips[i]; + chip->state = IDLE; + chip->work_sent_count = 0; + for (k = 0; k < WORK_HISTORY_LEN-1; k++) { + if (chip->current_work[k]) { + work_completed(drillbit, chip->current_work[k]); + chip->current_work[k] = NULL; + } + } + } + + drillbit_empty_buffer(drillbit); + return res; +} + +static config_setting *find_settings(struct cgpu_info *drillbit) +{ + struct drillbit_info *info = drillbit->device_data; + config_setting *setting; + char search_key[9]; + + if (!settings) { + drvlog(LOG_INFO, "Keeping onboard defaults for device %s (serial %08x)", + info->product, info->serial); + return NULL; + } + + // Search by serial + sprintf(search_key, "%08x", info->serial); + HASH_FIND_STR(settings, search_key, setting); + if (setting) { + drvlog(LOG_INFO, "Using serial specific settings for serial %s", search_key); + return setting; + } + + // Search by DRBxxx + snprintf(search_key, 9, "DRB%d", drillbit->device_id); + HASH_FIND_STR(settings, search_key, setting); + if (setting) { + drvlog(LOG_INFO, "Using device_id specific settings for device"); + return setting; + } + + // Failing that, search by product name + HASH_FIND_STR(settings, info->product, setting); + if (setting) { + drvlog(LOG_INFO, "Using product-specific settings for device %s", info->product); + return setting; + } + + // Search by "short" product name + snprintf(search_key, 9, "%c%d", info->product[0], info->num_chips); + HASH_FIND_STR(settings, search_key, setting); + if (setting) { + drvlog(LOG_INFO, "Using product-specific settings for device %s", info->product); + return setting; + } + + // Check for a generic/catchall drillbit-options argument (key set to NULL) + search_key[0] = 0; + HASH_FIND_STR(settings, search_key, setting); + if (setting) { + drvlog(LOG_INFO, "Using non-specific settings for device %s (serial %08x)", info->product, + info->serial); + return setting; + } + + drvlog(LOG_WARNING, "Keeping onboard defaults for device %s (serial %08x)", + info->product, info->serial); + return NULL; +} + +static void drillbit_send_config(struct cgpu_info *drillbit) +{ + struct drillbit_info *info = drillbit->device_data; + int amount; + char buf[SZ_SERIALISED_BOARDCONFIG+1]; + config_setting *setting; + BoardConfigV3 v3_config; + + // Find the relevant board config + setting = find_settings(drillbit); + if (!setting) + return; // Don't update board config from defaults + drvlog(LOG_NOTICE, "Config: %s:%d:%d:%d Serial: %08x", + setting->config.use_ext_clock ? "ext" : "int", + setting->config.clock_freq, + setting->config.clock_div2 ? 2 : 1, + setting->config.core_voltage, + info->serial); + + if (setting->config.use_ext_clock && !(info->capabilities & CAP_EXT_CLOCK)) { + drvlog(LOG_WARNING, "Chosen configuration specifies external clock but this device (serial %08x) has no external clock!", info->serial); + } + + if (info->protocol_version <= 3) { + /* Make up a backwards compatible V3 config structure to send to the miner */ + if (setting->config.core_voltage >= 950) + v3_config.core_voltage = CONFIG_CORE_095V; + else if (setting->config.core_voltage >= 850) + v3_config.core_voltage = CONFIG_CORE_085V; + else if (setting->config.core_voltage >= 750) + v3_config.core_voltage = CONFIG_CORE_075V; + else + v3_config.core_voltage = CONFIG_CORE_065V; + if (setting->config.clock_freq > 64) + v3_config.int_clock_level = setting->config.clock_freq / 5; + else + v3_config.int_clock_level = setting->config.clock_freq; + v3_config.clock_div2 = setting->config.clock_div2; + v3_config.use_ext_clock = setting->config.use_ext_clock; + v3_config.ext_clock_freq = setting->config.clock_freq; + serialise_board_configV3(&buf[1], &v3_config); + } else { + serialise_board_configV4(&buf[1], &setting->config); + } + buf[0] = 'C'; + usb_write_timeout(drillbit, buf, sizeof(buf), &amount, TIMEOUT, C_BF_CONFIG); + + /* Expect a single 'C' byte as acknowledgement */ + usb_read_simple_response(drillbit, 'C', C_BF_CONFIG); // TODO: verify response +} + +static void drillbit_updatetemps(struct thr_info *thr) +{ + struct cgpu_info *drillbit = thr->cgpu; + struct drillbit_info *info = drillbit->device_data; + char cmd; + int amount; + uint16_t temp; + struct timeval tv_now; + + if (!(info->capabilities & CAP_TEMP)) + return; + + cgtime(&tv_now); + if (ms_tdiff(&tv_now, &info->tv_lasttemp) < 1000) + return; // Only update temps once a second + info->tv_lasttemp = tv_now; + + cmd = 'T'; + usb_write_timeout(drillbit, &cmd, 1, &amount, TIMEOUT, C_BF_GETTEMP); + + if (!usb_read_fixed_size(drillbit, &temp, sizeof(temp), TIMEOUT, C_BF_GETTEMP)) { + drvlog(LOG_ERR, "Got no response to request for current temperature"); + return; + } + + drvlog(LOG_INFO, "Got temperature reading %d.%dC", temp/10, temp%10); + info->temp = temp; + if (temp > info->max_temp) + info->max_temp = temp; +} + +static void drillbit_get_statline_before(char *buf, size_t bufsiz, struct cgpu_info *drillbit) +{ + struct drillbit_info *info = drillbit->device_data; + + if ((info->capabilities & CAP_TEMP) && info->temp != 0) { + tailsprintf(buf, bufsiz, "%c%2d %.1fC max%.1fC", + info->product[0], + info->num_chips, + (float)(info->temp/10.0), + (float)(info->max_temp/10.0)); + } else { + tailsprintf(buf, bufsiz, "%c%2d", + info->product[0], + info->num_chips); + } +} + + +static bool drillbit_parse_options(__maybe_unused struct cgpu_info *drillbit) +{ + /* Read configuration options (currently global not per-ASIC or per-board) */ + if (settings != NULL) + return true; // Already initialised + + char *next_opt = opt_drillbit_options; + while (next_opt && strlen(next_opt)) { + BoardConfig parsed_config; + config_setting *new_setting; + char key[9]; + int count, freq, clockdiv, voltage; + char clksrc[4]; + + // Try looking for an option tagged with a key, first + count = sscanf(next_opt, "%8[^:]:%3s:%d:%d:%d", key, + clksrc, &freq, &clockdiv, &voltage); + if (count < 5) { + key[0] = 0; + count = sscanf(next_opt, "%3s:%d:%d:%d", + clksrc, &freq, &clockdiv, &voltage); + if (count < 4) { + quithere(1, "Failed to parse drillbit-options. Invalid options string: '%s'", next_opt); + } + } + + if (clockdiv != 1 && clockdiv != 2) { + quithere(1, "Invalid clock divider value %d. Valid values are 1 & 2.", clockdiv); + } + parsed_config.clock_div2 = count > 2 && clockdiv == 2; + + if (!strcmp("int",clksrc)) { + parsed_config.use_ext_clock = 0; + } + else if (!strcmp("ext", clksrc)) { + parsed_config.use_ext_clock = 1; + } else + quithere(1, "Invalid clock source. Valid choices are int, ext."); + + parsed_config.clock_freq = freq; + parsed_config.core_voltage = voltage; + + // Add the new set of settings to the configuration choices hash table + new_setting = (config_setting *)calloc(sizeof(config_setting), 1); + memcpy(&new_setting->config, &parsed_config, sizeof(BoardConfig)); + memcpy(&new_setting->key, key, 8); + config_setting *ignore; + HASH_REPLACE_STR(settings, key, new_setting, ignore); + + // Look for next comma-delimited Drillbit option + next_opt = strstr(next_opt, ","); + if (next_opt) + next_opt++; + } + + if (opt_drillbit_auto) { + sscanf(opt_drillbit_auto, "%d:%d:%d:%d", + &auto_every, &auto_good, &auto_bad, &auto_max); + if (auto_max < auto_bad) { + quithere(1, "Bad drillbit-auto: MAX limit must be greater than BAD limit"); + } + if (auto_bad < auto_good) { + quithere(1, "Bad drillbit-auto: GOOD limit must be greater than BAD limit"); + } + } + + return true; +} + +static struct cgpu_info *drillbit_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + struct cgpu_info *drillbit; + struct drillbit_info *info; + int i; + + drillbit = usb_alloc_cgpu(&drillbit_drv, 1); + drillbit->device_id = -1; // so drvlog() prints dname + + if (!drillbit_parse_options(drillbit)) + goto out; + + if (!usb_init(drillbit, dev, found)) + goto out; + + drvlog(LOG_INFO, "Device found at %s", drillbit->device_path); + + info = calloc(sizeof(struct drillbit_info), 1); + if (!info) + quit(1, "Failed to calloc info in %s", __func__); + drillbit->device_data = info; + + drillbit_open(drillbit); + + /* Send getinfo request */ + if (!drillbit_getinfo(drillbit, info)) + goto out_close; + + /* TODO: Add detection for actual chip ids based on command/response, + not prefill assumption about chip layout based on info structure */ + info->chips = calloc(sizeof(struct drillbit_chip_info), info->num_chips); + for (i = 0; i < info->num_chips; i++) { + info->chips[i].chip_id = i; + info->chips[i].auto_max = 999; + } + + /* Send reset request */ + if (!drillbit_reset(drillbit)) + goto out_close; + + drillbit_identify(drillbit); + drillbit_empty_buffer(drillbit); + + cgtime(&info->tv_lastchipinfo); + + if (!add_cgpu(drillbit)) + goto out_close; + + update_usb_stats(drillbit); + + if (info->capabilities & CAP_LIMITER_REMOVED) { + drvlog(LOG_WARNING, "Recommended limits have been disabled on this board, take care when changing settings."); + } + + drillbit_send_config(drillbit); + + drvlog(LOG_INFO, "Successfully initialised %s", + drillbit->device_path); + + return drillbit; +out_close: + drillbit_close(drillbit); + usb_uninit(drillbit); +out: + drillbit = usb_free_cgpu(drillbit); + return drillbit; +} + +static void drillbit_detect(bool __maybe_unused hotplug) +{ + usb_detect(&drillbit_drv, drillbit_detect_one); +} + +static uint32_t decnonce(uint32_t in) +{ + uint32_t out; + + /* First part load */ + out = (in & 0xFF) << 24; in >>= 8; + + /* Byte reversal */ + in = (((in & 0xaaaaaaaa) >> 1) | ((in & 0x55555555) << 1)); + in = (((in & 0xcccccccc) >> 2) | ((in & 0x33333333) << 2)); + in = (((in & 0xf0f0f0f0) >> 4) | ((in & 0x0f0f0f0f) << 4)); + + out |= (in >> 2)&0x3FFFFF; + + /* Extraction */ + if (in & 1) out |= (1 << 23); + if (in & 2) out |= (1 << 22); + + out -= 0x800004; + return out; +} + +#define BF_OFFSETS 3 +static const uint32_t bf_offsets[] = {-0x800000, 0, -0x400000}; + +static bool drillbit_checkresults(struct thr_info *thr, struct work *work, uint32_t nonce) +{ + struct cgpu_info *drillbit = thr->cgpu; + struct drillbit_info *info = drillbit->device_data; + int i; + + if (info->capabilities & CAP_IS_AVALON) { + if (test_nonce(work, nonce)) { + submit_tested_work(thr, work); + return true; + } + } + else { /* Bitfury */ + nonce = decnonce(nonce); + for (i = 0; i < BF_OFFSETS; i++) { + if (test_nonce(work, nonce + bf_offsets[i])) { + submit_tested_work(thr, work); + return true; + } + } + } + return false; +} + +/* Check if this ASIC should be tweaked up or down in clock speed */ +static void drillbit_check_auto(struct thr_info *thr, struct drillbit_chip_info *chip) +{ + struct cgpu_info *drillbit = thr->cgpu; + AutoTuneRequest request; + char buf[SZ_SERIALISED_AUTOTUNEREQUEST+1]; + int amount; + bool tune_up, tune_down; + + /* + Only check automatic tuning every "auto_every" work units, + or if the error count exceeds the 'max' count + */ + if (chip->success_auto + chip->error_auto < auto_every && + (chip->error_auto < auto_max)) + return; + + tune_up = chip->error_auto < auto_good && chip->auto_delta < chip->auto_max; + tune_down = chip->error_auto > auto_bad; + + + drvlog(tune_up||tune_down ? LOG_NOTICE : LOG_DEBUG, + "Chip id %d has %d/%d error rate %s", chip->chip_id, chip->error_auto, + chip->error_auto + chip->success_auto, + tune_up ? " - tuning up" : tune_down ? " - tuning down" : " - no change"); + + if (tune_up || tune_down) { + /* Value should be tweaked */ + buf[0] = 'A'; + request.chip_id = chip->chip_id; + request.increase_clock = tune_up; + serialise_autotune_request(&buf[1], &request); + usb_write_timeout(drillbit, buf, sizeof(buf), &amount, TIMEOUT, C_BF_AUTOTUNE); + usb_read_simple_response(drillbit, 'A', C_BF_AUTOTUNE); + if (tune_up) { + chip->auto_delta++; + } else { + chip->auto_delta--; + if (chip->error_auto >= auto_max + && chip->success_count + chip->error_count > auto_every) { + drvlog(LOG_ERR, "Chip id %d capping auto delta at max %d",chip->chip_id, + chip->auto_delta); + chip->auto_max = chip->auto_delta; + } + } + } + + chip->success_auto = 0; + chip->error_auto = 0; +} + +// Check and submit back any pending work results from firmware, +// returns number of successful results found +static int check_for_results(struct thr_info *thr) +{ + struct cgpu_info *drillbit = thr->cgpu; + struct drillbit_info *info = drillbit->device_data; + struct drillbit_chip_info *chip; + char cmd; + int amount, i, k, found; + uint8_t j; + int successful_results = 0; + uint32_t result_count; + char buf[SZ_SERIALISED_WORKRESULT]; + WorkResult *responses = NULL; + WorkResult *response; + + if (unlikely(thr->work_restart)) + goto cleanup; + + // Send request for completed work + cmd = 'E'; + usb_write_timeout(drillbit, &cmd, 1, &amount, TIMEOUT, C_BF_GETRES); + + // Receive count for work results + if (!usb_read_fixed_size(drillbit, &result_count, sizeof(result_count), TIMEOUT, C_BF_GETRES)) { + drvlog(LOG_ERR, "Got no response to request for work results"); + goto cleanup; + } + if (unlikely(drillbit->usbinfo.nodev)) + goto cleanup; + if (result_count) + drvlog(LOG_DEBUG, "Result count %d",result_count); + + if (result_count > 1024) { + drvlog(LOG_ERR, "Got implausible result count %d - treating as error!", result_count); + goto cleanup; + } + + if (result_count == 0) { + // Short circuit reading any work results + return 0; + } + + responses = calloc(result_count, sizeof(WorkResult)); + + // Receive work results (0 or more) into buffer + for (j = 0; j < result_count; j++) { + if (unlikely(drillbit->usbinfo.nodev)) + goto cleanup; + if (!usb_read_fixed_size(drillbit, buf, SZ_SERIALISED_WORKRESULT, TIMEOUT, C_BF_GETRES)) { + drvlog(LOG_ERR, "Failed to read response data packet idx %d count 0x%x", j, result_count); + drillbit_empty_buffer(drillbit); + goto cleanup; + } + deserialise_work_result(&responses[j], buf); + } + + for (j = 0; j < result_count; j++) { + if (unlikely(thr->work_restart)) + goto cleanup; + + response = &responses[j]; + drvlog(LOG_DEBUG, "Got response packet chip_id %d nonces %d is_idle %d", response->chip_id, response->num_nonces, response->is_idle); + chip = find_chip(info, response->chip_id); + if (!chip) { + drvlog(LOG_ERR, "Got work result for unknown chip id %d", response->chip_id); + drillbit_empty_buffer(drillbit); + continue; + } + if (chip->state == IDLE) { + drvlog(LOG_WARNING, "Got spurious work results for idle ASIC %d", response->chip_id); + } + if (response->num_nonces > MAX_RESULTS) { + drvlog(LOG_ERR, "Got invalid number of result nonces (%d) for chip id %d", response->num_nonces, response->chip_id); + drillbit_empty_buffer(drillbit); + goto cleanup; + } + + found = false; + for (i = 0; i < response->num_nonces; i++) { + if (unlikely(thr->work_restart)) + goto cleanup; + for (k = 0; k < WORK_HISTORY_LEN; k++) { + /* NB we deliberately check all results against all work because sometimes ASICs seem to give multiple "valid" nonces, + and this seems to avoid some result that would otherwise be rejected by the pool. + */ + if (chip->current_work[k] && drillbit_checkresults(thr, chip->current_work[k], response->nonce[i])) { + chip->success_count++; + chip->success_auto++; + successful_results++; + found = true; + } + } + } + drvlog(LOG_DEBUG, "%s nonce %08x", (found ? "Good":"Bad"), response->num_nonces ? response->nonce[0] : 0); + if (!found && chip->state != IDLE && response->num_nonces > 0) { + /* all nonces we got back from this chip were invalid */ + inc_hw_errors(thr); + chip->error_count++; + chip->error_auto++; + } + if (chip->state == WORKING_QUEUED && !response->is_idle) + chip->state = WORKING_NOQUEUED; // Time to queue up another piece of "next work" + else + chip->state = IDLE; // Uh-oh, we're totally out of work for this ASIC! + + if (opt_drillbit_auto && info->protocol_version >= 4) + drillbit_check_auto(thr, chip); + } + +cleanup: + if (responses) + free(responses); + return successful_results; +} + +static void drillbit_send_work_to_chip(struct thr_info *thr, struct drillbit_chip_info *chip) +{ + struct cgpu_info *drillbit = thr->cgpu; + struct work *work; + char buf[SZ_SERIALISED_WORKREQUEST+1]; + int amount, i; + + /* Get some new work for the chip */ + work = get_queue_work(thr, drillbit, thr->id); + if (unlikely(thr->work_restart)) { + work_completed(drillbit, work); + return; + } + + drvlog(LOG_DEBUG, "Sending work to chip_id %d", chip->chip_id); + serialise_work_request(&buf[1], chip->chip_id, work); + + /* Send work to cgminer */ + buf[0] = 'W'; + usb_write_timeout(drillbit, buf, sizeof(buf), &amount, TIMEOUT, C_BF_REQWORK); + + /* Expect a single 'W' byte as acknowledgement */ + usb_read_simple_response(drillbit, 'W', C_BF_REQWORK); + if (chip->state == WORKING_NOQUEUED) + chip->state = WORKING_QUEUED; + else + chip->state = WORKING_NOQUEUED; + + if (unlikely(thr->work_restart)) { + work_completed(drillbit, work); + return; + } + + // Read into work history + if (chip->current_work[0]) + work_completed(drillbit, chip->current_work[0]); + for (i = 0; i < WORK_HISTORY_LEN-1; i++) + chip->current_work[i] = chip->current_work[i+1]; + chip->current_work[WORK_HISTORY_LEN-1] = work; + cgtime(&chip->tv_start); + + chip->work_sent_count++; +} + +static int64_t drillbit_scanwork(struct thr_info *thr) +{ + struct cgpu_info *drillbit = thr->cgpu; + struct drillbit_info *info = drillbit->device_data; + struct drillbit_chip_info *chip; + struct timeval tv_now; + int amount, i, j, ms_diff, result_count = 0, sent_count = 0;; + char buf[200]; + + /* send work to an any chip without queued work */ + for (i = 0; i < info->num_chips && sent_count < 8; i++) { + if (info->chips[i].state != WORKING_QUEUED) { + drillbit_send_work_to_chip(thr, &info->chips[i]); + sent_count++; + } + if (unlikely(thr->work_restart) || unlikely(drillbit->usbinfo.nodev)) + goto cascade; + } + + /* check for any chips that have timed out on sending results */ + cgtime(&tv_now); + for (i = 0; i < info->num_chips; i++) { + if (info->chips[i].state == IDLE) + continue; + ms_diff = ms_tdiff(&tv_now, &info->chips[i].tv_start); + if (ms_diff > RESULT_TIMEOUT) { + if (info->chips[i].work_sent_count > 4) { + /* Only count ASIC timeouts after the pool has started to send work in earnest, + some pools can create unusual delays early on */ + drvlog(LOG_ERR, "Timing out unresponsive ASIC %d", info->chips[i].chip_id); + info->chips[i].timeout_count++; + info->chips[i].error_auto++; + } + info->chips[i].state = IDLE; + drillbit_send_work_to_chip(thr, &info->chips[i]); + } + if (unlikely(thr->work_restart) || unlikely(drillbit->usbinfo.nodev)) + goto cascade; + } + + /* Check for results */ + result_count = check_for_results(thr); + + /* Print a per-chip info line every 30 seconds */ + cgtime(&tv_now); + if (opt_log_level <= LOG_INFO && ms_tdiff(&tv_now, &info->tv_lastchipinfo) > 30000) { + /* TODO: this output line may get truncated (max debug is 256 bytes) once we get more + chips in a single device + */ + amount = sprintf(buf, "%s %d: S/E/T", drillbit->drv->name, drillbit->device_id); + if (amount > 0) { + for (i = 0; i < info->num_chips; i++) { + chip= &info->chips[i]; + j = snprintf(&buf[amount], sizeof(buf)-(size_t)amount, "%u:%u/%u/%u", + chip->chip_id, chip->success_count, chip->error_count, + chip->timeout_count); + if (j < 0) + break; + amount += j; + if ((size_t)amount >= sizeof(buf)) + break; + } + drvlog(LOG_INFO, "%s", buf); + cgtime(&info->tv_lastchipinfo); + } + } + + drillbit_updatetemps(thr); + +cascade: + if (unlikely(drillbit->usbinfo.nodev)) { + drvlog(LOG_WARNING, "Device disappeared, disabling thread"); + return -1; + } + + if (unlikely(thr->work_restart)) { + /* Issue an ASIC reset as we won't be coming back for any of these results */ + drvlog(LOG_DEBUG, "Received work restart, resetting ASIC"); + drillbit_reset(drillbit); + } + + return 0xffffffffULL * result_count; +} + +static struct api_data *drillbit_api_stats(struct cgpu_info *cgpu) +{ + struct drillbit_info *info = cgpu->device_data; + struct api_data *root = NULL; + char serial[16]; + int version; + + version = info->protocol_version; + root = api_add_int(root, "Protocol Version", &version, true); + root = api_add_string(root, "Product", info->product, false); + sprintf(serial, "%08x", info->serial); + root = api_add_string(root, "Serial", serial, true); + root = api_add_uint8(root, "ASIC Count", &info->num_chips, true); + if (info->capabilities & CAP_TEMP) { + float temp = (float)info->temp/10; + root = api_add_temp(root, "Temp", &temp, true); + temp = (float)info->max_temp/10; + root = api_add_temp(root, "Temp Max", &temp, true); + } + + return root; +} + +static void drillbit_reinit(struct cgpu_info *drillbit) +{ + drillbit_close(drillbit); + drillbit_open(drillbit); + drillbit_reset(drillbit); +} + +static void drillbit_shutdown(struct thr_info *thr) +{ + struct cgpu_info *drillbit = thr->cgpu; + + drillbit_close(drillbit); +} + +/* Currently hardcoded to BF1 devices */ +struct device_drv drillbit_drv = { + .drv_id = DRIVER_drillbit, + .dname = "Drillbit", + .name = "DRB", + .drv_detect = drillbit_detect, + .hash_work = &hash_driver_work, + .scanwork = drillbit_scanwork, + .get_api_stats = drillbit_api_stats, + .get_statline_before = drillbit_get_statline_before, + .reinit_device = drillbit_reinit, + .thread_shutdown = drillbit_shutdown, + .identify_device = drillbit_identify, +}; + + +/* Structure serialisation/deserialisation */ + +#define SERIALISE(FIELD) do { \ + memcpy(&buf[offset], &FIELD, sizeof(FIELD)); \ + offset += sizeof(FIELD); \ + } while (0) + +#define DESERIALISE(FIELD) do { \ + memcpy(&FIELD, &buf[offset], sizeof(FIELD)); \ + offset += sizeof(FIELD); \ + } while (0) + +static void serialise_work_request(char *buf, uint16_t chip_id, const struct work *work) +{ + size_t offset = 0; + SERIALISE(chip_id); + memcpy(&buf[offset], work->midstate, 32); + offset += 32; + memcpy(&buf[offset], work->data + 64, 12); + //offset += 12; +} + +static void deserialise_work_result(WorkResult *wr, const char *buf) +{ + int i; + size_t offset = 0; + DESERIALISE(wr->chip_id); + DESERIALISE(wr->num_nonces); + DESERIALISE(wr->is_idle); + for (i = 0; i < MAX_RESULTS; i++) + DESERIALISE(wr->nonce[i]); +} + +static void serialise_board_configV3(char *buf, BoardConfigV3 *bc) +{ + size_t offset = 0; + SERIALISE(bc->core_voltage); + SERIALISE(bc->int_clock_level); + SERIALISE(bc->clock_div2); + SERIALISE(bc->use_ext_clock); + SERIALISE(bc->ext_clock_freq); +} + +static void serialise_board_configV4(char *buf, BoardConfig *bc) +{ + size_t offset = 0; + SERIALISE(bc->core_voltage); + SERIALISE(bc->clock_freq); + SERIALISE(bc->clock_div2); + SERIALISE(bc->use_ext_clock); +} + +static void serialise_autotune_request(char *buf, AutoTuneRequest *aq) +{ + size_t offset = 0; + SERIALISE(aq->chip_id); + SERIALISE(aq->increase_clock); +} + +static void deserialise_identity(Identity *id, const char *buf) +{ + size_t offset = 0; + DESERIALISE(id->protocol_version); + DESERIALISE(id->product); + DESERIALISE(id->serial); + DESERIALISE(id->num_chips); + DESERIALISE(id->capabilities); +} diff --git a/driver-drillbit.h b/driver-drillbit.h new file mode 100644 index 0000000..b386107 --- /dev/null +++ b/driver-drillbit.h @@ -0,0 +1,56 @@ +/* + * Copyright 2013 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef BITFURY_H +#define BITFURY_H + +#include "miner.h" +#include "usbutils.h" + +#define WORK_HISTORY_LEN 4 + +struct drillbit_chip_info; + +/* drillbit_info structure applies to entire device */ +struct drillbit_info { + struct cgpu_info *base_cgpu; + uint8_t protocol_version; + uint8_t num_chips; + uint16_t capabilities; + char product[8]; + uint32_t serial; + struct drillbit_chip_info *chips; + struct timeval tv_lastchipinfo; + struct timeval tv_lasttemp; + uint16_t temp; + uint16_t max_temp; +}; + +enum drillbit_chip_state { + IDLE, /* Has no work */ + WORKING_NOQUEUED, /* Has current work but nothing queued as "next work" */ + WORKING_QUEUED /* Has current work and a piece of work queued for after that */ +}; + +struct drillbit_chip_info { + uint16_t chip_id; + struct work *current_work[WORK_HISTORY_LEN]; + enum drillbit_chip_state state; + struct timeval tv_start; + uint32_t success_count; + uint32_t error_count; + uint32_t timeout_count; + uint32_t work_sent_count; + uint32_t success_auto; + uint32_t error_auto; + int auto_delta; + int auto_max; +}; + +#endif /* BITFURY_H */ diff --git a/driver-hashfast.c b/driver-hashfast.c new file mode 100644 index 0000000..4175ade --- /dev/null +++ b/driver-hashfast.c @@ -0,0 +1,2066 @@ +/* + * Copyright 2013-2014 Con Kolivas + * Copyright 2013 Hashfast Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include + +#include "miner.h" +#include "usbutils.h" + +#include "driver-hashfast.h" + +int opt_hfa_ntime_roll = 1; +int opt_hfa_hash_clock = HFA_CLOCK_DEFAULT; +int opt_hfa_overheat = HFA_TEMP_OVERHEAT; +int opt_hfa_target = HFA_TEMP_TARGET; +bool opt_hfa_pll_bypass; +bool opt_hfa_dfu_boot; +int opt_hfa_fan_default = HFA_FAN_DEFAULT; +int opt_hfa_fan_max = HFA_FAN_MAX; +int opt_hfa_fan_min = HFA_FAN_MIN; +int opt_hfa_fail_drop = 10; +bool opt_hfa_noshed; + +char *opt_hfa_name; +char *opt_hfa_options; + +//////////////////////////////////////////////////////////////////////////////// +// Support for the CRC's used in header (CRC-8) and packet body (CRC-32) +//////////////////////////////////////////////////////////////////////////////// + +#define GP8 0x107 /* x^8 + x^2 + x + 1 */ +#define DI8 0x07 + +static bool hfa_crc8_set; + +char *set_hfa_fan(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to hfa-fan"; + if (ret == 1) + val2 = val1; + + if (val1 < 0 || val1 > 100 || val2 < 0 || val2 > 100 || val2 < val1) + return "Invalid value passed to hfa-fan"; + + opt_hfa_fan_min = val1; + opt_hfa_fan_max = val2; + if (opt_hfa_fan_min > opt_hfa_fan_default) + opt_hfa_fan_default = opt_hfa_fan_min; + if (opt_hfa_fan_max < opt_hfa_fan_default) + opt_hfa_fan_default = opt_hfa_fan_max; + + return NULL; +} + +static unsigned char crc8_table[256]; /* CRC-8 table */ + +static void hfa_init_crc8(void) +{ + int i,j; + unsigned char crc; + + hfa_crc8_set = true; + for (i = 0; i < 256; i++) { + crc = i; + for (j = 0; j < 8; j++) + crc = (crc << 1) ^ ((crc & 0x80) ? DI8 : 0); + crc8_table[i] = crc & 0xFF; + } +} + +static unsigned char hfa_crc8(unsigned char *h) +{ + int i; + unsigned char crc; + + h++; // Preamble not included + for (i = 1, crc = 0xff; i < 7; i++) + crc = crc8_table[crc ^ *h++]; + + return crc; +} + +struct hfa_cmd { + uint8_t cmd; + char *cmd_name; + enum usb_cmds usb_cmd; +}; + +/* Entries in this array need to align with the actual op values specified + * in hf_protocol.h */ +#define C_NULL C_MAX +static const struct hfa_cmd hfa_cmds[] = { + {OP_NULL, "OP_NULL", C_NULL}, // 0 + {OP_ROOT, "OP_ROOT", C_NULL}, + {OP_RESET, "OP_RESET", C_HF_RESET}, + {OP_PLL_CONFIG, "OP_PLL_CONFIG", C_HF_PLL_CONFIG}, + {OP_ADDRESS, "OP_ADDRESS", C_HF_ADDRESS}, + {OP_READDRESS, "OP_READDRESS", C_NULL}, + {OP_HIGHEST, "OP_HIGHEST", C_NULL}, + {OP_BAUD, "OP_BAUD", C_HF_BAUD}, + {OP_UNROOT, "OP_UNROOT", C_NULL}, // 8 + {OP_HASH, "OP_HASH", C_HF_HASH}, + {OP_NONCE, "OP_NONCE", C_HF_NONCE}, + {OP_ABORT, "OP_ABORT", C_HF_ABORT}, + {OP_STATUS, "OP_STATUS", C_HF_STATUS}, + {OP_GPIO, "OP_GPIO", C_NULL}, + {OP_CONFIG, "OP_CONFIG", C_HF_CONFIG}, + {OP_STATISTICS, "OP_STATISTICS", C_HF_STATISTICS}, + {OP_GROUP, "OP_GROUP", C_NULL}, // 16 + {OP_CLOCKGATE, "OP_CLOCKGATE", C_HF_CLOCKGATE}, + + {OP_USB_INIT, "OP_USB_INIT", C_HF_USB_INIT}, // 18 + {OP_GET_TRACE, "OP_GET_TRACE", C_NULL}, + {OP_LOOPBACK_USB, "OP_LOOPBACK_USB", C_NULL}, + {OP_LOOPBACK_UART, "OP_LOOPBACK_UART", C_NULL}, + {OP_DFU, "OP_DFU", C_HF_DFU}, + {OP_USB_SHUTDOWN, "OP_USB_SHUTDOWN", C_NULL}, + {OP_DIE_STATUS, "OP_DIE_STATUS", C_HF_DIE_STATUS}, // 24 + {OP_GWQ_STATUS, "OP_GWQ_STATUS", C_HF_GWQ_STATUS}, + {OP_WORK_RESTART, "OP_WORK_RESTART", C_HF_WORK_RESTART}, + {OP_USB_STATS1, "OP_USB_STATS1", C_NULL}, + {OP_USB_GWQSTATS, "OP_USB_GWQSTATS", C_HF_GWQSTATS}, + {OP_USB_NOTICE, "OP_USB_NOTICE", C_HF_NOTICE}, + {OP_PING, "OP_PING", C_HF_PING}, + {OP_CORE_MAP, "OP_CORE_MAP", C_NULL}, + {OP_VERSION, "OP_VERSION", C_NULL}, // 32 + {OP_FAN, "OP_FAN", C_HF_FAN}, + {OP_NAME, "OP_NAME", C_OP_NAME} +}; + +#define HF_USB_CMD_OFFSET (128 - 18) +#define HF_USB_CMD(X) (X - HF_USB_CMD_OFFSET) + +/* Send an arbitrary frame, consisting of an 8 byte header and an optional + * packet body. */ +static bool __hfa_send_frame(struct cgpu_info *hashfast, uint8_t opcode, int tx_length, + uint8_t *packet) +{ + struct hashfast_info *info = hashfast->device_data; + int ret, amount; + bool retried = false; + + if (unlikely(hashfast->usbinfo.nodev)) + return false; + + info->last_send = time(NULL); + applog(LOG_DEBUG, "%s %s: Sending %s frame", hashfast->drv->name, hashfast->unique_id, hfa_cmds[opcode].cmd_name); +retry: + ret = usb_write(hashfast, (char *)packet, tx_length, &amount, + hfa_cmds[opcode].usb_cmd); + if (unlikely(ret < 0 || amount != tx_length)) { + if (hashfast->usbinfo.nodev) + return false; + if (!retried) { + applog(LOG_ERR, "%s %s: hfa_send_frame: USB Send error, ret %d amount %d vs. tx_length %d, retrying", + hashfast->drv->name, hashfast->unique_id, ret, amount, tx_length); + retried = true; + goto retry; + } + applog(LOG_ERR, "%s %s: hfa_send_frame: USB Send error, ret %d amount %d vs. tx_length %d", + hashfast->drv->name, hashfast->unique_id, ret, amount, tx_length); + return false; + } + + if (retried) + applog(LOG_WARNING, "%s %s: hfa_send_frame: recovered OK", hashfast->drv->name, hashfast->unique_id); + + return true; +} + +static bool hfa_send_generic_frame(struct cgpu_info *hashfast, uint8_t opcode, uint8_t chip_address, + uint8_t core_address, uint16_t hdata, uint8_t *data, int len) +{ + uint8_t packet[256]; + struct hf_header *p = (struct hf_header *)packet; + int tx_length, ret, amount; + + p->preamble = HF_PREAMBLE; + p->operation_code = opcode; + p->chip_address = chip_address; + p->core_address = core_address; + p->hdata = htole16(hdata); + p->data_length = len / 4; + p->crc8 = hfa_crc8(packet); + + if (len) + memcpy(&packet[sizeof(struct hf_header)], data, len); + tx_length = sizeof(struct hf_header) + len; + + ret = usb_write(hashfast, (char *)packet, tx_length, &amount, C_NULL); + + return ((ret >= 0) && (amount == tx_length)); +} + +static bool hfa_send_frame(struct cgpu_info *hashfast, uint8_t opcode, uint16_t hdata, + uint8_t *data, int len) +{ + uint8_t packet[256]; + struct hf_header *p = (struct hf_header *)packet; + int tx_length; + + p->preamble = HF_PREAMBLE; + p->operation_code = hfa_cmds[opcode].cmd; + p->chip_address = HF_GWQ_ADDRESS; + p->core_address = 0; + p->hdata = htole16(hdata); + p->data_length = len / 4; + p->crc8 = hfa_crc8(packet); + + if (len) + memcpy(&packet[sizeof(struct hf_header)], data, len); + tx_length = sizeof(struct hf_header) + len; + + return (__hfa_send_frame(hashfast, opcode, tx_length, packet)); +} + +/* Send an already assembled packet, consisting of an 8 byte header which may + * or may not be followed by a packet body. */ + +static bool hfa_send_packet(struct cgpu_info *hashfast, struct hf_header *h, int cmd) +{ + int amount, ret, len; + + if (unlikely(hashfast->usbinfo.nodev)) + return false; + + len = sizeof(*h) + h->data_length * 4; + ret = usb_write(hashfast, (char *)h, len, &amount, hfa_cmds[cmd].usb_cmd); + if (ret < 0 || amount != len) { + applog(LOG_WARNING, "%s %s: send_packet: %s USB Send error, ret %d amount %d vs. length %d", + hashfast->drv->name, hashfast->unique_id, hfa_cmds[cmd].cmd_name, ret, amount, len); + return false; + } + return true; +} + +#define HFA_GET_HEADER_BUFSIZE 512 + +static bool hfa_get_header(struct cgpu_info *hashfast, struct hf_header *h, uint8_t *computed_crc) +{ + int amount, ret, orig_len, len, ofs = 0; + cgtimer_t ts_start; + char buf[HFA_GET_HEADER_BUFSIZE]; + char *header; + + if (unlikely(hashfast->usbinfo.nodev)) + return false; + + orig_len = len = sizeof(*h); + + /* Read for up to 500ms till we find the first occurrence of HF_PREAMBLE + * though it should be the first byte unless we get woefully out of + * sync. */ + cgtimer_time(&ts_start); + do { + cgtimer_t ts_now, ts_diff; + + cgtimer_time(&ts_now); + cgtimer_sub(&ts_now, &ts_start, &ts_diff); + if (cgtimer_to_ms(&ts_diff) > 500) + return false; + + if (unlikely(hashfast->usbinfo.nodev)) + return false; + if(ofs + len > HFA_GET_HEADER_BUFSIZE) { + // Not expected to happen. + applog(LOG_WARNING, "hfa_get_header() tried to overflow buf[]."); + return false; + } + ret = usb_read(hashfast, buf + ofs, len, &amount, C_HF_GETHEADER); + + if (unlikely(ret && ret != LIBUSB_ERROR_TIMEOUT)) + return false; + ofs += amount; + header = memchr(buf, HF_PREAMBLE, ofs); + if (header) { + /* Toss any leading data we can't use */ + if (header != buf) { + memmove(buf, header, ofs); + ofs -= header - buf; + } + len -= ofs; + } + else { + /* HF_PREAMBLE not found, toss all the useless leading data. */ + ofs = 0; + len = sizeof(*h); + } + } while (len > 0); + + memcpy(h, header, orig_len); + *computed_crc = hfa_crc8((uint8_t *)h); + + return true; +} + +static bool hfa_get_data(struct cgpu_info *hashfast, char *buf, int len4) +{ + int amount, ret, len = len4 * 4; + + if (unlikely(hashfast->usbinfo.nodev)) + return false; + ret = usb_read(hashfast, buf, len, &amount, C_HF_GETDATA); + if (ret) + return false; + if (amount != len) { + applog(LOG_WARNING, "%s %s: get_data: Strange amount returned %d vs. expected %d", + hashfast->drv->name, hashfast->unique_id, amount, len); + return false; + } + return true; +} + +static const char *hf_usb_init_errors[] = { + "Success", + "Reset timeout", + "Address cycle timeout", + "Clockgate operation timeout", + "Configuration operation timeout", + "Excessive core failures", + "All cores failed diagnostics", + "Too many groups configured - increase ntime roll amount", + "Chaining connections detected but secondary board(s) did not respond", + "Secondary board communication error", + "Main board 12V power is bad", + "Secondary board(s) 12V power is bad", + "Main board FPGA programming error", + "Main board FPGA SPI read timeout", + "Main board FPGA Bad magic number", + "Main board FPGA SPI write timeout", + "Main board FPGA register read/write test failed", + "ASIC core power fault", + "Dynamic baud rate change timeout", + "Address failure", + "Regulator programming error", + "Address range inconsistent after mixed reconfiguration", + "Timeout after mixed reconfiguration" +}; + +static bool hfa_clear_readbuf(struct cgpu_info *hashfast); + +struct op_nameframe { + struct hf_header h; + char name[32]; +} __attribute__((packed)); + +static void hfa_write_opname(struct cgpu_info *hashfast, struct hashfast_info *info) +{ + const uint8_t opcode = HF_USB_CMD(OP_NAME); + struct op_nameframe nameframe; + struct hf_header *h = (struct hf_header *)&nameframe; + const int tx_length = sizeof(struct op_nameframe); + + memset(&nameframe, 0, sizeof(nameframe)); + strncpy(nameframe.name, info->op_name, 30); + h->preamble = HF_PREAMBLE; + h->operation_code = hfa_cmds[opcode].cmd; + h->core_address = 1; + h->data_length = 32 / 4; + h->crc8 = hfa_crc8((unsigned char *)h); + applog(LOG_DEBUG, "%s %d: Opname being set to %s", hashfast->drv->name, + hashfast->device_id, info->op_name); + __hfa_send_frame(hashfast, opcode, tx_length, (uint8_t *)&nameframe); +} + +/* If no opname or an invalid opname is set, change it to the serial number if + * it exists, or a random name based on timestamp if not. */ +static void hfa_choose_opname(struct cgpu_info *hashfast, struct hashfast_info *info) +{ + uint64_t usecs; + + if (info->serial_number) + sprintf(info->op_name, "%08x", info->serial_number); + else { + struct timeval tv_now; + + cgtime(&tv_now); + usecs = (uint64_t)(tv_now.tv_sec) * (uint64_t)1000000 + (uint64_t)tv_now.tv_usec; + sprintf(info->op_name, "%lx", (long unsigned int)usecs); + } + hfa_write_opname(hashfast, info); +} + +// Generic setting header +struct hf_settings_data { + uint8_t revision; + uint8_t ref_frequency; + uint16_t magic; + uint16_t frequency0; + uint16_t voltage0; + uint16_t frequency1; + uint16_t voltage1; + uint16_t frequency2; + uint16_t voltage2; + uint16_t frequency3; + uint16_t voltage3; +} __attribute__((packed,aligned(4))); + +static bool hfa_set_voltages(struct cgpu_info *hashfast, struct hashfast_info *info) +{ + struct hf_settings_data op_settings_data; + + op_settings_data.revision = 1; + op_settings_data.ref_frequency = 25; + op_settings_data.magic = HFA_MAGIC_SETTINGS_VALUE; + + op_settings_data.frequency0 = info->hash_clock_rate; + op_settings_data.voltage0 = info->hash_voltage; + op_settings_data.frequency1 = info->hash_clock_rate; + op_settings_data.voltage1 = info->hash_voltage; + op_settings_data.frequency2 = info->hash_clock_rate; + op_settings_data.voltage2 = info->hash_voltage; + op_settings_data.frequency3 = info->hash_clock_rate; + op_settings_data.voltage3 = info->hash_voltage; + + hfa_send_generic_frame(hashfast, OP_SETTINGS, 0x00, 0x01, HFA_MAGIC_SETTINGS_VALUE, + (uint8_t *)&op_settings_data, sizeof(op_settings_data)); + // reset the board once to switch to new voltage settings + hfa_send_generic_frame(hashfast, OP_POWER, 0xff, 0x00, 0x1, NULL, 0); + hfa_send_generic_frame(hashfast, OP_POWER, 0xff, 0x00, 0x2, NULL, 0); + + return true; +} + +static bool hfa_send_shutdown(struct cgpu_info *hashfast); + +static bool hfa_reset(struct cgpu_info *hashfast, struct hashfast_info *info) +{ + struct hf_usb_init_header usb_init[2], *hu = usb_init; + struct hf_usb_init_base *db; + struct hf_usb_init_options *ho; + int retries = 0, i; + bool ret = false; + char buf[1024]; + struct hf_header *h = (struct hf_header *)buf; + uint8_t hcrc; + + /* Hash clock rate in Mhz. Set to opt_hfa_hash_clock if it has not + * been inherited across a restart. */ + if (!info->hash_clock_rate) + info->hash_clock_rate = opt_hfa_hash_clock; + info->group_ntime_roll = opt_hfa_ntime_roll; + info->core_ntime_roll = 1; + + // Assemble the USB_INIT request + memset(hu, 0, sizeof(*hu)); + hu->preamble = HF_PREAMBLE; + hu->operation_code = OP_USB_INIT; + hu->protocol = PROTOCOL_GLOBAL_WORK_QUEUE; // Protocol to use + if (!opt_hfa_noshed) + hu->shed_supported = true; + // Force PLL bypass + hu->pll_bypass = opt_hfa_pll_bypass; + hu->hash_clock = info->hash_clock_rate; // Hash clock rate in Mhz + if (info->group_ntime_roll > 1 && info->core_ntime_roll) { + ho = (struct hf_usb_init_options *)(hu + 1); + memset(ho, 0, sizeof(*ho)); + ho->group_ntime_roll = info->group_ntime_roll; + ho->core_ntime_roll = info->core_ntime_roll; + hu->data_length = sizeof(*ho) / 4; + } + hu->crc8 = hfa_crc8((uint8_t *)hu); + applog(LOG_INFO, "%s %s: Sending OP_USB_INIT with GWQ protocol specified", + hashfast->drv->name, hashfast->unique_id); +resend: + if (unlikely(hashfast->usbinfo.nodev)) + goto out; + + if (!hfa_clear_readbuf(hashfast)) + goto out; + + if (!hfa_send_packet(hashfast, (struct hf_header *)hu, HF_USB_CMD(OP_USB_INIT))) + goto out; + + // Check for the correct response. + // We extend the normal timeout - a complete device initialization, including + // bringing power supplies up from standby, etc., can take over a second. +tryagain: + for (i = 0; i < 10; i++) { + ret = hfa_get_header(hashfast, h, &hcrc); + if (unlikely(hashfast->usbinfo.nodev)) + goto out; + if (ret) + break; + } + if (!ret) { + if (retries++ < 3) + goto resend; + applog(LOG_WARNING, "%s %s: OP_USB_INIT failed!", hashfast->drv->name, hashfast->unique_id); + goto out; + } + if (h->crc8 != hcrc) { + applog(LOG_WARNING, "%s %s: OP_USB_INIT failed! CRC mismatch", hashfast->drv->name, hashfast->unique_id); + ret = false; + goto out; + } + if (h->operation_code != OP_USB_INIT) { + // This can happen if valid packet(s) were in transit *before* the OP_USB_INIT arrived + // at the device, so we just toss the packets and keep looking for the response. + applog(LOG_WARNING, "%s %s: OP_USB_INIT: Tossing packet, valid but unexpected type %d", + hashfast->drv->name, hashfast->unique_id, h->operation_code); + hfa_get_data(hashfast, buf, h->data_length); + if (retries++ < 3) + goto tryagain; + ret = false; + goto out; + } + + applog(LOG_DEBUG, "%s %s: Good reply to OP_USB_INIT", hashfast->drv->name, hashfast->unique_id); + applog(LOG_DEBUG, "%s %s: OP_USB_INIT: %d die in chain, %d cores, device_type %d, refclk %d Mhz", + hashfast->drv->name, hashfast->unique_id, h->chip_address, h->core_address, h->hdata & 0xff, (h->hdata >> 8) & 0xff); + + // Save device configuration + info->asic_count = h->chip_address; + info->core_count = h->core_address; + info->device_type = (uint8_t)h->hdata; + info->ref_frequency = (uint8_t)(h->hdata >> 8); + info->hash_sequence_head = 0; + info->hash_sequence_tail = 0; + info->device_sequence_tail = 0; + + if (info->asic_count == 12) + hashfast->drv->name = "HFS"; + else if (info->asic_count == 4) + hashfast->drv->name = "HFB"; + + // Size in bytes of the core bitmap in bytes + info->core_bitmap_size = (((info->asic_count * info->core_count) + 31) / 32) * 4; + + // Get the usb_init_base structure + if (!hfa_get_data(hashfast, (char *)&info->usb_init_base, U32SIZE(info->usb_init_base))) { + applog(LOG_WARNING, "%s %s: OP_USB_INIT failed! Failure to get usb_init_base data", + hashfast->drv->name, hashfast->unique_id); + ret = false; + goto out; + } + db = &info->usb_init_base; + info->firmware_version = ((db->firmware_rev >> 8) & 0xff) + (double)(db->firmware_rev & 0xff) / 10.0; + info->hardware_version = ((db->hardware_rev >> 8) & 0xff) + (double)(db->hardware_rev & 0xff) / 10.0; + applog(LOG_INFO, "%s %s: firmware_rev: %.1f", hashfast->drv->name, hashfast->unique_id, + info->firmware_version); + applog(LOG_INFO, "%s %s: hardware_rev: %.1f", hashfast->drv->name, hashfast->unique_id, + info->hardware_version); + applog(LOG_INFO, "%s %s: serial number: %08x", hashfast->drv->name, hashfast->unique_id, + db->serial_number); + applog(LOG_INFO, "%s %s: hash clockrate: %d Mhz", hashfast->drv->name, hashfast->unique_id, + db->hash_clockrate); + applog(LOG_INFO, "%s %s: inflight_target: %d", hashfast->drv->name, hashfast->unique_id, + db->inflight_target); + applog(LOG_INFO, "%s %s: sequence_modulus: %d", hashfast->drv->name, hashfast->unique_id, + db->sequence_modulus); + + // Now a copy of the config data used + if (!hfa_get_data(hashfast, (char *)&info->config_data, U32SIZE(info->config_data))) { + applog(LOG_WARNING, "%s %s: OP_USB_INIT failed! Failure to get config_data", + hashfast->drv->name, hashfast->unique_id); + ret = false; + goto out; + } + + // Now the core bitmap + info->core_bitmap = malloc(info->core_bitmap_size); + if (!info->core_bitmap) + quit(1, "Failed to malloc info core bitmap in hfa_reset"); + if (!hfa_get_data(hashfast, (char *)info->core_bitmap, info->core_bitmap_size / 4)) { + applog(LOG_WARNING, "%s %s: OP_USB_INIT failed! Failure to get core_bitmap", hashfast->drv->name, hashfast->unique_id); + ret = false; + goto out; + } + + // See if the initialization suceeded + if (db->operation_status) { + applog(LOG_ERR, "%s %s: OP_USB_INIT failed! Operation status %d (%s)", + hashfast->drv->name, hashfast->unique_id, db->operation_status, + (db->operation_status < sizeof(hf_usb_init_errors)/sizeof(hf_usb_init_errors[0])) ? + hf_usb_init_errors[db->operation_status] : "Unknown error code"); + ret = false; + switch (db->operation_status) { + case E_CORE_POWER_FAULT: + for (i = 0; i < 4; i++) { + if (((db->extra_status_1 >> i) & 0x11) == 0x1) { + applog(LOG_ERR, "%s %s: OP_USB_INIT: Quadrant %d (of 4) regulator failure", + hashfast->drv->name, hashfast->unique_id, i + 1); + } + } + break; + default: + break; + } + goto out; + } + + if (!db->hash_clockrate) { + applog(LOG_INFO, "%s %s: OP_USB_INIT failed! Clockrate reported as zero", + hashfast->drv->name, hashfast->unique_id); + ret = false; + goto out; + } + info->num_sequence = db->sequence_modulus; + info->serial_number = db->serial_number; + info->base_clock = db->hash_clockrate; + + ret = hfa_clear_readbuf(hashfast); +out: + if (!ret) { + hfa_send_shutdown(hashfast); + usb_nodev(hashfast); + } + return ret; +} + +static bool hfa_clear_readbuf(struct cgpu_info *hashfast) +{ + int amount, ret = 0; + char buf[512]; + + do { + if (hashfast->usbinfo.nodev) { + ret = LIBUSB_ERROR_NO_DEVICE; + break; + } + ret = usb_read(hashfast, buf, 512, &amount, C_HF_CLEAR_READ); + } while (!ret && amount); + + if (ret && ret != LIBUSB_ERROR_TIMEOUT) + return false; + return true; +} + +static bool hfa_send_shutdown(struct cgpu_info *hashfast) +{ + bool ret = false; + + if (hashfast->usbinfo.nodev) + return ret; + /* Send a restart before the shutdown frame to tell the device to + * discard any work it thinks is in flight for a cleaner restart. */ + if (!hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), 0, (uint8_t *)NULL, 0)) + return ret; + if (hfa_send_frame(hashfast, HF_USB_CMD(OP_USB_SHUTDOWN), 0, NULL, 0)) { + /* Wait to allow device to properly shut down. */ + cgsleep_ms(1000); + ret = true; + } + return ret; +} + +static struct cgpu_info *hfa_old_device(struct cgpu_info *hashfast, struct hashfast_info *info) +{ + struct cgpu_info *cgpu, *found = NULL; + struct hashfast_info *cinfo = NULL; + int i; + + /* See if we can find a zombie instance of the same device */ + for (i = 0; i < mining_threads; i++) { + cgpu = mining_thr[i]->cgpu; + if (!cgpu) + continue; + if (cgpu == hashfast) + continue; + if (cgpu->drv->drv_id != DRIVER_hashfast) + continue; + if (!cgpu->usbinfo.nodev) + continue; + cinfo = cgpu->device_data; + if (!cinfo) + continue; + if (info->op_name[0] != '\0' && !strncmp(info->op_name, cinfo->op_name, 32)) { + found = cgpu; + break; + } + if (info->serial_number && info->serial_number == cinfo->serial_number) { + found = cgpu; + break; + } + } + return found; +} + +static void hfa_set_clock(struct cgpu_info *hashfast, struct hashfast_info *info) +{ + uint16_t hdata; + int i; + + hdata = (WR_CLOCK_VALUE << WR_COMMAND_SHIFT) | info->hash_clock_rate; + + hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), hdata, (uint8_t *)NULL, 0); + /* We won't know what the real clock is in this case without a + * usb_init_base message so we have to assume it's what we asked. */ + info->base_clock = info->hash_clock_rate; + for (i = 0; i < info->asic_count; i++) + info->die_data[i].hash_clock = info->base_clock; +} + +/* Look for an op name match and apply any options to its first attempted + * init sequence. This function allows any arbitrary number of extra parameters + * to be added in the future. */ +static void hfa_check_options(struct hashfast_info *info) +{ + char *p, *options, *found = NULL, *marker; + int maxlen, option = 0; + + if (!opt_hfa_options) + return; + + if (!info->op_name) + return; + + maxlen = strlen(info->op_name); + + options = strdup(opt_hfa_options); + for (p = strtok(options, ","); p; p = strtok(NULL, ",")) { + int cmplen = strlen(p); + + if (maxlen < cmplen) + cmplen = maxlen; + if (cmplen < maxlen) + continue; + if (!strncmp(info->op_name, p, cmplen)) { + found = strdup(p); + break; + } + } + free(options); + if (!found) + return; + + for (p = strtok(found, ":"); p; p = strtok(NULL, ":")) { + long lval; + + /* Parse each option in order, leaving room to add more */ + switch(option++) { + default: + break; + case 1: + lval = strtol(p, NULL, 10); + if (lval < HFA_CLOCK_MIN || lval > HFA_CLOCK_MAX) { + applog(LOG_ERR, "Invalid clock speed %ld set with hashfast option for %s", + lval, info->op_name); + break; + } + info->hash_clock_rate = lval; + marker = strchr(p,'@'); + if (marker != NULL) { + lval = strtol(marker+1, NULL, 10); + if (lval < HFA_VOLTAGE_MIN || lval > HFA_VOLTAGE_MAX) { + applog(LOG_ERR, "Invalid core voltage %ld set with hashfast option for %s", + lval, info->op_name); + break; + } + info->hash_voltage = lval; + } + break; + } + } + free(found); +} + +static bool hfa_detect_common(struct cgpu_info *hashfast) +{ + struct hashfast_info *info; + char buf[1024]; + struct hf_header *h = (struct hf_header *)buf; + uint8_t hcrc; + bool ret; + int i; + + info = calloc(sizeof(struct hashfast_info), 1); + if (!info) + quit(1, "Failed to calloc hashfast_info in hfa_detect_common"); + hashfast->device_data = info; + + /* Try sending and receiving an OP_NAME */ + ret = hfa_send_frame(hashfast, HF_USB_CMD(OP_NAME), 0, (uint8_t *)NULL, 0); + if (hashfast->usbinfo.nodev) { + ret = false; + goto out; + } + if (!ret) { + applog(LOG_WARNING, "%s %d: Failed to send OP_NAME!", hashfast->drv->name, + hashfast->device_id); + goto out; + } + ret = hfa_get_header(hashfast, h, &hcrc); + if (hashfast->usbinfo.nodev) { + ret = false; + goto out; + } + if (!ret) { + /* We should receive a valid header even if OP_NAME isn't + * supported by the firmware. */ + applog(LOG_NOTICE, "%s %d: No response to name query - failed init or firmware upgrade required.", + hashfast->drv->name, hashfast->device_id); + ret = true; + } else { + /* Only try to parse the name if the firmware supports OP_NAME */ + if (h->operation_code == OP_NAME) { + if (!hfa_get_data(hashfast, info->op_name, 32 / 4)) { + applog(LOG_WARNING, "%s %d: OP_NAME failed! Failure to get op_name data", + hashfast->drv->name, hashfast->device_id); + goto out; + } + info->has_opname = info->opname_valid = true; + applog(LOG_DEBUG, "%s: Returned an OP_NAME", hashfast->drv->name); + for (i = 0; i < 32; i++) { + if (i > 0 && info->op_name[i] == '\0') + break; + /* Make sure the op_name is valid ascii only */ + if (info->op_name[i] < 32 || info->op_name[i] > 126) { + info->opname_valid = false; + break; + } + } + } + } + + info->cgpu = hashfast; + /* Look for a matching zombie instance and inherit values from it if it + * exists. */ + if (info->has_opname && info->opname_valid) { + info->old_cgpu = hfa_old_device(hashfast, info); + if (info->old_cgpu) { + struct hashfast_info *cinfo = info->old_cgpu->device_data; + + applog(LOG_NOTICE, "%s: Found old instance by op name %s at device %d", + hashfast->drv->name, info->op_name, info->old_cgpu->device_id); + info->resets = ++cinfo->resets; + info->hash_clock_rate = cinfo->hash_clock_rate; + } else { + applog(LOG_NOTICE, "%s: Found device with name %s", hashfast->drv->name, + info->op_name); + hfa_check_options(info); + } + } + +out: + if (!ret) { + if (!hashfast->usbinfo.nodev) + hfa_clear_readbuf(hashfast); + hashfast->device_data = NULL; + free(info); + } + return ret; +} + +static bool hfa_initialise(struct cgpu_info *hashfast) +{ + int err = 7; + + if (hashfast->usbinfo.nodev) + return false; + + if (!hfa_clear_readbuf(hashfast)) + return false; +#ifdef WIN32 + err = usb_transfer(hashfast, 0, 9, 1, 0, C_ATMEL_RESET); + if (!err) + err = usb_transfer(hashfast, 0x21, 0x22, 0, 0, C_ATMEL_OPEN); + if (!err) { + uint32_t buf[2]; + + /* Magic sequence to reset device only really needed for windows + * but harmless on linux. */ + buf[0] = 0x80250000; + buf[1] = 0x00000800; + err = usb_transfer_data(hashfast, 0x21, 0x20, 0x0000, 0, buf, + 7, C_ATMEL_INIT); + } + if (err < 0) { + applog(LOG_INFO, "%s %s: Failed to open with error %s", + hashfast->drv->name, hashfast->unique_id, libusb_error_name(err)); + } +#endif + /* Must have transmitted init sequence sized buffer */ + return (err == 7); +} + +static void hfa_dfu_boot(struct cgpu_info *hashfast) +{ + bool ret; + + if (unlikely(hashfast->usbinfo.nodev)) + return; + + ret = hfa_send_frame(hashfast, HF_USB_CMD(OP_DFU), 0, NULL, 0); + applog(LOG_WARNING, "%s %s: %03d:%03d DFU Boot %s", hashfast->drv->name, hashfast->unique_id, + hashfast->usbinfo.bus_number, hashfast->usbinfo.device_address, + ret ? "Succeeded" : "Failed"); +} + +static struct cgpu_info *hfa_detect_one(libusb_device *dev, struct usb_find_devices *found) +{ + struct cgpu_info *hashfast; + + hashfast = usb_alloc_cgpu(&hashfast_drv, HASHFAST_MINER_THREADS); + if (!hashfast) + quit(1, "Failed to usb_alloc_cgpu hashfast"); + hashfast->unique_id = ""; + + if (!usb_init(hashfast, dev, found)) { + hashfast = usb_free_cgpu(hashfast); + return NULL; + } + + hashfast->usbdev->usb_type = USB_TYPE_STD; + + if (!hfa_initialise(hashfast)) { + hashfast = usb_free_cgpu(hashfast); + return NULL; + } + if (opt_hfa_dfu_boot) { + hfa_dfu_boot(hashfast); + hashfast = usb_free_cgpu(hashfast); + opt_hfa_dfu_boot = false; + return NULL; + } + if (!hfa_detect_common(hashfast)) { + usb_uninit(hashfast); + hashfast = usb_free_cgpu(hashfast); + return NULL; + } + if (!add_cgpu(hashfast)) + return NULL; + + if (opt_hfa_name) { + struct hashfast_info *info = hashfast->device_data; + + strncpy(info->op_name, opt_hfa_name, 30); + applog(LOG_NOTICE, "%s %d %03d:%03d: Writing name %s", hashfast->drv->name, + hashfast->device_id, hashfast->usbinfo.bus_number, hashfast->usbinfo.device_address, + info->op_name); + hfa_write_opname(hashfast, info); + opt_hfa_name = NULL; + } + + return hashfast; +} + +static void hfa_detect(bool __maybe_unused hotplug) +{ + /* Set up the CRC tables only once. */ + if (!hfa_crc8_set) + hfa_init_crc8(); + usb_detect(&hashfast_drv, hfa_detect_one); +} + +static bool hfa_get_packet(struct cgpu_info *hashfast, struct hf_header *h) +{ + uint8_t hcrc; + bool ret; + + if (unlikely(hashfast->usbinfo.nodev)) + return false; + + ret = hfa_get_header(hashfast, h, &hcrc); + if (unlikely(!ret)) + goto out; + if (unlikely(h->crc8 != hcrc)) { + applog(LOG_WARNING, "%s %s: Bad CRC %d vs %d, discarding packet", + hashfast->drv->name, hashfast->unique_id, h->crc8, hcrc); + ret = false; + goto out; + } + if (h->data_length > 0) + ret = hfa_get_data(hashfast, (char *)(h + 1), h->data_length); + if (unlikely(!ret)) { + applog(LOG_WARNING, "%s %s: Failed to get data associated with header", + hashfast->drv->name, hashfast->unique_id); + } + +out: + return ret; +} + +static void hfa_running_shutdown(struct cgpu_info *hashfast, struct hashfast_info *info); + +static void hfa_parse_gwq_status(struct cgpu_info *hashfast, struct hashfast_info *info, + struct hf_header *h) +{ + struct hf_gwq_data *g = (struct hf_gwq_data *)(h + 1); + struct work *work; + + applog(LOG_DEBUG, "%s %s: OP_GWQ_STATUS, device_head %4d tail %4d my tail %4d shed %3d inflight %4d", + hashfast->drv->name, hashfast->unique_id, g->sequence_head, g->sequence_tail, info->hash_sequence_tail, + g->shed_count, HF_SEQUENCE_DISTANCE(info->hash_sequence_head,g->sequence_tail)); + + /* This is a special flag that the thermal overload has been tripped */ + if (unlikely(h->core_address & 0x80)) { + applog(LOG_ERR, "%s %s: Thermal overload tripped! Shutting down device", + hashfast->drv->name, hashfast->unique_id); + hfa_running_shutdown(hashfast, info); + usb_nodev(hashfast); + return; + } + + mutex_lock(&info->lock); + info->raw_hashes += g->hash_count; + info->device_sequence_head = g->sequence_head; + info->device_sequence_tail = g->sequence_tail; + info->shed_count = g->shed_count; + /* Free any work that is no longer required */ + while (info->device_sequence_tail != info->hash_sequence_tail) { + if (++info->hash_sequence_tail >= info->num_sequence) + info->hash_sequence_tail = 0; + if (unlikely(!(work = info->works[info->hash_sequence_tail]))) { + applog(LOG_ERR, "%s %s: Bad work sequence tail %d head %d devhead %d devtail %d sequence %d", + hashfast->drv->name, hashfast->unique_id, info->hash_sequence_tail, + info->hash_sequence_head, info->device_sequence_head, + info->device_sequence_tail, info->num_sequence); + hashfast->shutdown = true; + usb_nodev(hashfast); + break; + } + applog(LOG_DEBUG, "%s %s: Completing work on hash_sequence_tail %d", + hashfast->drv->name, hashfast->unique_id, info->hash_sequence_tail); + free_work(work); + info->works[info->hash_sequence_tail] = NULL; + } + mutex_unlock(&info->lock); +} + +/* Board temperature conversion */ +static float board_temperature(uint16_t adc) +{ + float t, r, f, b; + + if (adc < 40 || adc > 650) + return((float) 0.0); // Bad count + + b = 3590.0; + f = (float)adc / 1023.0; + r = 1.0 / (1.0 / f - 1.0); + t = log(r) / b; + t += 1.0 / (25.0 + 273.15); + t = 1.0 / t - 273.15; + + return t; +} + +static void hfa_update_die_status(struct cgpu_info *hashfast, struct hashfast_info *info, + struct hf_header *h) +{ + struct hf_g1_die_data *d = (struct hf_g1_die_data *)(h + 1), *ds; + int num_included = (h->data_length * 4) / sizeof(struct hf_g1_die_data); + int i, j, die = h->chip_address; + + float die_temperature, board_temp; + float core_voltage[6]; + + // Copy in the data. They're numbered sequentially from the starting point + ds = info->die_status + h->chip_address; + for (i = 0; i < num_included; i++) + memcpy(ds++, d++, sizeof(struct hf_g1_die_data)); + + for (i = 0, d = &info->die_status[h->chip_address]; i < num_included; i++, d++) { + die += i; + die_temperature = GN_DIE_TEMPERATURE(d->die.die_temperature); + /* Sanity checking */ + if (unlikely(die_temperature > 255)) + die_temperature = info->die_data[die].temp; + else + info->die_data[die].temp = die_temperature; + board_temp = board_temperature(d->temperature); + if (unlikely(board_temp > 255)) + board_temp = info->die_data[die].board_temp; + else + info->die_data[die].board_temp = board_temp; + for (j = 0; j < 6; j++) + core_voltage[j] = GN_CORE_VOLTAGE(d->die.core_voltage[j]); + + applog(LOG_DEBUG, "%s %s: die %2d: OP_DIE_STATUS Temps die %.1fC board %.1fC vdd's %.2f %.2f %.2f %.2f %.2f %.2f", + hashfast->drv->name, hashfast->unique_id, die, die_temperature, board_temp, + core_voltage[0], core_voltage[1], core_voltage[2], + core_voltage[3], core_voltage[4], core_voltage[5]); + // XXX Convert board phase currents, voltage, temperature + } + if (die == info->asic_count - 1) { + /* We have a full set of die temperatures, find the highest + * current temperature. */ + float max_temp = 0; + + info->temp_updates++; + + for (die = 0; die < info->asic_count; die++) { + if (info->die_data[die].temp > max_temp) + max_temp = info->die_data[die].temp; + if (info->die_data[die].board_temp > max_temp) + max_temp = info->die_data[die].board_temp; + } + /* Exponentially change the max_temp to smooth out troughs. */ + hashfast->temp = hashfast->temp * 0.63 + max_temp * 0.37; + } + + if (unlikely(hashfast->temp >= opt_hfa_overheat)) { + /* -1 means new overheat condition */ + if (!info->overheat) + info->overheat = -1; + } else if (unlikely(info->overheat && hashfast->temp < opt_hfa_overheat - HFA_TEMP_HYSTERESIS)) + info->overheat = 0; +} + +static void hfa_parse_nonce(struct thr_info *thr, struct cgpu_info *hashfast, + struct hashfast_info *info, struct hf_header *h) +{ + struct hf_candidate_nonce *n = (struct hf_candidate_nonce *)(h + 1); + int i, num_nonces = h->data_length / U32SIZE(sizeof(struct hf_candidate_nonce)); + + applog(LOG_DEBUG, "%s %s: OP_NONCE: %2d/%2d:, num_nonces %d hdata 0x%04x", + hashfast->drv->name, hashfast->unique_id, h->chip_address, h->core_address, num_nonces, h->hdata); + for (i = 0; i < num_nonces; i++, n++) { + struct work *work = NULL; + + applog(LOG_DEBUG, "%s %s: OP_NONCE: %2d: %2d: ntime %2d sequence %4d nonce 0x%08x", + hashfast->drv->name, hashfast->unique_id, h->chip_address, i, n->ntime & HF_NTIME_MASK, n->sequence, n->nonce); + + if (n->sequence < info->usb_init_base.sequence_modulus) { + // Find the job from the sequence number + mutex_lock(&info->lock); + work = info->works[n->sequence]; + mutex_unlock(&info->lock); + } else { + applog(LOG_INFO, "%s %s: OP_NONCE: Sequence out of range %4d max %4d", + hashfast->drv->name, hashfast->unique_id, n->sequence, info->usb_init_base.sequence_modulus); + } + + if (unlikely(!work)) { + info->no_matching_work++; + applog(LOG_INFO, "%s %s: No matching work!", hashfast->drv->name, hashfast->unique_id); + } else { + applog(LOG_DEBUG, "%s %s: OP_NONCE: sequence %d: submitting nonce 0x%08x ntime %d", + hashfast->drv->name, hashfast->unique_id, n->sequence, n->nonce, n->ntime & HF_NTIME_MASK); + if (submit_noffset_nonce(thr, work, n->nonce, n->ntime & HF_NTIME_MASK)) { + mutex_lock(&info->lock); + info->hash_count += 0xffffffffull * work->device_diff; + mutex_unlock(&info->lock); + } +#if 0 /* Not used */ + if (unlikely(n->ntime & HF_NONCE_SEARCH)) { + /* This tells us there is another share in the + * next 128 nonces */ + applog(LOG_DEBUG, "%s %s: OP_NONCE: SEARCH PROXIMITY EVENT FOUND", + hashfast->drv->name, hashfast->unique_id); + } +#endif + } + } +} + +static void hfa_update_die_statistics(struct hashfast_info *info, struct hf_header *h) +{ + struct hf_statistics *s = (struct hf_statistics *)(h + 1); + struct hf_long_statistics *l; + + // Accumulate the data + l = info->die_statistics + h->chip_address; + + l->rx_header_crc += s->rx_header_crc; + l->rx_body_crc += s->rx_body_crc; + l->rx_header_timeouts += s->rx_header_timeouts; + l->rx_body_timeouts += s->rx_body_timeouts; + l->core_nonce_fifo_full += s->core_nonce_fifo_full; + l->array_nonce_fifo_full += s->array_nonce_fifo_full; + l->stats_overrun += s->stats_overrun; +} + +static void hfa_update_stats1(struct cgpu_info *hashfast, struct hashfast_info *info, + struct hf_header *h) +{ + struct hf_long_usb_stats1 *s1 = &info->stats1; + struct hf_usb_stats1 *sd = (struct hf_usb_stats1 *)(h + 1); + + s1->usb_rx_preambles += sd->usb_rx_preambles; + s1->usb_rx_receive_byte_errors += sd->usb_rx_receive_byte_errors; + s1->usb_rx_bad_hcrc += sd->usb_rx_bad_hcrc; + + s1->usb_tx_attempts += sd->usb_tx_attempts; + s1->usb_tx_packets += sd->usb_tx_packets; + s1->usb_tx_timeouts += sd->usb_tx_timeouts; + s1->usb_tx_incompletes += sd->usb_tx_incompletes; + s1->usb_tx_endpointstalled += sd->usb_tx_endpointstalled; + s1->usb_tx_disconnected += sd->usb_tx_disconnected; + s1->usb_tx_suspended += sd->usb_tx_suspended; +#if 0 + /* We don't care about UART stats so they're not in our struct */ + s1->uart_tx_queue_dma += sd->uart_tx_queue_dma; + s1->uart_tx_interrupts += sd->uart_tx_interrupts; + + s1->uart_rx_preamble_ints += sd->uart_rx_preamble_ints; + s1->uart_rx_missed_preamble_ints += sd->uart_rx_missed_preamble_ints; + s1->uart_rx_header_done += sd->uart_rx_header_done; + s1->uart_rx_data_done += sd->uart_rx_data_done; + s1->uart_rx_bad_hcrc += sd->uart_rx_bad_hcrc; + s1->uart_rx_bad_dma += sd->uart_rx_bad_dma; + s1->uart_rx_short_dma += sd->uart_rx_short_dma; + s1->uart_rx_buffers_full += sd->uart_rx_buffers_full; +#endif + if (sd->max_tx_buffers > s1->max_tx_buffers) + s1->max_tx_buffers = sd->max_tx_buffers; + if (sd->max_rx_buffers > s1->max_rx_buffers) + s1->max_rx_buffers = sd->max_rx_buffers; + + applog(LOG_DEBUG, "%s %s: OP_USB_STATS1:", hashfast->drv->name, hashfast->unique_id); + applog(LOG_DEBUG, " usb_rx_preambles: %6d", sd->usb_rx_preambles); + applog(LOG_DEBUG, " usb_rx_receive_byte_errors: %6d", sd->usb_rx_receive_byte_errors); + applog(LOG_DEBUG, " usb_rx_bad_hcrc: %6d", sd->usb_rx_bad_hcrc); + + applog(LOG_DEBUG, " usb_tx_attempts: %6d", sd->usb_tx_attempts); + applog(LOG_DEBUG, " usb_tx_packets: %6d", sd->usb_tx_packets); + applog(LOG_DEBUG, " usb_tx_timeouts: %6d", sd->usb_tx_timeouts); + applog(LOG_DEBUG, " usb_tx_incompletes: %6d", sd->usb_tx_incompletes); + applog(LOG_DEBUG, " usb_tx_endpointstalled: %6d", sd->usb_tx_endpointstalled); + applog(LOG_DEBUG, " usb_tx_disconnected: %6d", sd->usb_tx_disconnected); + applog(LOG_DEBUG, " usb_tx_suspended: %6d", sd->usb_tx_suspended); +#if 0 + applog(LOG_DEBUG, " uart_tx_queue_dma: %6d", sd->uart_tx_queue_dma); + applog(LOG_DEBUG, " uart_tx_interrupts: %6d", sd->uart_tx_interrupts); + + applog(LOG_DEBUG, " uart_rx_preamble_ints: %6d", sd->uart_rx_preamble_ints); + applog(LOG_DEBUG, " uart_rx_missed_preamble_ints: %6d", sd->uart_rx_missed_preamble_ints); + applog(LOG_DEBUG, " uart_rx_header_done: %6d", sd->uart_rx_header_done); + applog(LOG_DEBUG, " uart_rx_data_done: %6d", sd->uart_rx_data_done); + applog(LOG_DEBUG, " uart_rx_bad_hcrc: %6d", sd->uart_rx_bad_hcrc); + applog(LOG_DEBUG, " uart_rx_bad_dma: %6d", sd->uart_rx_bad_dma); + applog(LOG_DEBUG, " uart_rx_short_dma: %6d", sd->uart_rx_short_dma); + applog(LOG_DEBUG, " uart_rx_buffers_full: %6d", sd->uart_rx_buffers_full); +#endif + applog(LOG_DEBUG, " max_tx_buffers: %6d", sd->max_tx_buffers); + applog(LOG_DEBUG, " max_rx_buffers: %6d", sd->max_rx_buffers); +} + +static void hfa_parse_notice(struct cgpu_info *hashfast, struct hf_header *h) +{ + struct hf_usb_notice_data *d; + + if (h->data_length == 0) { + applog(LOG_DEBUG, "%s %s: Received OP_USB_NOTICE with zero data length", + hashfast->drv->name, hashfast->unique_id); + return; + } + d = (struct hf_usb_notice_data *)(h + 1); + /* FIXME Do something with the notification code d->extra_data here */ + applog(LOG_NOTICE, "%s %s NOTICE: %s", hashfast->drv->name, hashfast->unique_id, d->message); +} + +static void hfa_parse_settings(struct cgpu_info *hashfast, struct hf_header *h) +{ + struct hashfast_info *info = hashfast->device_data; + struct hf_settings_data *op_settings_data = (struct hf_settings_data *)(h + 1); + + // Check if packet size, revision and magic are matching + if ((h->data_length * 4 == sizeof(struct hf_settings_data)) && + (h->core_address == 0) && + (op_settings_data->revision == 1) && + (op_settings_data->magic == HFA_MAGIC_SETTINGS_VALUE)) + { + applog(LOG_NOTICE, "%s: Device settings (%dMHz@%dmV,%dMHz@%dmV,%dMHz@%dmV,%dMHz@%dmV)", hashfast->drv->name, + op_settings_data->frequency0, op_settings_data->voltage0, + op_settings_data->frequency1, op_settings_data->voltage1, + op_settings_data->frequency2, op_settings_data->voltage2, + op_settings_data->frequency3, op_settings_data->voltage3); + // Set voltage only when current voltage values are different + if ((info->hash_voltage != 0) && + ((op_settings_data->voltage0 != info->hash_voltage) || + (op_settings_data->voltage1 != info->hash_voltage) || + (op_settings_data->voltage2 != info->hash_voltage) || + (op_settings_data->voltage3 != info->hash_voltage))) { + applog(LOG_NOTICE, "%s: Setting default clock and voltage to %dMHz@%dmV", + hashfast->drv->name, info->hash_clock_rate, info->hash_voltage); + hfa_set_voltages(hashfast, info); + } + } +} + +static void *hfa_read(void *arg) +{ + struct thr_info *thr = (struct thr_info *)arg; + struct cgpu_info *hashfast = thr->cgpu; + struct hashfast_info *info = hashfast->device_data; + char threadname[16]; + + snprintf(threadname, sizeof(threadname), "%d/%sRead", hashfast->device_id, hashfast->drv->name); + RenameThread(threadname); + + while (likely(!hashfast->shutdown)) { + char buf[512]; + struct hf_header *h = (struct hf_header *)buf; + bool ret; + + mutex_lock(&info->rlock); + ret = hfa_get_packet(hashfast, h); + mutex_unlock(&info->rlock); + + if (unlikely(hashfast->usbinfo.nodev)) + break; + + if (unlikely(!ret)) + continue; + + switch (h->operation_code) { + case OP_GWQ_STATUS: + hfa_parse_gwq_status(hashfast, info, h); + break; + case OP_DIE_STATUS: + hfa_update_die_status(hashfast, info, h); + break; + case OP_NONCE: + hfa_parse_nonce(thr, hashfast, info, h); + break; + case OP_STATISTICS: + hfa_update_die_statistics(info, h); + break; + case OP_USB_STATS1: + hfa_update_stats1(hashfast, info, h); + break; + case OP_USB_NOTICE: + hfa_parse_notice(hashfast, h); + break; + case OP_SETTINGS: + hfa_parse_settings(hashfast, h); + break; + case OP_POWER: + case OP_PING: + /* Do nothing */ + break; + default: + if (h->operation_code == OP_FAN) { + applog(LOG_NOTICE, "%s %s: Firmware upgrade required to support fan control", + hashfast->drv->name, hashfast->unique_id); + opt_hfa_target = 0; + break; + } + applog(LOG_WARNING, "%s %s: Unhandled operation code %d", + hashfast->drv->name, hashfast->unique_id, h->operation_code); + break; + } + /* Make sure we send something to the device at least every 5 + * seconds so it knows the driver is still alive for when we + * run out of work. The read thread never blocks so is the + * best place to do this. */ + if (time(NULL) - info->last_send > 5) + hfa_send_frame(hashfast, HF_USB_CMD(OP_PING), 0, NULL, 0); + } + applog(LOG_DEBUG, "%s %s: Shutting down read thread", hashfast->drv->name, hashfast->unique_id); + + return NULL; +} + +static void hfa_set_fanspeed(struct cgpu_info *hashfast, struct hashfast_info *info, + int fanspeed); + +static bool hfa_init(struct thr_info *thr) +{ + struct cgpu_info *hashfast = thr->cgpu; + struct hashfast_info *info = hashfast->device_data; + struct timeval now; + bool ret; + int i; + + if (hashfast->usbinfo.nodev) + return false; + + /* hashfast_reset should fill in details for info */ + ret = hfa_reset(hashfast, info); + + // The per-die status array + info->die_status = calloc(info->asic_count, sizeof(struct hf_g1_die_data)); + if (unlikely(!(info->die_status))) + quit(1, "Failed to calloc die_status"); + + info->die_data = calloc(info->asic_count, sizeof(struct hf_die_data)); + if (unlikely(!(info->die_data))) + quit(1, "Failed to calloc die_data"); + for (i = 0; i < info->asic_count; i++) + info->die_data[i].hash_clock = info->base_clock; + + // The per-die statistics array + info->die_statistics = calloc(info->asic_count, sizeof(struct hf_long_statistics)); + if (unlikely(!(info->die_statistics))) + quit(1, "Failed to calloc die_statistics"); + + info->works = calloc(sizeof(struct work *), info->num_sequence); + if (!info->works) + quit(1, "Failed to calloc info works in hfa_detect_common"); + if (!ret) + goto out; + + /* We will have extracted the serial number by now */ + if (info->has_opname && !info->opname_valid) + hfa_choose_opname(hashfast, info); + + /* Use the opname as the displayed unique identifier */ + hashfast->unique_id = info->op_name; + + /* Inherit the old device id */ + if (info->old_cgpu) + hashfast->device_id = info->old_cgpu->device_id; + + /* If we haven't found a matching old instance, we might not have + * a valid op_name yet or lack support so try to match based on + * serial number. */ + if (!info->old_cgpu) + info->old_cgpu = hfa_old_device(hashfast, info); + + if (!info->has_opname && info->old_cgpu) { + struct hashfast_info *cinfo = info->old_cgpu->device_data; + + applog(LOG_NOTICE, "%s: Found old instance by serial number %08x at device %d", + hashfast->drv->name, info->serial_number, info->old_cgpu->device_id); + info->resets = ++cinfo->resets; + /* Set the device with the last hash_clock_rate if it's + * different. */ + if (info->hash_clock_rate != cinfo->hash_clock_rate) { + info->hash_clock_rate = cinfo->hash_clock_rate; + hfa_set_clock(hashfast, info); + } + } + + // Read current device settings if voltage was set in options + if (info->hash_voltage != 0) + hfa_send_generic_frame(hashfast, OP_SETTINGS, 0x00, 0x00, HFA_MAGIC_SETTINGS_VALUE, NULL, 0); + + mutex_init(&info->lock); + mutex_init(&info->rlock); + if (pthread_create(&info->read_thr, NULL, hfa_read, (void *)thr)) + quit(1, "Failed to pthread_create read thr in hfa_prepare"); + + cgtime(&now); + get_datestamp(hashfast->init, sizeof(hashfast->init), &now); + hashfast->last_device_valid_work = time(NULL); + hfa_set_fanspeed(hashfast, info, opt_hfa_fan_default); +out: + if (hashfast->usbinfo.nodev) + ret = false; + + if (!ret) { + hfa_clear_readbuf(hashfast); + free(info); + hashfast->device_data = NULL; + usb_nodev(hashfast); + } + + return ret; +} + +/* If this ever returns 0 it means we have shed all the cores which will lead + * to no work being done which will trigger the watchdog. */ +static inline int hfa_basejobs(struct hashfast_info *info) +{ + return info->usb_init_base.inflight_target - info->shed_count; +} + +/* Figure out how many jobs to send. */ +static int hfa_jobs(struct cgpu_info *hashfast, struct hashfast_info *info) +{ + int ret = 0; + + if (unlikely(info->overheat)) { + /* Acknowledge and notify of new condition.*/ + if (info->overheat < 0) { + applog(LOG_WARNING, "%s %s: Hit overheat temp %.1f, throttling!", + hashfast->drv->name, hashfast->unique_id, hashfast->temp); + /* Value of 1 means acknowledged overheat */ + info->overheat = 1; + } + goto out; + } + + mutex_lock(&info->lock); + ret = hfa_basejobs(info) - HF_SEQUENCE_DISTANCE(info->hash_sequence_head, info->device_sequence_tail); + /* Place an upper limit on how many jobs to queue to prevent sending + * more work than the device can use after a period of outage. */ + if (ret > hfa_basejobs(info)) + ret = hfa_basejobs(info); + mutex_unlock(&info->lock); + + if (unlikely(ret < 0)) + ret = 0; + +out: + return ret; +} + +static void hfa_set_fanspeed(struct cgpu_info *hashfast, struct hashfast_info *info, + int fandiff) +{ + const uint8_t opcode = HF_USB_CMD(OP_FAN); + uint8_t packet[256]; + struct hf_header *p = (struct hf_header *)packet; + const int tx_length = sizeof(struct hf_header); + uint16_t hdata; + int fandata; + + info->fanspeed += fandiff; + if (info->fanspeed > opt_hfa_fan_max) + info->fanspeed = opt_hfa_fan_max; + else if (info->fanspeed < opt_hfa_fan_min) + info->fanspeed = opt_hfa_fan_min; + fandata = info->fanspeed * 255 / 100; // Fanspeed is in percent, hdata 0-255 + hdata = fandata; // Use an int first to avoid overflowing uint16_t + p->preamble = HF_PREAMBLE; + p->operation_code = hfa_cmds[opcode].cmd; + p->chip_address = 0xff; + p->core_address = 1; + p->hdata = htole16(hdata); + p->data_length = 0; + p->crc8 = hfa_crc8(packet); + + __hfa_send_frame(hashfast, opcode, tx_length, packet); +} + +static void hfa_increase_clock(struct cgpu_info *hashfast, struct hashfast_info *info, + int die) +{ + int i, high_clock = 0, low_clock = info->hash_clock_rate; + struct hf_die_data *hdd = &info->die_data[die]; + uint32_t diebit = 0x00000001ul << die; + uint16_t hdata, increase = 10; + + if (hdd->hash_clock + increase > info->hash_clock_rate) + increase = info->hash_clock_rate - hdd->hash_clock; + hdd->hash_clock += increase; + hdata = (WR_MHZ_INCREASE << 12) | increase; + if (info->clock_offset) { + for (i = 0; i < info->asic_count; i++) { + if (info->die_data[i].hash_clock > high_clock) + high_clock = info->die_data[i].hash_clock; + if (info->die_data[i].hash_clock < low_clock) + low_clock = info->die_data[i].hash_clock; + } + if (info->firmware_version < 0.5 && low_clock + HFA_CLOCK_MAXDIFF > high_clock) { + /* We can increase all clocks again */ + for (i = 0; i < info->asic_count; i++) { + if (i == die) /* We've already added to this die */ + continue; + info->die_data[i].hash_clock += increase; + } + applog(LOG_INFO, "%s %s: Die %d temp below range %.1f, increasing ALL dies by %d", + hashfast->drv->name, hashfast->unique_id, die, info->die_data[die].temp, increase); + hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), hdata, (uint8_t *)NULL, 0); + info->clock_offset -= increase; + return; + } + } + applog(LOG_INFO, "%s %s: Die temp below range %.1f, increasing die %d clock to %d", + hashfast->drv->name, hashfast->unique_id, info->die_data[die].temp, die, hdd->hash_clock); + hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), hdata, (uint8_t *)&diebit, 4); +} + +static void hfa_decrease_clock(struct cgpu_info *hashfast, struct hashfast_info *info, + int die) +{ + struct hf_die_data *hdd = &info->die_data[die]; + uint32_t diebit = 0x00000001ul << die; + uint16_t hdata, decrease = 20; + int i, high_clock = 0; + + /* Find the fastest die for comparison */ + for (i = 0; i < info->asic_count; i++) { + if (info->die_data[i].hash_clock > high_clock) + high_clock = info->die_data[i].hash_clock; + } + if (hdd->hash_clock - decrease < HFA_CLOCK_MIN) + decrease = hdd->hash_clock - HFA_CLOCK_MIN; + hdata = (WR_MHZ_DECREASE << 12) | decrease; + if (info->firmware_version < 0.5 && high_clock >= hdd->hash_clock + HFA_CLOCK_MAXDIFF) { + /* We can't have huge differences in clocks as it will lead to + * starvation of the faster cores so we have no choice but to + * slow down all dies to tame this one. */ + for (i = 0; i < info->asic_count; i++) + info->die_data[i].hash_clock -= decrease; + applog(LOG_INFO, "%s %s: Die %d temp above range %.1f, decreasing ALL die clocks by %d", + hashfast->drv->name, hashfast->unique_id, die, info->die_data[die].temp, decrease); + hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), hdata, (uint8_t *)NULL, 0); + info->clock_offset += decrease; + return; + + } + hdd->hash_clock -= decrease; + applog(LOG_INFO, "%s %s: Die temp above range %.1f, decreasing die %d clock to %d", + hashfast->drv->name, hashfast->unique_id, info->die_data[die].temp, die, hdd->hash_clock); + hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), hdata, (uint8_t *)&diebit, 4); +} + +/* Adjust clock according to temperature if need be by changing the clock + * setting and issuing a work restart with the new clock speed. */ +static void hfa_temp_clock(struct cgpu_info *hashfast, struct hashfast_info *info) +{ + int temp_change, i, low_clock; + time_t now_t = time(NULL); + bool throttled = false; + + if (!opt_hfa_target) + return; + + /* First find out if any dies are throttled before trying to optimise + * fanspeed, and find the slowest clock. */ + low_clock = info->hash_clock_rate; + for (i = 0; i < info->asic_count ; i++) { + struct hf_die_data *hdd = &info->die_data[i]; + + if (hdd->hash_clock < info->hash_clock_rate) + throttled = true; + if (hdd->hash_clock < low_clock) + low_clock = hdd->hash_clock; + } + + /* Find the direction of temperature change since we last checked */ + if (info->temp_updates < 5) + goto dies_only; + info->temp_updates = 0; + temp_change = hashfast->temp - info->last_max_temp; + info->last_max_temp = hashfast->temp; + + /* Adjust fanspeeds first if possible before die speeds, increasing + * speed quickly and lowering speed slowly */ + if (hashfast->temp > opt_hfa_target || + (throttled && hashfast->temp >= opt_hfa_target - HFA_TEMP_HYSTERESIS)) { + /* We should be trying to decrease temperature, if it's not on + * its way down. */ + if (info->fanspeed < opt_hfa_fan_max) { + if (!temp_change) + hfa_set_fanspeed(hashfast, info, 5); + else if (temp_change > 0) + hfa_set_fanspeed(hashfast, info, 10); + } + } else if (hashfast->temp >= opt_hfa_target - HFA_TEMP_HYSTERESIS) { + /* In optimal range, try and maintain the same temp */ + if (temp_change > 0) { + /* Temp rising, tweak fanspeed up */ + if (info->fanspeed < opt_hfa_fan_max) + hfa_set_fanspeed(hashfast, info, 2); + } else if (temp_change < 0) { + /* Temp falling, tweak fanspeed down */ + if (info->fanspeed > opt_hfa_fan_min) + hfa_set_fanspeed(hashfast, info, -1); + } + } else { + /* Below optimal range, try and increase temp */ + if (temp_change <= 0 && !throttled) { + if (info->fanspeed > opt_hfa_fan_min) + hfa_set_fanspeed(hashfast, info, -1); + } + } + +dies_only: + /* Do no restarts at all if there has been one less than 15 seconds + * ago */ + if (now_t - info->last_restart < 15) + return; + + for (i = 1; i <= info->asic_count ; i++) { + int die = (info->last_die_adjusted + i) % info->asic_count; + struct hf_die_data *hdd = &info->die_data[die]; + + /* Sanity check */ + if (unlikely(hdd->temp == 0.0 || hdd->temp > 255)) + continue; + + /* In target temperature */ + if (hdd->temp >= opt_hfa_target - HFA_TEMP_HYSTERESIS && hdd->temp <= opt_hfa_target) + continue; + + if (hdd->temp > opt_hfa_target) { + /* Temp above target range */ + + /* Already at min speed */ + if (hdd->hash_clock == HFA_CLOCK_MIN) + continue; + /* Have some leeway before throttling speed */ + if (hdd->temp < opt_hfa_target + HFA_TEMP_HYSTERESIS) + break; + hfa_decrease_clock(hashfast, info, die); + } else { + /* Temp below target range. Only send a restart to + * increase speed no more than every 60 seconds. */ + if (now_t - hdd->last_restart < 60) + continue; + + /* Already at max speed */ + if (hdd->hash_clock == info->hash_clock_rate) + continue; + /* Do not increase the clocks on any dies if we have + * a forced offset due to wild differences in clocks, + * unless this is the slowest one. */ + if (info->clock_offset && hdd->hash_clock > low_clock) + continue; + hfa_increase_clock(hashfast, info, die); + } + /* Keep track of the last die adjusted since we only adjust + * one at a time to ensure we end up iterating over all of + * them. */ + info->last_restart = hdd->last_restart = now_t; + info->last_die_adjusted = die; + break; + } +} + +static void hfa_running_shutdown(struct cgpu_info *hashfast, struct hashfast_info *info) +{ + int iruntime = cgpu_runtime(hashfast); + + /* If the device has already disapperaed, don't drop the clock in case + * it was just unplugged as opposed to a failure. */ + if (hashfast->usbinfo.nodev) + return; + + /* Only decrease the clock speed if the device has run at this speed + * for less than an hour before failing, otherwise the hashrate gains + * are worth the occasional restart which takes at most a minute. */ + if (iruntime < 3600 && info->hash_clock_rate > HFA_CLOCK_DEFAULT && opt_hfa_fail_drop) { + info->hash_clock_rate -= opt_hfa_fail_drop; + if (info->hash_clock_rate < HFA_CLOCK_DEFAULT) + info->hash_clock_rate = HFA_CLOCK_DEFAULT; + if (info->old_cgpu && info->old_cgpu->device_data) { + struct hashfast_info *cinfo = info->old_cgpu->device_data; + + /* Set the master device's clock speed if this is a copy */ + cinfo->hash_clock_rate = info->hash_clock_rate; + } + applog(LOG_WARNING, "%s %s: Decreasing clock speed to %d with reset", + hashfast->drv->name, hashfast->unique_id, info->hash_clock_rate); + } + + if (!hfa_send_shutdown(hashfast)) + return; + + if (hashfast->usbinfo.nodev) + return; + + mutex_lock(&info->rlock); + hfa_clear_readbuf(hashfast); + mutex_unlock(&info->rlock); + + usb_nodev(hashfast); +} + +static int64_t hfa_scanwork(struct thr_info *thr) +{ + struct cgpu_info *hashfast = thr->cgpu; + struct hashfast_info *info = hashfast->device_data; + struct work *base_work = NULL; + int jobs, ret, cycles = 0; + double fail_time; + int64_t hashes; + + if (unlikely(hashfast->usbinfo.nodev)) { + applog(LOG_WARNING, "%s %s: device disappeared, disabling", + hashfast->drv->name, hashfast->unique_id); + return -1; + } + + /* Base the fail time on no valid nonces for 25 full nonce ranges at + * the current expected hashrate. */ + fail_time = 25.0 * (double)hashfast->drv->max_diff * 0xffffffffull / + (double)(info->base_clock * 1000000) / hfa_basejobs(info); + if (unlikely(share_work_tdiff(hashfast) > fail_time)) { + applog(LOG_WARNING, "%s %s: No valid hashes for over %.0f seconds, shutting down thread", + hashfast->drv->name, hashfast->unique_id, fail_time); + hfa_running_shutdown(hashfast, info); + return -1; + } + + if (unlikely(thr->work_restart)) { +restart: + info->last_restart = time(NULL); + thr->work_restart = false; + ret = hfa_send_frame(hashfast, HF_USB_CMD(OP_WORK_RESTART), 0, (uint8_t *)NULL, 0); + if (unlikely(!ret)) { + hfa_running_shutdown(hashfast, info); + return -1; + } + /* Give a full allotment of jobs after a restart, not waiting + * for the status update telling us how much to give. */ + jobs = hfa_basejobs(info); + } else { + /* Only adjust die clocks if there's no restart since two + * restarts back to back get ignored. */ + hfa_temp_clock(hashfast, info); + jobs = hfa_jobs(hashfast, info); + } + + /* Wait on restart_wait for up to 0.5 seconds or submit jobs as soon as + * they're required. */ + while (!jobs && ++cycles < 5) { + ret = restart_wait(thr, 100); + if (unlikely(!ret)) + goto restart; + jobs = hfa_jobs(hashfast, info); + } + + if (jobs) { + applog(LOG_DEBUG, "%s %s: Sending %d new jobs", hashfast->drv->name, hashfast->unique_id, + jobs); + } + + while (jobs-- > 0) { + struct hf_hash_usb op_hash_data; + struct work *work; + uint64_t intdiff; + int i, sequence; + uint32_t *p; + + /* This is a blocking function if there's no work */ + if (!base_work) + base_work = get_work(thr, thr->id); + + /* HFA hardware actually had ntime rolling disabled so we + * can roll the work ourselves here to minimise the amount of + * work we need to generate. */ + if (base_work->drv_rolllimit > jobs) { + base_work->drv_rolllimit--; + roll_work(base_work); + work = make_clone(base_work); + } else { + work = base_work; + base_work = NULL; + } + + /* Assemble the data frame and send the OP_HASH packet */ + memcpy(op_hash_data.midstate, work->midstate, sizeof(op_hash_data.midstate)); + memcpy(op_hash_data.merkle_residual, work->data + 64, 4); + p = (uint32_t *)(work->data + 64 + 4); + op_hash_data.timestamp = *p++; + op_hash_data.bits = *p++; + op_hash_data.starting_nonce = 0; + op_hash_data.nonce_loops = 0; + op_hash_data.ntime_loops = 0; + + /* Set the number of leading zeroes to look for based on diff. + * Diff 1 = 32, Diff 2 = 33, Diff 4 = 34 etc. */ + intdiff = (uint64_t)work->device_diff; + for (i = 31; intdiff; i++, intdiff >>= 1); + op_hash_data.search_difficulty = i; + op_hash_data.group = 0; + if ((sequence = info->hash_sequence_head + 1) >= info->num_sequence) + sequence = 0; + ret = hfa_send_frame(hashfast, OP_HASH, sequence, (uint8_t *)&op_hash_data, sizeof(op_hash_data)); + if (unlikely(!ret)) { + free_work(work); + if (base_work) + free_work(base_work); + hfa_running_shutdown(hashfast, info); + return -1; + } + + mutex_lock(&info->lock); + info->hash_sequence_head = sequence; + info->works[info->hash_sequence_head] = work; + mutex_unlock(&info->lock); + + applog(LOG_DEBUG, "%s %s: OP_HASH sequence %d search_difficulty %d work_difficulty %g", + hashfast->drv->name, hashfast->unique_id, info->hash_sequence_head, + op_hash_data.search_difficulty, work->work_difficulty); + } + + if (base_work) + free_work(base_work); + + /* Only count 2/3 of the hashes to smooth out the hashrate for cycles + * that have no hashes added. */ + mutex_lock(&info->lock); + hashes = info->hash_count / 3 * 2; + info->calc_hashes += hashes; + info->hash_count -= hashes; + mutex_unlock(&info->lock); + + return hashes; +} + +static struct api_data *hfa_api_stats(struct cgpu_info *cgpu) +{ + struct hashfast_info *info; + struct hf_long_usb_stats1 *s1; + struct api_data *root = NULL; + struct hf_usb_init_base *db; + int varint, i; + char buf[64]; + + info = cgpu->device_data; + if (!info) + return NULL; + + root = api_add_int(root, "asic count", &info->asic_count, false); + root = api_add_int(root, "core count", &info->core_count, false); + + root = api_add_double(root, "firmware rev", &info->firmware_version, false); + root = api_add_double(root, "hardware rev", &info->hardware_version, false); + db = &info->usb_init_base; + root = api_add_hex32(root, "serial number", &db->serial_number, true); + varint = db->hash_clockrate; + root = api_add_int(root, "base clockrate", &varint, true); + varint = db->inflight_target; + root = api_add_int(root, "inflight target", &varint, true); + varint = db->sequence_modulus; + root = api_add_int(root, "sequence modulus", &varint, true); + root = api_add_int(root, "fan percent", &info->fanspeed, false); + if (info->op_name[0] != '\0') + root = api_add_string(root, "op name", info->op_name, false); + + s1 = &info->stats1; + root = api_add_uint64(root, "rx preambles", &s1->usb_rx_preambles, false); + root = api_add_uint64(root, "rx rcv byte err", &s1->usb_rx_receive_byte_errors, false); + root = api_add_uint64(root, "rx bad hcrc", &s1->usb_rx_bad_hcrc, false); + root = api_add_uint64(root, "tx attempts", &s1->usb_tx_attempts, false); + root = api_add_uint64(root, "tx packets", &s1->usb_tx_packets, false); + root = api_add_uint64(root, "tx incompletes", &s1->usb_tx_incompletes, false); + root = api_add_uint64(root, "tx ep stalled", &s1->usb_tx_endpointstalled, false); + root = api_add_uint64(root, "tx disconnect", &s1->usb_tx_disconnected, false); + root = api_add_uint64(root, "tx suspend", &s1->usb_tx_suspended, false); + varint = s1->max_tx_buffers; + root = api_add_int(root, "max tx buf", &varint, true); + varint = s1->max_rx_buffers; + root = api_add_int(root, "max rx buf", &varint, true); + + for (i = 0; i < info->asic_count; i++) { + struct hf_long_statistics *l; + struct hf_g1_die_data *d; + char which[16]; + double val; + int j; + + if (!info->die_statistics || !info->die_status) + continue; + l = &info->die_statistics[i]; + if (!l) + continue; + d = &info->die_status[i]; + if (!d) + continue; + snprintf(which, sizeof(which), "Asic%d", i); + + snprintf(buf, sizeof(buf), "%s hash clockrate", which); + root = api_add_int(root, buf, &(info->die_data[i].hash_clock), false); + snprintf(buf, sizeof(buf), "%s die temperature", which); + val = GN_DIE_TEMPERATURE(d->die.die_temperature); + root = api_add_double(root, buf, &val, true); + snprintf(buf, sizeof(buf), "%s board temperature", which); + val = board_temperature(d->temperature); + root = api_add_double(root, buf, &val, true); + for (j = 0; j < 6; j++) { + snprintf(buf, sizeof(buf), "%s voltage %d", which, j); + val = GN_CORE_VOLTAGE(d->die.core_voltage[j]); + root = api_add_utility(root, buf, &val, true); + } + snprintf(buf, sizeof(buf), "%s rx header crc", which); + root = api_add_uint64(root, buf, &l->rx_header_crc, false); + snprintf(buf, sizeof(buf), "%s rx body crc", which); + root = api_add_uint64(root, buf, &l->rx_body_crc, false); + snprintf(buf, sizeof(buf), "%s rx header to", which); + root = api_add_uint64(root, buf, &l->rx_header_timeouts, false); + snprintf(buf, sizeof(buf), "%s rx body to", which); + root = api_add_uint64(root, buf, &l->rx_body_timeouts, false); + snprintf(buf, sizeof(buf), "%s cn fifo full", which); + root = api_add_uint64(root, buf, &l->core_nonce_fifo_full, false); + snprintf(buf, sizeof(buf), "%s an fifo full", which); + root = api_add_uint64(root, buf, &l->array_nonce_fifo_full, false); + snprintf(buf, sizeof(buf), "%s stats overrun", which); + root = api_add_uint64(root, buf, &l->stats_overrun, false); + } + + root = api_add_uint64(root, "raw hashcount", &info->raw_hashes, false); + root = api_add_uint64(root, "calc hashcount", &info->calc_hashes, false); + root = api_add_int(root, "no matching work", &info->no_matching_work, false); + root = api_add_uint16(root, "shed count", &info->shed_count, false); + root = api_add_int(root, "resets", &info->resets, false); + + return root; +} + +static void hfa_statline_before(char *buf, size_t bufsiz, struct cgpu_info *hashfast) +{ + struct hashfast_info *info; + struct hf_g1_die_data *d; + double max_volt; + int i; + + if (!hashfast->device_data) + return; + info = hashfast->device_data; + /* Can happen during init sequence */ + if (!info->die_status) + return; + max_volt = 0.0; + + for (i = 0; i < info->asic_count; i++) { + int j; + + d = &info->die_status[i]; + for (j = 0; j < 6; j++) { + double volt = GN_CORE_VOLTAGE(d->die.core_voltage[j]); + + if (volt > max_volt) + max_volt = volt; + } + } + + tailsprintf(buf, bufsiz, "%3dMHz %3.0fC %3d%% %3.2fV", info->base_clock, + hashfast->temp, info->fanspeed, max_volt); +} + +/* We cannot re-initialise so just shut down the device for it to hotplug + * again. */ +static void hfa_reinit(struct cgpu_info *hashfast) +{ + if (hashfast && hashfast->device_data) + hfa_running_shutdown(hashfast, hashfast->device_data); +} + +static void hfa_free_all_work(struct hashfast_info *info) +{ + while (info->device_sequence_tail != info->hash_sequence_head) { + struct work *work; + + if (++info->hash_sequence_tail >= info->num_sequence) + info->hash_sequence_tail = 0; + if (unlikely(!(work = info->works[info->hash_sequence_tail]))) + break; + free_work(work); + info->works[info->hash_sequence_tail] = NULL; + } +} + +static void hfa_shutdown(struct thr_info *thr) +{ + struct cgpu_info *hashfast = thr->cgpu; + struct hashfast_info *info = hashfast->device_data; + + hfa_send_shutdown(hashfast); + pthread_join(info->read_thr, NULL); + hfa_free_all_work(info); + hfa_clear_readbuf(hashfast); + free(info->works); + free(info->die_statistics); + free(info->die_status); + free(info->die_data); + /* Keep the device data intact to allow new instances to match old + * ones. */ +} + +struct device_drv hashfast_drv = { + .drv_id = DRIVER_hashfast, + .dname = "Hashfast", + .name = "HFA", + .max_diff = 32.0, // Limit max diff to get some nonces back regardless + .drv_detect = hfa_detect, + .thread_init = hfa_init, + .hash_work = &hash_driver_work, + .scanwork = hfa_scanwork, + .get_api_stats = hfa_api_stats, + .get_statline_before = hfa_statline_before, + .reinit_device = hfa_reinit, + .thread_shutdown = hfa_shutdown, +}; diff --git a/driver-hashfast.h b/driver-hashfast.h new file mode 100644 index 0000000..331bbd6 --- /dev/null +++ b/driver-hashfast.h @@ -0,0 +1,163 @@ +/* + * Copyright 2013-2014 Con Kolivas + * Copyright 2013 Hashfast + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef HASHFAST_H +#define HASHFAST_H + +#ifdef USE_HASHFAST +#include "miner.h" +#include "elist.h" +#include "hf_protocol.h" + +int opt_hfa_ntime_roll; +int opt_hfa_hash_clock; +int opt_hfa_overheat; +int opt_hfa_target; +bool opt_hfa_pll_bypass; +bool opt_hfa_dfu_boot; +int opt_hfa_fan_default; +int opt_hfa_fan_max; +int opt_hfa_fan_min; +int opt_hfa_fail_drop; +bool opt_hfa_noshed; + +char *set_hfa_fan(char *arg); +char *opt_hfa_name; +char *opt_hfa_options; + +#define HASHFAST_MINER_THREADS 1 +#define HFA_CLOCK_DEFAULT 550 +#define HFA_CLOCK_MIN 125 +#define HFA_CLOCK_MAX 1000 +#define HFA_CLOCK_MAXDIFF 100 +#define HFA_TEMP_OVERHEAT 95 +#define HFA_TEMP_TARGET 88 +#define HFA_TEMP_HYSTERESIS 3 +#define HFA_FAN_DEFAULT 33 +#define HFA_FAN_MAX 85 +#define HFA_FAN_MIN 5 +#define HFA_VOLTAGE_MAX 1000 +#define HFA_VOLTAGE_MIN 500 +#define HFA_MAGIC_SETTINGS_VALUE 0x42AA + +// # Factory Operation Codes +#define OP_SETTINGS 55 // Read or write settings +#define OP_POWER 57 + +// Matching fields for hf_statistics, but large #s for local accumulation, per-die +struct hf_long_statistics { + uint64_t rx_header_crc; // Header CRCs + uint64_t rx_body_crc; // Data CRCs + uint64_t rx_header_timeouts; // Header timeouts + uint64_t rx_body_timeouts; // Data timeouts + uint64_t core_nonce_fifo_full; // Core nonce Q overrun events + uint64_t array_nonce_fifo_full; // System nonce Q overrun events + uint64_t stats_overrun; // Overrun in statistics reporting +}; + +// Matching fields for hf_usb_stats1, but large #s for local accumulation, per device +struct hf_long_usb_stats1 { + // USB incoming + uint64_t usb_rx_preambles; + uint64_t usb_rx_receive_byte_errors; + uint64_t usb_rx_bad_hcrc; + + // USB outgoing + uint64_t usb_tx_attempts; + uint64_t usb_tx_packets; + uint64_t usb_tx_timeouts; + uint64_t usb_tx_incompletes; + uint64_t usb_tx_endpointstalled; + uint64_t usb_tx_disconnected; + uint64_t usb_tx_suspended; +#if 0 + /* We don't care about UART stats */ + // UART transmit + uint64_t uart_tx_queue_dma; + uint64_t uart_tx_interrupts; + + // UART receive + uint64_t uart_rx_preamble_ints; + uint64_t uart_rx_missed_preamble_ints; + uint64_t uart_rx_header_done; + uint64_t uart_rx_data_done; + uint64_t uart_rx_bad_hcrc; + uint64_t uart_rx_bad_dma; + uint64_t uart_rx_short_dma; + uint64_t uart_rx_buffers_full; +#endif + + uint8_t max_tx_buffers; + uint8_t max_rx_buffers; +}; + +/* Private per die data for dynamic clocking */ +struct hf_die_data { + int hash_clock; + double temp; + double board_temp; + time_t last_restart; +}; + +struct hashfast_info { + struct cgpu_info *cgpu; // Points back to parent structure + struct cgpu_info *old_cgpu ; // Points to old structure if hotplugged same device + int asic_count; // # of chips in the chain + int core_count; // # of cores per chip + int device_type; // What sort of device this is + int num_sequence; // A power of 2. What the sequence number range is. + int ref_frequency; // Reference clock rate + struct hf_g1_die_data *die_status; // Array of per-die voltage, current, temperature sensor data + struct hf_long_statistics *die_statistics; // Array of per-die error counters + struct hf_long_usb_stats1 stats1; + struct hf_die_data *die_data; + double firmware_version; + double hardware_version; + int hash_clock_rate; // Hash clock rate to use, in Mhz + int base_clock; // Clock rate we actually got + struct hf_usb_init_base usb_init_base; // USB Base information from USB_INIT + struct hf_config_data config_data; // Configuration data used from USB_INIT + int core_bitmap_size; // in bytes + uint32_t *core_bitmap; // Core OK bitmap test results, run with PLL Bypassed + int group_ntime_roll; // Total ntime roll amount per group + int core_ntime_roll; // Total core ntime roll amount + uint32_t serial_number; // db->serial_number if it exists + char op_name[36]; + bool has_opname; + bool opname_valid; + + pthread_mutex_t lock; + pthread_mutex_t rlock; + struct work **works; + uint16_t hash_sequence_head; // HOST: The next hash sequence # to be sent + uint16_t hash_sequence_tail; // HOST: Follows device_sequence_tail around to free work + uint16_t device_sequence_head; // DEVICE: The most recent sequence number the device dispatched + uint16_t device_sequence_tail; // DEVICE: The most recently completed job in the device + int64_t hash_count; + uint64_t raw_hashes; + uint64_t calc_hashes; + uint16_t shed_count; // Dynamic copy of #cores device has shed for thermal control + int no_matching_work; + int resets; + int overheat; + int last_max_temp; + int temp_updates; + int fanspeed; // Fanspeed in percent + int last_die_adjusted; + int clock_offset; + int hash_voltage; // Hash voltage to use, in mV + + pthread_t read_thr; + time_t last_restart; + time_t last_send; +}; + +#endif /* USE_HASHFAST */ +#endif /* HASHFAST_H */ diff --git a/driver-hashratio.c b/driver-hashratio.c new file mode 100644 index 0000000..a95f902 --- /dev/null +++ b/driver-hashratio.c @@ -0,0 +1,876 @@ +/* + * Copyright 2013-2014 Con Kolivas + * Copyright 2012-2014 Xiangfu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#ifndef WIN32 + #include + #include + #include + #ifndef O_CLOEXEC + #define O_CLOEXEC 0 + #endif +#else + #include + #include +#endif + +#include "elist.h" +#include "miner.h" +#include "driver-hashratio.h" +#include "crc.h" +#include "usbutils.h" + +static int opt_hashratio_fan_min = HRTO_DEFAULT_FAN_MIN; +static int opt_hashratio_fan_max = HRTO_DEFAULT_FAN_MAX; + +static int hashratio_freq = HRTO_DEFAULT_FREQUENCY; + +//static int get_fan_pwm(int temp) { +// int pwm; +// uint8_t fan_pwm_arr[] = {30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, +// 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, +// 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, +// 30, 37, 49, 61, 73, 85, 88, 91, 94, 97, 100, 100, 100, 100, 100, 100, +// 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, +// 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, +// 100, 100, 100, 100, 100, 100, 100}; +// if (temp < 0 || temp >= sizeof(fan_pwm_arr)/sizeof(fan_pwm_arr[0]) || +// fan_pwm_arr[temp] > opt_hashratio_fan_max) { +// return opt_hashratio_fan_max; +// } +// pwm = HRTO_PWM_MAX - fan_pwm_arr[temp] * HRTO_PWM_MAX / 100; +// +// if (pwm < opt_hashratio_fan_min) { +// return opt_hashratio_fan_min; +// } +// if (pwm > opt_hashratio_fan_max) { +// return opt_hashratio_fan_max; +// } +// return pwm; +//} + +char *set_hashratio_freq(char *arg) +{ + int val, ret; + + ret = sscanf(arg, "%d", &val); + if (ret != 1) + return "No values passed to hashratio-freq"; + + if (val < HRTO_DEFAULT_FREQUENCY_MIN || val > HRTO_DEFAULT_FREQUENCY_MAX) + return "Invalid value passed to hashratio-freq"; + + hashratio_freq = val; + + return NULL; +} + +static inline uint8_t rev8(uint8_t d) +{ + int i; + uint8_t out = 0; + + /* (from left to right) */ + for (i = 0; i < 8; i++) + if (d & (1 << i)) + out |= (1 << (7 - i)); + + return out; +} + +char *set_hashratio_fan(char *arg) +{ + int val1, val2, ret; + + ret = sscanf(arg, "%d-%d", &val1, &val2); + if (ret < 1) + return "No values passed to hashratio-fan"; + if (ret == 1) + val2 = val1; + + if (val1 < 0 || val1 > 100 || val2 < 0 || val2 > 100 || val2 < val1) + return "Invalid value passed to hashratio-fan"; + + opt_hashratio_fan_min = val1 * HRTO_PWM_MAX / 100; + opt_hashratio_fan_max = val2 * HRTO_PWM_MAX / 100; + + return NULL; +} + +static int hashratio_init_pkg(struct hashratio_pkg *pkg, uint8_t type, + uint8_t idx, uint8_t cnt) +{ + unsigned short crc; + + pkg->head[0] = HRTO_H1; + pkg->head[1] = HRTO_H2; + + pkg->type = type; + pkg->idx = idx; + pkg->cnt = cnt; + + crc = crc16(pkg->data, HRTO_P_DATA_LEN); + + pkg->crc[0] = (crc & 0xff00) >> 8; + pkg->crc[1] = crc & 0x00ff; + return 0; +} + +static int job_idcmp(uint8_t *job_id, char *pool_job_id) +{ + int job_id_len; + unsigned short crc, crc_expect; + + if (!pool_job_id) + return 1; + + job_id_len = strlen(pool_job_id); + crc_expect = crc16((const unsigned char *)pool_job_id, job_id_len); + + crc = job_id[0] << 8 | job_id[1]; + + if (crc_expect == crc) + return 0; + + applog(LOG_DEBUG, "Hashratio: job_id not match! [%04x:%04x (%s)]", + crc, crc_expect, pool_job_id); + + return 1; +} + +static int decode_pkg(struct thr_info *thr, struct hashratio_ret *ar, uint8_t *pkg) +{ + struct cgpu_info *hashratio = thr->cgpu; + struct hashratio_info *info = hashratio->device_data; + struct pool *pool, *real_pool, *pool_stratum = &info->pool; + + unsigned int expected_crc; + unsigned int actual_crc; + uint32_t nonce, nonce2, miner; + int pool_no; + uint8_t job_id[4]; + int tmp; + + int type = HRTO_GETS_ERROR; + + memcpy((uint8_t *)ar, pkg, HRTO_READ_SIZE); + +// applog(LOG_DEBUG, "pkg.type, hex: %02x, dec: %d", ar->type, ar->type); + + if (ar->head[0] == HRTO_H1 && ar->head[1] == HRTO_H2) { + expected_crc = crc16(ar->data, HRTO_P_DATA_LEN); + actual_crc = (ar->crc[0] & 0xff) | + ((ar->crc[1] & 0xff) << 8); + + type = ar->type; + applog(LOG_DEBUG, "hashratio: %d: expected crc(%04x), actual_crc(%04x)", type, expected_crc, actual_crc); + if (expected_crc != actual_crc) + goto out; + + switch(type) { + case HRTO_P_NONCE: + applog(LOG_DEBUG, "Hashratio: HRTO_P_NONCE"); + memcpy(&miner, ar->data + 0, 4); + memcpy(&pool_no, ar->data + 4, 4); + memcpy(&nonce2, ar->data + 8, 4); + /* Calc time ar->data + 12 */ + memcpy(&nonce, ar->data + 12, 4); + memcpy(job_id, ar->data + 16, 4); + + miner = be32toh(miner); + pool_no = be32toh(pool_no); + if (miner >= HRTO_DEFAULT_MINERS || pool_no >= total_pools || pool_no < 0) { + applog(LOG_DEBUG, "hashratio: Wrong miner/pool/id no %d,%d", miner, pool_no); + break; + } else + info->matching_work[miner]++; + nonce2 = be32toh(nonce2); + nonce = be32toh(nonce); + + applog(LOG_DEBUG, "hashratio: Found! [%s] %d:(%08x) (%08x)", + job_id, pool_no, nonce2, nonce); + + real_pool = pool = pools[pool_no]; + if (job_idcmp(job_id, pool->swork.job_id)) { + if (!job_idcmp(job_id, pool_stratum->swork.job_id)) { + applog(LOG_DEBUG, "Hashratio: Match to previous stratum! (%s)", pool_stratum->swork.job_id); + pool = pool_stratum; + } else { + applog(LOG_DEBUG, "Hashratio Cannot match to any stratum! (%s)", pool->swork.job_id); + break; + } + } + submit_nonce2_nonce(thr, pool, real_pool, nonce2, nonce, 0); + break; + case HRTO_P_STATUS: + applog(LOG_DEBUG, "Hashratio: HRTO_P_STATUS"); + memcpy(&tmp, ar->data, 4); + tmp = be32toh(tmp); + info->temp = (tmp & 0x00f0) >> 8; + if (info->temp_max < info->temp) { + info->temp_max = info->temp; + } +// info->temp[1] = tmp & 0xffff; + + memcpy(&tmp, ar->data + 4, 4); + tmp = be32toh(tmp); + info->fan[0] = tmp >> 16; + info->fan[1] = tmp & 0xffff; + + // local_work + memcpy(&tmp, ar->data + 8, 4); + tmp = be32toh(tmp); + info->local_work = tmp; + info->local_works += tmp; + + // hw_work + memcpy(&tmp, ar->data + 12, 4); + tmp = be32toh(tmp); + info->hw_works += tmp; + + hashratio->temp = info->temp; + break; + case HRTO_P_ACKDETECT: + applog(LOG_DEBUG, "Hashratio: HRTO_P_ACKDETECT"); + break; + case HRTO_P_ACK: + applog(LOG_DEBUG, "Hashratio: HRTO_P_ACK"); + break; + case HRTO_P_NAK: + applog(LOG_DEBUG, "Hashratio: HRTO_P_NAK"); + break; + default: + applog(LOG_DEBUG, "Hashratio: HRTO_GETS_ERROR"); + type = HRTO_GETS_ERROR; + break; + } + } + +out: + return type; +} + +static inline int hashratio_gets(struct cgpu_info *hashratio, uint8_t *buf) +{ + int i; + int read_amount = HRTO_READ_SIZE; + uint8_t buf_tmp[HRTO_READ_SIZE]; + uint8_t buf_copy[2 * HRTO_READ_SIZE]; + uint8_t *buf_back = buf; + int ret = 0; + + while (true) { + int err; + + do { + memset(buf, 0, read_amount); + err = usb_read(hashratio, (char *)buf, read_amount, &ret, C_HRO_READ); + if (unlikely(err < 0 || ret != read_amount)) { + applog(LOG_ERR, "hashratio: Error on read in hashratio_gets got %d", ret); + return HRTO_GETS_ERROR; + } + if (likely(ret >= read_amount)) { + for (i = 1; i < read_amount; i++) { + if (buf_back[i - 1] == HRTO_H1 && buf_back[i] == HRTO_H2) + break; + } + i -= 1; + if (i) { + err = usb_read(hashratio, (char *)buf, read_amount, &ret, C_HRO_READ); + if (unlikely(err < 0 || ret != read_amount)) { + applog(LOG_ERR, "hashratio: Error on 2nd read in hashratio_gets got %d", ret); + return HRTO_GETS_ERROR; + } + memcpy(buf_copy, buf_back + i, HRTO_READ_SIZE - i); + memcpy(buf_copy + HRTO_READ_SIZE - i, buf_tmp, i); + memcpy(buf_back, buf_copy, HRTO_READ_SIZE); + } + return HRTO_GETS_OK; + } + buf += ret; + read_amount -= ret; + continue; + } while (ret > 0); + + return HRTO_GETS_TIMEOUT; + } +} + +static int hashratio_send_pkg(struct cgpu_info *hashratio, const struct hashratio_pkg *pkg) +{ + int err, amount; + uint8_t buf[HRTO_WRITE_SIZE]; + int nr_len = HRTO_WRITE_SIZE; + + memcpy(buf, pkg, HRTO_WRITE_SIZE); +// if (opt_debug) { +// applog(LOG_DEBUG, "hashratio: Sent(%d):", nr_len); +// hexdump((uint8_t *)buf, nr_len); +// } + + if (unlikely(hashratio->usbinfo.nodev)) + return HRTO_SEND_ERROR; + + err = usb_write(hashratio, (char *)buf, nr_len, &amount, C_HRO_WRITE); + if (err || amount != nr_len) { + applog(LOG_DEBUG, "hashratio: Send(%d)!", amount); + return HRTO_SEND_ERROR; + } + + return HRTO_SEND_OK; +} + +static int hashratio_send_pkgs(struct cgpu_info *hashratio, const struct hashratio_pkg *pkg) +{ + int ret; + + do { + if (unlikely(hashratio->usbinfo.nodev)) + return -1; + ret = hashratio_send_pkg(hashratio, pkg); + } while (ret != HRTO_SEND_OK); + return 0; +} + +static void hashratio_stratum_pkgs(struct cgpu_info *hashratio, struct pool *pool) +{ + const int merkle_offset = 36; + struct hashratio_pkg pkg; + int i, a, b, tmp; + unsigned char target[32]; + int job_id_len; + unsigned short crc; + + /* Send out the first stratum message STATIC */ + applog(LOG_DEBUG, "hashratio: Pool stratum message STATIC: %d, %d, %d, %d, %d, %d", + pool->coinbase_len, + pool->nonce2_offset, + pool->n2size, + merkle_offset, + pool->merkles, + pool->pool_no); + memset(pkg.data, 0, HRTO_P_DATA_LEN); + tmp = be32toh(pool->coinbase_len); + memcpy(pkg.data, &tmp, 4); + + tmp = be32toh(pool->nonce2_offset); + memcpy(pkg.data + 4, &tmp, 4); + + tmp = be32toh(pool->n2size); + memcpy(pkg.data + 8, &tmp, 4); + + tmp = be32toh(merkle_offset); + memcpy(pkg.data + 12, &tmp, 4); + + tmp = be32toh(pool->merkles); + memcpy(pkg.data + 16, &tmp, 4); + + tmp = be32toh((int)pool->sdiff); + memcpy(pkg.data + 20, &tmp, 4); + + tmp = be32toh((int)pool->pool_no); + memcpy(pkg.data + 24, &tmp, 4); + + hashratio_init_pkg(&pkg, HRTO_P_STATIC, 1, 1); + if (hashratio_send_pkgs(hashratio, &pkg)) + return; + + set_target(target, pool->sdiff); + memcpy(pkg.data, target, 32); + if (opt_debug) { + char *target_str; + target_str = bin2hex(target, 32); + applog(LOG_DEBUG, "hashratio: Pool stratum target: %s", target_str); + free(target_str); + } + hashratio_init_pkg(&pkg, HRTO_P_TARGET, 1, 1); + if (hashratio_send_pkgs(hashratio, &pkg)) + return; + + applog(LOG_DEBUG, "hashratio: Pool stratum message JOBS_ID: %s", + pool->swork.job_id); + memset(pkg.data, 0, HRTO_P_DATA_LEN); + + job_id_len = strlen(pool->swork.job_id); + crc = crc16((const unsigned char *)pool->swork.job_id, job_id_len); + pkg.data[0] = (crc & 0xff00) >> 8; + pkg.data[1] = crc & 0x00ff; + hashratio_init_pkg(&pkg, HRTO_P_JOB_ID, 1, 1); + if (hashratio_send_pkgs(hashratio, &pkg)) + return; + + a = pool->coinbase_len / HRTO_P_DATA_LEN; + b = pool->coinbase_len % HRTO_P_DATA_LEN; + applog(LOG_DEBUG, "pool->coinbase_len: %d", pool->coinbase_len); + applog(LOG_DEBUG, "hashratio: Pool stratum message COINBASE: %d %d", a, b); + for (i = 0; i < a; i++) { + memcpy(pkg.data, pool->coinbase + i * 32, 32); + hashratio_init_pkg(&pkg, HRTO_P_COINBASE, i + 1, a + (b ? 1 : 0)); + if (hashratio_send_pkgs(hashratio, &pkg)) + return; + if (i % 25 == 0) { + cgsleep_ms(2); + } + } + if (b) { + memset(pkg.data, 0, HRTO_P_DATA_LEN); + memcpy(pkg.data, pool->coinbase + i * 32, b); + hashratio_init_pkg(&pkg, HRTO_P_COINBASE, i + 1, i + 1); + if (hashratio_send_pkgs(hashratio, &pkg)) + return; + } + + b = pool->merkles; + applog(LOG_DEBUG, "hashratio: Pool stratum message MERKLES: %d", b); + for (i = 0; i < b; i++) { + memset(pkg.data, 0, HRTO_P_DATA_LEN); + memcpy(pkg.data, pool->swork.merkle_bin[i], 32); + hashratio_init_pkg(&pkg, HRTO_P_MERKLES, i + 1, b); + if (hashratio_send_pkgs(hashratio, &pkg)) + return; + } + + applog(LOG_DEBUG, "hashratio: Pool stratum message HEADER: 4"); + for (i = 0; i < 4; i++) { + memset(pkg.data, 0, HRTO_P_HEADER); + memcpy(pkg.data, pool->header_bin + i * 32, 32); + hashratio_init_pkg(&pkg, HRTO_P_HEADER, i + 1, 4); + if (hashratio_send_pkgs(hashratio, &pkg)) + return; + + } +} + +static int hashratio_get_result(struct thr_info *thr, struct hashratio_ret *ar) +{ + struct cgpu_info *hashratio = thr->cgpu; + uint8_t result[HRTO_READ_SIZE]; + int ret; + + memset(result, 0, HRTO_READ_SIZE); + + ret = hashratio_gets(hashratio, result); + if (ret != HRTO_GETS_OK) + return ret; + +// if (opt_debug) { +// applog(LOG_DEBUG, "hashratio: Get(ret = %d):", ret); +// hexdump((uint8_t *)result, HRTO_READ_SIZE); +// } + + return decode_pkg(thr, ar, result); +} + +#define HASHRATIO_LATENCY 5 + +static void hashratio_initialise(struct cgpu_info *hashratio) +{ + int err, interface; + + if (hashratio->usbinfo.nodev) + return; + + interface = usb_interface(hashratio); + // Reset + err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, + FTDI_VALUE_RESET, interface, C_RESET); + + applog(LOG_DEBUG, "%s%i: reset got err %d", + hashratio->drv->name, hashratio->device_id, err); + + if (hashratio->usbinfo.nodev) + return; + + // Set latency + err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_LATENCY, + HASHRATIO_LATENCY, interface, C_LATENCY); + + applog(LOG_DEBUG, "%s%i: latency got err %d", + hashratio->drv->name, hashratio->device_id, err); + + if (hashratio->usbinfo.nodev) + return; + + // Set data + err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_DATA, + FTDI_VALUE_DATA_AVA, interface, C_SETDATA); + + applog(LOG_DEBUG, "%s%i: data got err %d", + hashratio->drv->name, hashratio->device_id, err); + + if (hashratio->usbinfo.nodev) + return; + + // Set the baud + err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_BAUD, FTDI_VALUE_BAUD_AVA, + (FTDI_INDEX_BAUD_AVA & 0xff00) | interface, + C_SETBAUD); + + applog(LOG_DEBUG, "%s%i: setbaud got err %d", + hashratio->drv->name, hashratio->device_id, err); + + if (hashratio->usbinfo.nodev) + return; + + // Set Modem Control + err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, + FTDI_VALUE_MODEM, interface, C_SETMODEM); + + applog(LOG_DEBUG, "%s%i: setmodemctrl got err %d", + hashratio->drv->name, hashratio->device_id, err); + + if (hashratio->usbinfo.nodev) + return; + + // Set Flow Control + err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, + FTDI_VALUE_FLOW, interface, C_SETFLOW); + + applog(LOG_DEBUG, "%s%i: setflowctrl got err %d", + hashratio->drv->name, hashratio->device_id, err); + + if (hashratio->usbinfo.nodev) + return; + + /* hashratio repeats the following */ + // Set Modem Control + err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, + FTDI_VALUE_MODEM, interface, C_SETMODEM); + + applog(LOG_DEBUG, "%s%i: setmodemctrl 2 got err %d", + hashratio->drv->name, hashratio->device_id, err); + + if (hashratio->usbinfo.nodev) + return; + + // Set Flow Control + err = usb_transfer(hashratio, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, + FTDI_VALUE_FLOW, interface, C_SETFLOW); + + applog(LOG_DEBUG, "%s%i: setflowctrl 2 got err %d", + hashratio->drv->name, hashratio->device_id, err); +} + +static struct cgpu_info *hashratio_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + struct hashratio_info *info; + int err, amount; + int ackdetect; + char mm_version[16]; + + struct cgpu_info *hashratio = usb_alloc_cgpu(&hashratio_drv, 1); + struct hashratio_pkg detect_pkg; + struct hashratio_ret ret_pkg; + + if (!usb_init(hashratio, dev, found)) { + applog(LOG_ERR, "Hashratio failed usb_init"); + hashratio = usb_free_cgpu(hashratio); + return NULL; + } + + hashratio_initialise(hashratio); + + strcpy(mm_version, "NONE"); + /* Send out detect pkg */ + memset(detect_pkg.data, 0, HRTO_P_DATA_LEN); + + hashratio_init_pkg(&detect_pkg, HRTO_P_DETECT, 1, 1); + hashratio_send_pkg(hashratio, &detect_pkg); + err = usb_read(hashratio, (char *)&ret_pkg, HRTO_READ_SIZE, &amount, C_HRO_READ); + if (err || amount != HRTO_READ_SIZE) { + applog(LOG_ERR, "%s %d: Hashratio failed usb_read with err %d amount %d", + hashratio->drv->name, hashratio->device_id, err, amount); + usb_uninit(hashratio); + usb_free_cgpu(hashratio); + return NULL; + } + + ackdetect = ret_pkg.type; + applog(LOG_DEBUG, "hashratio Detect ID: %d", ackdetect); + + if (ackdetect != HRTO_P_ACKDETECT) { + applog(LOG_DEBUG, "Not a hashratio device"); + usb_uninit(hashratio); + usb_free_cgpu(hashratio); + return NULL; + } + + memcpy(mm_version, ret_pkg.data, 15); + mm_version[15] = '\0'; + + /* We have a real Hashratio! */ + hashratio->threads = HRTO_MINER_THREADS; + add_cgpu(hashratio); + + update_usb_stats(hashratio); + + applog(LOG_INFO, "%s%d: Found at %s", hashratio->drv->name, hashratio->device_id, + hashratio->device_path); + + hashratio->device_data = calloc(sizeof(struct hashratio_info), 1); + if (unlikely(!(hashratio->device_data))) + quit(1, "Failed to malloc hashratio_info"); + + info = hashratio->device_data; + + strcpy(info->mm_version, mm_version); + + info->fan_pwm = HRTO_DEFAULT_FAN / 100 * HRTO_PWM_MAX; + info->temp_max = 0; + info->temp_history_index = 0; + info->temp_sum = 0; + info->temp_old = 0; + info->default_freq = hashratio_freq; + + return hashratio; +} + +static inline void hashratio_detect(bool __maybe_unused hotplug) +{ + usb_detect(&hashratio_drv, hashratio_detect_one); +} + +static bool hashratio_prepare(struct thr_info *thr) +{ + struct cgpu_info *hashratio = thr->cgpu; + struct hashratio_info *info = hashratio->device_data; + + cglock_init(&info->pool.data_lock); + + return true; +} + +static void copy_pool_stratum(struct hashratio_info *info, struct pool *pool) +{ + int i; + int merkles = pool->merkles; + size_t coinbase_len = pool->coinbase_len; + struct pool *pool_stratum = &info->pool; + + if (!job_idcmp((uint8_t *)pool->swork.job_id, pool_stratum->swork.job_id)) + return; + + cg_wlock(&(pool_stratum->data_lock)); + free(pool_stratum->swork.job_id); + free(pool_stratum->nonce1); + free(pool_stratum->coinbase); + + align_len(&coinbase_len); + pool_stratum->coinbase = calloc(coinbase_len, 1); + if (unlikely(!pool_stratum->coinbase)) + quit(1, "Failed to calloc pool_stratum coinbase in hashratio"); + memcpy(pool_stratum->coinbase, pool->coinbase, coinbase_len); + + + for (i = 0; i < pool_stratum->merkles; i++) + free(pool_stratum->swork.merkle_bin[i]); + if (merkles) { + pool_stratum->swork.merkle_bin = realloc(pool_stratum->swork.merkle_bin, + sizeof(char *) * merkles + 1); + for (i = 0; i < merkles; i++) { + pool_stratum->swork.merkle_bin[i] = malloc(32); + if (unlikely(!pool_stratum->swork.merkle_bin[i])) + quit(1, "Failed to malloc pool_stratum swork merkle_bin"); + memcpy(pool_stratum->swork.merkle_bin[i], pool->swork.merkle_bin[i], 32); + } + } + + pool_stratum->sdiff = pool->sdiff; + pool_stratum->coinbase_len = pool->coinbase_len; + pool_stratum->nonce2_offset = pool->nonce2_offset; + pool_stratum->n2size = pool->n2size; + pool_stratum->merkles = pool->merkles; + + pool_stratum->swork.job_id = strdup(pool->swork.job_id); + pool_stratum->nonce1 = strdup(pool->nonce1); + + memcpy(pool_stratum->ntime, pool->ntime, sizeof(pool_stratum->ntime)); + memcpy(pool_stratum->header_bin, pool->header_bin, sizeof(pool_stratum->header_bin)); + cg_wunlock(&(pool_stratum->data_lock)); +} + +static void hashratio_update_work(struct cgpu_info *hashratio) +{ + struct hashratio_info *info = hashratio->device_data; + struct thr_info *thr = hashratio->thr[0]; + struct hashratio_pkg send_pkg; + uint32_t tmp, range, start; + struct work *work; + struct pool *pool; + + applog(LOG_DEBUG, "hashratio: New stratum: restart: %d, update: %d", + thr->work_restart, thr->work_update); + thr->work_update = false; + thr->work_restart = false; + + work = get_work(thr, thr->id); /* Make sure pool is ready */ + discard_work(work); /* Don't leak memory */ + + pool = current_pool(); + if (!pool->has_stratum) + quit(1, "hashratio: Miner Manager have to use stratum pool"); + if (pool->coinbase_len > HRTO_P_COINBASE_SIZE) + quit(1, "hashratio: Miner Manager pool coinbase length have to less then %d", HRTO_P_COINBASE_SIZE); + if (pool->merkles > HRTO_P_MERKLES_COUNT) + quit(1, "hashratio: Miner Manager merkles have to less then %d", HRTO_P_MERKLES_COUNT); + + info->pool_no = pool->pool_no; + + cgtime(&info->last_stratum); + cg_rlock(&pool->data_lock); + info->pool_no = pool->pool_no; + copy_pool_stratum(info, pool); + hashratio_stratum_pkgs(hashratio, pool); + cg_runlock(&pool->data_lock); + + /* Configure the parameter from outside */ + memset(send_pkg.data, 0, HRTO_P_DATA_LEN); + + // fan. We're not measuring temperature so set a safe but not max value + info->fan_pwm = HRTO_PWM_MAX * 2 / 3; + tmp = be32toh(info->fan_pwm); + memcpy(send_pkg.data, &tmp, 4); + + // freq + tmp = be32toh(info->default_freq); + memcpy(send_pkg.data + 4, &tmp, 4); + applog(LOG_DEBUG, "set freq: %d", info->default_freq); + + /* Configure the nonce2 offset and range */ + range = 0xffffffff / (total_devices + 1); + start = range * (hashratio->device_id + 1); + + tmp = be32toh(start); + memcpy(send_pkg.data + 8, &tmp, 4); + + tmp = be32toh(range); + memcpy(send_pkg.data + 12, &tmp, 4); + + /* Package the data */ + hashratio_init_pkg(&send_pkg, HRTO_P_SET, 1, 1); + hashratio_send_pkgs(hashratio, &send_pkg); +} + +static int64_t hashratio_scanhash(struct thr_info *thr) +{ + struct cgpu_info *hashratio = thr->cgpu; + struct hashratio_info *info = hashratio->device_data; + struct hashratio_pkg send_pkg; + struct hashratio_ret ar; + + memset(send_pkg.data, 0, HRTO_P_DATA_LEN); + hashratio_init_pkg(&send_pkg, HRTO_P_POLLING, 1, 1); + + if (unlikely(hashratio->usbinfo.nodev || hashratio_send_pkgs(hashratio, &send_pkg))) { + applog(LOG_ERR, "%s%d: Device disappeared, shutting down thread", + hashratio->drv->name, hashratio->device_id); + return -1; + } + hashratio_get_result(thr, &ar); + + return (int64_t)info->local_work * 64 * 0xffffffff; +} + +static struct api_data *hashratio_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + struct hashratio_info *info = cgpu->device_data; + char buf[24]; + char buf2[256]; + double hwp; + int i; + + // mm version + sprintf(buf, "MM Version"); + root = api_add_string(root, buf, info->mm_version, false); + + // asic freq + sprintf(buf, "Asic Freq (MHz)"); + root = api_add_int(root, buf, &(info->default_freq), false); + + // match work count + for (i = 0; i < HRTO_DEFAULT_MODULARS; i++) { + sprintf(buf, "Match work Modular %02d", i + 1); + memset(buf2, 0, sizeof(buf2)); + snprintf(buf2, sizeof(buf2), + "%02d:%08d %02d:%08d %02d:%08d %02d:%08d " + "%02d:%08d %02d:%08d %02d:%08d %02d:%08d " + "%02d:%08d %02d:%08d %02d:%08d %02d:%08d " + "%02d:%08d %02d:%08d %02d:%08d %02d:%08d", + i*16 + 1, info->matching_work[i*16 + 0], + i*16 + 2, info->matching_work[i*16 + 1], + i*16 + 3, info->matching_work[i*16 + 2], + i*16 + 4, info->matching_work[i*16 + 3], + i*16 + 5, info->matching_work[i*16 + 4], + i*16 + 6, info->matching_work[i*16 + 5], + i*16 + 7, info->matching_work[i*16 + 6], + i*16 + 8, info->matching_work[i*16 + 7], + i*16 + 9, info->matching_work[i*16 + 8], + i*16 + 10, info->matching_work[i*16 + 9], + i*16 + 11, info->matching_work[i*16 + 10], + i*16 + 12, info->matching_work[i*16 + 11], + i*16 + 13, info->matching_work[i*16 + 12], + i*16 + 14, info->matching_work[i*16 + 13], + i*16 + 15, info->matching_work[i*16 + 14], + i*16 + 16, info->matching_work[i*16 + 15]); + root = api_add_string(root, buf, buf2, true); + } + + // local works + sprintf(buf, "Local works"); + root = api_add_int(root, buf, &(info->local_works), false); + + // hardware error works + sprintf(buf, "Hardware error works"); + root = api_add_int(root, buf, &(info->hw_works), false); + + // device hardware error % + hwp = info->local_works ? ((double)info->hw_works / (double)info->local_works) : 0; + sprintf(buf, "Device hardware error%%"); + root = api_add_percent(root, buf, &hwp, true); + + // Temperature + sprintf(buf, "Temperature"); + root = api_add_int(root, buf, &(info->temp), false); + + // Fan + for (i = 0; i < HRTO_FAN_COUNT; i++) { + sprintf(buf, "Fan%d", i+1); + root = api_add_int(root, buf, &(info->fan[i]), false); + } + + return root; +} + +static void hashratio_shutdown(struct thr_info __maybe_unused *thr) +{ +} + +struct device_drv hashratio_drv = { + .drv_id = DRIVER_hashratio, + .dname = "hashratio", + .name = "HRO", + .get_api_stats = hashratio_api_stats, + .drv_detect = hashratio_detect, + .thread_prepare = hashratio_prepare, + .hash_work = hash_driver_work, + .scanwork = hashratio_scanhash, + .flush_work = hashratio_update_work, + .update_work = hashratio_update_work, + .thread_shutdown = hashratio_shutdown, +}; diff --git a/driver-hashratio.h b/driver-hashratio.h new file mode 100644 index 0000000..e155b5a --- /dev/null +++ b/driver-hashratio.h @@ -0,0 +1,131 @@ +/* + * Copyright 2013-2014 Con Kolivas + * Copyright 2012-2014 Xiangfu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef _HASHRATIO_H_ +#define _HASHRATIO_H_ + +#include "miner.h" +#include "util.h" + +#ifdef USE_HASHRATIO +char *opt_hashratio_freq; + +#define HRTO_MINER_THREADS 1 + +#define HRTO_RESET_FAULT_DECISECONDS 10 +#define HRTO_IO_SPEED 115200 + +#define HRTO_DEFAULT_MODULARS 5 +#define HRTO_DEFAULT_MINERS_PER_MODULAR 16 +/* total chips number */ +#define HRTO_DEFAULT_MINERS (HRTO_DEFAULT_MODULARS * 16) + +#define HRTO_PWM_MAX 0x3FF +#define HRTO_DEFAULT_FAN 20 /* N% */ +#define HRTO_DEFAULT_FAN_MIN 50 /* N% */ +#define HRTO_DEFAULT_FAN_MAX 100 /* N% */ + +#define HRTO_DEFAULT_FREQUENCY 280 /* MHz */ +#define HRTO_DEFAULT_FREQUENCY_MIN 100 +#define HRTO_DEFAULT_FREQUENCY_MAX 750 + +#define HRTO_FAN_COUNT 2 +//#define HRTO_TEMP_COUNT 1 + +/* Hashratio protocol package type */ +#define HRTO_H1 'H' +#define HRTO_H2 'R' + +#define HRTO_P_COINBASE_SIZE (6 * 1024) +#define HRTO_P_MERKLES_COUNT 20 + +#define HRTO_P_COUNT 39 +#define HRTO_P_DATA_LEN (HRTO_P_COUNT - 7) + +#define HRTO_P_DETECT 10 // 0x0a +#define HRTO_P_STATIC 11 // 0x0b +#define HRTO_P_JOB_ID 12 // 0x0c +#define HRTO_P_COINBASE 13 // 0x0d +#define HRTO_P_MERKLES 14 // 0x0e +#define HRTO_P_HEADER 15 // 0x0f +#define HRTO_P_POLLING 16 // 0x10 +#define HRTO_P_TARGET 17 // 0x11 +#define HRTO_P_REQUIRE 18 // 0x12 +#define HRTO_P_SET 19 // 0x13 +#define HRTO_P_TEST 20 // 0x14 + +#define HRTO_P_ACK 51 // 0x33 +#define HRTO_P_NAK 52 // 0x34 +#define HRTO_P_NONCE 53 // 0x35 +#define HRTO_P_STATUS 54 // 0x36 +#define HRTO_P_ACKDETECT 55 // 0x37 +#define HRTO_P_TEST_RET 56 // 0x38 +/* Hashratio protocol package type */ + +struct hashratio_pkg { + uint8_t head[2]; + uint8_t type; + uint8_t idx; + uint8_t cnt; + uint8_t data[32]; + uint8_t crc[2]; +}; +#define hashratio_ret hashratio_pkg + +struct hashratio_info { + int default_freq; + + int fan_pwm; + + int temp; + int fan[HRTO_FAN_COUNT]; +// uint8_t freq[HRTO_DEFAULT_MINERS]; + uint8_t target_freq[HRTO_DEFAULT_MINERS]; + + int temp_max; + int temp_history_count; + int temp_history_index; + int temp_sum; + int temp_old; + + struct timeval last_stratum; + struct pool pool; + int pool_no; + + int local_works; + int hw_works; + int matching_work[HRTO_DEFAULT_MINERS]; + int local_work; + int hw_work; + +// uint32_t get_result_counter; + + char mm_version[16]; +}; + +#define HRTO_WRITE_SIZE (sizeof(struct hashratio_pkg)) +#define HRTO_READ_SIZE HRTO_WRITE_SIZE + +#define HRTO_GETS_OK 0 +#define HRTO_GETS_TIMEOUT -1 +#define HRTO_GETS_RESTART -2 +#define HRTO_GETS_ERROR -3 + +#define HRTO_SEND_OK 0 +#define HRTO_SEND_ERROR -1 + +#define hashratio_open(devpath, baud, purge) serial_open(devpath, baud, HRTO_RESET_FAULT_DECISECONDS, purge) +#define hashratio_close(fd) close(fd) + +extern char *set_hashratio_fan(char *arg); +extern char *set_hashratio_freq(char *arg); + +#endif /* USE_HASHRATIO */ +#endif /* _HASHRATIO_H_ */ diff --git a/driver-icarus.c b/driver-icarus.c new file mode 100644 index 0000000..2971121 --- /dev/null +++ b/driver-icarus.c @@ -0,0 +1,2462 @@ +/* + * Copyright 2012-2013 Andrew Smith + * Copyright 2012 Xiangfu + * Copyright 2013-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +/* + * Those code should be works fine with V2 and V3 bitstream of Icarus. + * Operation: + * No detection implement. + * Input: 64B = 32B midstate + 20B fill bytes + last 12 bytes of block head. + * Return: send back 32bits immediately when Icarus found a valid nonce. + * no query protocol implemented here, if no data send back in ~11.3 + * seconds (full cover time on 32bit nonce range by 380MH/s speed) + * just send another work. + * Notice: + * 1. Icarus will start calculate when you push a work to them, even they + * are busy. + * 2. The 2 FPGAs on Icarus will distribute the job, one will calculate the + * 0 ~ 7FFFFFFF, another one will cover the 80000000 ~ FFFFFFFF. + * 3. It's possible for 2 FPGAs both find valid nonce in the meantime, the 2 + * valid nonce will all be send back. + * 4. Icarus will stop work when: a valid nonce has been found or 32 bits + * nonce range is completely calculated. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#ifdef WIN32 +#include +#endif + +#include "compat.h" +#include "miner.h" +#include "usbutils.h" + +// The serial I/O speed - Linux uses a define 'B115200' in bits/termios.h +#define ICARUS_IO_SPEED 115200 + +#define ICARUS_BUF_SIZE 8 +// The size of a successful nonce read +#define ANT_READ_SIZE 5 +#define ICARUS_READ_SIZE 4 +#define ROCK_READ_SIZE 8 + +// Ensure the sizes are correct for the Serial read +#if (ICARUS_READ_SIZE != 4) +#error ICARUS_READ_SIZE must be 4 +#endif +#define ASSERT1(condition) __maybe_unused static char sizeof_uint32_t_must_be_4[(condition)?1:-1] +ASSERT1(sizeof(uint32_t) == 4); + +// TODO: USB? Different calculation? - see usbstats to work it out e.g. 1/2 of normal send time +// or even use that number? 1/2 +// #define ICARUS_READ_TIME(baud) ((double)ICARUS_READ_SIZE * (double)8.0 / (double)(baud)) +// maybe 1ms? +#define ICARUS_READ_TIME(baud) (0.001) + +// USB ms timeout to wait - user specified timeouts are multiples of this +#define ICA_WAIT_TIMEOUT 100 +#define ANT_WAIT_TIMEOUT 10 +#define AU3_WAIT_TIMEOUT 1 +#define ICARUS_WAIT_TIMEOUT (info->u3 ? AU3_WAIT_TIMEOUT : (info->ant ? ANT_WAIT_TIMEOUT : ICA_WAIT_TIMEOUT)) + +#define ICARUS_CMR2_TIMEOUT 1 + +// Defined in multiples of ICARUS_WAIT_TIMEOUT +// Must of course be greater than ICARUS_READ_COUNT_TIMING/ICARUS_WAIT_TIMEOUT +// There's no need to have this bigger, since the overhead/latency of extra work +// is pretty small once you get beyond a 10s nonce range time and 10s also +// means that nothing slower than 429MH/s can go idle so most icarus devices +// will always mine without idling +#define ICARUS_READ_TIME_LIMIT_MAX 100 + +// In timing mode: Default starting value until an estimate can be obtained +// 5000 ms allows for up to a ~840MH/s device +#define ICARUS_READ_COUNT_TIMING 5000 + +// Antminer USB is > 1GH/s so use a shorter limit +// 1000 ms allows for up to ~4GH/s device +#define ANTUSB_READ_COUNT_TIMING 1000 + +#define ANTU3_READ_COUNT_TIMING 100 + +#define ICARUS_READ_COUNT_MIN ICARUS_WAIT_TIMEOUT +#define SECTOMS(s) ((int)((s) * 1000)) +// How many ms below the expected completion time to abort work +// extra in case the last read is delayed +#define ICARUS_READ_REDUCE ((int)(ICARUS_WAIT_TIMEOUT * 1.5)) + +// For a standard Icarus REV3 (to 5 places) +// Since this rounds up a the last digit - it is a slight overestimate +// Thus the hash rate will be a VERY slight underestimate +// (by a lot less than the displayed accuracy) +// Minor inaccuracy of these numbers doesn't affect the work done, +// only the displayed MH/s +#define ICARUS_REV3_HASH_TIME 0.0000000026316 +#define LANCELOT_HASH_TIME 0.0000000025000 +#define ASICMINERUSB_HASH_TIME 0.0000000029761 +// TODO: What is it? +#define CAIRNSMORE1_HASH_TIME 0.0000000027000 +// Per FPGA +#define CAIRNSMORE2_HASH_TIME 0.0000000066600 +#define NANOSEC 1000000000.0 +#define ANTMINERUSB_HASH_MHZ 0.000000125 +#define ANTMINERUSB_HASH_TIME (ANTMINERUSB_HASH_MHZ / (double)(opt_anu_freq)) +#define ANTU3_HASH_MHZ 0.0000000032 +#define ANTU3_HASH_TIME (ANTU3_HASH_MHZ / (double)(opt_au3_freq)) + +#define CAIRNSMORE2_INTS 4 + +// Icarus Rev3 doesn't send a completion message when it finishes +// the full nonce range, so to avoid being idle we must abort the +// work (by starting a new work item) shortly before it finishes +// +// Thus we need to estimate 2 things: +// 1) How many hashes were done if the work was aborted +// 2) How high can the timeout be before the Icarus is idle, +// to minimise the number of work items started +// We set 2) to 'the calculated estimate' - ICARUS_READ_REDUCE +// to ensure the estimate ends before idle +// +// The simple calculation used is: +// Tn = Total time in seconds to calculate n hashes +// Hs = seconds per hash +// Xn = number of hashes +// W = code/usb overhead per work +// +// Rough but reasonable estimate: +// Tn = Hs * Xn + W (of the form y = mx + b) +// +// Thus: +// Line of best fit (using least squares) +// +// Hs = (n*Sum(XiTi)-Sum(Xi)*Sum(Ti))/(n*Sum(Xi^2)-Sum(Xi)^2) +// W = Sum(Ti)/n - (Hs*Sum(Xi))/n +// +// N.B. W is less when aborting work since we aren't waiting for the reply +// to be transferred back (ICARUS_READ_TIME) +// Calculating the hashes aborted at n seconds is thus just n/Hs +// (though this is still a slight overestimate due to code delays) +// + +// Both below must be exceeded to complete a set of data +// Minimum how long after the first, the last data point must be +#define HISTORY_SEC 60 +// Minimum how many points a single ICARUS_HISTORY should have +#define MIN_DATA_COUNT 5 +// The value MIN_DATA_COUNT used is doubled each history until it exceeds: +#define MAX_MIN_DATA_COUNT 100 + +static struct timeval history_sec = { HISTORY_SEC, 0 }; + +// Store the last INFO_HISTORY data sets +// [0] = current data, not yet ready to be included as an estimate +// Each new data set throws the last old set off the end thus +// keeping a ongoing average of recent data +#define INFO_HISTORY 10 + +struct ICARUS_HISTORY { + struct timeval finish; + double sumXiTi; + double sumXi; + double sumTi; + double sumXi2; + uint32_t values; + uint32_t hash_count_min; + uint32_t hash_count_max; +}; + +enum timing_mode { MODE_DEFAULT, MODE_SHORT, MODE_LONG, MODE_VALUE }; + +static const char *MODE_DEFAULT_STR = "default"; +static const char *MODE_SHORT_STR = "short"; +static const char *MODE_SHORT_STREQ = "short="; +static const char *MODE_LONG_STR = "long"; +static const char *MODE_LONG_STREQ = "long="; +static const char *MODE_VALUE_STR = "value"; +static const char *MODE_UNKNOWN_STR = "unknown"; + +#define MAX_DEVICE_NUM 100 +#define MAX_WORK_BUFFER_SIZE 2 +#define MAX_CHIP_NUM 24 +// Set it to 3, 5 or 9 +#define NONCE_CORRECTION_TIMES 5 +#define MAX_TRIES 4 +#define RM_CMD_MASK 0x0F +#define RM_STATUS_MASK 0xF0 +#define RM_CHIP_MASK 0x3F +#define RM_PRODUCT_MASK 0xC0 +#define RM_PRODUCT_RBOX 0x00 +#define RM_PRODUCT_T1 0x40 +#define RM_PRODUCT_T2 0x80 +#define RM_PRODUCT_TEST 0xC0 + +#if (NONCE_CORRECTION_TIMES == 5) +static int32_t rbox_corr_values[] = {0, 1, -1, -2, -4}; +#endif +#if (NONCE_CORRECTION_TIMES == 9) +static int32_t rbox_corr_values[] = {0, 1, -1, 2, -2, 3, -3, 4, -4}; +#endif +#if (NONCE_CORRECTION_TIMES == 3) +static int32_t rbox_corr_values[] = {0, 1, -1}; +#endif + +#define ANT_QUEUE_NUM 36 + +typedef enum { + NONCE_DATA1_OFFSET = 0, + NONCE_DATA2_OFFSET, + NONCE_DATA3_OFFSET, + NONCE_DATA4_OFFSET, + NONCE_TASK_CMD_OFFSET, + NONCE_CHIP_NO_OFFSET, + NONCE_TASK_NO_OFFSET, + NONCE_COMMAND_OFFSET, + NONCE_MAX_OFFSET +} NONCE_OFFSET; + +typedef enum { + NONCE_DATA_CMD = 0, + NONCE_TASK_COMPLETE_CMD, + NONCE_GET_TASK_CMD, +} NONCE_COMMAND; + +typedef struct nonce_data { + int chip_no; + unsigned int task_no ; + unsigned char work_state; + int cmd_value; +} NONCE_DATA; + +typedef enum { + ROCKMINER_RBOX = 0, + ROCKMINER_T1, + ROCKMINER_T2, + ROCKMINER_MAX +} ROCKMINER_PRODUCT_T; + +typedef struct rockminer_chip_info { + unsigned char freq; + int error_cnt; + time_t last_received_task_complete_time; +} ROCKMINER_CHIP_INFO; + +typedef struct rockminer_device_info { + unsigned char detect_chip_no; + unsigned char chip_max; + unsigned char product_id; + float min_frq; + float def_frq; + float max_frq; + ROCKMINER_CHIP_INFO chip[MAX_CHIP_NUM]; + time_t dev_detect_time; +} ROCKMINER_DEVICE_INFO; + +struct ICARUS_INFO { + enum sub_ident ident; + int intinfo; + + // time to calculate the golden_ob + uint64_t golden_hashes; + struct timeval golden_tv; + + struct ICARUS_HISTORY history[INFO_HISTORY+1]; + uint32_t min_data_count; + + int timeout; + + // seconds per Hash + double Hs; + // ms til we abort + int read_time; + // ms limit for (short=/long=) read_time + int read_time_limit; + // How long without hashes is considered a failed device + int fail_time; + + enum timing_mode timing_mode; + bool do_icarus_timing; + + double fullnonce; + int count; + double W; + uint32_t values; + uint64_t hash_count_range; + + // Determine the cost of history processing + // (which will only affect W) + uint64_t history_count; + struct timeval history_time; + + // icarus-options + int baud; + int work_division; + int fpga_count; + uint32_t nonce_mask; + + uint8_t cmr2_speed; + bool speed_next_work; + bool flash_next_work; + + int nonce_size; + + bool failing; + + pthread_mutex_t lock; + + ROCKMINER_DEVICE_INFO rmdev; + struct work *base_work; // For when we roll work + struct work *g_work[MAX_CHIP_NUM][MAX_WORK_BUFFER_SIZE]; + uint32_t last_nonce[MAX_CHIP_NUM][MAX_WORK_BUFFER_SIZE]; + char rock_init[64]; + uint64_t nonces_checked; + uint64_t nonces_correction_times; + uint64_t nonces_correction_tests; + uint64_t nonces_fail; + uint64_t nonces_correction[NONCE_CORRECTION_TIMES]; + + struct work **antworks; + int nonces; + int workid; + bool ant; + bool u3; +}; + +#define ICARUS_MIDSTATE_SIZE 32 +#define ICARUS_UNUSED_SIZE 16 +#define ICARUS_WORK_SIZE 12 + +#define ICARUS_WORK_DATA_OFFSET 64 + +#define ICARUS_CMR2_SPEED_FACTOR 2.5 +#define ICARUS_CMR2_SPEED_MIN_INT 100 +#define ICARUS_CMR2_SPEED_DEF_INT 180 +#define ICARUS_CMR2_SPEED_MAX_INT 220 +#define CMR2_INT_TO_SPEED(_speed) ((uint8_t)((float)_speed / ICARUS_CMR2_SPEED_FACTOR)) +#define ICARUS_CMR2_SPEED_MIN CMR2_INT_TO_SPEED(ICARUS_CMR2_SPEED_MIN_INT) +#define ICARUS_CMR2_SPEED_DEF CMR2_INT_TO_SPEED(ICARUS_CMR2_SPEED_DEF_INT) +#define ICARUS_CMR2_SPEED_MAX CMR2_INT_TO_SPEED(ICARUS_CMR2_SPEED_MAX_INT) +#define ICARUS_CMR2_SPEED_INC 1 +#define ICARUS_CMR2_SPEED_DEC -1 +#define ICARUS_CMR2_SPEED_FAIL -10 + +#define ICARUS_CMR2_PREFIX ((uint8_t)0xB7) +#define ICARUS_CMR2_CMD_SPEED ((uint8_t)0) +#define ICARUS_CMR2_CMD_FLASH ((uint8_t)1) +#define ICARUS_CMR2_DATA_FLASH_OFF ((uint8_t)0) +#define ICARUS_CMR2_DATA_FLASH_ON ((uint8_t)1) +#define ICARUS_CMR2_CHECK ((uint8_t)0x6D) + +#define ANT_UNUSED_SIZE 15 + +struct ICARUS_WORK { + uint8_t midstate[ICARUS_MIDSTATE_SIZE]; + // These 4 bytes are for CMR2 bitstreams that handle MHz adjustment + uint8_t check; + uint8_t data; + uint8_t cmd; + uint8_t prefix; + uint8_t unused[ANT_UNUSED_SIZE]; + uint8_t id; // Used only by ANT, otherwise unused by other icarus + uint8_t work[ICARUS_WORK_SIZE]; +}; + +#define ANT_U1_DEFFREQ 200 +#define ANT_U3_DEFFREQ 225 +#define ANT_U3_MAXFREQ 250 +struct { + float freq; + uint16_t hex; +} u3freqtable[] = { + { 100, 0x0783 }, + { 125, 0x0983 }, + { 150, 0x0b83 }, + { 175, 0x0d83 }, + { 193.75, 0x0f03 }, + { 196.88, 0x1f07 }, + { 200, 0x0782 }, + { 206.25, 0x1006 }, + { 212.5, 0x1086 }, + { 218.75, 0x1106 }, + { 225, 0x0882 }, + { 237.5, 0x1286 }, + { 243.75, 0x1306 }, + { 250, 0x0982 }, +}; + +#define END_CONDITION 0x0000ffff + +// Looking for options in --icarus-timing and --icarus-options: +// +// Code increments this each time we start to look at a device +// However, this means that if other devices are checked by +// the Icarus code (e.g. Avalon only as at 20130517) +// they will count in the option offset +// +// This, however, is deterministic so that's OK +// +// If we were to increment after successfully finding an Icarus +// that would be random since an Icarus may fail and thus we'd +// not be able to predict the option order +// +// Devices are checked in the order libusb finds them which is ? +// +static int option_offset = -1; + +/* +#define ICA_BUFSIZ (0x200) + +static void transfer_read(struct cgpu_info *icarus, uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, char *buf, int bufsiz, int *amount, enum usb_cmds cmd) +{ + int err; + + err = usb_transfer_read(icarus, request_type, bRequest, wValue, wIndex, buf, bufsiz, amount, cmd); + + applog(LOG_DEBUG, "%s: cgid %d %s got err %d", + icarus->drv->name, icarus->cgminer_id, + usb_cmdname(cmd), err); +} +*/ + +static void _transfer(struct cgpu_info *icarus, uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, uint32_t *data, int siz, enum usb_cmds cmd) +{ + int err; + + err = usb_transfer_data(icarus, request_type, bRequest, wValue, wIndex, data, siz, cmd); + + applog(LOG_DEBUG, "%s: cgid %d %s got err %d", + icarus->drv->name, icarus->cgminer_id, + usb_cmdname(cmd), err); +} + +#define transfer(icarus, request_type, bRequest, wValue, wIndex, cmd) \ + _transfer(icarus, request_type, bRequest, wValue, wIndex, NULL, 0, cmd) + +static void icarus_initialise(struct cgpu_info *icarus, int baud) +{ + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(icarus->device_data); + uint16_t wValue, wIndex; + enum sub_ident ident; + int interface; + + if (icarus->usbinfo.nodev) + return; + + interface = _usb_interface(icarus, info->intinfo); + ident = usb_ident(icarus); + + switch (ident) { + case IDENT_BLT: + case IDENT_LLT: + case IDENT_CMR1: + case IDENT_CMR2: + // Reset + transfer(icarus, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, FTDI_VALUE_RESET, + interface, C_RESET); + + if (icarus->usbinfo.nodev) + return; + + // Latency + _usb_ftdi_set_latency(icarus, info->intinfo); + + if (icarus->usbinfo.nodev) + return; + + // Set data control + transfer(icarus, FTDI_TYPE_OUT, FTDI_REQUEST_DATA, FTDI_VALUE_DATA_BLT, + interface, C_SETDATA); + + if (icarus->usbinfo.nodev) + return; + + // default to BLT/LLT 115200 + wValue = FTDI_VALUE_BAUD_BLT; + wIndex = FTDI_INDEX_BAUD_BLT; + + if (ident == IDENT_CMR1 || ident == IDENT_CMR2) { + switch (baud) { + case 115200: + wValue = FTDI_VALUE_BAUD_CMR_115; + wIndex = FTDI_INDEX_BAUD_CMR_115; + break; + case 57600: + wValue = FTDI_VALUE_BAUD_CMR_57; + wIndex = FTDI_INDEX_BAUD_CMR_57; + break; + default: + quit(1, "icarus_intialise() invalid baud (%d) for Cairnsmore1", baud); + break; + } + } + + // Set the baud + transfer(icarus, FTDI_TYPE_OUT, FTDI_REQUEST_BAUD, wValue, + (wIndex & 0xff00) | interface, C_SETBAUD); + + if (icarus->usbinfo.nodev) + return; + + // Set Modem Control + transfer(icarus, FTDI_TYPE_OUT, FTDI_REQUEST_MODEM, FTDI_VALUE_MODEM, + interface, C_SETMODEM); + + if (icarus->usbinfo.nodev) + return; + + // Set Flow Control + transfer(icarus, FTDI_TYPE_OUT, FTDI_REQUEST_FLOW, FTDI_VALUE_FLOW, + interface, C_SETFLOW); + + if (icarus->usbinfo.nodev) + return; + + // Clear any sent data + transfer(icarus, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, FTDI_VALUE_PURGE_TX, + interface, C_PURGETX); + + if (icarus->usbinfo.nodev) + return; + + // Clear any received data + transfer(icarus, FTDI_TYPE_OUT, FTDI_REQUEST_RESET, FTDI_VALUE_PURGE_RX, + interface, C_PURGERX); + break; + case IDENT_ICA: + // Set Data Control + transfer(icarus, PL2303_CTRL_OUT, PL2303_REQUEST_CTRL, PL2303_VALUE_CTRL, + interface, C_SETDATA); + + if (icarus->usbinfo.nodev) + return; + + // Set Line Control + uint32_t ica_data[2] = { PL2303_VALUE_LINE0, PL2303_VALUE_LINE1 }; + _transfer(icarus, PL2303_CTRL_OUT, PL2303_REQUEST_LINE, PL2303_VALUE_LINE, + interface, &ica_data[0], PL2303_VALUE_LINE_SIZE, C_SETLINE); + + if (icarus->usbinfo.nodev) + return; + + // Vendor + transfer(icarus, PL2303_VENDOR_OUT, PL2303_REQUEST_VENDOR, PL2303_VALUE_VENDOR, + interface, C_VENDOR); + break; + case IDENT_AMU: + case IDENT_ANU: + case IDENT_AU3: + case IDENT_LIN: + // Enable the UART + transfer(icarus, CP210X_TYPE_OUT, CP210X_REQUEST_IFC_ENABLE, + CP210X_VALUE_UART_ENABLE, + interface, C_ENABLE_UART); + + if (icarus->usbinfo.nodev) + return; + + // Set data control + transfer(icarus, CP210X_TYPE_OUT, CP210X_REQUEST_DATA, CP210X_VALUE_DATA, + interface, C_SETDATA); + + if (icarus->usbinfo.nodev) + return; + + // Set the baud + uint32_t data = CP210X_DATA_BAUD; + _transfer(icarus, CP210X_TYPE_OUT, CP210X_REQUEST_BAUD, 0, + interface, &data, sizeof(data), C_SETBAUD); + break; + case IDENT_AVA: + break; + default: + quit(1, "icarus_intialise() called with invalid %s cgid %i ident=%d", + icarus->drv->name, icarus->cgminer_id, ident); + } +} + +static void rev(unsigned char *s, size_t l) +{ + size_t i, j; + unsigned char t; + + for (i = 0, j = l - 1; i < j; i++, j--) { + t = s[i]; + s[i] = s[j]; + s[j] = t; + } +} + +#define ICA_NONCE_ERROR -1 +#define ICA_NONCE_OK 0 +#define ICA_NONCE_RESTART 1 +#define ICA_NONCE_TIMEOUT 2 + +static int icarus_get_nonce(struct cgpu_info *icarus, unsigned char *buf, struct timeval *tv_start, + struct timeval *tv_finish, struct thr_info *thr, int read_time) +{ + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(icarus->device_data); + int err, amt, rc; + + if (icarus->usbinfo.nodev) + return ICA_NONCE_ERROR; + + cgtime(tv_start); + err = usb_read_ii_timeout_cancellable(icarus, info->intinfo, (char *)buf, + info->nonce_size, &amt, read_time, + C_GETRESULTS); + cgtime(tv_finish); + + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_ERR, "%s %i: Comms error (rerr=%d amt=%d)", icarus->drv->name, + icarus->device_id, err, amt); + dev_error(icarus, REASON_DEV_COMMS_ERROR); + return ICA_NONCE_ERROR; + } + + if (amt >= info->nonce_size) + return ICA_NONCE_OK; + + rc = SECTOMS(tdiff(tv_finish, tv_start)); + if (thr && thr->work_restart) { + applog(LOG_DEBUG, "Icarus Read: Work restart at %d ms", rc); + return ICA_NONCE_RESTART; + } + + if (amt > 0) + applog(LOG_DEBUG, "Icarus Read: Timeout reading for %d ms", rc); + else + applog(LOG_DEBUG, "Icarus Read: No data for %d ms", rc); + return ICA_NONCE_TIMEOUT; +} + + +static const char *timing_mode_str(enum timing_mode timing_mode) +{ + switch(timing_mode) { + case MODE_DEFAULT: + return MODE_DEFAULT_STR; + case MODE_SHORT: + return MODE_SHORT_STR; + case MODE_LONG: + return MODE_LONG_STR; + case MODE_VALUE: + return MODE_VALUE_STR; + default: + return MODE_UNKNOWN_STR; + } +} + +static void set_timing_mode(int this_option_offset, struct cgpu_info *icarus) +{ + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(icarus->device_data); + int read_count_timing = 0; + enum sub_ident ident; + double Hs, fail_time; + char buf[BUFSIZ+1]; + char *ptr, *comma, *eq; + size_t max; + int i; + + if (opt_icarus_timing == NULL) + buf[0] = '\0'; + else { + ptr = opt_icarus_timing; + for (i = 0; i < this_option_offset; i++) { + comma = strchr(ptr, ','); + if (comma == NULL) + break; + ptr = comma + 1; + } + + comma = strchr(ptr, ','); + if (comma == NULL) + max = strlen(ptr); + else + max = comma - ptr; + + if (max > BUFSIZ) + max = BUFSIZ; + strncpy(buf, ptr, max); + buf[max] = '\0'; + } + + ident = usb_ident(icarus); + switch (ident) { + case IDENT_ICA: + case IDENT_AVA: + info->Hs = ICARUS_REV3_HASH_TIME; + read_count_timing = ICARUS_READ_COUNT_TIMING; + break; + case IDENT_BLT: + case IDENT_LLT: + info->Hs = LANCELOT_HASH_TIME; + read_count_timing = ICARUS_READ_COUNT_TIMING; + break; + case IDENT_AMU: + info->Hs = ASICMINERUSB_HASH_TIME; + read_count_timing = ICARUS_READ_COUNT_TIMING; + break; + case IDENT_CMR1: + info->Hs = CAIRNSMORE1_HASH_TIME; + read_count_timing = ICARUS_READ_COUNT_TIMING; + break; + case IDENT_CMR2: + info->Hs = CAIRNSMORE2_HASH_TIME; + read_count_timing = ICARUS_READ_COUNT_TIMING; + break; + case IDENT_ANU: + info->Hs = ANTMINERUSB_HASH_TIME; + read_count_timing = ANTUSB_READ_COUNT_TIMING; + break; + case IDENT_AU3: + info->Hs = ANTU3_HASH_TIME; + read_count_timing = ANTU3_READ_COUNT_TIMING; + break; + default: + quit(1, "Icarus get_options() called with invalid %s ident=%d", + icarus->drv->name, ident); + } + + info->read_time = 0; + info->read_time_limit = 0; // 0 = no limit + + if (strcasecmp(buf, MODE_SHORT_STR) == 0) { + // short + info->read_time = read_count_timing; + + info->timing_mode = MODE_SHORT; + info->do_icarus_timing = true; + } else if (strncasecmp(buf, MODE_SHORT_STREQ, strlen(MODE_SHORT_STREQ)) == 0) { + // short=limit + info->read_time = read_count_timing; + + info->timing_mode = MODE_SHORT; + info->do_icarus_timing = true; + + info->read_time_limit = atoi(&buf[strlen(MODE_SHORT_STREQ)]); + if (info->read_time_limit < 0) + info->read_time_limit = 0; + if (info->read_time_limit > ICARUS_READ_TIME_LIMIT_MAX) + info->read_time_limit = ICARUS_READ_TIME_LIMIT_MAX; + } else if (strcasecmp(buf, MODE_LONG_STR) == 0) { + // long + info->read_time = read_count_timing; + + info->timing_mode = MODE_LONG; + info->do_icarus_timing = true; + } else if (strncasecmp(buf, MODE_LONG_STREQ, strlen(MODE_LONG_STREQ)) == 0) { + // long=limit + info->read_time = read_count_timing; + + info->timing_mode = MODE_LONG; + info->do_icarus_timing = true; + + info->read_time_limit = atoi(&buf[strlen(MODE_LONG_STREQ)]); + if (info->read_time_limit < 0) + info->read_time_limit = 0; + if (info->read_time_limit > ICARUS_READ_TIME_LIMIT_MAX) + info->read_time_limit = ICARUS_READ_TIME_LIMIT_MAX; + } else if ((Hs = atof(buf)) != 0) { + // ns[=read_time] + info->Hs = Hs / NANOSEC; + info->fullnonce = info->Hs * (((double)0xffffffff) + 1); + + if ((eq = strchr(buf, '=')) != NULL) + info->read_time = atoi(eq+1) * ICARUS_WAIT_TIMEOUT; + + if (info->read_time < ICARUS_READ_COUNT_MIN) + info->read_time = SECTOMS(info->fullnonce) - ICARUS_READ_REDUCE; + + if (unlikely(info->read_time < ICARUS_READ_COUNT_MIN)) + info->read_time = ICARUS_READ_COUNT_MIN; + + info->timing_mode = MODE_VALUE; + info->do_icarus_timing = false; + } else { + // Anything else in buf just uses DEFAULT mode + + info->fullnonce = info->Hs * (((double)0xffffffff) + 1); + + if ((eq = strchr(buf, '=')) != NULL) + info->read_time = atoi(eq+1) * ICARUS_WAIT_TIMEOUT; + + if (info->read_time < ICARUS_READ_COUNT_MIN) + info->read_time = SECTOMS(info->fullnonce) - ICARUS_READ_REDUCE; + + if (unlikely(info->read_time < ICARUS_READ_COUNT_MIN)) + info->read_time = ICARUS_READ_COUNT_MIN; + + info->timing_mode = MODE_DEFAULT; + info->do_icarus_timing = false; + } + + info->min_data_count = MIN_DATA_COUNT; + + // All values are in multiples of ICARUS_WAIT_TIMEOUT + info->read_time_limit *= ICARUS_WAIT_TIMEOUT; + + applog(LOG_DEBUG, "%s: cgid %d Init: mode=%s read_time=%dms limit=%dms Hs=%e", + icarus->drv->name, icarus->cgminer_id, + timing_mode_str(info->timing_mode), + info->read_time, info->read_time_limit, info->Hs); + + /* Set the time to detect a dead device to 30 full nonce ranges. */ + fail_time = info->Hs * 0xffffffffull * 30.0; + /* Integer accuracy is definitely enough. */ + info->fail_time = fail_time; +} + +static uint32_t mask(int work_division) +{ + uint32_t nonce_mask = 0x7fffffff; + + // yes we can calculate these, but this way it's easy to see what they are + switch (work_division) { + case 1: + nonce_mask = 0xffffffff; + break; + case 2: + nonce_mask = 0x7fffffff; + break; + case 4: + nonce_mask = 0x3fffffff; + break; + case 8: + nonce_mask = 0x1fffffff; + break; + default: + quit(1, "Invalid2 icarus-options for work_division (%d) must be 1, 2, 4 or 8", work_division); + } + + return nonce_mask; +} + +static void get_options(int this_option_offset, struct cgpu_info *icarus, int *baud, int *work_division, int *fpga_count) +{ + char buf[BUFSIZ+1]; + char *ptr, *comma, *colon, *colon2; + enum sub_ident ident; + size_t max; + int i, tmp; + + if (opt_icarus_options == NULL) + buf[0] = '\0'; + else { + ptr = opt_icarus_options; + for (i = 0; i < this_option_offset; i++) { + comma = strchr(ptr, ','); + if (comma == NULL) + break; + ptr = comma + 1; + } + + comma = strchr(ptr, ','); + if (comma == NULL) + max = strlen(ptr); + else + max = comma - ptr; + + if (max > BUFSIZ) + max = BUFSIZ; + strncpy(buf, ptr, max); + buf[max] = '\0'; + } + + ident = usb_ident(icarus); + switch (ident) { + case IDENT_ICA: + case IDENT_BLT: + case IDENT_LLT: + case IDENT_AVA: + *baud = ICARUS_IO_SPEED; + *work_division = 2; + *fpga_count = 2; + break; + case IDENT_AMU: + case IDENT_ANU: + case IDENT_AU3: + *baud = ICARUS_IO_SPEED; + *work_division = 1; + *fpga_count = 1; + break; + case IDENT_CMR1: + *baud = ICARUS_IO_SPEED; + *work_division = 2; + *fpga_count = 2; + break; + case IDENT_CMR2: + *baud = ICARUS_IO_SPEED; + *work_division = 1; + *fpga_count = 1; + break; + default: + quit(1, "Icarus get_options() called with invalid %s ident=%d", + icarus->drv->name, ident); + } + + if (*buf) { + colon = strchr(buf, ':'); + if (colon) + *(colon++) = '\0'; + + if (*buf) { + tmp = atoi(buf); + switch (tmp) { + case 115200: + *baud = 115200; + break; + case 57600: + *baud = 57600; + break; + default: + quit(1, "Invalid icarus-options for baud (%s) must be 115200 or 57600", buf); + } + } + + if (colon && *colon) { + colon2 = strchr(colon, ':'); + if (colon2) + *(colon2++) = '\0'; + + if (*colon) { + tmp = atoi(colon); + if (tmp == 1 || tmp == 2 || tmp == 4 || tmp == 8) { + *work_division = tmp; + *fpga_count = tmp; // default to the same + } else { + quit(1, "Invalid icarus-options for work_division (%s) must be 1, 2, 4 or 8", colon); + } + } + + if (colon2 && *colon2) { + tmp = atoi(colon2); + if (tmp > 0 && tmp <= *work_division) + *fpga_count = tmp; + else { + quit(1, "Invalid icarus-options for fpga_count (%s) must be >0 and <=work_division (%d)", colon2, *work_division); + } + } + } + } +} + +unsigned char crc5(unsigned char *ptr, unsigned char len) +{ + unsigned char i, j, k; + unsigned char crc = 0x1f; + + unsigned char crcin[5] = {1, 1, 1, 1, 1}; + unsigned char crcout[5] = {1, 1, 1, 1, 1}; + unsigned char din = 0; + + j = 0x80; + k = 0; + for (i = 0; i < len; i++) { + if (*ptr & j) + din = 1; + else + din = 0; + crcout[0] = crcin[4] ^ din; + crcout[1] = crcin[0]; + crcout[2] = crcin[1] ^ crcin[4] ^ din; + crcout[3] = crcin[2]; + crcout[4] = crcin[3]; + + j = j >> 1; + k++; + if (k == 8) { + j = 0x80; + k = 0; + ptr++; + } + memcpy(crcin, crcout, 5); + } + crc = 0; + if (crcin[4]) + crc |= 0x10; + if (crcin[3]) + crc |= 0x08; + if (crcin[2]) + crc |= 0x04; + if (crcin[1]) + crc |= 0x02; + if (crcin[0]) + crc |= 0x01; + return crc; +} + +static uint16_t anu_find_freqhex(void) +{ + float fout, best_fout = opt_anu_freq; + int od, nf, nr, no, n, m, bs; + uint16_t anu_freq_hex = 0; + float best_diff = 1000; + + if (!best_fout) + best_fout = ANT_U1_DEFFREQ; + + for (od = 0; od < 4; od++) { + no = 1 << od; + for (n = 0; n < 16; n++) { + nr = n + 1; + for (m = 0; m < 64; m++) { + nf = m + 1; + fout = 25 * (float)nf /((float)(nr) * (float)(no)); + if (fabsf(fout - opt_anu_freq) > best_diff) + continue; + if (500 <= (fout * no) && (fout * no) <= 1000) + bs = 1; + else + bs = 0; + best_diff = fabsf(fout - opt_anu_freq); + best_fout = fout; + anu_freq_hex = (bs << 14) | (m << 7) | (n << 2) | od; + if (fout == opt_anu_freq) { + applog(LOG_DEBUG, "ANU found exact frequency %.1f with hex %04x", + opt_anu_freq, anu_freq_hex); + goto out; + } + } + } + } + applog(LOG_NOTICE, "ANU found nearest frequency %.1f with hex %04x", best_fout, + anu_freq_hex); +out: + return anu_freq_hex; +} + +static uint16_t anu3_find_freqhex(void) +{ + int i = 0, freq = opt_au3_freq, u3freq; + uint16_t anu_freq_hex = 0x0882; + + if (!freq) + freq = ANT_U3_DEFFREQ; + + do { + u3freq = u3freqtable[i].freq; + if (u3freq <= freq) + anu_freq_hex = u3freqtable[i].hex; + i++; + } while (u3freq < ANT_U3_MAXFREQ); + + return anu_freq_hex; +} + +static bool set_anu_freq(struct cgpu_info *icarus, struct ICARUS_INFO *info, uint16_t anu_freq_hex) +{ + unsigned char cmd_buf[4], rdreg_buf[4]; + int amount, err; + char buf[512]; + + if (!anu_freq_hex) + anu_freq_hex = anu_find_freqhex(); + memset(cmd_buf, 0, 4); + memset(rdreg_buf, 0, 4); + cmd_buf[0] = 2 | 0x80; + cmd_buf[1] = (anu_freq_hex & 0xff00u) >> 8; + cmd_buf[2] = (anu_freq_hex & 0x00ffu); + cmd_buf[3] = crc5(cmd_buf, 27); + + rdreg_buf[0] = 4 | 0x80; + rdreg_buf[1] = 0; //16-23 + rdreg_buf[2] = 0x04; //8-15 + rdreg_buf[3] = crc5(rdreg_buf, 27); + + applog(LOG_DEBUG, "%s %i: Send frequency %02x%02x%02x%02x", icarus->drv->name, icarus->device_id, + cmd_buf[0], cmd_buf[1], cmd_buf[2], cmd_buf[3]); + err = usb_write_ii(icarus, info->intinfo, (char *)cmd_buf, 4, &amount, C_ANU_SEND_CMD); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "%s %i: Write freq Comms error (werr=%d amount=%d)", + icarus->drv->name, icarus->device_id, err, amount); + return false; + } + err = usb_read_ii_timeout(icarus, info->intinfo, buf, 512, &amount, 100, C_GETRESULTS); + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_ERR, "%s %i: Read freq Comms error (rerr=%d amount=%d)", + icarus->drv->name, icarus->device_id, err, amount); + return false; + } + + applog(LOG_DEBUG, "%s %i: Send freq getstatus %02x%02x%02x%02x", icarus->drv->name, icarus->device_id, + rdreg_buf[0], rdreg_buf[1], rdreg_buf[2], rdreg_buf[3]); + err = usb_write_ii(icarus, info->intinfo, (char *)cmd_buf, 4, &amount, C_ANU_SEND_RDREG); + if (err != LIBUSB_SUCCESS || amount != 4) { + applog(LOG_ERR, "%s %i: Write freq Comms error (werr=%d amount=%d)", + icarus->drv->name, icarus->device_id, err, amount); + return false; + } + err = usb_read_ii_timeout(icarus, info->intinfo, buf, 512, &amount, 100, C_GETRESULTS); + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_ERR, "%s %i: Read freq Comms error (rerr=%d amount=%d)", + icarus->drv->name, icarus->device_id, err, amount); + return false; + } + + return true; +} + +static void set_anu_volt(struct cgpu_info *icarus) +{ + unsigned char voltage_data[2], cmd_buf[4]; + char volt_buf[8]; + int err, amount; + + /* Allow a zero setting to imply not to try and set voltage */ + if (!opt_au3_volt) + return; + if (opt_au3_volt < 725 || opt_au3_volt > 850) { + applog(LOG_WARNING, "Invalid ANU voltage %d specified, must be 725-850", opt_au3_volt); + return; + } + sprintf(volt_buf, "%04d", opt_au3_volt); + hex2bin(voltage_data, volt_buf, 2); + cmd_buf[0] = 0xaa; + cmd_buf[1] = voltage_data[0]; + cmd_buf[1] &=0x0f; + cmd_buf[1] |=0xb0; + cmd_buf[2] = voltage_data[1]; + cmd_buf[3] = 0x00; //0-7 + cmd_buf[3] = crc5(cmd_buf, 4*8 - 5); + cmd_buf[3] |= 0xc0; + applog(LOG_INFO, "Send ANU voltage %02x%02x%02x%02x", cmd_buf[0], cmd_buf[1], cmd_buf[2], cmd_buf[3]); + cgsleep_ms(500); + err = usb_write(icarus, (char * )cmd_buf, 4, &amount, C_ANU_SEND_VOLT); + if (err != LIBUSB_SUCCESS || amount != 4) + applog(LOG_ERR, "Write voltage Comms error (werr=%d amount=%d)", err, amount); +} + +static void rock_init_last_received_task_complete_time(struct ICARUS_INFO *info) +{ + int i; + + if (opt_rock_freq < info->rmdev.min_frq || + opt_rock_freq > info->rmdev.max_frq) + opt_rock_freq = info->rmdev.def_frq; + + for (i = 0; i < MAX_CHIP_NUM; ++i) { + info->rmdev.chip[i].last_received_task_complete_time = time(NULL); + info->rmdev.chip[i].freq = opt_rock_freq/10 - 1; + info->rmdev.chip[i].error_cnt = 0; + } + + info->rmdev.dev_detect_time = time(NULL); +} + + +static void icarus_clear(struct cgpu_info *icarus, struct ICARUS_INFO *info) +{ + char buf[512]; + int amt; + + do { + usb_read_ii_timeout(icarus, info->intinfo, buf, 512, &amt, 100, C_GETRESULTS); + } while (amt > 0); +} + +static struct cgpu_info *icarus_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + int this_option_offset = ++option_offset; + struct ICARUS_INFO *info; + struct timeval tv_start, tv_finish; + + // Block 171874 nonce = (0xa2870100) = 0x000187a2 + // N.B. golden_ob MUST take less time to calculate + // than the timeout set in icarus_open() + // This one takes ~0.53ms on Rev3 Icarus + const char golden_ob[] = + "4679ba4ec99876bf4bfe086082b40025" + "4df6c356451471139a3afa71e48f544a" + "00000000000000000000000000000000" + "0000000087320b1a1426674f2fa722ce"; + + const char golden_nonce[] = "000187a2"; + const uint32_t golden_nonce_val = 0x000187a2; + unsigned char nonce_bin[ICARUS_READ_SIZE]; + struct ICARUS_WORK workdata; + char *nonce_hex; + int baud, uninitialised_var(work_division), uninitialised_var(fpga_count); + bool anu_freqset = false; + struct cgpu_info *icarus; + int ret, err, amount, tries, i; + bool ok; + bool cmr2_ok[CAIRNSMORE2_INTS]; + int cmr2_count; + + if ((sizeof(workdata) << 1) != (sizeof(golden_ob) - 1)) + quithere(1, "Data and golden_ob sizes don't match"); + + icarus = usb_alloc_cgpu(&icarus_drv, 1); + + if (!usb_init(icarus, dev, found)) + goto shin; + + get_options(this_option_offset, icarus, &baud, &work_division, &fpga_count); + + hex2bin((void *)(&workdata), golden_ob, sizeof(workdata)); + + info = (struct ICARUS_INFO *)calloc(1, sizeof(struct ICARUS_INFO)); + if (unlikely(!info)) + quit(1, "Failed to malloc ICARUS_INFO"); + icarus->device_data = (void *)info; + + info->ident = usb_ident(icarus); + switch (info->ident) { + case IDENT_ICA: + case IDENT_AVA: + case IDENT_BLT: + case IDENT_LLT: + case IDENT_AMU: + case IDENT_CMR1: + info->timeout = ICARUS_WAIT_TIMEOUT; + break; + case IDENT_ANU: + case IDENT_AU3: + info->timeout = ANT_WAIT_TIMEOUT; + break; + case IDENT_CMR2: + if (found->intinfo_count != CAIRNSMORE2_INTS) { + quithere(1, "CMR2 Interface count (%d) isn't expected: %d", + found->intinfo_count, + CAIRNSMORE2_INTS); + } + info->timeout = ICARUS_CMR2_TIMEOUT; + cmr2_count = 0; + for (i = 0; i < CAIRNSMORE2_INTS; i++) + cmr2_ok[i] = false; + break; + default: + quit(1, "%s icarus_detect_one() invalid %s ident=%d", + icarus->drv->dname, icarus->drv->dname, info->ident); + } + + info->nonce_size = ICARUS_READ_SIZE; +// For CMR2 test each USB Interface + +retry: + + tries = 2; + ok = false; + while (!ok && tries-- > 0) { + icarus_clear(icarus, info); + icarus_initialise(icarus, baud); + + if (info->u3) { + uint16_t anu_freq_hex = anu3_find_freqhex(); + + set_anu_volt(icarus); + if (!set_anu_freq(icarus, info, anu_freq_hex)) { + applog(LOG_WARNING, "%s %i: Failed to set frequency, too much overclock?", + icarus->drv->name, icarus->device_id); + continue; + } + icarus->usbdev->ident = info->ident = IDENT_AU3; + info->Hs = ANTU3_HASH_TIME; + icarus->drv->name = "AU3"; + applog(LOG_DEBUG, "%s %i: Detected Antminer U3", icarus->drv->name, + icarus->device_id); + } else if (info->ident == IDENT_ANU && !info->u3) { + if (!set_anu_freq(icarus, info, 0)) { + applog(LOG_WARNING, "%s %i: Failed to set frequency, too much overclock?", + icarus->drv->name, icarus->device_id); + continue; + } + } + + err = usb_write_ii(icarus, info->intinfo, + (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + + if (err != LIBUSB_SUCCESS || amount != sizeof(workdata)) + continue; + + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = icarus_get_nonce(icarus, nonce_bin, &tv_start, &tv_finish, NULL, 300); + if (ret != ICA_NONCE_OK) + continue; + + if (info->nonce_size == ICARUS_READ_SIZE && usb_buffer_size(icarus) == 4) { + applog(LOG_DEBUG, "%s %i: Detected Rockminer, deferring detection", + icarus->drv->name, icarus->device_id); + usb_buffer_clear(icarus); + break; + + } + if (info->nonce_size == ICARUS_READ_SIZE && usb_buffer_size(icarus) == 1) { + info->ant = true; + usb_buffer_clear(icarus); + icarus->usbdev->ident = info->ident = IDENT_ANU; + info->nonce_size = ANT_READ_SIZE; + info->Hs = ANTMINERUSB_HASH_TIME; + icarus->drv->name = "ANU"; + applog(LOG_DEBUG, "%s %i: Detected Antminer U1/2/3, changing nonce size to %d", + icarus->drv->name, icarus->device_id, ANT_READ_SIZE); + } + + nonce_hex = bin2hex(nonce_bin, sizeof(nonce_bin)); + if (strncmp(nonce_hex, golden_nonce, 8) == 0) { + if (info->ant && !anu_freqset) + anu_freqset = true; + else + ok = true; + } else { + if (tries < 0 && info->ident != IDENT_CMR2) { + applog(LOG_ERR, + "Icarus Detect: " + "Test failed at %s: get %s, should: %s", + icarus->device_path, nonce_hex, golden_nonce); + } + } + free(nonce_hex); + } + + if (!ok) { + if (info->ident != IDENT_CMR2) { + if (info->u3) + goto unshin; + info->u3 = true; + goto retry; + } + + if (info->intinfo < CAIRNSMORE2_INTS-1) { + info->intinfo++; + goto retry; + } + } else { + if (info->ident == IDENT_CMR2) { + applog(LOG_DEBUG, + "Icarus Detect: " + "Test succeeded at %s i%d: got %s", + icarus->device_path, info->intinfo, golden_nonce); + + cmr2_ok[info->intinfo] = true; + cmr2_count++; + if (info->intinfo < CAIRNSMORE2_INTS-1) { + info->intinfo++; + goto retry; + } + } + } + + if (info->ident == IDENT_CMR2) { + if (cmr2_count == 0) { + applog(LOG_ERR, + "Icarus Detect: Test failed at %s: for all %d CMR2 Interfaces", + icarus->device_path, CAIRNSMORE2_INTS); + goto unshin; + } + + // set the interface to the first one that succeeded + for (i = 0; i < CAIRNSMORE2_INTS; i++) + if (cmr2_ok[i]) { + info->intinfo = i; + break; + } + } else { + applog(LOG_DEBUG, + "Icarus Detect: " + "Test succeeded at %s: got %s", + icarus->device_path, golden_nonce); + } + + /* We have a real Icarus! */ + if (!add_cgpu(icarus)) + goto unshin; + + update_usb_stats(icarus); + + applog(LOG_INFO, "%s %d: Found at %s", + icarus->drv->name, icarus->device_id, icarus->device_path); + + if (info->ident == IDENT_CMR2) { + applog(LOG_INFO, "%s %d: with %d Interface%s", + icarus->drv->name, icarus->device_id, + cmr2_count, cmr2_count > 1 ? "s" : ""); + + // Assume 1 or 2 are running FPGA pairs + if (cmr2_count < 3) { + work_division = fpga_count = 2; + info->Hs /= 2; + } + } + + applog(LOG_DEBUG, "%s %d: Init baud=%d work_division=%d fpga_count=%d", + icarus->drv->name, icarus->device_id, baud, work_division, fpga_count); + + info->baud = baud; + info->work_division = work_division; + info->fpga_count = fpga_count; + info->nonce_mask = mask(work_division); + + info->golden_hashes = (golden_nonce_val & info->nonce_mask) * fpga_count; + timersub(&tv_finish, &tv_start, &(info->golden_tv)); + + set_timing_mode(this_option_offset, icarus); + + if (info->ident == IDENT_CMR2) { + int i; + for (i = info->intinfo + 1; i < icarus->usbdev->found->intinfo_count; i++) { + struct cgpu_info *cgtmp; + struct ICARUS_INFO *intmp; + + if (!cmr2_ok[i]) + continue; + + cgtmp = usb_copy_cgpu(icarus); + if (!cgtmp) { + applog(LOG_ERR, "%s %d: Init failed initinfo %d", + icarus->drv->name, icarus->device_id, i); + continue; + } + + cgtmp->usbinfo.usbstat = USB_NOSTAT; + + intmp = (struct ICARUS_INFO *)malloc(sizeof(struct ICARUS_INFO)); + if (unlikely(!intmp)) + quit(1, "Failed2 to malloc ICARUS_INFO"); + + cgtmp->device_data = (void *)intmp; + + // Initialise everything to match + memcpy(intmp, info, sizeof(struct ICARUS_INFO)); + + intmp->intinfo = i; + + icarus_initialise(cgtmp, baud); + + if (!add_cgpu(cgtmp)) { + usb_uninit(cgtmp); + free(intmp); + continue; + } + + update_usb_stats(cgtmp); + } + } + + return icarus; + +unshin: + + usb_uninit(icarus); + free(info); + icarus->device_data = NULL; + +shin: + + icarus = usb_free_cgpu(icarus); + + return NULL; +} + +static int64_t rock_scanwork(struct thr_info *thr); + +static void rock_statline_before(char *buf, size_t bufsiz, struct cgpu_info *cgpu) +{ + if (cgpu->temp) + tailsprintf(buf, bufsiz, "%3.0fMHz %3.0fC", opt_rock_freq, cgpu->temp); + else + tailsprintf(buf, bufsiz, "%.0fMHz", opt_rock_freq); +} + +/* The only thing to do on flush_work is to remove the base work to prevent us + * rolling what is now stale work */ +static void rock_flush(struct cgpu_info *icarus) +{ + struct ICARUS_INFO *info = icarus->device_data; + struct work *work; + + mutex_lock(&info->lock); + work = info->base_work; + info->base_work = NULL; + mutex_unlock(&info->lock); + + if (work) + free_work(work); +} + +static struct cgpu_info *rock_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + struct ICARUS_INFO *info; + struct timeval tv_start, tv_finish; + char *ob_hex = NULL; + + // Block 171874 nonce = (0xa2870100) = 0x000187a2 + // N.B. golden_ob MUST take less time to calculate + // than the timeout set in icarus_open() + // This one takes ~0.53ms on Rev3 Icarus + const char golden_ob[] = + "4679ba4ec99876bf4bfe086082b40025" + "4df6c356451471139a3afa71e48f544a" + "00000000000000000000000000000000" + "aa1ff05587320b1a1426674f2fa722ce"; + + const char golden_nonce[] = "000187a2"; + const uint32_t golden_nonce_val = 0x000187a2; + unsigned char nonce_bin[ROCK_READ_SIZE]; + struct ICARUS_WORK workdata; + char *nonce_hex; + struct cgpu_info *icarus; + int ret, err, amount, tries; + bool ok; + int correction_times = 0; + NONCE_DATA nonce_data; + uint32_t nonce; + char *newname = NULL; + + if ((sizeof(workdata) << 1) != (sizeof(golden_ob) - 1)) + quithere(1, "Data and golden_ob sizes don't match"); + + icarus = usb_alloc_cgpu(&icarus_drv, 1); + + if (!usb_init(icarus, dev, found)) + goto shin; + + hex2bin((void *)(&workdata), golden_ob, sizeof(workdata)); + rev((void *)(&(workdata.midstate)), ICARUS_MIDSTATE_SIZE); + rev((void *)(&(workdata.work)), ICARUS_WORK_SIZE); + if (opt_debug) { + ob_hex = bin2hex((void *)(&workdata), sizeof(workdata)); + applog(LOG_WARNING, "%s %d: send_gold_nonce %s", + icarus->drv->name, icarus->device_id, ob_hex); + free(ob_hex); + } + + info = (struct ICARUS_INFO *)calloc(1, sizeof(struct ICARUS_INFO)); + if (unlikely(!info)) + quit(1, "Failed to malloc ICARUS_INFO"); + (void)memset(info, 0, sizeof(struct ICARUS_INFO)); + icarus->device_data = (void *)info; + icarus->usbdev->ident = info->ident = IDENT_LIN; + info->nonce_size = ROCK_READ_SIZE; + info->fail_time = 10; + info->nonce_mask = 0xffffffff; + update_usb_stats(icarus); + + tries = MAX_TRIES; + ok = false; + while (!ok && tries-- > 0) { + icarus_initialise(icarus, info->baud); + + applog(LOG_DEBUG, "tries: %d", tries); + workdata.unused[ICARUS_UNUSED_SIZE - 3] = opt_rock_freq/10 - 1; + workdata.unused[ICARUS_UNUSED_SIZE - 2] = (MAX_TRIES-1-tries); + info->rmdev.detect_chip_no++; + if (info->rmdev.detect_chip_no >= MAX_TRIES) + info->rmdev.detect_chip_no = 0; + //g_detect_chip_no = (g_detect_chip_no + 1) & MAX_CHIP_NUM; + + usb_buffer_clear(icarus); + err = usb_write_ii(icarus, info->intinfo, + (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != sizeof(workdata)) + continue; + + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = icarus_get_nonce(icarus, nonce_bin, &tv_start, &tv_finish, NULL, 100); + + applog(LOG_DEBUG, "Rockminer nonce_bin: %02x %02x %02x %02x %02x %02x %02x %02x", + nonce_bin[0], nonce_bin[1], nonce_bin[2], nonce_bin[3], + nonce_bin[4], nonce_bin[5], nonce_bin[6], nonce_bin[7]); + if (ret != ICA_NONCE_OK) { + applog(LOG_DEBUG, "detect_one get_gold_nonce error, tries = %d", tries); + continue; + } + if (usb_buffer_size(icarus) == 1) { + applog(LOG_INFO, "Rock detect found an ANU, skipping"); + usb_buffer_clear(icarus); + break; + } + + newname = NULL; + switch (nonce_bin[NONCE_CHIP_NO_OFFSET] & RM_PRODUCT_MASK) { + case RM_PRODUCT_T1: + newname = "LIR"; // Rocketbox + info->rmdev.product_id = ROCKMINER_T1; + info->rmdev.chip_max = 12; + info->rmdev.min_frq = 200; + info->rmdev.def_frq = 330; + info->rmdev.max_frq = 400; + break; + case RM_PRODUCT_T2: // what's this? + newname = "LIX"; + info->rmdev.product_id = ROCKMINER_T2; + info->rmdev.chip_max = 16; + info->rmdev.min_frq = 200; + info->rmdev.def_frq = 300; + info->rmdev.max_frq = 400; + break; + case RM_PRODUCT_RBOX: + newname = "LIN"; // R-Box + info->rmdev.product_id = ROCKMINER_RBOX; + info->rmdev.chip_max = 4; + info->rmdev.min_frq = 200; + info->rmdev.def_frq = 270; + info->rmdev.max_frq = 400; + break; + default: + continue; + } + + snprintf(info->rock_init, sizeof(info->rock_init), "%02x %02x %02x %02x", + nonce_bin[4], nonce_bin[5], nonce_bin[6], nonce_bin[7]); + + nonce_data.chip_no = nonce_bin[NONCE_CHIP_NO_OFFSET] & RM_CHIP_MASK; + if (nonce_data.chip_no >= info->rmdev.chip_max) + nonce_data.chip_no = 0; + + nonce_data.cmd_value = nonce_bin[NONCE_TASK_CMD_OFFSET] & RM_CMD_MASK; + if (nonce_data.cmd_value == NONCE_TASK_COMPLETE_CMD) { + applog(LOG_DEBUG, "complete g_detect_chip_no: %d", info->rmdev.detect_chip_no); + workdata.unused[ICARUS_UNUSED_SIZE - 3] = opt_rock_freq/10 - 1; + workdata.unused[ICARUS_UNUSED_SIZE - 2] = info->rmdev.detect_chip_no; + info->rmdev.detect_chip_no++; + if (info->rmdev.detect_chip_no >= MAX_TRIES) + info->rmdev.detect_chip_no = 0; + + err = usb_write_ii(icarus, info->intinfo, + (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err != LIBUSB_SUCCESS || amount != sizeof(workdata)) + continue; + applog(LOG_DEBUG, "send_gold_nonce usb_write_ii"); + continue; + } + + memcpy((char *)&nonce, nonce_bin, ICARUS_READ_SIZE); + nonce = htobe32(nonce); + applog(LOG_DEBUG, "Rockminer nonce: %08X", nonce); + correction_times = 0; + while (correction_times < NONCE_CORRECTION_TIMES) { + nonce_hex = bin2hex(nonce_bin, 4); + if (golden_nonce_val == nonce + rbox_corr_values[correction_times]) { + memset(&(info->g_work[0]), 0, sizeof(info->g_work)); + rock_init_last_received_task_complete_time(info); + + ok = true; + break; + } else { + applog(LOG_DEBUG, "detect_one gold_nonce compare error times = %d", + correction_times); + if (tries < 0 && info->ident != IDENT_CMR2) { + applog(LOG_WARNING, + "Icarus Detect: " + "Test failed at %s: get %s, should: %s", + icarus->device_path, nonce_hex, golden_nonce); + } + + if (nonce == 0) + break; + } + free(nonce_hex); + correction_times++; + } + } + + if (!ok) + goto unshin; + + if (newname) { + if (!icarus->drv->copy) + icarus->drv = copy_drv(icarus->drv); + icarus->drv->name = newname; + } + + applog(LOG_DEBUG, "Icarus Detect: Test succeeded at %s: got %s", + icarus->device_path, golden_nonce); + + /* We have a real Rockminer! */ + if (!add_cgpu(icarus)) + goto unshin; + + icarus->drv->scanwork = rock_scanwork; + icarus->drv->dname = "Rockminer"; + icarus->drv->get_statline_before = &rock_statline_before; + icarus->drv->flush_work = &rock_flush; + mutex_init(&info->lock); + + applog(LOG_INFO, "%s %d: Found at %s", + icarus->drv->name, icarus->device_id, + icarus->device_path); + + timersub(&tv_finish, &tv_start, &(info->golden_tv)); + + return icarus; + +unshin: + + usb_uninit(icarus); + free(info); + icarus->device_data = NULL; + +shin: + + icarus = usb_free_cgpu(icarus); + + return NULL; +} + +static void icarus_detect(bool __maybe_unused hotplug) +{ + usb_detect(&icarus_drv, rock_detect_one); + usb_detect(&icarus_drv, icarus_detect_one); +} + +static bool icarus_prepare(struct thr_info *thr) +{ + struct cgpu_info *icarus = thr->cgpu; + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(icarus->device_data); + + if (info->ant) + info->antworks = calloc(sizeof(struct work *), ANT_QUEUE_NUM); + return true; +} + +static void cmr2_command(struct cgpu_info *icarus, uint8_t cmd, uint8_t data) +{ + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(icarus->device_data); + struct ICARUS_WORK workdata; + int amount; + + memset((void *)(&workdata), 0, sizeof(workdata)); + + workdata.prefix = ICARUS_CMR2_PREFIX; + workdata.cmd = cmd; + workdata.data = data; + workdata.check = workdata.data ^ workdata.cmd ^ workdata.prefix ^ ICARUS_CMR2_CHECK; + + usb_write_ii(icarus, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); +} + +static void cmr2_commands(struct cgpu_info *icarus) +{ + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(icarus->device_data); + + if (info->speed_next_work) { + info->speed_next_work = false; + cmr2_command(icarus, ICARUS_CMR2_CMD_SPEED, info->cmr2_speed); + return; + } + + if (info->flash_next_work) { + info->flash_next_work = false; + cmr2_command(icarus, ICARUS_CMR2_CMD_FLASH, ICARUS_CMR2_DATA_FLASH_ON); + cgsleep_ms(250); + cmr2_command(icarus, ICARUS_CMR2_CMD_FLASH, ICARUS_CMR2_DATA_FLASH_OFF); + cgsleep_ms(250); + cmr2_command(icarus, ICARUS_CMR2_CMD_FLASH, ICARUS_CMR2_DATA_FLASH_ON); + cgsleep_ms(250); + cmr2_command(icarus, ICARUS_CMR2_CMD_FLASH, ICARUS_CMR2_DATA_FLASH_OFF); + return; + } +} + +void rock_send_task(unsigned char chip_no, unsigned int current_task_id, struct thr_info *thr) +{ + struct cgpu_info *icarus = thr->cgpu; + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(icarus->device_data); + int err, amount; + struct ICARUS_WORK workdata; + char *ob_hex; + struct work *work = NULL; + + /* Only base_work needs locking since it can be asynchronously deleted + * by flush work */ + if (info->g_work[chip_no][current_task_id] == NULL) { + mutex_lock(&info->lock); + if (!info->base_work) + info->base_work = get_work(thr, thr->id); + if (info->base_work->drv_rolllimit > 0) { + info->base_work->drv_rolllimit--; + roll_work(info->base_work); + work = make_clone(info->base_work); + } else { + work = info->base_work; + info->base_work = NULL; + } + mutex_unlock(&info->lock); + + info->g_work[chip_no][current_task_id] = work; + } else { + work = info->g_work[chip_no][current_task_id]; + applog(LOG_DEBUG, "::resend work"); + } + + memset((void *)(&workdata), 0, sizeof(workdata)); + memcpy(&(workdata.midstate), work->midstate, ICARUS_MIDSTATE_SIZE); + memcpy(&(workdata.work), work->data + ICARUS_WORK_DATA_OFFSET, ICARUS_WORK_SIZE); + workdata.unused[ICARUS_UNUSED_SIZE - 4] = 0xaa; + if (info->rmdev.chip[chip_no].freq > (info->rmdev.max_frq/10 - 1) || + info->rmdev.chip[chip_no].freq < (info->rmdev.min_frq/10 - 1)) + rock_init_last_received_task_complete_time(info); + + workdata.unused[ICARUS_UNUSED_SIZE - 3] = info->rmdev.chip[chip_no].freq; //icarus->freq/10 - 1; ; + workdata.unused[ICARUS_UNUSED_SIZE - 2] = chip_no ; + workdata.id = 0x55; + + if (opt_debug) { + ob_hex = bin2hex((void *)(work->data), 128); + applog(LOG_WARNING, "%s %d: work->data %s", + icarus->drv->name, icarus->device_id, ob_hex); + free(ob_hex); + } + + // We only want results for the work we are about to send + usb_buffer_clear(icarus); + + err = usb_write_ii(icarus, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + + if (err < 0 || amount != sizeof(workdata)) { + applog(LOG_ERR, "%s %i: Comms error (werr=%d amt=%d)", + icarus->drv->name, icarus->device_id, err, amount); + dev_error(icarus, REASON_DEV_COMMS_ERROR); + icarus_initialise(icarus, info->baud); + + if (info->g_work[chip_no][current_task_id]) + { + free_work(info->g_work[chip_no][current_task_id]); + info->g_work[chip_no][current_task_id] = NULL; + } + + return; + } + + return; +} + +static void process_history(struct cgpu_info *icarus, struct ICARUS_INFO *info, uint32_t nonce, + uint64_t hash_count, struct timeval *elapsed, struct timeval *tv_start) +{ + struct ICARUS_HISTORY *history0, *history; + struct timeval tv_history_start, tv_history_finish; + int count; + double Hs, W, fullnonce; + int read_time, i; + bool limited; + uint32_t values; + int64_t hash_count_range; + double Ti, Xi; + + // Ignore possible end condition values ... + // TODO: set limitations on calculated values depending on the device + // to avoid crap values caused by CPU/Task Switching/Swapping/etc + if ((nonce & info->nonce_mask) <= END_CONDITION || + (nonce & info->nonce_mask) >= (info->nonce_mask & ~END_CONDITION)) + return; + + cgtime(&tv_history_start); + + history0 = &(info->history[0]); + + if (history0->values == 0) + timeradd(tv_start, &history_sec, &(history0->finish)); + + Ti = (double)(elapsed->tv_sec) + + ((double)(elapsed->tv_usec))/((double)1000000) + - ((double)ICARUS_READ_TIME(info->baud)); + Xi = (double)hash_count; + history0->sumXiTi += Xi * Ti; + history0->sumXi += Xi; + history0->sumTi += Ti; + history0->sumXi2 += Xi * Xi; + + history0->values++; + + if (history0->hash_count_max < hash_count) + history0->hash_count_max = hash_count; + if (history0->hash_count_min > hash_count || history0->hash_count_min == 0) + history0->hash_count_min = hash_count; + + if (history0->values >= info->min_data_count + && timercmp(tv_start, &(history0->finish), >)) { + for (i = INFO_HISTORY; i > 0; i--) + memcpy(&(info->history[i]), + &(info->history[i-1]), + sizeof(struct ICARUS_HISTORY)); + + // Initialise history0 to zero for summary calculation + memset(history0, 0, sizeof(struct ICARUS_HISTORY)); + + // We just completed a history data set + // So now recalc read_time based on the whole history thus we will + // initially get more accurate until it completes INFO_HISTORY + // total data sets + count = 0; + for (i = 1 ; i <= INFO_HISTORY; i++) { + history = &(info->history[i]); + if (history->values >= MIN_DATA_COUNT) { + count++; + + history0->sumXiTi += history->sumXiTi; + history0->sumXi += history->sumXi; + history0->sumTi += history->sumTi; + history0->sumXi2 += history->sumXi2; + history0->values += history->values; + + if (history0->hash_count_max < history->hash_count_max) + history0->hash_count_max = history->hash_count_max; + if (history0->hash_count_min > history->hash_count_min || history0->hash_count_min == 0) + history0->hash_count_min = history->hash_count_min; + } + } + + // All history data + Hs = (history0->values*history0->sumXiTi - history0->sumXi*history0->sumTi) + / (history0->values*history0->sumXi2 - history0->sumXi*history0->sumXi); + W = history0->sumTi/history0->values - Hs*history0->sumXi/history0->values; + hash_count_range = history0->hash_count_max - history0->hash_count_min; + values = history0->values; + + // Initialise history0 to zero for next data set + memset(history0, 0, sizeof(struct ICARUS_HISTORY)); + + fullnonce = W + Hs * (((double)0xffffffff) + 1); + read_time = SECTOMS(fullnonce) - ICARUS_READ_REDUCE; + if (info->read_time_limit > 0 && read_time > info->read_time_limit) { + read_time = info->read_time_limit; + limited = true; + } else + limited = false; + + info->Hs = Hs; + info->read_time = read_time; + + info->fullnonce = fullnonce; + info->count = count; + info->W = W; + info->values = values; + info->hash_count_range = hash_count_range; + + if (info->min_data_count < MAX_MIN_DATA_COUNT) + info->min_data_count *= 2; + else if (info->timing_mode == MODE_SHORT) + info->do_icarus_timing = false; + + applog(LOG_WARNING, "%s %d Re-estimate: Hs=%e W=%e read_time=%dms%s fullnonce=%.3fs", + icarus->drv->name, icarus->device_id, Hs, W, read_time, + limited ? " (limited)" : "", fullnonce); + } + info->history_count++; + cgtime(&tv_history_finish); + + timersub(&tv_history_finish, &tv_history_start, &tv_history_finish); + timeradd(&tv_history_finish, &(info->history_time), &(info->history_time)); +} + +static int64_t icarus_scanwork(struct thr_info *thr) +{ + struct cgpu_info *icarus = thr->cgpu; + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(icarus->device_data); + int ret, err, amount; + unsigned char nonce_bin[ICARUS_BUF_SIZE]; + struct ICARUS_WORK workdata; + char *ob_hex; + uint32_t nonce; + int64_t hash_count = 0; + struct timeval tv_start, tv_finish, elapsed; + int curr_hw_errors; + bool was_hw_error; + struct work *work; + int64_t estimate_hashes; + uint8_t workid = 0; + + if (unlikely(share_work_tdiff(icarus) > info->fail_time)) { + if (info->failing) { + if (share_work_tdiff(icarus) > info->fail_time + 60) { + applog(LOG_ERR, "%s %d: Device failed to respond to restart", + icarus->drv->name, icarus->device_id); + usb_nodev(icarus); + return -1; + } + } else { + applog(LOG_WARNING, "%s %d: No valid hashes for over %d secs, attempting to reset", + icarus->drv->name, icarus->device_id, info->fail_time); + usb_reset(icarus); + info->failing = true; + } + } + + // Device is gone + if (icarus->usbinfo.nodev) + return -1; + + elapsed.tv_sec = elapsed.tv_usec = 0; + + work = get_work(thr, thr->id); + memset((void *)(&workdata), 0, sizeof(workdata)); + memcpy(&(workdata.midstate), work->midstate, ICARUS_MIDSTATE_SIZE); + memcpy(&(workdata.work), work->data + ICARUS_WORK_DATA_OFFSET, ICARUS_WORK_SIZE); + rev((void *)(&(workdata.midstate)), ICARUS_MIDSTATE_SIZE); + rev((void *)(&(workdata.work)), ICARUS_WORK_SIZE); + if (info->ant) { + workid = info->workid; + if (++info->workid >= 0x1F) + info->workid = 0; + if (info->antworks[workid]) + free_work(info->antworks[workid]); + info->antworks[workid] = work; + workdata.id = workid; + } + + if (info->speed_next_work || info->flash_next_work) + cmr2_commands(icarus); + + // We only want results for the work we are about to send + usb_buffer_clear(icarus); + + err = usb_write_ii(icarus, info->intinfo, (char *)(&workdata), sizeof(workdata), &amount, C_SENDWORK); + if (err < 0 || amount != sizeof(workdata)) { + applog(LOG_ERR, "%s %i: Comms error (werr=%d amt=%d)", + icarus->drv->name, icarus->device_id, err, amount); + dev_error(icarus, REASON_DEV_COMMS_ERROR); + icarus_initialise(icarus, info->baud); + goto out; + } + + if (opt_debug) { + ob_hex = bin2hex((void *)(&workdata), sizeof(workdata)); + applog(LOG_DEBUG, "%s %d: sent %s", + icarus->drv->name, icarus->device_id, ob_hex); + free(ob_hex); + } +more_nonces: + /* Icarus will return nonces or nothing. If we know we have enough data + * for a response in the buffer already, there will be no usb read + * performed. */ + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = icarus_get_nonce(icarus, nonce_bin, &tv_start, &tv_finish, thr, info->read_time); + if (ret == ICA_NONCE_ERROR) + goto out; + + // aborted before becoming idle, get new work + if (ret == ICA_NONCE_TIMEOUT || ret == ICA_NONCE_RESTART) { + if (info->ant) + goto out; + + timersub(&tv_finish, &tv_start, &elapsed); + + // ONLY up to just when it aborted + // We didn't read a reply so we don't subtract ICARUS_READ_TIME + estimate_hashes = ((double)(elapsed.tv_sec) + + ((double)(elapsed.tv_usec))/((double)1000000)) / info->Hs; + + // If some Serial-USB delay allowed the full nonce range to + // complete it can't have done more than a full nonce + if (unlikely(estimate_hashes > 0xffffffff)) + estimate_hashes = 0xffffffff; + + applog(LOG_DEBUG, "%s %d: no nonce = 0x%08lX hashes (%ld.%06lds)", + icarus->drv->name, icarus->device_id, + (long unsigned int)estimate_hashes, + (long)elapsed.tv_sec, (long)elapsed.tv_usec); + + hash_count = estimate_hashes; + goto out; + } + + if (info->ant) { + workid = nonce_bin[4] & 0x1F; + if (info->antworks[workid]) + work = info->antworks[workid]; + else + goto out; + } + + memcpy((char *)&nonce, nonce_bin, ICARUS_READ_SIZE); + nonce = htobe32(nonce); + curr_hw_errors = icarus->hw_errors; + if (submit_nonce(thr, work, nonce)) + info->failing = false; + was_hw_error = (curr_hw_errors < icarus->hw_errors); + + /* U3s return shares fast enough to use just that for hashrate + * calculation, otherwise the result is inaccurate instead. */ + if (info->ant) { + info->nonces++; + if (usb_buffer_size(icarus) >= ANT_READ_SIZE) + goto more_nonces; + } else { + hash_count = (nonce & info->nonce_mask); + hash_count++; + hash_count *= info->fpga_count; + } + +#if 0 + // This appears to only return zero nonce values + if (usb_buffer_size(icarus) > 3) { + memcpy((char *)&nonce, icarus->usbdev->buffer, sizeof(nonce_bin)); + nonce = htobe32(nonce); + applog(LOG_WARNING, "%s %d: attempting to submit 2nd nonce = 0x%08lX", + icarus->drv->name, icarus->device_id, + (long unsigned int)nonce); + curr_hw_errors = icarus->hw_errors; + submit_nonce(thr, work, nonce); + was_hw_error = (curr_hw_errors > icarus->hw_errors); + } +#endif + + if (opt_debug || info->do_icarus_timing) + timersub(&tv_finish, &tv_start, &elapsed); + + applog(LOG_DEBUG, "%s %d: nonce = 0x%08x = 0x%08lX hashes (%ld.%06lds)", + icarus->drv->name, icarus->device_id, + nonce, (long unsigned int)hash_count, + (long)elapsed.tv_sec, (long)elapsed.tv_usec); + + if (info->do_icarus_timing && !was_hw_error) + process_history(icarus, info, nonce, hash_count, &elapsed, &tv_start); +out: + if (!info->ant) + free_work(work); + else { + /* Ant USBs free the work themselves. Return only one full + * nonce worth on each pass to smooth out displayed hashrate */ + if (info->nonces) { + hash_count = 0xffffffff; + info->nonces--; + } + } + + return hash_count; +} + +static int64_t rock_scanwork(struct thr_info *thr) +{ + struct cgpu_info *icarus = thr->cgpu; + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(icarus->device_data); + int ret; + unsigned char nonce_bin[ICARUS_BUF_SIZE]; + uint32_t nonce; + int64_t hash_count = 0; + struct timeval tv_start, tv_finish, elapsed; + struct work *work = NULL; + int64_t estimate_hashes; + int correction_times = 0; + NONCE_DATA nonce_data; + double temp; + + int chip_no = 0; + time_t recv_time = 0; + + if (unlikely(share_work_tdiff(icarus) > info->fail_time)) { + if (info->failing) { + if (share_work_tdiff(icarus) > info->fail_time + 60) { + applog(LOG_ERR, "%s %d: Device failed to respond to restart", + icarus->drv->name, icarus->device_id); + usb_nodev(icarus); + return -1; + } + } else { + applog(LOG_WARNING, "%s %d: No valid hashes for over %d secs, attempting to reset", + icarus->drv->name, icarus->device_id, info->fail_time); + usb_reset(icarus); + info->failing = true; + } + } + + // Device is gone + if (icarus->usbinfo.nodev) + return -1; + + elapsed.tv_sec = elapsed.tv_usec = 0; + + for (chip_no = 0; chip_no < info->rmdev.chip_max; chip_no++) { + recv_time = time(NULL); + if (recv_time > info->rmdev.chip[chip_no].last_received_task_complete_time + 1) { + info->rmdev.chip[chip_no].last_received_task_complete_time = recv_time; + rock_send_task(chip_no, 0,thr); + break; + } + } + + memset(nonce_bin, 0, sizeof(nonce_bin)); + ret = icarus_get_nonce(icarus, nonce_bin, &tv_start, &tv_finish, thr, 3000);//info->read_time); + + nonce_data.chip_no = nonce_bin[NONCE_CHIP_NO_OFFSET] & RM_CHIP_MASK; + if (nonce_data.chip_no >= info->rmdev.chip_max) + nonce_data.chip_no = 0; + nonce_data.task_no = nonce_bin[NONCE_TASK_NO_OFFSET] & 0x1; + nonce_data.cmd_value = nonce_bin[NONCE_TASK_CMD_OFFSET] & RM_CMD_MASK; + nonce_data.work_state = nonce_bin[NONCE_TASK_CMD_OFFSET] & RM_STATUS_MASK; + + temp = (double)nonce_bin[NONCE_COMMAND_OFFSET]; + if (temp != 128) + icarus->temp = temp; + + if (nonce_data.cmd_value == NONCE_TASK_COMPLETE_CMD) { + info->rmdev.chip[nonce_data.chip_no].last_received_task_complete_time = time(NULL); + if (info->g_work[nonce_data.chip_no][nonce_data.task_no]) { + free_work(info->g_work[nonce_data.chip_no][nonce_data.task_no]); + info->g_work[nonce_data.chip_no][nonce_data.task_no] = NULL; + } + goto out; + } + + if (nonce_data.cmd_value == NONCE_GET_TASK_CMD) { + rock_send_task(nonce_data.chip_no, nonce_data.task_no, thr); + goto out; + } + + if (ret == ICA_NONCE_TIMEOUT) + rock_send_task(nonce_data.chip_no, nonce_data.task_no, thr); + + work = info->g_work[nonce_data.chip_no][nonce_data.task_no]; + if (work == NULL) + goto out; + + if (ret == ICA_NONCE_ERROR) + goto out; + + // aborted before becoming idle, get new work + if (ret == ICA_NONCE_TIMEOUT || ret == ICA_NONCE_RESTART) { + timersub(&tv_finish, &tv_start, &elapsed); + + // ONLY up to just when it aborted + // We didn't read a reply so we don't subtract ICARUS_READ_TIME + estimate_hashes = ((double)(elapsed.tv_sec) + + ((double)(elapsed.tv_usec))/((double)1000000)) / info->Hs; + + // If some Serial-USB delay allowed the full nonce range to + // complete it can't have done more than a full nonce + if (unlikely(estimate_hashes > 0xffffffff)) + estimate_hashes = 0xffffffff; + + applog(LOG_DEBUG, "%s %d: no nonce = 0x%08lX hashes (%ld.%06lds)", + icarus->drv->name, icarus->device_id, + (long unsigned int)estimate_hashes, + (long)elapsed.tv_sec, (long)elapsed.tv_usec); + + goto out; + } + + memcpy((char *)&nonce, nonce_bin, ICARUS_READ_SIZE); + nonce = htobe32(nonce); + recv_time = time(NULL); + if ((recv_time-info->rmdev.dev_detect_time) >= 60) { + unsigned char i; + info->rmdev.dev_detect_time = recv_time; + for (i = 0; i < info->rmdev.chip_max; i ++) { + if (info->rmdev.chip[i].error_cnt >= 12) { + if (info->rmdev.chip[i].freq > info->rmdev.min_frq) + info->rmdev.chip[i].freq--; + } else if (info->rmdev.chip[i].error_cnt <= 1) { + if (info->rmdev.chip[i].freq < (info->rmdev.def_frq / 10 - 1)) + info->rmdev.chip[i].freq++; + } + info->rmdev.chip[i].error_cnt = 0; + } + } + + correction_times = 0; + info->nonces_checked++; + while (correction_times < NONCE_CORRECTION_TIMES) { + uint32_t new_nonce; + + if (correction_times > 0) { + info->nonces_correction_tests++; + if (correction_times == 1) + info->nonces_correction_times++; + } + new_nonce = nonce + rbox_corr_values[correction_times]; + /* Basic dupe testing */ + if (new_nonce == info->last_nonce[nonce_data.chip_no][nonce_data.task_no]) + break; + if (test_nonce(work, new_nonce)) { + nonce = new_nonce; + submit_tested_work(thr, work); + info->last_nonce[nonce_data.chip_no][nonce_data.task_no] = nonce; + info->nonces_correction[correction_times]++; + hash_count++; + info->failing = false; + applog(LOG_DEBUG, "Rockminer nonce :::OK:::"); + break; + } else { + applog(LOG_DEBUG, "Rockminer nonce error times = %d", correction_times); + if (new_nonce == 0) + break; + } + correction_times++; + } + if (correction_times >= NONCE_CORRECTION_TIMES) { + inc_hw_errors(thr); + info->nonces_fail++; + } + + hash_count = (hash_count * info->nonce_mask); + + if (opt_debug || info->do_icarus_timing) + timersub(&tv_finish, &tv_start, &elapsed); + + applog(LOG_DEBUG, "%s %d: nonce = 0x%08x = 0x%08lX hashes (%ld.%06lds)", + icarus->drv->name, icarus->device_id, + nonce, (long unsigned int)hash_count, + (long)elapsed.tv_sec, (long)elapsed.tv_usec); + +out: + + return hash_count; +} + +static struct api_data *icarus_api_stats(struct cgpu_info *cgpu) +{ + struct api_data *root = NULL; + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(cgpu->device_data); + char data[4096]; + int i, off; + size_t len; + float avg; + + // Warning, access to these is not locked - but we don't really + // care since hashing performance is way more important than + // locking access to displaying API debug 'stats' + // If locking becomes an issue for any of them, use copy_data=true also + root = api_add_int(root, "read_time", &(info->read_time), false); + root = api_add_int(root, "read_time_limit", &(info->read_time_limit), false); + root = api_add_double(root, "fullnonce", &(info->fullnonce), false); + root = api_add_int(root, "count", &(info->count), false); + root = api_add_hs(root, "Hs", &(info->Hs), false); + root = api_add_double(root, "W", &(info->W), false); + root = api_add_uint(root, "total_values", &(info->values), false); + root = api_add_uint64(root, "range", &(info->hash_count_range), false); + root = api_add_uint64(root, "history_count", &(info->history_count), false); + root = api_add_timeval(root, "history_time", &(info->history_time), false); + root = api_add_uint(root, "min_data_count", &(info->min_data_count), false); + root = api_add_uint(root, "timing_values", &(info->history[0].values), false); + root = api_add_const(root, "timing_mode", timing_mode_str(info->timing_mode), false); + root = api_add_bool(root, "is_timing", &(info->do_icarus_timing), false); + root = api_add_int(root, "baud", &(info->baud), false); + root = api_add_int(root, "work_division", &(info->work_division), false); + root = api_add_int(root, "fpga_count", &(info->fpga_count), false); + + if (info->ident == IDENT_LIN) { + root = api_add_string(root, "rock_init", info->rock_init, false); + root = api_add_uint8(root, "rock_chips", &(info->rmdev.detect_chip_no), false); + root = api_add_uint8(root, "rock_chip_max", &(info->rmdev.chip_max), false); + root = api_add_uint8(root, "rock_prod_id", &(info->rmdev.product_id), false); + root = api_add_avg(root, "rock_min_freq", &(info->rmdev.min_frq), false); + root = api_add_avg(root, "rock_max_freq", &(info->rmdev.max_frq), false); + root = api_add_uint64(root, "rock_check", &(info->nonces_checked), false); + root = api_add_uint64(root, "rock_corr", &(info->nonces_correction_times), false); + root = api_add_uint64(root, "rock_corr_tests", &(info->nonces_correction_tests), false); + root = api_add_uint64(root, "rock_corr_fail", &(info->nonces_fail), false); + if (info->nonces_checked <= 0) + avg = 0; + else + avg = (float)(info->nonces_correction_tests) / (float)(info->nonces_checked); + root = api_add_avg(root, "rock_corr_avg", &avg, true); + data[0] = '\0'; + off = 0; + for (i = 0; i < NONCE_CORRECTION_TIMES; i++) { + len = snprintf(data+off, sizeof(data)-off, + "%s%"PRIu64, + i > 0 ? "/" : "", + info->nonces_correction[i]); + if (len >= (sizeof(data)-off)) + off = sizeof(data)-1; + else { + if (len > 0) + off += len; + } + } + root = api_add_string(root, "rock_corr_finds", data, true); + } + + return root; +} + +static void icarus_statline_before(char *buf, size_t bufsiz, struct cgpu_info *cgpu) +{ + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(cgpu->device_data); + + if (info->ant) { + if (info->u3) + tailsprintf(buf, bufsiz, "%3.0fMHz %3dmV", opt_au3_freq, opt_au3_volt); + else + tailsprintf(buf, bufsiz, "%3.0fMHz", opt_anu_freq); + } else if (info->ident == IDENT_CMR2 && info->cmr2_speed > 0) + tailsprintf(buf, bufsiz, "%5.1fMhz", (float)(info->cmr2_speed) * ICARUS_CMR2_SPEED_FACTOR); +} + +static void icarus_shutdown(__maybe_unused struct thr_info *thr) +{ + // TODO: ? +} + +static void icarus_identify(struct cgpu_info *cgpu) +{ + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(cgpu->device_data); + + if (info->ident == IDENT_CMR2) + info->flash_next_work = true; +} + +static char *icarus_set(struct cgpu_info *cgpu, char *option, char *setting, char *replybuf) +{ + struct ICARUS_INFO *info = (struct ICARUS_INFO *)(cgpu->device_data); + int val; + + if (info->ident != IDENT_CMR2) { + strcpy(replybuf, "no set options available"); + return replybuf; + } + + if (strcasecmp(option, "help") == 0) { + sprintf(replybuf, "clock: range %d-%d", + ICARUS_CMR2_SPEED_MIN_INT, ICARUS_CMR2_SPEED_MAX_INT); + return replybuf; + } + + if (strcasecmp(option, "clock") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing clock setting"); + return replybuf; + } + + val = atoi(setting); + if (val < ICARUS_CMR2_SPEED_MIN_INT || val > ICARUS_CMR2_SPEED_MAX_INT) { + sprintf(replybuf, "invalid clock: '%s' valid range %d-%d", + setting, + ICARUS_CMR2_SPEED_MIN_INT, + ICARUS_CMR2_SPEED_MAX_INT); + } + + info->cmr2_speed = CMR2_INT_TO_SPEED(val); + info->speed_next_work = true; + + return NULL; + } + + sprintf(replybuf, "Unknown option: %s", option); + return replybuf; +} + +struct device_drv icarus_drv = { + .drv_id = DRIVER_icarus, + .dname = "Icarus", + .name = "ICA", + .drv_detect = icarus_detect, + .hash_work = &hash_driver_work, + .get_api_stats = icarus_api_stats, + .get_statline_before = icarus_statline_before, + .set_device = icarus_set, + .identify_device = icarus_identify, + .thread_prepare = icarus_prepare, + .scanwork = icarus_scanwork, + .thread_shutdown = icarus_shutdown, +}; diff --git a/driver-klondike.c b/driver-klondike.c new file mode 100644 index 0000000..1429041 --- /dev/null +++ b/driver-klondike.c @@ -0,0 +1,1555 @@ +/* + * Copyright 2013 Andrew Smith + * Copyright 2013 Con Kolivas + * Copyright 2013 Chris Savery + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#ifdef WIN32 +#include +#endif + +#include "compat.h" +#include "miner.h" +#include "usbutils.h" + +#define K1 "K1" +#define K16 "K16" +#define K64 "K64" + +static const char *msg_detect_send = "DSend"; +static const char *msg_detect_reply = "DReply"; +static const char *msg_send = "Send"; +static const char *msg_reply = "Reply"; + +#define KLN_CMD_ABORT 'A' +#define KLN_CMD_CONFIG 'C' +#define KLN_CMD_ENABLE 'E' +#define KLN_CMD_IDENT 'I' +#define KLN_CMD_NONCE '=' +#define KLN_CMD_STATUS 'S' +#define KLN_CMD_WORK 'W' + +#define KLN_CMD_ENABLE_OFF '0' +#define KLN_CMD_ENABLE_ON '1' + +#define MIDSTATE_BYTES 32 +#define MERKLE_OFFSET 64 +#define MERKLE_BYTES 12 + +#define REPLY_SIZE 15 // adequate for all types of replies +#define MAX_KLINES 1024 // unhandled reply limit +#define REPLY_WAIT_TIME 100 // poll interval for a cmd waiting it's reply +#define CMD_REPLY_RETRIES 8 // how many retries for cmds +#define MAX_WORK_COUNT 4 // for now, must be binary multiple and match firmware +#define TACH_FACTOR 87890 // fan rpm divisor + +#define KLN_KILLWORK_TEMP 53.5 +#define KLN_COOLED_DOWN 45.5 + +/* + * Work older than 5s will already be completed + * FYI it must not be possible to complete 256 work + * items this quickly on a single device - + * thus limited to 219.9GH/s per device + */ +#define OLD_WORK_MS ((int)(5 * 1000)) + +/* + * How many incorrect slave counts to ignore in a row + * 2 means it allows random grabage returned twice + * Until slaves are implemented, this should never occur + * so allowing 2 in a row should ignore random errros + */ +#define KLN_ISS_IGNORE 2 + +/* + * If the queue status hasn't been updated for this long then do it now + * 5GH/s = 859ms per full nonce range + */ +#define LATE_UPDATE_MS ((int)(2.5 * 1000)) + +// If 5 late updates in a row, try to reset the device +#define LATE_UPDATE_LIMIT 5 + +// If the reset fails sleep for 1s +#define LATE_UPDATE_SLEEP_MS 1000 + +// However give up after 8s +#define LATE_UPDATE_NODEV_MS ((int)(8.0 * 1000)) + +struct device_drv klondike_drv; + +typedef struct klondike_header { + uint8_t cmd; + uint8_t dev; + uint8_t buf[REPLY_SIZE-2]; +} HEADER; + +#define K_2(_bytes) ((int)(_bytes[0]) + \ + ((int)(_bytes[1]) << 8)) + +#define K_4(_bytes) ((uint64_t)(_bytes[0]) + \ + ((uint64_t)(_bytes[1]) << 8) + \ + ((uint64_t)(_bytes[2]) << 16) + \ + ((uint64_t)(_bytes[3]) << 24)) + +#define K_SERIAL(_serial) K_4(_serial) +#define K_HASHCOUNT(_hashcount) K_2(_hashcount) +#define K_MAXCOUNT(_maxcount) K_2(_maxcount) +#define K_NONCE(_nonce) K_4(_nonce) +#define K_HASHCLOCK(_hashclock) K_2(_hashclock) + +#define SET_HASHCLOCK(_hashclock, _value) do { \ + (_hashclock)[0] = (uint8_t)((_value) & 0xff); \ + (_hashclock)[1] = (uint8_t)(((_value) >> 8) & 0xff); \ + } while(0) + +#define KSENDHD(_add) (sizeof(uint8_t) + sizeof(uint8_t) + _add) + +typedef struct klondike_id { + uint8_t cmd; + uint8_t dev; + uint8_t version; + uint8_t product[7]; + uint8_t serial[4]; +} IDENTITY; + +typedef struct klondike_status { + uint8_t cmd; + uint8_t dev; + uint8_t state; + uint8_t chipcount; + uint8_t slavecount; + uint8_t workqc; + uint8_t workid; + uint8_t temp; + uint8_t fanspeed; + uint8_t errorcount; + uint8_t hashcount[2]; + uint8_t maxcount[2]; + uint8_t noise; +} WORKSTATUS; + +typedef struct _worktask { + uint8_t cmd; + uint8_t dev; + uint8_t workid; + uint8_t midstate[32]; + uint8_t merkle[12]; +} WORKTASK; + +typedef struct _workresult { + uint8_t cmd; + uint8_t dev; + uint8_t workid; + uint8_t nonce[4]; +} WORKRESULT; + +typedef struct klondike_cfg { + uint8_t cmd; + uint8_t dev; + uint8_t hashclock[2]; + uint8_t temptarget; + uint8_t tempcritical; + uint8_t fantarget; + uint8_t pad2; +} WORKCFG; + +typedef struct kline { + union { + HEADER hd; + IDENTITY id; + WORKSTATUS ws; + WORKTASK wt; + WORKRESULT wr; + WORKCFG cfg; + }; +} KLINE; + +#define zero_kline(_kline) memset((void *)(_kline), 0, sizeof(KLINE)); + +typedef struct device_info { + uint32_t noncecount; + uint32_t nextworkid; + uint16_t lasthashcount; + uint64_t totalhashcount; + uint32_t rangesize; + uint32_t *chipstats; +} DEVINFO; + +typedef struct klist { + struct klist *prev; + struct klist *next; + KLINE kline; + struct timeval tv_when; + int block_seq; + bool ready; + bool working; +} KLIST; + +typedef struct jobque { + int workqc; + struct timeval last_update; + bool overheat; + bool flushed; + int late_update_count; + int late_update_sequential; +} JOBQUE; + +struct klondike_info { + pthread_rwlock_t stat_lock; + struct thr_info replies_thr; + cglock_t klist_lock; + KLIST *used; + KLIST *free; + int kline_count; + int used_count; + int block_seq; + KLIST *status; + DEVINFO *devinfo; + KLIST *cfg; + JOBQUE *jobque; + int noncecount; + uint64_t hashcount; + uint64_t errorcount; + uint64_t noisecount; + int incorrect_slave_sequential; + + // us Delay from USB reply to being processed + double delay_count; + double delay_total; + double delay_min; + double delay_max; + + struct timeval tv_last_nonce_received; + + // Time from recieving one nonce to the next + double nonce_count; + double nonce_total; + double nonce_min; + double nonce_max; + + int wque_size; + int wque_cleared; + + bool initialised; +}; + +static KLIST *new_klist_set(struct cgpu_info *klncgpu) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + KLIST *klist = NULL; + int i; + + klist = calloc(MAX_KLINES, sizeof(*klist)); + if (!klist) + quit(1, "Failed to calloc klist - when old count=%d", klninfo->kline_count); + + klninfo->kline_count += MAX_KLINES; + + klist[0].prev = NULL; + klist[0].next = &(klist[1]); + for (i = 1; i < MAX_KLINES-1; i++) { + klist[i].prev = &klist[i-1]; + klist[i].next = &klist[i+1]; + } + klist[MAX_KLINES-1].prev = &(klist[MAX_KLINES-2]); + klist[MAX_KLINES-1].next = NULL; + + return klist; +} + +static KLIST *allocate_kitem(struct cgpu_info *klncgpu) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + KLIST *kitem = NULL; + int ran_out = 0; + char errbuf[1024]; + + cg_wlock(&klninfo->klist_lock); + + if (klninfo->free == NULL) { + ran_out = klninfo->kline_count; + klninfo->free = new_klist_set(klncgpu); + snprintf(errbuf, sizeof(errbuf), + "%s%i: KLINE count exceeded %d, now %d", + klncgpu->drv->name, klncgpu->device_id, + ran_out, klninfo->kline_count); + } + + kitem = klninfo->free; + + klninfo->free = klninfo->free->next; + if (klninfo->free) + klninfo->free->prev = NULL; + + kitem->next = klninfo->used; + kitem->prev = NULL; + if (kitem->next) + kitem->next->prev = kitem; + klninfo->used = kitem; + + kitem->ready = false; + kitem->working = false; + + memset((void *)&(kitem->kline), 0, sizeof(kitem->kline)); + + klninfo->used_count++; + + cg_wunlock(&klninfo->klist_lock); + + if (ran_out > 0) + applog(LOG_WARNING, "%s", errbuf); + + return kitem; +} + +static KLIST *release_kitem(struct cgpu_info *klncgpu, KLIST *kitem) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + + cg_wlock(&klninfo->klist_lock); + + if (kitem == klninfo->used) + klninfo->used = kitem->next; + + if (kitem->next) + kitem->next->prev = kitem->prev; + if (kitem->prev) + kitem->prev->next = kitem->next; + + kitem->next = klninfo->free; + if (klninfo->free) + klninfo->free->prev = kitem; + + kitem->prev = NULL; + + klninfo->free = kitem; + + klninfo->used_count--; + + cg_wunlock(&klninfo->klist_lock); + + return NULL; +} + +static double cvtKlnToC(uint8_t temp) +{ + double Rt, stein, celsius; + + if (temp == 0) + return 0.0; + + Rt = 1000.0 * 255.0 / (double)temp - 1000.0; + + stein = log(Rt / 2200.0) / 3987.0; + + stein += 1.0 / (double)(25.0 + 273.15); + + celsius = (1.0 / stein) - 273.15; + + // For display of bad data + if (celsius < 0.0) + celsius = 0.0; + if (celsius > 200.0) + celsius = 200.0; + + return celsius; +} + +static int cvtCToKln(double deg) +{ + double Rt, stein, temp; + + if (deg < 0.0) + deg = 0.0; + + stein = 1.0 / (deg + 273.15); + + stein -= 1.0 / (double)(25.0 + 273.15); + + Rt = exp(stein * 3987.0) * 2200.0; + + if (Rt == -1000.0) + Rt++; + + temp = 1000.0 * 256.0 / (Rt + 1000.0); + + if (temp > 255) + temp = 255; + if (temp < 0) + temp = 0; + + return (int)temp; +} + +// Change this to LOG_WARNING if you wish to always see the replies +#define READ_DEBUG LOG_DEBUG + +static void display_kline(struct cgpu_info *klncgpu, KLINE *kline, const char *msg) +{ + char *hexdata; + + switch (kline->hd.cmd) { + case KLN_CMD_NONCE: + applog(READ_DEBUG, + "%s%i:%d %s work [%c] dev=%d workid=%d" + " nonce=0x%08x", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->wr.dev), msg, kline->wr.cmd, + (int)(kline->wr.dev), + (int)(kline->wr.workid), + (unsigned int)K_NONCE(kline->wr.nonce) - 0xC0); + break; + case KLN_CMD_STATUS: + case KLN_CMD_WORK: + case KLN_CMD_ENABLE: + case KLN_CMD_ABORT: + applog(READ_DEBUG, + "%s%i:%d %s status [%c] dev=%d chips=%d" + " slaves=%d workcq=%d workid=%d temp=%d fan=%d" + " errors=%d hashes=%d max=%d noise=%d", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->ws.dev), msg, kline->ws.cmd, + (int)(kline->ws.dev), + (int)(kline->ws.chipcount), + (int)(kline->ws.slavecount), + (int)(kline->ws.workqc), + (int)(kline->ws.workid), + (int)(kline->ws.temp), + (int)(kline->ws.fanspeed), + (int)(kline->ws.errorcount), + K_HASHCOUNT(kline->ws.hashcount), + K_MAXCOUNT(kline->ws.maxcount), + (int)(kline->ws.noise)); + break; + case KLN_CMD_CONFIG: + applog(READ_DEBUG, + "%s%i:%d %s config [%c] dev=%d clock=%d" + " temptarget=%d tempcrit=%d fan=%d", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->cfg.dev), msg, kline->cfg.cmd, + (int)(kline->cfg.dev), + K_HASHCLOCK(kline->cfg.hashclock), + (int)(kline->cfg.temptarget), + (int)(kline->cfg.tempcritical), + (int)(kline->cfg.fantarget)); + break; + case KLN_CMD_IDENT: + applog(READ_DEBUG, + "%s%i:%d %s info [%c] version=0x%02x prod=%.7s" + " serial=0x%08x", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->hd.dev), msg, kline->hd.cmd, + (int)(kline->id.version), + kline->id.product, + (unsigned int)K_SERIAL(kline->id.serial)); + break; + default: + hexdata = bin2hex((unsigned char *)&(kline->hd.dev), REPLY_SIZE - 1); + applog(LOG_ERR, + "%s%i:%d %s [%c:%s] unknown and ignored", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->hd.dev), msg, kline->hd.cmd, + hexdata); + free(hexdata); + break; + } +} + +static void display_send_kline(struct cgpu_info *klncgpu, KLINE *kline, const char *msg) +{ + char *hexdata; + + switch (kline->hd.cmd) { + case KLN_CMD_WORK: + applog(READ_DEBUG, + "%s%i:%d %s work [%c] dev=%d workid=0x%02x ...", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->wt.dev), msg, kline->ws.cmd, + (int)(kline->wt.dev), + (int)(kline->wt.workid)); + break; + case KLN_CMD_CONFIG: + applog(READ_DEBUG, + "%s%i:%d %s config [%c] dev=%d clock=%d" + " temptarget=%d tempcrit=%d fan=%d", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->cfg.dev), msg, kline->cfg.cmd, + (int)(kline->cfg.dev), + K_HASHCLOCK(kline->cfg.hashclock), + (int)(kline->cfg.temptarget), + (int)(kline->cfg.tempcritical), + (int)(kline->cfg.fantarget)); + break; + case KLN_CMD_IDENT: + case KLN_CMD_STATUS: + case KLN_CMD_ABORT: + applog(READ_DEBUG, + "%s%i:%d %s cmd [%c]", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->hd.dev), msg, kline->hd.cmd); + break; + case KLN_CMD_ENABLE: + applog(READ_DEBUG, + "%s%i:%d %s enable [%c] enable=%c", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->hd.dev), msg, kline->hd.cmd, + (char)(kline->hd.buf[0])); + break; + case KLN_CMD_NONCE: + default: + hexdata = bin2hex((unsigned char *)&(kline->hd.dev), REPLY_SIZE - 1); + applog(LOG_ERR, + "%s%i:%d %s [%c:%s] unknown/unexpected and ignored", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->hd.dev), msg, kline->hd.cmd, + hexdata); + free(hexdata); + break; + } +} + +static bool SendCmd(struct cgpu_info *klncgpu, KLINE *kline, int datalen) +{ + int err, amt, writ; + + if (klncgpu->usbinfo.nodev) + return false; + + display_send_kline(klncgpu, kline, msg_send); + writ = KSENDHD(datalen); + err = usb_write(klncgpu, (char *)kline, writ, &amt, C_REQUESTRESULTS); + if (err < 0 || amt != writ) { + applog(LOG_ERR, "%s%i:%d Cmd:%c Dev:%d, write failed (%d:%d:%d)", + klncgpu->drv->name, klncgpu->device_id, + (int)(kline->hd.dev), + kline->hd.cmd, (int)(kline->hd.dev), + writ, amt, err); + return false; + } + + return true; +} + +static KLIST *GetReply(struct cgpu_info *klncgpu, uint8_t cmd, uint8_t dev) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + KLIST *kitem; + int retries = CMD_REPLY_RETRIES; + + while (retries-- > 0 && klncgpu->shutdown == false) { + cgsleep_ms(REPLY_WAIT_TIME); + cg_rlock(&klninfo->klist_lock); + kitem = klninfo->used; + while (kitem) { + if (kitem->kline.hd.cmd == cmd && + kitem->kline.hd.dev == dev && + kitem->ready == true && kitem->working == false) { + kitem->working = true; + cg_runlock(&klninfo->klist_lock); + return kitem; + } + kitem = kitem->next; + } + cg_runlock(&klninfo->klist_lock); + } + return NULL; +} + +static KLIST *SendCmdGetReply(struct cgpu_info *klncgpu, KLINE *kline, int datalen) +{ + if (!SendCmd(klncgpu, kline, datalen)) + return NULL; + + return GetReply(klncgpu, kline->hd.cmd, kline->hd.dev); +} + +static bool klondike_get_stats(struct cgpu_info *klncgpu) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + KLIST *kitem; + KLINE kline; + int slaves, dev; + + if (klncgpu->usbinfo.nodev || klninfo->status == NULL) + return false; + + applog(LOG_DEBUG, "%s%i: getting status", + klncgpu->drv->name, klncgpu->device_id); + + rd_lock(&(klninfo->stat_lock)); + slaves = klninfo->status[0].kline.ws.slavecount; + rd_unlock(&(klninfo->stat_lock)); + + // loop thru devices and get status for each + for (dev = 0; dev <= slaves; dev++) { + zero_kline(&kline); + kline.hd.cmd = KLN_CMD_STATUS; + kline.hd.dev = dev; + kitem = SendCmdGetReply(klncgpu, &kline, 0); + if (kitem != NULL) { + wr_lock(&(klninfo->stat_lock)); + memcpy((void *)(&(klninfo->status[dev])), + (void *)kitem, + sizeof(klninfo->status[dev])); + wr_unlock(&(klninfo->stat_lock)); + kitem = release_kitem(klncgpu, kitem); + } else { + applog(LOG_ERR, "%s%i:%d failed to update stats", + klncgpu->drv->name, klncgpu->device_id, dev); + } + } + return true; +} + +// TODO: this only enables the master (no slaves) +static bool kln_enable(struct cgpu_info *klncgpu) +{ + KLIST *kitem; + KLINE kline; + int tries = 2; + bool ok = false; + + zero_kline(&kline); + kline.hd.cmd = KLN_CMD_ENABLE; + kline.hd.dev = 0; + kline.hd.buf[0] = KLN_CMD_ENABLE_ON; + + while (tries-- > 0) { + kitem = SendCmdGetReply(klncgpu, &kline, 1); + if (kitem) { + kitem = release_kitem(klncgpu, kitem); + ok = true; + break; + } + cgsleep_ms(50); + } + + if (ok) + cgsleep_ms(50); + + return ok; +} + +static void kln_disable(struct cgpu_info *klncgpu, int dev, bool all) +{ + KLINE kline; + int i; + + zero_kline(&kline); + kline.hd.cmd = KLN_CMD_ENABLE; + kline.hd.buf[0] = KLN_CMD_ENABLE_OFF; + for (i = (all ? 0 : dev); i <= dev; i++) { + kline.hd.dev = i; + SendCmd(klncgpu, &kline, KSENDHD(1)); + } +} + +static bool klondike_init(struct cgpu_info *klncgpu) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + KLIST *kitem; + KLINE kline; + int slaves, dev; + + klninfo->initialised = false; + + zero_kline(&kline); + kline.hd.cmd = KLN_CMD_STATUS; + kline.hd.dev = 0; + kitem = SendCmdGetReply(klncgpu, &kline, 0); + if (kitem == NULL) + return false; + + slaves = kitem->kline.ws.slavecount; + if (klninfo->status == NULL) { + applog(LOG_DEBUG, "%s%i: initializing data", + klncgpu->drv->name, klncgpu->device_id); + + // alloc space for status, devinfo, cfg and jobque for master and slaves + klninfo->status = calloc(slaves+1, sizeof(*(klninfo->status))); + if (unlikely(!klninfo->status)) + quit(1, "Failed to calloc status array in klondke_get_stats"); + klninfo->devinfo = calloc(slaves+1, sizeof(*(klninfo->devinfo))); + if (unlikely(!klninfo->devinfo)) + quit(1, "Failed to calloc devinfo array in klondke_get_stats"); + klninfo->cfg = calloc(slaves+1, sizeof(*(klninfo->cfg))); + if (unlikely(!klninfo->cfg)) + quit(1, "Failed to calloc cfg array in klondke_get_stats"); + klninfo->jobque = calloc(slaves+1, sizeof(*(klninfo->jobque))); + if (unlikely(!klninfo->jobque)) + quit(1, "Failed to calloc jobque array in klondke_get_stats"); + } + + memcpy((void *)(&(klninfo->status[0])), (void *)kitem, sizeof(klninfo->status[0])); + kitem = release_kitem(klncgpu, kitem); + + // zero init triggers read back only + zero_kline(&kline); + kline.cfg.cmd = KLN_CMD_CONFIG; + + int size = 2; + + // boundaries are checked by device, with valid values returned + if (opt_klondike_options != NULL) { + int hashclock; + double temptarget; + + sscanf(opt_klondike_options, "%d:%lf", &hashclock, &temptarget); + SET_HASHCLOCK(kline.cfg.hashclock, hashclock); + kline.cfg.temptarget = cvtCToKln(temptarget); + kline.cfg.tempcritical = 0; // hard code for old firmware + kline.cfg.fantarget = 0xff; // hard code for old firmware + size = sizeof(kline.cfg) - 2; + } + + for (dev = 0; dev <= slaves; dev++) { + kline.cfg.dev = dev; + kitem = SendCmdGetReply(klncgpu, &kline, size); + if (kitem != NULL) { + memcpy((void *)&(klninfo->cfg[dev]), kitem, sizeof(klninfo->cfg[dev])); + applog(LOG_WARNING, "%s%i:%d config (%d: Clk: %d, T:%.0lf, C:%.0lf, F:%d)", + klncgpu->drv->name, klncgpu->device_id, dev, + dev, K_HASHCLOCK(klninfo->cfg[dev].kline.cfg.hashclock), + cvtKlnToC(klninfo->cfg[dev].kline.cfg.temptarget), + cvtKlnToC(klninfo->cfg[dev].kline.cfg.tempcritical), + (int)100*klninfo->cfg[dev].kline.cfg.fantarget/256); + kitem = release_kitem(klncgpu, kitem); + } + } + klondike_get_stats(klncgpu); + klninfo->initialised = true; + for (dev = 0; dev <= slaves; dev++) { + klninfo->devinfo[dev].rangesize = ((uint64_t)1<<32) / klninfo->status[dev].kline.ws.chipcount; + klninfo->devinfo[dev].chipstats = calloc(klninfo->status[dev].kline.ws.chipcount*2 , sizeof(uint32_t)); + } + + bool ok = kln_enable(klncgpu); + + if (!ok) + applog(LOG_ERR, "%s%i: failed to enable", klncgpu->drv->name, klncgpu->device_id); + + return ok; +} + +static void control_init(struct cgpu_info *klncgpu) +{ + int err, interface; + + if (klncgpu->usbinfo.nodev) + return; + + interface = usb_interface(klncgpu); + + err = usb_transfer(klncgpu, 0, 9, 1, interface, C_RESET); + + applog(LOG_DEBUG, "%s%i: reset got err %d", + klncgpu->drv->name, klncgpu->device_id, err); +} + +static struct cgpu_info *klondike_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + struct cgpu_info *klncgpu = usb_alloc_cgpu(&klondike_drv, 1); + struct klondike_info *klninfo = NULL; + KLINE kline; + + if (unlikely(!klncgpu)) + quit(1, "Failed to calloc klncgpu in klondike_detect_one"); + + klninfo = calloc(1, sizeof(*klninfo)); + if (unlikely(!klninfo)) + quit(1, "Failed to calloc klninfo in klondke_detect_one"); + klncgpu->device_data = (void *)klninfo; + + klninfo->free = new_klist_set(klncgpu); + + if (usb_init(klncgpu, dev, found)) { + int sent, recd, err; + KLIST kitem; + int attempts = 0; + + control_init(klncgpu); + + while (attempts++ < 3) { + kline.hd.cmd = KLN_CMD_IDENT; + kline.hd.dev = 0; + display_send_kline(klncgpu, &kline, msg_detect_send); + err = usb_write(klncgpu, (char *)&(kline.hd), 2, &sent, C_REQUESTRESULTS); + if (err < 0 || sent != 2) { + applog(LOG_ERR, "%s (%s) detect write failed (%d:%d)", + klncgpu->drv->dname, + klncgpu->device_path, + sent, err); + } + cgsleep_ms(REPLY_WAIT_TIME*10); + err = usb_read(klncgpu, (char *)&(kitem.kline), REPLY_SIZE, &recd, C_GETRESULTS); + if (err < 0) { + applog(LOG_ERR, "%s (%s) detect read failed (%d:%d)", + klncgpu->drv->dname, + klncgpu->device_path, + recd, err); + } else if (recd < 1) { + applog(LOG_ERR, "%s (%s) detect empty reply (%d)", + klncgpu->drv->dname, + klncgpu->device_path, + recd); + } else if (kitem.kline.hd.cmd == KLN_CMD_IDENT && kitem.kline.hd.dev == 0) { + display_kline(klncgpu, &kitem.kline, msg_detect_reply); + applog(LOG_DEBUG, "%s (%s) detect successful (%d attempt%s)", + klncgpu->drv->dname, + klncgpu->device_path, + attempts, attempts == 1 ? "" : "s"); + if (!add_cgpu(klncgpu)) + break; + update_usb_stats(klncgpu); + applog(LOG_DEBUG, "Klondike cgpu added"); + rwlock_init(&klninfo->stat_lock); + cglock_init(&klninfo->klist_lock); + return klncgpu; + } + } + usb_uninit(klncgpu); + } + free(klninfo->free); + free(klninfo); + free(klncgpu); + return NULL; +} + +static void klondike_detect(bool __maybe_unused hotplug) +{ + usb_detect(&klondike_drv, klondike_detect_one); +} + +static void klondike_identify(__maybe_unused struct cgpu_info *klncgpu) +{ +/* + KLINE kline; + + zero_kline(&kline); + kline.hd.cmd = KLN_CMD_IDENT; + kline.hd.dev = 0; + SendCmdGetReply(klncgpu, &kline, KSENDHD(0)); +*/ +} + +static void klondike_check_nonce(struct cgpu_info *klncgpu, KLIST *kitem) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + struct work *work, *look, *tmp; + KLINE *kline = &(kitem->kline); + struct timeval tv_now; + double us_diff; + uint32_t nonce = K_NONCE(kline->wr.nonce) - 0xC0; + + applog(LOG_DEBUG, "%s%i:%d FOUND NONCE (%02x:%08x)", + klncgpu->drv->name, klncgpu->device_id, (int)(kline->wr.dev), + kline->wr.workid, (unsigned int)nonce); + + work = NULL; + cgtime(&tv_now); + rd_lock(&(klncgpu->qlock)); + HASH_ITER(hh, klncgpu->queued_work, look, tmp) { + if (ms_tdiff(&tv_now, &(look->tv_stamp)) < OLD_WORK_MS && + (look->subid == (kline->wr.dev*256 + kline->wr.workid))) { + work = look; + break; + } + } + rd_unlock(&(klncgpu->qlock)); + + if (work) { + wr_lock(&(klninfo->stat_lock)); + klninfo->devinfo[kline->wr.dev].noncecount++; + klninfo->noncecount++; + wr_unlock(&(klninfo->stat_lock)); + + applog(LOG_DEBUG, "%s%i:%d SUBMIT NONCE (%02x:%08x)", + klncgpu->drv->name, klncgpu->device_id, (int)(kline->wr.dev), + kline->wr.workid, (unsigned int)nonce); + + cgtime(&tv_now); + bool ok = submit_nonce(klncgpu->thr[0], work, nonce); + + applog(LOG_DEBUG, "%s%i:%d chip stats %d, %08x, %d, %d", + klncgpu->drv->name, klncgpu->device_id, (int)(kline->wr.dev), + kline->wr.dev, (unsigned int)nonce, + klninfo->devinfo[kline->wr.dev].rangesize, + klninfo->status[kline->wr.dev].kline.ws.chipcount); + + klninfo->devinfo[kline->wr.dev].chipstats[(nonce / klninfo->devinfo[kline->wr.dev].rangesize) + (ok ? 0 : klninfo->status[kline->wr.dev].kline.ws.chipcount)]++; + + us_diff = us_tdiff(&tv_now, &(kitem->tv_when)); + if (klninfo->delay_count == 0) { + klninfo->delay_min = us_diff; + klninfo->delay_max = us_diff; + } else { + if (klninfo->delay_min > us_diff) + klninfo->delay_min = us_diff; + if (klninfo->delay_max < us_diff) + klninfo->delay_max = us_diff; + } + klninfo->delay_count++; + klninfo->delay_total += us_diff; + + if (klninfo->nonce_count > 0) { + us_diff = us_tdiff(&(kitem->tv_when), &(klninfo->tv_last_nonce_received)); + if (klninfo->nonce_count == 1) { + klninfo->nonce_min = us_diff; + klninfo->nonce_max = us_diff; + } else { + if (klninfo->nonce_min > us_diff) + klninfo->nonce_min = us_diff; + if (klninfo->nonce_max < us_diff) + klninfo->nonce_max = us_diff; + } + klninfo->nonce_total += us_diff; + } + klninfo->nonce_count++; + + memcpy(&(klninfo->tv_last_nonce_received), &(kitem->tv_when), + sizeof(klninfo->tv_last_nonce_received)); + + return; + } + + applog(LOG_ERR, "%s%i:%d unknown work (%02x:%08x) - ignored", + klncgpu->drv->name, klncgpu->device_id, (int)(kline->wr.dev), + kline->wr.workid, (unsigned int)nonce); + + //inc_hw_errors(klncgpu->thr[0]); +} + +// thread to keep looking for replies +static void *klondike_get_replies(void *userdata) +{ + struct cgpu_info *klncgpu = (struct cgpu_info *)userdata; + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + KLIST *kitem = NULL; + char *hexdata; + int err, recd, slaves, dev, isc; + bool overheat, sent; + + applog(LOG_DEBUG, "%s%i: listening for replies", + klncgpu->drv->name, klncgpu->device_id); + + while (klncgpu->shutdown == false) { + if (klncgpu->usbinfo.nodev) + return NULL; + + if (kitem == NULL) + kitem = allocate_kitem(klncgpu); + else + memset((void *)&(kitem->kline), 0, sizeof(kitem->kline)); + + err = usb_read(klncgpu, (char *)&(kitem->kline), REPLY_SIZE, &recd, C_GETRESULTS); + if (err || recd != REPLY_SIZE) { + if (err != -7) + applog(LOG_ERR, "%s%i: reply err=%d amt=%d", + klncgpu->drv->name, klncgpu->device_id, + err, recd); + } + if (!err && recd == REPLY_SIZE) { + cgtime(&(kitem->tv_when)); + rd_lock(&(klninfo->stat_lock)); + kitem->block_seq = klninfo->block_seq; + rd_unlock(&(klninfo->stat_lock)); + if (opt_log_level <= READ_DEBUG) { + hexdata = bin2hex((unsigned char *)&(kitem->kline.hd.dev), recd-1); + applog(READ_DEBUG, "%s%i:%d reply [%c:%s]", + klncgpu->drv->name, klncgpu->device_id, + (int)(kitem->kline.hd.dev), + kitem->kline.hd.cmd, hexdata); + free(hexdata); + } + + // We can't check this until it's initialised + if (klninfo->initialised) { + rd_lock(&(klninfo->stat_lock)); + slaves = klninfo->status[0].kline.ws.slavecount; + rd_unlock(&(klninfo->stat_lock)); + + if (kitem->kline.hd.dev > slaves) { + applog(LOG_ERR, "%s%i: reply [%c] has invalid dev=%d (max=%d) using 0", + klncgpu->drv->name, klncgpu->device_id, + (char)(kitem->kline.hd.cmd), + (int)(kitem->kline.hd.dev), + slaves); + /* TODO: this is rather problematic if there are slaves + * however without slaves - it should always be zero */ + kitem->kline.hd.dev = 0; + } else { + wr_lock(&(klninfo->stat_lock)); + klninfo->jobque[kitem->kline.hd.dev].late_update_sequential = 0; + wr_unlock(&(klninfo->stat_lock)); + } + } + + switch (kitem->kline.hd.cmd) { + case KLN_CMD_NONCE: + klondike_check_nonce(klncgpu, kitem); + display_kline(klncgpu, &kitem->kline, msg_reply); + break; + case KLN_CMD_WORK: + // We can't do/check this until it's initialised + if (klninfo->initialised) { + dev = kitem->kline.ws.dev; + if (kitem->kline.ws.workqc == 0) { + bool idle = false; + rd_lock(&(klninfo->stat_lock)); + if (klninfo->jobque[dev].flushed == false) + idle = true; + slaves = klninfo->status[0].kline.ws.slavecount; + rd_unlock(&(klninfo->stat_lock)); + if (idle) + applog(LOG_WARNING, "%s%i:%d went idle before work was sent", + klncgpu->drv->name, + klncgpu->device_id, + dev); + } + wr_lock(&(klninfo->stat_lock)); + klninfo->jobque[dev].flushed = false; + wr_unlock(&(klninfo->stat_lock)); + } + case KLN_CMD_STATUS: + case KLN_CMD_ABORT: + // We can't do/check this until it's initialised + if (klninfo->initialised) { + isc = 0; + dev = kitem->kline.ws.dev; + wr_lock(&(klninfo->stat_lock)); + klninfo->jobque[dev].workqc = (int)(kitem->kline.ws.workqc); + cgtime(&(klninfo->jobque[dev].last_update)); + slaves = klninfo->status[0].kline.ws.slavecount; + overheat = klninfo->jobque[dev].overheat; + if (dev == 0) { + if (kitem->kline.ws.slavecount != slaves) + isc = ++klninfo->incorrect_slave_sequential; + else + isc = klninfo->incorrect_slave_sequential = 0; + } + wr_unlock(&(klninfo->stat_lock)); + + if (isc) { + applog(LOG_ERR, "%s%i:%d reply [%c] has a diff" + " # of slaves=%d (curr=%d)%s", + klncgpu->drv->name, + klncgpu->device_id, + dev, + (char)(kitem->kline.ws.cmd), + (int)(kitem->kline.ws.slavecount), + slaves, + isc <= KLN_ISS_IGNORE ? "" : + " disabling device"); + if (isc > KLN_ISS_IGNORE) + usb_nodev(klncgpu); + break; + } + + if (!overheat) { + double temp = cvtKlnToC(kitem->kline.ws.temp); + if (temp >= KLN_KILLWORK_TEMP) { + KLINE kline; + + wr_lock(&(klninfo->stat_lock)); + klninfo->jobque[dev].overheat = true; + wr_unlock(&(klninfo->stat_lock)); + + applog(LOG_WARNING, "%s%i:%d Critical overheat (%.0fC)", + klncgpu->drv->name, + klncgpu->device_id, + dev, temp); + + zero_kline(&kline); + kline.hd.cmd = KLN_CMD_ABORT; + kline.hd.dev = dev; + sent = SendCmd(klncgpu, &kline, KSENDHD(0)); + kln_disable(klncgpu, dev, false); + if (!sent) { + applog(LOG_ERR, "%s%i:%d overheat failed to" + " abort work - disabling device", + klncgpu->drv->name, + klncgpu->device_id, + dev); + usb_nodev(klncgpu); + } + } + } + } + case KLN_CMD_ENABLE: + wr_lock(&(klninfo->stat_lock)); + klninfo->errorcount += kitem->kline.ws.errorcount; + klninfo->noisecount += kitem->kline.ws.noise; + wr_unlock(&(klninfo->stat_lock)); + display_kline(klncgpu, &kitem->kline, msg_reply); + kitem->ready = true; + kitem = NULL; + break; + case KLN_CMD_CONFIG: + display_kline(klncgpu, &kitem->kline, msg_reply); + kitem->ready = true; + kitem = NULL; + break; + case KLN_CMD_IDENT: + display_kline(klncgpu, &kitem->kline, msg_reply); + kitem->ready = true; + kitem = NULL; + break; + default: + display_kline(klncgpu, &kitem->kline, msg_reply); + break; + } + } + } + return NULL; +} + +static void klondike_flush_work(struct cgpu_info *klncgpu) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + KLIST *kitem; + KLINE kline; + int slaves, dev; + + if (klninfo->initialised) { + wr_lock(&(klninfo->stat_lock)); + klninfo->block_seq++; + slaves = klninfo->status[0].kline.ws.slavecount; + wr_unlock(&(klninfo->stat_lock)); + + applog(LOG_DEBUG, "%s%i: flushing work", + klncgpu->drv->name, klncgpu->device_id); + zero_kline(&kline); + kline.hd.cmd = KLN_CMD_ABORT; + for (dev = 0; dev <= slaves; dev++) { + kline.hd.dev = dev; + kitem = SendCmdGetReply(klncgpu, &kline, KSENDHD(0)); + if (kitem != NULL) { + wr_lock(&(klninfo->stat_lock)); + memcpy((void *)&(klninfo->status[dev]), + kitem, + sizeof(klninfo->status[dev])); + klninfo->jobque[dev].flushed = true; + wr_unlock(&(klninfo->stat_lock)); + kitem = release_kitem(klncgpu, kitem); + } + } + } +} + +static bool klondike_thread_prepare(struct thr_info *thr) +{ + struct cgpu_info *klncgpu = thr->cgpu; + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + + if (thr_info_create(&(klninfo->replies_thr), NULL, klondike_get_replies, (void *)klncgpu)) { + applog(LOG_ERR, "%s%i: thread create failed", klncgpu->drv->name, klncgpu->device_id); + return false; + } + pthread_detach(klninfo->replies_thr.pth); + + // let the listening get started + cgsleep_ms(100); + + return klondike_init(klncgpu); +} + +static bool klondike_thread_init(struct thr_info *thr) +{ + struct cgpu_info *klncgpu = thr->cgpu; + + if (klncgpu->usbinfo.nodev) + return false; + + klondike_flush_work(klncgpu); + + return true; +} + +static void klondike_shutdown(struct thr_info *thr) +{ + struct cgpu_info *klncgpu = thr->cgpu; + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + + applog(LOG_DEBUG, "%s%i: shutting down work", + klncgpu->drv->name, klncgpu->device_id); + + kln_disable(klncgpu, klninfo->status[0].kline.ws.slavecount, true); + + klncgpu->shutdown = true; +} + +static void klondike_thread_enable(struct thr_info *thr) +{ + struct cgpu_info *klncgpu = thr->cgpu; + + if (klncgpu->usbinfo.nodev) + return; + +/* + KLINE kline; + + zero_kline(&kline); + kline.hd.cmd = KLN_CMD_ENABLE; + kline.hd.dev = dev; + kline.hd.buf[0] = KLN_CMD_ENABLE_OFF; + kitem = SendCmdGetReply(klncgpu, &kline, KSENDHD(1)); +*/ + +} + +static bool klondike_send_work(struct cgpu_info *klncgpu, int dev, struct work *work) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + struct work *look, *tmp; + KLINE kline; + struct timeval tv_old; + int wque_size, wque_cleared; + + if (klncgpu->usbinfo.nodev) + return false; + + zero_kline(&kline); + kline.wt.cmd = KLN_CMD_WORK; + kline.wt.dev = dev; + memcpy(kline.wt.midstate, work->midstate, MIDSTATE_BYTES); + memcpy(kline.wt.merkle, work->data + MERKLE_OFFSET, MERKLE_BYTES); + kline.wt.workid = (uint8_t)(klninfo->devinfo[dev].nextworkid++ & 0xFF); + work->subid = dev*256 + kline.wt.workid; + cgtime(&work->tv_stamp); + + if (opt_log_level <= LOG_DEBUG) { + char *hexdata = bin2hex((void *)&kline.wt, sizeof(kline.wt)); + applog(LOG_DEBUG, "WORKDATA: %s", hexdata); + free(hexdata); + } + + applog(LOG_DEBUG, "%s%i:%d sending work (%d:%02x)", + klncgpu->drv->name, klncgpu->device_id, dev, + dev, kline.wt.workid); + KLIST *kitem = SendCmdGetReply(klncgpu, &kline, sizeof(kline.wt)); + if (kitem != NULL) { + wr_lock(&(klninfo->stat_lock)); + memcpy((void *)&(klninfo->status[dev]), kitem, sizeof(klninfo->status[dev])); + wr_unlock(&(klninfo->stat_lock)); + kitem = release_kitem(klncgpu, kitem); + + // remove old work + wque_size = 0; + wque_cleared = 0; + cgtime(&tv_old); + wr_lock(&klncgpu->qlock); + HASH_ITER(hh, klncgpu->queued_work, look, tmp) { + if (ms_tdiff(&tv_old, &(look->tv_stamp)) > OLD_WORK_MS) { + __work_completed(klncgpu, look); + free_work(look); + wque_cleared++; + } else + wque_size++; + } + wr_unlock(&klncgpu->qlock); + + wr_lock(&(klninfo->stat_lock)); + klninfo->wque_size = wque_size; + klninfo->wque_cleared = wque_cleared; + wr_unlock(&(klninfo->stat_lock)); + return true; + } + return false; +} + +static bool klondike_queue_full(struct cgpu_info *klncgpu) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + struct work *work = NULL; + int dev, queued, slaves, seq, howlong; + struct timeval now; + bool nowork; + + if (klncgpu->shutdown == true) + return true; + + cgtime(&now); + rd_lock(&(klninfo->stat_lock)); + slaves = klninfo->status[0].kline.ws.slavecount; + for (dev = 0; dev <= slaves; dev++) + if (ms_tdiff(&now, &(klninfo->jobque[dev].last_update)) > LATE_UPDATE_MS) { + klninfo->jobque[dev].late_update_count++; + seq = ++klninfo->jobque[dev].late_update_sequential; + rd_unlock(&(klninfo->stat_lock)); + if (seq < LATE_UPDATE_LIMIT) { + applog(LOG_DEBUG, "%s%i:%d late update", + klncgpu->drv->name, klncgpu->device_id, dev); + klondike_get_stats(klncgpu); + goto que; + } else { + applog(LOG_WARNING, "%s%i:%d late update (%d) reached - attempting reset", + klncgpu->drv->name, klncgpu->device_id, + dev, LATE_UPDATE_LIMIT); + control_init(klncgpu); + kln_enable(klncgpu); + klondike_get_stats(klncgpu); + rd_lock(&(klninfo->stat_lock)); + howlong = ms_tdiff(&now, &(klninfo->jobque[dev].last_update)); + if (howlong > LATE_UPDATE_MS) { + rd_unlock(&(klninfo->stat_lock)); + if (howlong > LATE_UPDATE_NODEV_MS) { + applog(LOG_ERR, "%s%i:%d reset failed - dropping device", + klncgpu->drv->name, klncgpu->device_id, dev); + usb_nodev(klncgpu); + } else + cgsleep_ms(LATE_UPDATE_SLEEP_MS); + + return true; + } + break; + } + } + rd_unlock(&(klninfo->stat_lock)); + +que: + + nowork = true; + for (queued = 0; queued < MAX_WORK_COUNT-1; queued++) + for (dev = 0; dev <= slaves; dev++) { +tryagain: + rd_lock(&(klninfo->stat_lock)); + if (klninfo->jobque[dev].overheat) { + double temp = cvtKlnToC(klninfo->status[0].kline.ws.temp); + if ((queued == MAX_WORK_COUNT-2) && + ms_tdiff(&now, &(klninfo->jobque[dev].last_update)) > (LATE_UPDATE_MS/2)) { + rd_unlock(&(klninfo->stat_lock)); + klondike_get_stats(klncgpu); + goto tryagain; + } + if (temp <= KLN_COOLED_DOWN) { + klninfo->jobque[dev].overheat = false; + rd_unlock(&(klninfo->stat_lock)); + applog(LOG_WARNING, "%s%i:%d Overheat recovered (%.0fC)", + klncgpu->drv->name, klncgpu->device_id, + dev, temp); + kln_enable(klncgpu); + goto tryagain; + } else { + rd_unlock(&(klninfo->stat_lock)); + continue; + } + } + + if (klninfo->jobque[dev].workqc <= queued) { + rd_unlock(&(klninfo->stat_lock)); + if (!work) + work = get_queued(klncgpu); + if (unlikely(!work)) + return false; + nowork = false; + if (klondike_send_work(klncgpu, dev, work)) + return false; + } else + rd_unlock(&(klninfo->stat_lock)); + } + + if (nowork) + cgsleep_ms(10); // avoid a hard loop in case we have nothing to do + + return true; +} + +static int64_t klondike_scanwork(struct thr_info *thr) +{ + struct cgpu_info *klncgpu = thr->cgpu; + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + int64_t newhashcount = 0; + int dev, slaves; + + if (klncgpu->usbinfo.nodev) + return -1; + + restart_wait(thr, 200); + if (klninfo->status != NULL) { + rd_lock(&(klninfo->stat_lock)); + slaves = klninfo->status[0].kline.ws.slavecount; + for (dev = 0; dev <= slaves; dev++) { + uint64_t newhashdev = 0, hashcount; + int maxcount; + + hashcount = K_HASHCOUNT(klninfo->status[dev].kline.ws.hashcount); + maxcount = K_MAXCOUNT(klninfo->status[dev].kline.ws.maxcount); + // todo: chg this to check workid for wrapped instead + if (klninfo->devinfo[dev].lasthashcount > hashcount) + newhashdev += maxcount; // hash counter wrapped + newhashdev += hashcount - klninfo->devinfo[dev].lasthashcount; + klninfo->devinfo[dev].lasthashcount = hashcount; + if (maxcount != 0) + klninfo->hashcount += (newhashdev << 32) / maxcount; + } + newhashcount += 0xffffffffull * (uint64_t)klninfo->noncecount; + klninfo->noncecount = 0; + rd_unlock(&(klninfo->stat_lock)); + } + + return newhashcount; +} + + +static void get_klondike_statline_before(char *buf, size_t siz, struct cgpu_info *klncgpu) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + uint8_t temp = 0xFF; + uint16_t fan = 0; + uint16_t clock = 0; + int dev, slaves; + + if (klninfo->status == NULL) { + blank_get_statline_before(buf, siz, klncgpu); + return; + } + + rd_lock(&(klninfo->stat_lock)); + slaves = klninfo->status[0].kline.ws.slavecount; + for (dev = 0; dev <= slaves; dev++) { + if (klninfo->status[dev].kline.ws.temp < temp) + temp = klninfo->status[dev].kline.ws.temp; + fan += klninfo->cfg[dev].kline.cfg.fantarget; + clock += (uint16_t)K_HASHCLOCK(klninfo->cfg[dev].kline.cfg.hashclock); + } + rd_unlock(&(klninfo->stat_lock)); + fan /= slaves + 1; + //fan *= 100/255; // <-- You can't do this because int 100 / int 255 == 0 + fan = 100 * fan / 255; + if (fan > 100) + fan = 100; + clock /= slaves + 1; + if (clock > 999) // error - so truncate it + clock = 999; + + tailsprintf(buf, siz, "%3dMHz %3d%% %.1fC", (int)clock, (int)fan, cvtKlnToC(temp)); +} + +static struct api_data *klondike_api_stats(struct cgpu_info *klncgpu) +{ + struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); + struct api_data *root = NULL; + char buf[32]; + int dev, slaves; + + if (klninfo->status == NULL) + return NULL; + + rd_lock(&(klninfo->stat_lock)); + slaves = klninfo->status[0].kline.ws.slavecount; + for (dev = 0; dev <= slaves; dev++) { + + float fTemp = cvtKlnToC(klninfo->status[dev].kline.ws.temp); + sprintf(buf, "Temp %d", dev); + root = api_add_temp(root, buf, &fTemp, true); + + double dClk = (double)K_HASHCLOCK(klninfo->cfg[dev].kline.cfg.hashclock); + sprintf(buf, "Clock %d", dev); + root = api_add_freq(root, buf, &dClk, true); + + unsigned int iFan = (unsigned int)100 * klninfo->cfg[dev].kline.cfg.fantarget / 255; + sprintf(buf, "Fan Percent %d", dev); + root = api_add_int(root, buf, (int *)(&iFan), true); + + iFan = 0; + if (klninfo->status[dev].kline.ws.fanspeed > 0) + iFan = (unsigned int)TACH_FACTOR / klninfo->status[dev].kline.ws.fanspeed; + sprintf(buf, "Fan RPM %d", dev); + root = api_add_int(root, buf, (int *)(&iFan), true); + + if (klninfo->devinfo[dev].chipstats != NULL) { + char data[2048]; + char one[32]; + int n; + + sprintf(buf, "Nonces / Chip %d", dev); + data[0] = '\0'; + for (n = 0; n < klninfo->status[dev].kline.ws.chipcount; n++) { + snprintf(one, sizeof(one), "%07d ", klninfo->devinfo[dev].chipstats[n]); + strcat(data, one); + } + root = api_add_string(root, buf, data, true); + + sprintf(buf, "Errors / Chip %d", dev); + data[0] = '\0'; + for (n = 0; n < klninfo->status[dev].kline.ws.chipcount; n++) { + snprintf(one, sizeof(one), "%07d ", klninfo->devinfo[dev].chipstats[n + klninfo->status[dev].kline.ws.chipcount]); + strcat(data, one); + } + root = api_add_string(root, buf, data, true); + } + } + + root = api_add_uint64(root, "Hash Count", &(klninfo->hashcount), true); + root = api_add_uint64(root, "Error Count", &(klninfo->errorcount), true); + root = api_add_uint64(root, "Noise Count", &(klninfo->noisecount), true); + + root = api_add_int(root, "KLine Limit", &(klninfo->kline_count), true); + root = api_add_int(root, "KLine Used", &(klninfo->used_count), true); + + root = api_add_elapsed(root, "KQue Delay Count", &(klninfo->delay_count), true); + root = api_add_elapsed(root, "KQue Delay Total", &(klninfo->delay_total), true); + root = api_add_elapsed(root, "KQue Delay Min", &(klninfo->delay_min), true); + root = api_add_elapsed(root, "KQue Delay Max", &(klninfo->delay_max), true); + double avg; + if (klninfo->delay_count == 0) + avg = 0; + else + avg = klninfo->delay_total / klninfo->delay_count; + root = api_add_diff(root, "KQue Delay Avg", &avg, true); + + root = api_add_elapsed(root, "KQue Nonce Count", &(klninfo->nonce_count), true); + root = api_add_elapsed(root, "KQue Nonce Total", &(klninfo->nonce_total), true); + root = api_add_elapsed(root, "KQue Nonce Min", &(klninfo->nonce_min), true); + root = api_add_elapsed(root, "KQue Nonce Max", &(klninfo->nonce_max), true); + if (klninfo->nonce_count == 0) + avg = 0; + else + avg = klninfo->nonce_total / klninfo->nonce_count; + root = api_add_diff(root, "KQue Nonce Avg", &avg, true); + + root = api_add_int(root, "WQue Size", &(klninfo->wque_size), true); + root = api_add_int(root, "WQue Cleared", &(klninfo->wque_cleared), true); + + rd_unlock(&(klninfo->stat_lock)); + + return root; +} + +struct device_drv klondike_drv = { + .drv_id = DRIVER_klondike, + .dname = "Klondike", + .name = "KLN", + .drv_detect = klondike_detect, + .get_api_stats = klondike_api_stats, + .get_statline_before = get_klondike_statline_before, + .get_stats = klondike_get_stats, + .identify_device = klondike_identify, + .thread_prepare = klondike_thread_prepare, + .thread_init = klondike_thread_init, + .hash_work = hash_queued_work, + .scanwork = klondike_scanwork, + .queue_full = klondike_queue_full, + .flush_work = klondike_flush_work, + .thread_shutdown = klondike_shutdown, + .thread_enable = klondike_thread_enable +}; diff --git a/driver-knc.c b/driver-knc.c new file mode 100644 index 0000000..564d27d --- /dev/null +++ b/driver-knc.c @@ -0,0 +1,865 @@ +/* + * cgminer driver for KnCminer devices + * + * Copyright 2014 KnCminer + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "logging.h" +#include "miner.h" +#include "knc-transport.h" +#include "knc-asic.h" + +#define MAX_ASICS 6 +#define DIES_PER_ASIC 4 +#define MAX_CORES_PER_DIE 360 +#define WORKS_PER_CORE 3 + +#define CORE_ERROR_LIMIT 30 +#define CORE_ERROR_INTERVAL 30 +#define CORE_ERROR_DISABLE_TIME 5*60 +#define CORE_SUBMIT_MIN_TIME 2 +#define CORE_TIMEOUT 20 +#define SCAN_ADJUST_RANGE 32 + +static struct timeval now; +static const struct timeval core_check_interval = { + CORE_ERROR_INTERVAL, 0 +}; +static const struct timeval core_disable_interval = { + CORE_ERROR_DISABLE_TIME, 0 +}; +static const struct timeval core_submit_interval = { + CORE_SUBMIT_MIN_TIME, 0 +}; +static const struct timeval core_timeout_interval = { + CORE_TIMEOUT, 0 +}; + +struct knc_die; + +struct knc_core_state { + int generation; + int core; + int coreid; + struct knc_die *die; + struct { + int slot; + struct work *work; + } workslot[WORKS_PER_CORE]; /* active, next */ + int transfer_stamp; + struct knc_report report; + struct { + int slot; + uint32_t nonce; + } last_nonce; + uint32_t works; + uint32_t shares; + uint32_t errors; + uint32_t completed; + int last_slot; + uint32_t errors_now; + struct timeval disabled_until; + struct timeval hold_work_until; + struct timeval timeout; + bool inuse; +}; + +struct knc_state; + +struct knc_die { + int channel; + int die; + int version; + int cores; + struct knc_state *knc; + struct knc_core_state *core; +}; + +#define MAX_SPI_SIZE (4096) +#define MAX_SPI_RESPONSES (MAX_SPI_SIZE / (2 + 4 + 1 + 1 + 1 + 4)) +#define MAX_SPI_MESSAGE (128) +#define KNC_SPI_BUFFERS (3) + +struct knc_state { + struct cgpu_info *cgpu; + void *ctx; + int generation; /* work/block generation, incremented on each flush invalidating older works */ + int dies; + struct knc_die die[MAX_ASICS*DIES_PER_ASIC]; + int cores; + int scan_adjust; + int startup; + /* Statistics */ + uint64_t shares; /* diff1 shares reported by hardware */ + uint64_t works; /* Work units submitted */ + uint64_t completed; /* Work units completed */ + uint64_t errors; /* Hardware & communication errors */ + struct timeval next_error_interval; + /* End of statistics */ + /* SPI communications thread */ + pthread_mutex_t spi_qlock; /* SPI queue status lock */ + struct thr_info spi_thr; /* SPI I/O thread */ + pthread_cond_t spi_qcond; /* SPI queue change wakeup */ + struct knc_spi_buffer { + enum { + KNC_SPI_IDLE=0, + KNC_SPI_PENDING, + KNC_SPI_DONE + } state; + int size; + uint8_t txbuf[MAX_SPI_SIZE]; + uint8_t rxbuf[MAX_SPI_SIZE]; + int responses; + struct knc_spi_response { + int request_length; + int response_length; + enum { + KNC_UNKNOWN = 0, + KNC_NO_RESPONSE, + KNC_SETWORK, + KNC_REPORT, + KNC_INFO + } type; + struct knc_core_state *core; + uint32_t data; + int offset; + } response_info[MAX_SPI_RESPONSES]; + } spi_buffer[KNC_SPI_BUFFERS]; + int send_buffer; + int read_buffer; + int send_buffer_count; + int read_buffer_count; + /* end SPI thread */ + + /* Do not add anything below here!! core[] must be last */ + struct knc_core_state core[]; +}; + +int opt_knc_device_idx = 0; +int opt_knc_device_bus = -1; +char *knc_log_file = NULL; + +static void *knc_spi(void *thr_data) +{ + struct cgpu_info *cgpu = thr_data; + struct knc_state *knc = cgpu->device_data; + int buffer = 0; + + pthread_mutex_lock(&knc->spi_qlock); + while (!cgpu->shutdown) { + int this_buffer = buffer; + while (knc->spi_buffer[buffer].state != KNC_SPI_PENDING && !cgpu->shutdown) + pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock); + pthread_mutex_unlock(&knc->spi_qlock); + if (cgpu->shutdown) + return NULL; + + knc_trnsp_transfer(knc->ctx, knc->spi_buffer[buffer].txbuf, knc->spi_buffer[buffer].rxbuf, knc->spi_buffer[buffer].size); + + buffer += 1; + if (buffer >= KNC_SPI_BUFFERS) + buffer = 0; + + pthread_mutex_lock(&knc->spi_qlock); + knc->spi_buffer[this_buffer].state = KNC_SPI_DONE; + pthread_cond_signal(&knc->spi_qcond); + } + pthread_mutex_unlock(&knc->spi_qlock); + return NULL; +} + +static void knc_process_responses(struct thr_info *thr); + +static void knc_flush(struct thr_info *thr) +{ + struct cgpu_info *cgpu = thr->cgpu; + struct knc_state *knc = cgpu->device_data; + struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer]; + if (buffer->state == KNC_SPI_IDLE && buffer->size > 0) { + pthread_mutex_lock(&knc->spi_qlock); + buffer->state = KNC_SPI_PENDING; + pthread_cond_signal(&knc->spi_qcond); + knc->send_buffer += 1; + knc->send_buffer_count += 1; + if (knc->send_buffer >= KNC_SPI_BUFFERS) + knc->send_buffer = 0; + buffer = &knc->spi_buffer[knc->send_buffer]; + /* Block for SPI to finish a transfer if all buffers are busy */ + while (buffer->state == KNC_SPI_PENDING) { + applog(LOG_DEBUG, "KnC: SPI buffer full (%d), waiting for SPI thread", buffer->responses); + pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock); + } + pthread_mutex_unlock(&knc->spi_qlock); + } + knc_process_responses(thr); +} + +static void knc_sync(struct thr_info *thr) +{ + struct cgpu_info *cgpu = thr->cgpu; + struct knc_state *knc = cgpu->device_data; + struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer]; + int sent = 0; + pthread_mutex_lock(&knc->spi_qlock); + if (buffer->state == KNC_SPI_IDLE && buffer->size > 0) { + buffer->state = KNC_SPI_PENDING; + pthread_cond_signal(&knc->spi_qcond); + knc->send_buffer += 1; + knc->send_buffer_count += 1; + if (knc->send_buffer >= KNC_SPI_BUFFERS) + knc->send_buffer = 0; + sent = 1; + } + int prev_buffer = knc->send_buffer - 1; + if (prev_buffer < 0) + prev_buffer = KNC_SPI_BUFFERS - 1; + buffer = &knc->spi_buffer[prev_buffer]; + while (buffer->state == KNC_SPI_PENDING) + pthread_cond_wait(&knc->spi_qcond, &knc->spi_qlock); + pthread_mutex_unlock(&knc->spi_qlock); + + int pending = knc->send_buffer - knc->read_buffer; + if (pending <= 0) + pending += KNC_SPI_BUFFERS; + pending -= 1 - sent; + applog(LOG_INFO, "KnC: sync %d pending buffers", pending); + knc_process_responses(thr); +} + +static void knc_transfer(struct thr_info *thr, struct knc_core_state *core, int request_length, uint8_t *request, int response_length, int response_type, uint32_t data) +{ + struct cgpu_info *cgpu = thr->cgpu; + struct knc_state *knc = cgpu->device_data; + struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->send_buffer]; + /* FPGA control, request header, request body/response, CRC(4), ACK(1), EXTRA(3) */ + int msglen = 2 + MAX(request_length, 4 + response_length ) + 4 + 1 + 3; + if (buffer->size + msglen > MAX_SPI_SIZE || buffer->responses >= MAX_SPI_RESPONSES) { + applog(LOG_INFO, "KnC: SPI buffer sent, %d messages %d bytes", buffer->responses, buffer->size); + knc_flush(thr); + buffer = &knc->spi_buffer[knc->send_buffer]; + } + struct knc_spi_response *response_info = &buffer->response_info[buffer->responses]; + buffer->responses++; + response_info->offset = buffer->size; + response_info->type = response_type; + response_info->request_length = request_length; + response_info->response_length = response_length; + response_info->core = core; + response_info->data = data; + buffer->size = knc_prepare_transfer(buffer->txbuf, buffer->size, MAX_SPI_SIZE, core->die->channel, request_length, request, response_length); +} + +static int knc_transfer_stamp(struct knc_state *knc) +{ + return knc->send_buffer_count; +} + +static int knc_transfer_completed(struct knc_state *knc, int stamp) +{ + /* signed delta math, counter wrap OK */ + return (int)(knc->read_buffer_count - stamp) >= 1; +} + +static bool knc_detect_one(void *ctx) +{ + /* Scan device for ASICs */ + int channel, die, cores = 0, core; + struct cgpu_info *cgpu; + struct knc_state *knc; + struct knc_die_info die_info[MAX_ASICS][DIES_PER_ASIC]; + + memset(die_info, 0, sizeof(die_info)); + + /* Send GETINFO to each die to detect if it is usable */ + for (channel = 0; channel < MAX_ASICS; channel++) { + if (!knc_trnsp_asic_detect(ctx, channel)) + continue; + for (die = 0; die < DIES_PER_ASIC; die++) { + if (knc_detect_die(ctx, channel, die, &die_info[channel][die]) == 0) + cores += die_info[channel][die].cores; + } + } + + if (!cores) { + applog(LOG_NOTICE, "no KnCminer cores found"); + return false; + } + + applog(LOG_ERR, "Found a KnC miner with %d cores", cores); + + cgpu = calloc(1, sizeof(*cgpu)); + knc = calloc(1, sizeof(*knc) + cores * sizeof(struct knc_core_state)); + if (!cgpu || !knc) { + applog(LOG_ERR, "KnC miner detected, but failed to allocate memory"); + return false; + } + + knc->cgpu = cgpu; + knc->ctx = ctx; + knc->generation = 1; + + /* Index all cores */ + int dies = 0; + cores = 0; + struct knc_core_state *pcore = knc->core; + for (channel = 0; channel < MAX_ASICS; channel++) { + for (die = 0; die < DIES_PER_ASIC; die++) { + if (die_info[channel][die].cores) { + knc->die[dies].channel = channel; + knc->die[dies].die = die; + knc->die[dies].version = die_info[channel][die].version; + knc->die[dies].cores = die_info[channel][die].cores; + knc->die[dies].core = pcore; + knc->die[dies].knc = knc; + for (core = 0; core < knc->die[dies].cores; core++) { + knc->die[dies].core[core].die = &knc->die[dies]; + knc->die[dies].core[core].core = core; + } + cores += knc->die[dies].cores; + pcore += knc->die[dies].cores; + dies++; + } + } + } + for (core = 0; core < cores; core++) + knc->core[core].coreid = core; + knc->dies = dies; + knc->cores = cores; + knc->startup = 2; + + cgpu->drv = &knc_drv; + cgpu->name = "KnCminer"; + cgpu->threads = 1; + + cgpu->device_data = knc; + + pthread_mutex_init(&knc->spi_qlock, NULL); + pthread_cond_init(&knc->spi_qcond, NULL); + if (thr_info_create(&knc->spi_thr, NULL, knc_spi, (void *)cgpu)) { + applog(LOG_ERR, "%s%i: SPI thread create failed", + cgpu->drv->name, cgpu->device_id); + free(cgpu); + free(knc); + return false; + } + + add_cgpu(cgpu); + + return true; +} + +/* Probe devices and register with add_cgpu */ +void knc_detect(bool __maybe_unused hotplug) +{ + void *ctx = knc_trnsp_new(opt_knc_device_idx); + + if (ctx != NULL) { + if (!knc_detect_one(ctx)) + knc_trnsp_free(ctx); + } +} + +/* Core helper functions */ +static int knc_core_hold_work(struct knc_core_state *core) +{ + return timercmp(&core->hold_work_until, &now, >); +} + +static int knc_core_has_work(struct knc_core_state *core) +{ + int i; + for (i = 0; i < WORKS_PER_CORE; i++) { + if (core->workslot[i].slot > 0) + return true; + } + return false; +} + +static int knc_core_need_work(struct knc_core_state *core) +{ + return !knc_core_hold_work(core) && !core->workslot[1].work && !core->workslot[2].work; +} + +static int knc_core_disabled(struct knc_core_state *core) +{ + return timercmp(&core->disabled_until, &now, >); +} + +static int _knc_core_next_slot(struct knc_core_state *core) +{ + /* Avoid slot #0 and #15. #0 is "no work assigned" and #15 is seen on bad cores */ + int slot = core->last_slot + 1; + if (slot >= 15) + slot = 1; + core->last_slot = slot; + return slot; +} + +static bool knc_core_slot_busy(struct knc_core_state *core, int slot) +{ + if (slot == core->report.active_slot) + return true; + if (slot == core->report.next_slot) + return true; + int i; + for (i = 0; i < WORKS_PER_CORE; i++) { + if (slot == core->workslot[i].slot) + return true; + } + return false; +} + +static int knc_core_next_slot(struct knc_core_state *core) +{ + int slot; + do slot = _knc_core_next_slot(core); + while (knc_core_slot_busy(core, slot)); + return slot; +} + +static void knc_core_failure(struct knc_core_state *core) +{ + core->errors++; + core->errors_now++; + core->die->knc->errors++; + if (knc_core_disabled(core)) + return; + if (core->errors_now > CORE_ERROR_LIMIT) { + applog(LOG_ERR, "KnC: %d.%d.%d disabled for %d seconds due to repeated hardware errors", + core->die->channel, core->die->die, core->core, core_disable_interval.tv_sec); + timeradd(&now, &core_disable_interval, &core->disabled_until); + } +} + +static int knc_core_handle_nonce(struct thr_info *thr, struct knc_core_state *core, int slot, uint32_t nonce) +{ + int i; + if (!slot) + return; + core->last_nonce.slot = slot; + core->last_nonce.nonce = nonce; + if (core->die->knc->startup) + return; + for (i = 0; i < WORKS_PER_CORE; i++) { + if (slot == core->workslot[i].slot && core->workslot[i].work) { + applog(LOG_INFO, "KnC: %d.%d.%d found nonce %08x", core->die->channel, core->die->die, core->core, nonce); + if (submit_nonce(thr, core->workslot[i].work, nonce)) { + /* Good share */ + core->shares++; + core->die->knc->shares++; + /* This core is useful. Ignore any errors */ + core->errors_now = 0; + } else { + applog(LOG_INFO, "KnC: %d.%d.%d hwerror nonce %08x", core->die->channel, core->die->die, core->core, nonce); + /* Bad share */ + knc_core_failure(core); + } + } + } +} + +static int knc_core_process_report(struct thr_info *thr, struct knc_core_state *core, uint8_t *response) +{ + struct knc_report *report = &core->report; + knc_decode_report(response, report, core->die->version); + bool had_event = false; + + applog(LOG_DEBUG, "KnC %d.%d.%d: Process report %d %d(%d) / %d %d %d", core->die->channel, core->die->die, core->core, report->active_slot, report->next_slot, report->next_state, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot); + int n; + for (n = 0; n < KNC_NONCES_PER_REPORT; n++) { + if (report->nonce[n].slot < 0) + break; + if (core->last_nonce.slot == report->nonce[n].slot && core->last_nonce.nonce == report->nonce[n].nonce) + break; + } + while(n-- > 0) { + knc_core_handle_nonce(thr, core, report->nonce[n].slot, report->nonce[n].nonce); + } + + if (report->active_slot && core->workslot[0].slot != report->active_slot) { + had_event = true; + applog(LOG_INFO, "KnC: New work on %d.%d.%d, %d %d / %d %d %d", core->die->channel, core->die->die, core->core, report->active_slot, report->next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot); + /* Core switched to next work */ + if (core->workslot[0].work) { + core->die->knc->completed++; + core->completed++; + applog(LOG_INFO, "KnC: Work completed on core %d.%d.%d!", core->die->channel, core->die->die, core->core); + free_work(core->workslot[0].work); + } + core->workslot[0] = core->workslot[1]; + core->workslot[1].work = NULL; + core->workslot[1].slot = -1; + + /* or did it switch directly to pending work? */ + if (report->active_slot == core->workslot[2].slot) { + applog(LOG_INFO, "KnC: New work on %d.%d.%d, %d %d %d %d (pending)", core->die->channel, core->die->die, core->core, report->active_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot); + if (core->workslot[0].work) + free_work(core->workslot[0].work); + core->workslot[0] = core->workslot[2]; + core->workslot[2].work = NULL; + core->workslot[2].slot = -1; + } + } + + if (report->next_state && core->workslot[2].slot > 0 && (core->workslot[2].slot == report->next_slot || report->next_slot == -1)) { + had_event = true; + applog(LOG_INFO, "KnC: Accepted work on %d.%d.%d, %d %d %d %d (pending)", core->die->channel, core->die->die, core->core, report->active_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot); + /* core accepted next work */ + if (core->workslot[1].work) + free_work(core->workslot[1].work); + core->workslot[1] = core->workslot[2]; + core->workslot[2].work = NULL; + core->workslot[2].slot = -1; + } + + if (core->workslot[2].work && knc_transfer_completed(core->die->knc, core->transfer_stamp)) { + had_event = true; + applog(LOG_INFO, "KnC: Setwork failed on core %d.%d.%d?", core->die->channel, core->die->die, core->core); + free_work(core->workslot[2].work); + core->workslot[2].slot = -1; + } + + if (had_event) + applog(LOG_INFO, "KnC: Exit report on %d.%d.%d, %d %d / %d %d %d", core->die->channel, core->die->die, core->core, report->active_slot, report->next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot); + + return 0; +} + +static void knc_process_responses(struct thr_info *thr) +{ + struct cgpu_info *cgpu = thr->cgpu; + struct knc_state *knc = cgpu->device_data; + struct knc_spi_buffer *buffer = &knc->spi_buffer[knc->read_buffer]; + while (buffer->state == KNC_SPI_DONE) { + int i; + for (i = 0; i < buffer->responses; i++) { + struct knc_spi_response *response_info = &buffer->response_info[i]; + uint8_t *rxbuf = &buffer->rxbuf[response_info->offset]; + struct knc_core_state *core = response_info->core; + int status = knc_decode_response(rxbuf, response_info->request_length, &rxbuf, response_info->response_length); + /* Invert KNC_ACCEPTED to simplify logics below */ + if (response_info->type == KNC_SETWORK && !KNC_IS_ERROR(status)) + status ^= KNC_ACCEPTED; + if (core->die->version != KNC_VERSION_JUPITER && status != 0) { + applog(LOG_ERR, "KnC %d.%d.%d: Communication error (%x / %d)", core->die->channel, core->die->die, core->core, status, i); + if (status == KNC_ACCEPTED) { + /* Core refused our work vector. Likely out of sync. Reset it */ + core->inuse = false; + } + knc_core_failure(core); + } + switch(response_info->type) { + case KNC_REPORT: + case KNC_SETWORK: + /* Should we care about failed SETWORK explicit? Or simply handle it by next state not loaded indication in reports? */ + knc_core_process_report(thr, core, rxbuf); + break; + } + } + + buffer->state = KNC_SPI_IDLE; + buffer->responses = 0; + buffer->size = 0; + knc->read_buffer += 1; + knc->read_buffer_count += 1; + if (knc->read_buffer >= KNC_SPI_BUFFERS) + knc->read_buffer = 0; + buffer = &knc->spi_buffer[knc->read_buffer]; + } +} + +static int knc_core_send_work(struct thr_info *thr, struct knc_core_state *core, struct work *work, bool clean) +{ + struct knc_state *knc = core->die->knc; + struct cgpu_info *cgpu = knc->cgpu; + int request_length = 4 + 1 + 6*4 + 3*4 + 8*4; + uint8_t request[request_length]; + int response_length = 1 + 1 + (1 + 4) * 5; + uint8_t response[response_length]; + + int slot = knc_core_next_slot(core); + if (slot < 0) + goto error; + + applog(LOG_INFO, "KnC setwork%s %d.%d.%d = %d, %d %d / %d %d %d", clean ? " CLEAN" : "", core->die->channel, core->die->die, core->core, slot, core->report.active_slot, core->report.next_slot, core->workslot[0].slot, core->workslot[1].slot, core->workslot[2].slot); + if (!clean && !knc_core_need_work(core)) + goto error; + + switch(core->die->version) { + case KNC_VERSION_JUPITER: + if (clean) { + /* Double halt to get rid of any previous queued work */ + request_length = knc_prepare_jupiter_halt(request, core->die->die, core->core); + knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0); + knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0); + } + request_length = knc_prepare_jupiter_setwork(request, core->die->die, core->core, slot, work); + knc_transfer(thr, core, request_length, request, 0, KNC_NO_RESPONSE, 0); + break; + case KNC_VERSION_NEPTUNE: + request_length = knc_prepare_neptune_setwork(request, core->die->die, core->core, slot, work, clean); + knc_transfer(thr, core, request_length, request, response_length, KNC_SETWORK, slot); + break; + default: + goto error; + } + + core->workslot[2].work = work; + core->workslot[2].slot = slot; + core->works++; + core->die->knc->works++; + core->transfer_stamp = knc_transfer_stamp(knc); + core->inuse = true; + + timeradd(&now, &core_submit_interval, &core->hold_work_until); + timeradd(&now, &core_timeout_interval, &core->timeout); + + return 0; + +error: + applog(LOG_INFO, "KnC: %d.%d.%d Failed to setwork (%d)", + core->die->channel, core->die->die, core->core, core->errors_now); + knc_core_failure(core); + free_work(work); + return -1; +} + +static int knc_core_request_report(struct thr_info *thr, struct knc_core_state *core) +{ + struct knc_state *knc = core->die->knc; + struct cgpu_info *cgpu = knc->cgpu; + int request_length = 4; + uint8_t request[request_length]; + int response_length = 1 + 1 + (1 + 4) * 5; + uint8_t response[response_length]; + + applog(LOG_DEBUG, "KnC: %d.%d.%d Request report", core->die->channel, core->die->die, core->core); + + request_length = knc_prepare_report(request, core->die->die, core->core); + + switch(core->die->version) { + case KNC_VERSION_JUPITER: + response_length = 1 + 1 + (1 + 4); + knc_transfer(thr, core, request_length, request, response_length, KNC_REPORT, 0); + return 0; + case KNC_VERSION_NEPTUNE: + knc_transfer(thr, core, request_length, request, response_length, KNC_REPORT, 0); + return 0; + } + +error: + applog(LOG_INFO, "KnC: Failed to scan work report"); + knc_core_failure(core); + return -1; +} + +/* return value is number of nonces that have been checked since + * previous call + */ +static int64_t knc_scanwork(struct thr_info *thr) +{ +#define KNC_COUNT_UNIT shares + struct cgpu_info *cgpu = thr->cgpu; + struct knc_state *knc = cgpu->device_data; + int64_t ret = 0; + uint32_t last_count = knc->KNC_COUNT_UNIT; + + applog(LOG_DEBUG, "KnC running scanwork"); + + gettimeofday(&now, NULL); + + knc_trnsp_periodic_check(knc->ctx); + + int i; + + knc_process_responses(thr); + + if (timercmp(&knc->next_error_interval, &now, >)) { + /* Reset hw error limiter every check interval */ + timeradd(&now, &core_check_interval, &knc->next_error_interval); + for (i = 0; i < knc->cores; i++) { + struct knc_core_state *core = &knc->core[i]; + core->errors_now = 0; + } + } + + for (i = 0; i < knc->cores; i++) { + struct knc_core_state *core = &knc->core[i]; + bool clean = !core->inuse; + if (knc_core_disabled(core)) + continue; + if (core->generation != knc->generation) { + applog(LOG_INFO, "KnC %d.%d.%d flush gen=%d/%d", core->die->channel, core->die->die, core->core, core->generation, knc->generation); + /* clean set state, forget everything */ + int slot; + for (slot = 0; slot < WORKS_PER_CORE; slot ++) { + if (core->workslot[slot].work) + free_work(core->workslot[slot].work); + core->workslot[slot].slot = -1; + } + core->hold_work_until = now; + core->generation = knc->generation; + } else if (timercmp(&core->timeout, &now, <=) && (core->workslot[0].slot > 0 || core->workslot[1].slot > 0 || core->workslot[2].slot > 0)) { + applog(LOG_ERR, "KnC %d.%d.%d timeout", core->die->channel, core->die->die, core->core, core->generation, knc->generation); + clean = true; + } + if (!knc_core_has_work(core)) + clean = true; + if (core->workslot[0].slot < 0 && core->workslot[1].slot < 0 && core->workslot[2].slot < 0) + clean = true; + if (i % SCAN_ADJUST_RANGE == knc->scan_adjust) + clean = true; + if ((knc_core_need_work(core) || clean) && !knc->startup) { + struct work *work = get_work(thr, thr->id); + knc_core_send_work(thr, core, work, clean); + } else { + knc_core_request_report(thr, core); + } + } + /* knc->startup delays initial work submission until we have had chance to query all cores on their current status, to avoid slot number collisions with earlier run */ + if (knc->startup) + knc->startup--; + else if (knc->scan_adjust < SCAN_ADJUST_RANGE) + knc->scan_adjust++; + + knc_flush(thr); + + return (int64_t)(knc->KNC_COUNT_UNIT - last_count) * 0x100000000UL; +} + +static void knc_flush_work(struct cgpu_info *cgpu) +{ + struct knc_state *knc = cgpu->device_data; + + applog(LOG_INFO, "KnC running flushwork"); + + knc->generation++; + knc->scan_adjust=0; + if (!knc->generation) + knc->generation++; +} + +static void knc_zero_stats(struct cgpu_info *cgpu) +{ + int core; + struct knc_state *knc = cgpu->device_data; + for (core = 0; core < knc->cores; core++) { + knc->shares = 0; + knc->completed = 0; + knc->works = 0; + knc->errors = 0; + knc->core[core].works = 0; + knc->core[core].errors = 0; + knc->core[core].shares = 0; + knc->core[core].completed = 0; + } +} + +static struct api_data *knc_api_stats(struct cgpu_info *cgpu) +{ + struct knc_state *knc = cgpu->device_data; + struct api_data *root = NULL; + unsigned int cursize; + int asic, core, n; + char label[256]; + + root = api_add_int(root, "dies", &knc->dies, 1); + root = api_add_int(root, "cores", &knc->cores, 1); + root = api_add_uint64(root, "shares", &knc->shares, 1); + root = api_add_uint64(root, "works", &knc->works, 1); + root = api_add_uint64(root, "completed", &knc->completed, 1); + root = api_add_uint64(root, "errors", &knc->errors, 1); + + /* Active cores */ + int active = knc->cores; + for (core = 0; core < knc->cores; core++) { + if (knc_core_disabled(&knc->core[core])) + active -= 1; + } + root = api_add_int(root, "active", &active, 1); + + /* Per ASIC/die data */ + for (n = 0; n < knc->dies; n++) { + struct knc_die *die = &knc->die[n]; + +#define knc_api_die_string(name, value) do { \ + snprintf(label, sizeof(label), "%d.%d.%s", die->channel, die->die, name); \ + root = api_add_string(root, label, value, 1); \ + } while(0) +#define knc_api_die_int(name, value) do { \ + snprintf(label, sizeof(label), "%d.%d.%s", die->channel, die->die, name); \ + uint64_t v = value; \ + root = api_add_uint64(root, label, &v, 1); \ + } while(0) + + /* Model */ + { + char *model = "?"; + switch(die->version) { + case KNC_VERSION_JUPITER: + model = "Jupiter"; + break; + case KNC_VERSION_NEPTUNE: + model = "Neptune"; + break; + } + knc_api_die_string("model", model); + knc_api_die_int("cores", die->cores); + } + + /* Core based stats */ + { + int active = 0; + uint64_t errors = 0; + uint64_t shares = 0; + uint64_t works = 0; + uint64_t completed = 0; + char coremap[die->cores+1]; + + for (core = 0; core < die->cores; core++) { + coremap[core] = knc_core_disabled(&die->core[core]) ? '0' : '1'; + works += die->core[core].works; + shares += die->core[core].shares; + errors += die->core[core].errors; + completed += die->core[core].completed; + } + coremap[die->cores] = '\0'; + knc_api_die_int("errors", errors); + knc_api_die_int("shares", shares); + knc_api_die_int("works", works); + knc_api_die_int("completed", completed); + knc_api_die_string("coremap", coremap); + } + } + + return root; +} + +struct device_drv knc_drv = { + .drv_id = DRIVER_knc, + .dname = "KnCminer Neptune", + .name = "KnC", + .drv_detect = knc_detect, + .hash_work = hash_driver_work, + .flush_work = knc_flush_work, + .scanwork = knc_scanwork, + .zero_stats = knc_zero_stats, + .get_api_stats = knc_api_stats, +}; diff --git a/driver-minion.c b/driver-minion.c new file mode 100644 index 0000000..52e3442 --- /dev/null +++ b/driver-minion.c @@ -0,0 +1,5380 @@ +/* + * Copyright 2013-2014 Andrew Smith - BlackArrow Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" +#include "compat.h" +#include "miner.h" +#include "klist.h" +#include +#include + +#ifndef LINUX +static void minion_detect(__maybe_unused bool hotplug) +{ +} +#else + +#include +#include +#include +#include +#include +#include +#include + +// Define this to 1 to enable interrupt code and enable no_nonce +#define ENABLE_INT_NONO 0 + +// Define this to 1 if compiling on RockChip and not on RPi +#define MINION_ROCKCHIP 0 + +// The code is always in - this just decides if it does it +static bool minreread = false; + +#if MINION_ROCKCHIP == 1 +#define MINION_POWERCYCLE_GPIO 173 +#define MINION_CHIP_OFF "1" +#define MINION_CHIP_ON "0" +#define MINION_CHIP_DELAY 100 +#endif + +// Power cycle if the xff_list is full and the tail is less than +// this long ago +#define MINION_POWER_TIME 60 + +/* + * Use pins for board selection + * If disabled, it will test chips just as 'pin 0' + * but never do any gpio - the equivalent of the previous 'no pins' code + */ +static bool usepins = false; + +#define MINION_PAGE_SIZE 4096 + +#define BCM2835_BASE 0x20000000 +#define BCM2835_GPIO_BASE (BCM2835_BASE + 0x200000) + +#define BCM2835_GPIO_SET0 0x001c // GPIO Pin Output Set 0 +#define BCM2835_GPIO_CLR0 0x0028 // GPIO Pin Output Clear 0 + +#define BCM2835_GPIO_FSEL0 0x0000 + +#define BCM2835_GPIO_FSEL_INPUT 0b000 +#define BCM2835_GPIO_FSEL_OUTPUT 0b001 +#define BCM2835_GPIO_FSEL_MASK 0b111 + +#define BCM2835_PIN_HIGH 0x1 +#define BCM2835_PIN_LOW 0x0 + +static const char *minion_memory = "/dev/mem"; +static int minion_memory_addr = BCM2835_GPIO_BASE; + +#define MINION_SPI_BUS 0 +#define MINION_SPI_CHIP 0 + +#if MINION_ROCKCHIP == 0 +#define MINION_SPI_SPEED 8000000 +#else +#define MINION_SPI_SPEED 500000 +#endif +#define MINION_SPI_BUFSIZ 1024 + +static struct minion_select_pins { + int pin; + int wpi; + char *name; + int bcm; // this is what we use +} minionPins[] = { + { 24, 10, "CE0", 8, }, + { 26, 11, "CE1", 7, }, + { 16, 4, "GPIO4", 23, }, + { 22, 6, "GPIO6", 25, }, + { 12, 1, "GPIO1", 18, }, + { 18, 5, "GPIO5", 24, }, + { 11, 0, "GPIO0", 17, }, + { 13, 2, "GPIO2", 27, }, + { 15, 3, "GPIO3", 22, }, + { 7, 7, "GPIO7", 4, } + +/* The rest on the RPi + { 3, 8, "SDA", 2, } + { 5, 9, "SCL", 3, } + { 19, 12, "MOSI", 10, } + { 21, 13, "MISO", 9, } + { 23, 14, "SCLK", 11, } + { 8, 15, "TxD", 14, } + { 10, 16, "RxD", 15, } +*/ +}; + +/* + * uS delays for GPIO pin access + */ +#define MINION_PIN_BEFORE cgsleep_us(33) +#define MINION_PIN_SLEEP cgsleep_us(133) +#define MINION_PIN_AFTER + +#define MINION_PIN_COUNT (sizeof(minionPins)/ \ + sizeof(struct minion_select_pins)) + +#define CHIP_PIN(_chip) (minioninfo->chip_pin[_chip]) + +#define MINION_MIN_CHIP 0 +#define MINION_MAX_CHIP 11 + +#define MINION_CHIP_PER_PIN (1 + MINION_MAX_CHIP - MINION_MIN_CHIP) + +#define MINION_CHIPS (MINION_PIN_COUNT * MINION_CHIP_PER_PIN) +#define MINION_CORES 99 +#define FAKE_CORE MINION_CORES + +/* + * TODO: These will need adjusting for final hardware + * Look them up and calculate them? + */ +#define MINION_QUE_MAX 64 +#define MINION_QUE_HIGH 48 +#define MINION_QUE_SEND 16 +#define MINION_QUE_LOW 8 + +#define MINION_FFL " - from %s %s() line %d" +#define MINION_FFL_HERE __FILE__, __func__, __LINE__ +#define MINION_FFL_PASS file, func, line +#define MINION_FFL_ARGS __maybe_unused const char *file, \ + __maybe_unused const char *func, \ + __maybe_unused const int line + +#define minion_txrx(_task) _minion_txrx(minioncgpu, minioninfo, _task, MINION_FFL_HERE) + +#define MINION_SYS_REGS 0x00 +#define MINION_CORE_REGS 0x10 +#define MINION_RES_BUF 0x20 +#define MINION_CMD_QUE 0x30 +#define MINION_NONCE_RANGES 0x70 + +#define DATA_SIZ (sizeof(uint32_t)) + +// All SYS data sizes are DATA_SIZ +#define MINION_SYS_CHIP_SIG 0x00 +#define MINION_SYS_CHIP_STA 0x01 +#define MINION_SYS_SPI_LED 0x02 +#define MINION_SYS_TEMP_CTL 0x03 +#define MINION_SYS_FREQ_CTL 0x04 +#define MINION_SYS_NONCE_LED 0x05 +#define MINION_SYS_MISC_CTL 0x06 +#define MINION_SYS_RSTN_CTL 0x07 +#define MINION_SYS_INT_ENA 0x08 +#define MINION_SYS_INT_CLR 0x09 +#define MINION_SYS_INT_STA 0x0a +#define MINION_SYS_FIFO_STA 0x0b +#define MINION_SYS_QUE_TRIG 0x0c +#define MINION_SYS_BUF_TRIG 0x0d +#define MINION_SYS_IDLE_CNT 0x0e + +// How many 32 bit reports make up all the cores - 99 cores = 4 reps +#define MINION_CORE_REPS (int)((((MINION_CORES-1) >> 5) & 0xff) + 1) + +// All SYS data sizes are DATA_SIZ +#define MINION_SYS_SIZ DATA_SIZ + +// Header Pin 18 = GPIO5 = BCM 24 +#define MINION_GPIO_RESULT_INT_PIN 24 +// RockChip is pin 172 ... + +#define MINION_GPIO_SYS "/sys/class/gpio" +#define MINION_GPIO_ENA "/export" +#define MINION_GPIO_ENA_VAL "%d" +#define MINION_GPIO_DIS "/unexport" +#define MINION_GPIO_PIN "/gpio%d" +#define MINION_GPIO_DIR "/direction" +#define MINION_GPIO_DIR_READ "in" +#define MINION_GPIO_DIR_WRITE "out" +#define MINION_GPIO_EDGE "/edge" +#define MINION_GPIO_EDGE_NONE "none" +#define MINION_GPIO_EDGE_RISING "rising" +#define MINION_GPIO_EDGE_FALLING "falling" +#define MINION_GPIO_EDGE_BOTH "both" +#define MINION_GPIO_ACT "/active_low" +#define MINION_GPIO_ACT_LO "1" +#define MINION_GPIO_ACT_HI "0" +#define MINION_GPIO_VALUE "/value" + +#define MINION_RESULT_INT 0x01 +#define MINION_RESULT_FULL_INT 0x02 +#define MINION_CMD_INT 0x04 +#define MINION_CMD_FULL_INT 0x08 +#define MINION_TEMP_LOW_INT 0x10 +#define MINION_TEMP_HI_INT 0x20 +#define MINION_ALL_INT MINION_RESULT_INT | \ + MINION_RESULT_FULL_INT | \ + MINION_CMD_INT | \ + MINION_CMD_FULL_INT | \ + MINION_TEMP_LOW_INT | \ + MINION_TEMP_HI_INT + +#define RSTN_CTL_RESET_CORES 0x01 +#define RSTN_CTL_FLUSH_RESULTS 0x02 +#define RSTN_CTL_FLUSH_CMD_QUEUE 0x04 +#define RSTN_CTL_SPI_SW_RSTN 0x08 +#define RSTN_CTL_SHA_MGR_RESET 0x10 + +// Init +#define SYS_RSTN_CTL_INIT (RSTN_CTL_RESET_CORES | \ + RSTN_CTL_FLUSH_RESULTS | \ + RSTN_CTL_FLUSH_CMD_QUEUE | \ + RSTN_CTL_SPI_SW_RSTN | \ + RSTN_CTL_SHA_MGR_RESET) + +// Block change +#define SYS_RSTN_CTL_FLUSH (RSTN_CTL_RESET_CORES | \ + RSTN_CTL_SPI_SW_RSTN | \ + RSTN_CTL_FLUSH_CMD_QUEUE) + +#if ENABLE_INT_NONO +// enable 'no nonce' report +#define SYS_MISC_CTL_DEFAULT 0x04 +#else +#define SYS_MISC_CTL_DEFAULT 0x00 +#endif + +// Temperature returned by MINION_SYS_CHIP_STA 0x01 STA_TEMP() +#define MINION_TEMP_40 0 +#define MINION_TEMP_60 1 +#define MINION_TEMP_80 3 +#define MINION_TEMP_100 7 +#define MINION_TEMP_OVER 15 + +static const char *min_temp_40 = "<40"; +static const char *min_temp_60 = "40-60"; +static const char *min_temp_80 = "60-80"; +static const char *min_temp_100 = "80-100"; +static const char *min_temp_over = ">100"; +static const char *min_temp_invalid = "?"; + +/* + * Temperature for MINION_SYS_TEMP_CTL 0x03 temp_thres [0:3] + * i.e. it starts at 120 and goes up in steps of 5 to 160 + */ +#define MINION_TEMP_CTL_MIN 1 +#define MINION_TEMP_CTL_MAX 9 +#define MINION_TEMP_CTL_BITS 0x0f +#define MINION_TEMP_CTL_DEF 135 +#define MINION_TEMP_CTL_STEP 5 +#define MINION_TEMP_CTL_MIN_VALUE 120 +#define MINION_TEMP_CTL_MAX_VALUE (MINION_TEMP_CTL_MIN_VALUE + \ + (MINION_TEMP_CTL_STEP * \ + (MINION_TEMP_CTL_MAX - MINION_TEMP_CTL_MIN))) +#define MINION_TEMP_DISABLE "disable" +#define MINION_TEMP_CTL_DISABLE -1 +#define MINION_TEMP_CTL_DISABLE_VALUE 0x20 + +// CORE data size is DATA_SIZ +#define MINION_CORE_ENA0_31 0x10 +#define MINION_CORE_ENA32_63 0x11 +#define MINION_CORE_ENA64_95 0x12 +#define MINION_CORE_ENA96_98 0x13 +#define MINION_CORE_ACT0_31 0x14 +#define MINION_CORE_ACT32_63 0x15 +#define MINION_CORE_ACT64_95 0x16 +#define MINION_CORE_ACT96_98 0x17 + +// All CORE data sizes are DATA_SIZ +#define MINION_CORE_SIZ DATA_SIZ + +#define MINION_CORE_ALL "all" + +// RES data size is minion_result +#define MINION_RES_DATA 0x20 +#define MINION_RES_PEEK 0x21 + +// QUE data size is minion_que +#define MINION_QUE_0 0x30 +#define MINION_QUE_R 0x31 + +// RANGE data sizes are DATA_SIZ +#define MINION_NONCE_START 0x70 +#define MINION_NONCE_RANGE 0x71 + +// This must be >= max txsiz + max rxsiz +#define MINION_BUFSIZ 1024 + +#define u8tou32(_c, _off) (((uint8_t *)(_c))[(_off)+0] + \ + ((uint8_t *)(_c))[(_off)+1] * 0x100 + \ + ((uint8_t *)(_c))[(_off)+2] * 0x10000 + \ + ((uint8_t *)(_c))[(_off)+3] * 0x1000000 ) + +#define MINION_ADDR_WRITE 0x7f +#define MINION_ADDR_READ 0x80 + +#define READ_ADDR(_reg) ((_reg) | MINION_ADDR_READ) +#define WRITE_ADDR(_reg) ((_reg) & MINION_ADDR_WRITE) + +#define IS_ADDR_READ(_reg) (((_reg) & MINION_ADDR_READ) == MINION_ADDR_READ) +#define IS_ADDR_WRITE(_reg) (((_reg) & MINION_ADDR_READ) == 0) + +#define SET_HEAD_WRITE(_h, _reg) ((_h)->reg) = WRITE_ADDR(_reg) +#define SET_HEAD_READ(_h, _reg) ((_h)->reg) = READ_ADDR(_reg) +#define SET_HEAD_SIZ(_h, _siz) \ + do { \ + ((_h)->siz)[0] = (uint8_t)((_siz) & 0xff); \ + ((_h)->siz)[1] = (uint8_t)(((_siz) & 0xff00) >> 8); \ + } while (0) + +struct minion_header { + uint8_t chipid; + uint8_t reg; + uint8_t siz[2]; + uint8_t data[4]; // placeholder +}; + +#define HSIZE() (sizeof(struct minion_header) - 4) + +#define MINION_NOCHIP_SIG 0x00000000 +#define MINION_NOCHIP_SIG2 0xffffffff +#define MINION_CHIP_SIG 0xb1ac8a44 + +/* + * Number of times to try and get the SIG with each chip, + * if the chip returns neither of the above values + * TODO: maybe need some reset between tries, to handle a shift value? + */ +#define MINION_SIG_TRIES 3 + +/* + * TODO: Finding these means the chip is there - but how to fix it? + * The extra &'s are to ensure there is no sign bit issue since + * the sign bit carry in a C bit-shift is compiler dependent + */ +#define MINION_CHIP_SIG_SHIFT1 (((MINION_CHIP_SIG & 0x0000ffff) << 16) & 0xffff0000) +#define MINION_CHIP_SIG_SHIFT2 (((MINION_CHIP_SIG & 0x00ffffff) << 8) & 0xffffff00) +#define MINION_CHIP_SIG_SHIFT3 (((MINION_CHIP_SIG & 0xffffff00) >> 8) & 0x00ffffff) +#define MINION_CHIP_SIG_SHIFT4 (((MINION_CHIP_SIG & 0xffff0000) >> 16) & 0x0000ffff) + +#define MINION_SPI_LED_ON 0xa5a5 +#define MINION_SPI_LED_OFF 0x0 + +// Time since first nonce/last reset before turning on the LED +#define MINION_LED_TEST_TIME 600 + +#define MINION_FREQ_MIN 100 +#define MINION_FREQ_DEF 1200 +#define MINION_FREQ_MAX 1400 +#define MINION_FREQ_FACTOR 100 +#define MINION_FREQ_RESET_STEP MINION_FREQ_FACTOR +#define MINION_FREQ_FACTOR_MIN 1 +#define MINION_FREQ_FACTOR_MAX 14 + +static uint32_t minion_freq[] = { + 0x0, + 0x205032, // 1 = 100Mhz + 0x203042, // 2 = 200Mhz + 0x20204B, // 3 = 300Mhz + 0x201042, // 4 = 400Mhz + 0x201053, // 5 = 500Mhz + 0x200032, // 6 = 600Mhz + 0x20003A, // 7 = 700Mhz + 0x200042, // 8 = 800Mhz + 0x20004B, // 9 = 900Mhz + 0x200053, // 10 = 1000Mhz + 0x21005B, // 11 = 1100Mhz + 0x210064, // 12 = 1200Mhz + 0x21006C, // 13 = 1300Mhz + 0x210074 // 14 = 1400Mhz +}; + +// When hash rate falls below this in the history hash rate, reset it +#define MINION_RESET_PERCENT 75.0 +// When hash rate falls below this after the longer test time +#define MINION_RESET2_PERCENT 85.0 + +// After the above resets, delay sending work for: +#define MINION_RESET_DELAY_s 0.088 + +#define STA_TEMP(_sta) ((uint16_t)((_sta)[3] & 0x1f)) +#define STA_CORES(_sta) ((uint16_t)((_sta)[2])) +#define STA_FREQ(_sta) ((uint32_t)((_sta)[1]) * 0x100 + (uint32_t)((_sta)[0])) + +// Randomly between 1s and 2s per chip +#define MINION_STATS_UPDATE_TIME_mS 1000 +#define MINION_STATS_UPDATE_RAND_mS 1000 + +// Don't report it more than once every ... 5s +#define MINION_IDLE_MESSAGE_ms 5000 + +struct minion_status { + uint16_t temp; + uint16_t cores; + uint32_t freq; + uint32_t quework; + uint32_t chipwork; + uint32_t realwork; // FIFO_STA + struct timeval last; + bool overheat; + bool islow; + bool tohigh; + int lowcount; + uint32_t overheats; + struct timeval lastoverheat; + struct timeval lastrecover; + double overheattime; + uint32_t tempsent; + uint32_t idle; + uint32_t last_rpt_idle; + struct timeval idle_rpt; + struct timeval first_nonce; + uint64_t from_first_good; +}; + +#define ENABLE_CORE(_core, _n) ((_core[_n >> 3]) |= (1 << (_n % 8))) +#define CORE_IDLE(_core, _n) ((_core[_n >> 3]) & (1 << (_n % 8))) + +#define FIFO_RES(_fifo, _off) ((_fifo)[(_off) + 0]) +#define FIFO_CMD(_fifo, _off) ((_fifo)[(_off) + 1]) + +#define RES_GOLD(_res) ((((_res)->status[3]) & 0x80) == 0) +#define RES_CHIPID(_res) (((_res)->status[3]) & 0x1f) +#define RES_CORE(_res) ((_res)->status[2]) +#define RES_TASK(_res) ((int)((_res)->status[1]) * 0x100 + (int)((_res)->status[0])) +#define RES_NONCE(_res) u8tou32((_res)->nonce, 0) + +/* + * This is only valid since we avoid using task_id 0 for work + * However, it isn't really necessary since we only request + * the number of results the result buffer says it has + * However, it is a simple failsafe + */ +#define IS_RESULT(_res) ((_res)->status[1] || (_res)->status[0]) + +struct minion_result { + uint8_t status[DATA_SIZ]; + uint8_t nonce[DATA_SIZ]; +}; + +#define MINION_RES_DATA_SIZ sizeof(struct minion_result) + +/* + * (MINION_SPI_BUFSIZ - HSIZE()) / MINION_RES_DATA_SIZ + * less a little bit to round it out + */ +#define MINION_MAX_RES 120 + +#define MIDSTATE_BYTES 32 +#define MERKLE7_OFFSET 64 +#define MERKLE_BYTES 12 + +#define MINION_MAX_TASK_ID 0xffff + +struct minion_que { + uint8_t task_id[2]; + uint8_t reserved[2]; + uint8_t midstate[MIDSTATE_BYTES]; + uint8_t merkle7[DATA_SIZ]; + uint8_t ntime[DATA_SIZ]; + uint8_t bits[DATA_SIZ]; +}; + +/* + * Max time to wait before checking the task list + * Required, since only urgent tasks trigger an immediate check + * TODO: ? for 2TH/s + */ +#define MINION_TASK_mS 8 + +/* + * Max time to wait before checking the result list for nonces + * This can be long since it's only a failsafe + * cgsem_post is always sent if there are nonces ready to check + */ +#define MINION_NONCE_mS 888 + +// Number of results to make a GPIO interrupt +//#define MINION_RESULT_INT_SIZE 1 +#define MINION_RESULT_INT_SIZE 2 + +/* + * Max time to wait before checking for results + * The interrupt doesn't occur until MINION_RESULT_INT_SIZE results are found + * See comment in minion_spi_reply() at poll() + */ +#define MINION_REPLY_mS 88 + +/* + * Max time to wait before returning the amount of work done + * A result interrupt will send a trigger for this also + * See comment in minion_scanwork() + * This avoids the cgminer master work loop spinning doing nothing + */ +#define MINION_SCAN_mS 88 + +// *** Work lists: generated, queued for a chip, sent to chip +typedef struct work_item { + struct work *work; + uint32_t task_id; + struct timeval sent; + int nonces; + bool urgent; + bool stale; // if stale, don't decrement que/chipwork when discarded + bool rolled; + int errors; // uncertain since the error could mean task_id is wrong + struct timeval created; // when work was generated + uint64_t ioseq; +} WORK_ITEM; + +#define ALLOC_WORK_ITEMS 4096 +#define LIMIT_WORK_ITEMS 0 + +// *** Task queue ready to be sent +typedef struct task_item { + uint64_t tid; + uint8_t chip; + bool write; + uint8_t address; + uint32_t task_id; + uint32_t wsiz; + uint32_t osiz; + uint32_t rsiz; + uint8_t wbuf[MINION_BUFSIZ]; + uint8_t obuf[MINION_BUFSIZ]; + uint8_t rbuf[MINION_BUFSIZ]; + int reply; + bool urgent; + uint8_t work_state; + struct work *work; + K_ITEM *witem; + uint64_t ioseq; +} TASK_ITEM; + +#define ALLOC_TASK_ITEMS 256 +#define LIMIT_TASK_ITEMS 0 + +// *** Results queue ready to be checked +typedef struct res_item { + int chip; + int core; + uint32_t task_id; + uint32_t nonce; + struct timeval when; + /* + * Only once per task_id if no nonces were found + * Sent with core = 0 + * However, currently it always sends it at the end of every task + * TODO: code assumes it doesn't - change later when we + * see what the final hardware does (minor code performance gain) + */ + bool no_nonce; + // If we requested the result twice: + bool another; + uint32_t task_id2; + uint32_t nonce2; +} RES_ITEM; + +#define ALLOC_RES_ITEMS 256 +#define LIMIT_RES_ITEMS 0 + +// *** Per chip nonce history +typedef struct hist_item { + struct timeval when; +} HIST_ITEM; + +#define ALLOC_HIST_ITEMS 4096 +#define LIMIT_HIST_ITEMS 0 + +// How much history to keep (5min) +#define MINION_HISTORY_s 300 +// History required to decide a reset at MINION_FREQ_DEF Mhz +#define MINION_RESET_s 10 +// How many times to reset before changing Freq +// This doesn't include the secondary higher % check +#define MINION_RESET_COUNT 6 + +// To enable the 2nd check +static bool second_check = true; +// Longer time lapse to expect the higher % +// This intercepts a slow GHs drop earlier +#define MINION_RESET2_s 60 + +#if (MINION_RESET_s > MINION_HISTORY_s) +#error "MINION_RESET_s can't be greater than MINION_HISTORY_s" +#endif + +#define FREQ_DELAY(freq) ((float)(MINION_RESET_s * MINION_FREQ_DEF) / (freq)) + +#if (MINION_RESET2_s > MINION_HISTORY_s) +#error "MINION_RESET2_s can't be greater than MINION_HISTORY_s" +#endif + +// FREQ2_DELAY(MINION_FREQ_MIN) = FREQ2_FACTOR * MINION_RESET2_s +#define FREQ2_FACTOR 1.5 + +#define FREQ2_DELAY(freq) ((1.0 + (float)((freq - MINION_FREQ_DEF) * (1 - FREQ2_FACTOR)) / \ + (float)(MINION_FREQ_DEF - MINION_FREQ_MIN)) * MINION_RESET2_s) + +#if (MINION_RESET2_s <= MINION_RESET_s) +#error "MINION_RESET2_s must be greater than MINION_RESET_s" +#endif + +/* If there was no reset for this long, clear the reset history + * (except the last one) since this means the current clock is ok + * with rare resets */ +#define MINION_CLR_s 300 + +#if (MINION_CLR_s <= MINION_RESET2_s) +#error "MINION_CLR_s must be greater than MINION_RESET2_s" +#endif + +// History must be always generated for the reset check +#define MINION_MAX_RESET_CHECK 2 + +/* Floating point reset settings required for the code to work properly + * Basically: RESET2 must be after RESET and CLR must be after RESET2 */ +static void define_test() +{ + float test; + + if (MINION_RESET2_PERCENT <= MINION_RESET_PERCENT) { + quithere(1, "MINION_RESET2_PERCENT=%f must be " + "> MINION_RESET_PERCENT=%f", + MINION_RESET2_PERCENT, MINION_RESET_PERCENT); + } + + test = FREQ_DELAY(MINION_FREQ_MIN); + if (test >= MINION_HISTORY_s) { + quithere(1, "FREQ_DELAY(MINION_FREQ_MIN)=%f must be " + "< MINION_HISTORY_s=%d", + test, MINION_HISTORY_s); + } + + if (MINION_CLR_s <= test) { + quithere(1, "MINION_CLR_s=%d must be > " + "FREQ_DELAY(MINION_FREQ_MIN)=%f", + MINION_CLR_s, test); + } + + if (FREQ2_FACTOR <= 1.0) + quithere(1, "FREQ2_FACTOR=%f must be > 1.0", FREQ2_FACTOR); + + + test = FREQ2_DELAY(MINION_FREQ_MIN); + if (test >= MINION_HISTORY_s) { + quithere(1, "FREQ2_DELAY(MINION_FREQ_MIN)=%f must be " + "< MINION_HISTORY_s=%d", + test, MINION_HISTORY_s); + } + + if (MINION_CLR_s <= test) { + quithere(1, "MINION_CLR_s=%d must be > " + "FREQ2_DELAY(MINION_FREQ_MIN)=%f", + MINION_CLR_s, test); + } +} + +// *** Chip freq/MHs performance history +typedef struct perf_item { + double elapsed; + uint64_t nonces; + uint32_t freq; + double ghs; + struct timeval when; +} PERF_ITEM; + +#define ALLOC_PERF_ITEMS 128 +#define LIMIT_PERF_ITEMS 0 + +// *** 0xff error history +typedef struct xff_item { + time_t when; +} XFF_ITEM; + +#define ALLOC_XFF_ITEMS 100 +#define LIMIT_XFF_ITEMS 100 + +#define DATA_WORK(_item) ((WORK_ITEM *)(_item->data)) +#define DATA_TASK(_item) ((TASK_ITEM *)(_item->data)) +#define DATA_RES(_item) ((RES_ITEM *)(_item->data)) +#define DATA_HIST(_item) ((HIST_ITEM *)(_item->data)) +#define DATA_PERF(_item) ((PERF_ITEM *)(_item->data)) +#define DATA_XFF(_item) ((XFF_ITEM *)(_item->data)) + +// Set this to 1 to enable iostats processing +// N.B. it slows down mining +#define DO_IO_STATS 0 + +#if DO_IO_STATS +#define IO_STAT_NOW(_tv) cgtime(_tv) +#define IO_STAT_STORE(_sta, _fin, _lsta, _lfin, _tsd, _buf, _siz, _reply, _ioc) \ + do { \ + double _diff, _ldiff, _lwdiff, _1time; \ + int _off; \ + _diff = us_tdiff(_fin, _sta); \ + _ldiff = us_tdiff(_lfin, _lsta); \ + _lwdiff = us_tdiff(_sta, _lsta); \ + _1time = us_tdiff(_tsd, _lfin); \ + _off = (int)(_buf[1]) + (_reply >= 0 ? 0 : 0x100); \ + minioninfo->summary.count++; \ + minioninfo->summary.tsd += _1time; \ + minioninfo->iostats[_off].count++; \ + minioninfo->iostats[_off].tsd += _1time; \ + if (_diff <= 0) { \ + minioninfo->summary.zero_delay++; \ + minioninfo->iostats[_off].zero_delay++; \ + } else { \ + minioninfo->summary.total_delay += _diff; \ + if (minioninfo->summary.max_delay < _diff) \ + minioninfo->summary.max_delay = _diff; \ + if (minioninfo->summary.min_delay == 0 || \ + minioninfo->summary.min_delay > _diff) \ + minioninfo->summary.min_delay = _diff; \ + minioninfo->iostats[_off].total_delay += _diff; \ + if (minioninfo->iostats[_off].max_delay < _diff) \ + minioninfo->iostats[_off].max_delay = _diff; \ + if (minioninfo->iostats[_off].min_delay == 0 || \ + minioninfo->iostats[_off].min_delay > _diff) \ + minioninfo->iostats[_off].min_delay = _diff; \ + } \ + if (_ldiff <= 0) { \ + minioninfo->summary.zero_dlock++; \ + minioninfo->iostats[_off].zero_dlock++; \ + } else { \ + minioninfo->summary.total_dlock += _ldiff; \ + if (minioninfo->summary.max_dlock < _ldiff) \ + minioninfo->summary.max_dlock = _ldiff; \ + if (minioninfo->summary.min_dlock == 0 || \ + minioninfo->summary.min_dlock > _ldiff) \ + minioninfo->summary.min_dlock = _ldiff; \ + minioninfo->iostats[_off].total_dlock += _ldiff; \ + if (minioninfo->iostats[_off].max_dlock < _ldiff) \ + minioninfo->iostats[_off].max_dlock = _ldiff; \ + if (minioninfo->iostats[_off].min_dlock == 0 || \ + minioninfo->iostats[_off].min_dlock > _ldiff) \ + minioninfo->iostats[_off].min_dlock = _ldiff; \ + } \ + minioninfo->summary.total_dlwait += _lwdiff; \ + minioninfo->iostats[_off].total_dlwait += _lwdiff; \ + if (_siz == 0) { \ + minioninfo->summary.zero_bytes++; \ + minioninfo->iostats[_off].zero_bytes++; \ + } else { \ + minioninfo->summary.total_bytes += _siz; \ + if (minioninfo->summary.max_bytes < _siz) \ + minioninfo->summary.max_bytes = _siz; \ + if (minioninfo->summary.min_bytes == 0 || \ + minioninfo->summary.min_bytes > _siz) \ + minioninfo->summary.min_bytes = _siz; \ + minioninfo->iostats[_off].total_bytes += _siz; \ + if (minioninfo->iostats[_off].max_bytes < _siz) \ + minioninfo->iostats[_off].max_bytes = _siz; \ + if (minioninfo->iostats[_off].min_bytes == 0 || \ + minioninfo->iostats[_off].min_bytes > _siz) \ + minioninfo->iostats[_off].min_bytes = _siz; \ + } \ + } while (0); + +typedef struct iostat { + uint64_t count; // total ioctl() + + double total_delay; // total elapsed ioctl() + double min_delay; + double max_delay; + uint64_t zero_delay; // how many had <= 0 delay + + // Above but including locking + double total_dlock; + double min_dlock; + double max_dlock; + uint64_t zero_dlock; + + // Total time waiting to get lock + double total_dlwait; + + // these 3 fields are ignored for now since all are '1' + uint64_t total_ioc; // SPI_IOC_MESSAGE(x) + uint64_t min_ioc; + uint64_t max_ioc; + + uint64_t total_bytes; // ioctl() bytes + uint64_t min_bytes; + uint64_t max_bytes; + uint64_t zero_bytes; // how many had siz == 0 + + double tsd; // total doing one extra cgtime() each time +} IOSTAT; +#else +#define IO_STAT_NOW(_tv) +#define IO_STAT_STORE(_sta, _fin, _lsta, _lfin, _tsd, _buf, _siz, _reply, _ioc) +#endif + +static double time_bands[] = { 0.1, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0 }; +#define TIME_BANDS ((int)(sizeof(time_bands)/sizeof(double))) + +struct minion_info { + struct thr_info *thr; + struct thr_info spiw_thr; + struct thr_info spir_thr; + struct thr_info res_thr; + + pthread_mutex_t spi_lock; + pthread_mutex_t sta_lock; + + cgsem_t task_ready; + cgsem_t nonce_ready; + cgsem_t scan_work; + + volatile unsigned *gpio; + + int spifd; + char gpiointvalue[64]; + int gpiointfd; + + // I/O or seconds + bool spi_reset_io; + int spi_reset_count; + time_t last_spi_reset; + uint64_t spi_resets; + + // TODO: need to track disabled chips - done? + int chips; + bool has_chip[MINION_CHIPS]; + int init_temp[MINION_CHIPS]; + uint8_t init_cores[MINION_CHIPS][DATA_SIZ*MINION_CORE_REPS]; + + uint8_t chipid[MINION_CHIPS]; // Chip Number + int chip_pin[MINION_CHIPS]; + + uint64_t ioseq; + uint32_t next_task_id; + + // Stats + uint64_t chip_nonces[MINION_CHIPS]; + uint64_t chip_nononces[MINION_CHIPS]; + uint64_t chip_good[MINION_CHIPS]; + uint64_t chip_bad[MINION_CHIPS]; + uint64_t chip_err[MINION_CHIPS]; + uint64_t chip_dup[MINION_CHIPS]; + uint64_t core_good[MINION_CHIPS][MINION_CORES+1]; + uint64_t core_bad[MINION_CHIPS][MINION_CORES+1]; + + uint32_t chip_core_ena[MINION_CORE_REPS][MINION_CHIPS]; + uint32_t chip_core_act[MINION_CORE_REPS][MINION_CHIPS]; + + struct minion_status chip_status[MINION_CHIPS]; + + uint64_t interrupts; + uint64_t result_interrupts; + uint64_t command_interrupts; + char last_interrupt[64]; + + pthread_mutex_t nonce_lock; + uint64_t new_nonces; + + uint64_t ok_nonces; + uint64_t untested_nonces; + uint64_t tested_nonces; + + uint64_t work_unrolled; + uint64_t work_rolled; + + uint64_t spi_errors; + uint64_t fifo_spi_errors[MINION_CHIPS]; + uint64_t res_spi_errors[MINION_CHIPS]; + uint64_t use_res2[MINION_CHIPS]; + + uint64_t tasks_failed[MINION_CHIPS]; + uint64_t tasks_recovered[MINION_CHIPS]; + uint64_t nonces_failed[MINION_CHIPS]; + uint64_t nonces_recovered[MINION_CHIPS]; + struct timeval last_reset[MINION_CHIPS]; + double do_reset[MINION_CHIPS]; + bool flag_reset[MINION_CHIPS]; + + // Work items + K_LIST *wfree_list; + K_STORE *wwork_list; + K_STORE *wstale_list; + K_STORE *wque_list[MINION_CHIPS]; + K_STORE *wchip_list[MINION_CHIPS]; + uint64_t wwork_flushed; + uint64_t wque_flushed; + uint64_t wchip_staled; + + // Task list + K_LIST *tfree_list; + K_STORE *task_list; + K_STORE *treply_list; + + uint64_t next_tid; + + // Nonce replies + K_LIST *rfree_list; + K_STORE *rnonce_list; + + struct timeval last_did; + + // Nonce history + K_LIST *hfree_list; + K_STORE *hchip_list[MINION_CHIPS]; + + int history_gen; + struct timeval chip_chk; + struct timeval chip_rpt; + double history_ghs[MINION_CHIPS]; + // Point in history for MINION_RESET_s + int reset_time[MINION_CHIPS]; + K_ITEM *reset_mark[MINION_CHIPS]; + int reset_count[MINION_CHIPS]; + // Point in history for MINION_RESET2_s + int reset2_time[MINION_CHIPS]; + K_ITEM *reset2_mark[MINION_CHIPS]; + int reset2_count[MINION_CHIPS]; + + // Performance history + K_LIST *pfree_list; + K_STORE *p_list[MINION_CHIPS]; + + // 0xff history + K_LIST *xfree_list; + K_STORE *xff_list; + time_t last_power_cycle; + uint64_t power_cycles; + time_t last_xff; + uint64_t xffs; + uint64_t last_displayed_xff; + + // Gets reset to zero each time it is used in reporting + int res_err_count[MINION_CHIPS]; + +#if DO_IO_STATS + // Total + IOSTAT summary; + + // Two for each command plus wasted extras i.e. direct/fast lookup + // No error uses 0x0 to 0xff, error uses 0x100 to 0x1ff + IOSTAT iostats[0x200]; +#endif + + // Stats on how long work is waiting to move from wwork_list to wque_list + uint64_t que_work; + double que_time; + double que_min; + double que_max; + uint64_t que_bands[TIME_BANDS+1]; + + // From wwork_list to txrx + uint64_t wt_work; + double wt_time; + double wt_min; + double wt_max; + uint64_t wt_bands[TIME_BANDS+1]; + + bool lednow[MINION_CHIPS]; + bool setled[MINION_CHIPS]; + + // When changing the frequency don't modify 'anything' + bool changing[MINION_CHIPS]; + int init_freq[MINION_CHIPS]; + int want_freq[MINION_CHIPS]; + uint32_t freqsent[MINION_CHIPS]; + struct timeval lastfreq[MINION_CHIPS]; + int freqms[MINION_CHIPS]; + + bool initialised; +}; + +#if MINION_ROCKCHIP == 1 +static bool minion_toggle_gpio(struct cgpu_info *minioncgpu, int gpionum) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + char pindir[64], ena[64], pin[8], dir[64]; + char gpiointvalue[64]; + struct stat st; + int file, err, chip; + ssize_t ret; + + snprintf(pindir, sizeof(pindir), MINION_GPIO_SYS MINION_GPIO_PIN, gpionum); + memset(&st, 0, sizeof(st)); + + if (stat(pindir, &st) == 0) { // already exists + if (!S_ISDIR(st.st_mode)) { + applog(LOG_ERR, "%s: failed1 to enable GPIO pin %d" + " - not a directory", + minioncgpu->drv->dname, gpionum); + return false; + } + } else { + snprintf(ena, sizeof(ena), MINION_GPIO_SYS MINION_GPIO_ENA); + file = open(ena, O_WRONLY | O_SYNC); + if (file == -1) { + applog(LOG_ERR, "%s: failed2 to export GPIO pin %d (%d)" + " - you need to be root?", + minioncgpu->drv->dname, + gpionum, errno); + return false; + } + snprintf(pin, sizeof(pin), MINION_GPIO_ENA_VAL, gpionum); + ret = write(file, pin, (size_t)strlen(pin)); + if (ret != (ssize_t)strlen(pin)) { + if (ret < 0) + err = errno; + else + err = (int)ret; + close(file); + applog(LOG_ERR, "%s: failed3 to export GPIO pin %d (%d:%d)", + minioncgpu->drv->dname, + gpionum, err, (int)strlen(pin)); + return false; + } + close(file); + + // Check again if it exists + memset(&st, 0, sizeof(st)); + if (stat(pindir, &st) != 0) { + applog(LOG_ERR, "%s: failed4 to export GPIO pin %d (%d)", + minioncgpu->drv->dname, + gpionum, errno); + return false; + } + } + + // Set the pin attributes + // Direction + snprintf(dir, sizeof(dir), MINION_GPIO_SYS MINION_GPIO_PIN MINION_GPIO_DIR, gpionum); + file = open(dir, O_WRONLY | O_SYNC); + if (file == -1) { + applog(LOG_ERR, "%s: failed5 to configure GPIO pin %d (%d)" + " - you need to be root?", + minioncgpu->drv->dname, + gpionum, errno); + return false; + } + ret = write(file, MINION_GPIO_DIR_WRITE, sizeof(MINION_GPIO_DIR_WRITE)-1); + if (ret != sizeof(MINION_GPIO_DIR_WRITE)-1) { + if (ret < 0) + err = errno; + else + err = (int)ret; + close(file); + applog(LOG_ERR, "%s: failed6 to configure GPIO pin %d (%d:%d)", + minioncgpu->drv->dname, gpionum, + err, (int)sizeof(MINION_GPIO_DIR_WRITE)-1); + return false; + } + close(file); + + // Open it + snprintf(gpiointvalue, sizeof(gpiointvalue), + MINION_GPIO_SYS MINION_GPIO_PIN MINION_GPIO_VALUE, + gpionum); + int fd = open(gpiointvalue, O_WRONLY); + if (fd == -1) { + applog(LOG_ERR, "%s: failed7 to access GPIO pin %d (%d)", + minioncgpu->drv->dname, + gpionum, errno); + return false; + } + + ret = write(fd, MINION_CHIP_OFF, sizeof(MINION_CHIP_OFF)-1); + if (ret != sizeof(MINION_CHIP_OFF)-1) { + close(fd); + applog(LOG_ERR, "%s: failed8 to toggle off GPIO pin %d (%d:%d)", + minioncgpu->drv->dname, + gpionum, (int)ret, errno); + return false; + } + + cgsleep_ms(MINION_CHIP_DELAY); + + ret = write(fd, MINION_CHIP_ON, sizeof(MINION_CHIP_ON)-1); + if (ret != sizeof(MINION_CHIP_OFF)-1) { + close(fd); + applog(LOG_ERR, "%s: failed9 to toggle on GPIO pin %d (%d:%d)", + minioncgpu->drv->dname, + gpionum, (int)ret, errno); + return false; + } + + close(fd); + minioninfo->last_power_cycle = time(NULL); + minioninfo->power_cycles++; + // Reset all chip led counters + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip]) + minioninfo->chip_status[chip].first_nonce.tv_sec = 0L; + } + return true; +} +#endif + +static void ready_work(struct cgpu_info *minioncgpu, struct work *work, bool rolled) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + K_ITEM *item = NULL; + + K_WLOCK(minioninfo->wfree_list); + + item = k_unlink_head(minioninfo->wfree_list); + + DATA_WORK(item)->work = work; + DATA_WORK(item)->task_id = 0; + memset(&(DATA_WORK(item)->sent), 0, sizeof(DATA_WORK(item)->sent)); + DATA_WORK(item)->nonces = 0; + DATA_WORK(item)->urgent = false; + DATA_WORK(item)->rolled = rolled; + DATA_WORK(item)->errors = 0; + cgtime(&(DATA_WORK(item)->created)); + + k_add_head(minioninfo->wwork_list, item); + + K_WUNLOCK(minioninfo->wfree_list); +} + +static bool oldest_nonce(struct cgpu_info *minioncgpu, int *chip, int *core, uint32_t *task_id, + uint32_t *nonce, bool *no_nonce, struct timeval *when, + bool *another, uint32_t *task_id2, uint32_t *nonce2) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + K_ITEM *item = NULL; + bool found = false; + + K_WLOCK(minioninfo->rnonce_list); + + item = k_unlink_tail(minioninfo->rnonce_list); + if (item) { + found = true; + *chip = DATA_RES(item)->chip; + *core = DATA_RES(item)->core; + *task_id = DATA_RES(item)->task_id; + *nonce = DATA_RES(item)->nonce; + *no_nonce = DATA_RES(item)->no_nonce; + memcpy(when, &(DATA_RES(item)->when), sizeof(*when)); + *another = DATA_RES(item)->another; + *task_id2 = DATA_RES(item)->task_id2; + *nonce2 = DATA_RES(item)->nonce2; + + k_free_head(minioninfo->rfree_list, item); + } + + K_WUNLOCK(minioninfo->rnonce_list); + + return found; +} + +static const char *addr2txt(uint8_t addr) +{ + switch (addr) { + case READ_ADDR(MINION_SYS_CHIP_SIG): + return "RChipSig"; + case READ_ADDR(MINION_SYS_CHIP_STA): + return "RChipSta"; + case WRITE_ADDR(MINION_SYS_SPI_LED): + return "WLed"; + case WRITE_ADDR(MINION_SYS_MISC_CTL): + return "WMiscCtrl"; + case WRITE_ADDR(MINION_SYS_RSTN_CTL): + return "WResetCtrl"; + case READ_ADDR(MINION_SYS_FIFO_STA): + return "RFifoSta"; + case READ_ADDR(MINION_CORE_ENA0_31): + return "RCoreEna0-31"; + case WRITE_ADDR(MINION_CORE_ENA0_31): + return "WCoreEna0-31"; + case READ_ADDR(MINION_CORE_ENA32_63): + return "RCoreEna32-63"; + case WRITE_ADDR(MINION_CORE_ENA32_63): + return "WCoreEna32-63"; + case READ_ADDR(MINION_CORE_ENA64_95): + return "RCoreEna64-95"; + case WRITE_ADDR(MINION_CORE_ENA64_95): + return "WCoreEna64-95"; + case READ_ADDR(MINION_CORE_ENA96_98): + return "RCoreEna96-98"; + case WRITE_ADDR(MINION_CORE_ENA96_98): + return "WCoreEna96-98"; + case READ_ADDR(MINION_CORE_ACT0_31): + return "RCoreAct0-31"; + case READ_ADDR(MINION_CORE_ACT32_63): + return "RCoreAct32-63"; + case READ_ADDR(MINION_CORE_ACT64_95): + return "RCoreAct64-95"; + case READ_ADDR(MINION_CORE_ACT96_98): + return "RCoreAct96-98"; + case READ_ADDR(MINION_RES_DATA): + return "RResData"; + case READ_ADDR(MINION_RES_PEEK): + return "RResPeek"; + case WRITE_ADDR(MINION_QUE_0): + return "WQueWork"; + case READ_ADDR(MINION_NONCE_START): + return "RNonceStart"; + case WRITE_ADDR(MINION_NONCE_START): + return "WNonceStart"; + case READ_ADDR(MINION_NONCE_RANGE): + return "RNonceRange"; + case WRITE_ADDR(MINION_NONCE_RANGE): + return "WNonceRange"; + case READ_ADDR(MINION_SYS_INT_STA): + return "RIntSta"; + case WRITE_ADDR(MINION_SYS_INT_ENA): + return "WIntEna"; + case WRITE_ADDR(MINION_SYS_INT_CLR): + return "WIntClear"; + case WRITE_ADDR(MINION_SYS_BUF_TRIG): + return "WResTrigger"; + case WRITE_ADDR(MINION_SYS_QUE_TRIG): + return "WCmdTrigger"; + case READ_ADDR(MINION_SYS_TEMP_CTL): + return "RTempCtrl"; + case WRITE_ADDR(MINION_SYS_TEMP_CTL): + return "WTempCtrl"; + case READ_ADDR(MINION_SYS_FREQ_CTL): + return "RFreqCtrl"; + case WRITE_ADDR(MINION_SYS_FREQ_CTL): + return "WFreqCtrl"; + case READ_ADDR(MINION_SYS_IDLE_CNT): + return "RIdleCnt"; + } + + // gcc warning if this is in default: + if (IS_ADDR_READ(addr)) + return "RUnhandled"; + else + return "WUnhandled"; +} + +// For display_ioctl() +#define IOCTRL_LOG LOG_WARNING + +// For all other debug so it can easily be switched always on +#define MINION_LOG LOG_DEBUG + +// For task corruption logging +#define MINTASK_LOG LOG_DEBUG + +// Set to 1 for debug +#define MINION_SHOW_IO 0 + +#define DATA_ALL 2048 +#define DATA_OFF 512 + +#if MINION_SHOW_IO +static void display_ioctl(int reply, uint32_t osiz, uint8_t *obuf, uint32_t rsiz, uint8_t *rbuf) +{ + struct minion_result *res; + const char *name, *dir, *ex; + char buf[4096]; + int i, rescount; + + name = addr2txt(obuf[1]); + + if (IS_ADDR_READ(obuf[1])) + dir = "from"; + else + dir = "to"; + + buf[0] = '\0'; + ex = ""; + + switch (obuf[1]) { + case READ_ADDR(MINION_SYS_CHIP_SIG): + case READ_ADDR(MINION_SYS_CHIP_STA): + break; + case WRITE_ADDR(MINION_SYS_SPI_LED): + case WRITE_ADDR(MINION_SYS_MISC_CTL): + case WRITE_ADDR(MINION_SYS_RSTN_CTL): + if (osiz > HSIZE()) { + ex = " wrote "; + __bin2hex(buf, obuf + HSIZE(), osiz - HSIZE()); + } else + ex = " wrote nothing"; + break; + default: + if (IS_ADDR_WRITE(obuf[1])) { + if (osiz > HSIZE()) { + ex = " wrote "; + __bin2hex(buf, obuf + HSIZE(), osiz - HSIZE()); + } else + ex = " wrote nothing"; + } + break; + } + + if (reply < 0) { + applog(IOCTRL_LOG, "%s %s chipid %d osiz %d%s%s", + name, dir, (int)obuf[0], (int)osiz, ex, buf); + applog(IOCTRL_LOG, " reply was error %d", reply); + } else { + if (IS_ADDR_WRITE(obuf[1])) { + applog(IOCTRL_LOG, "%s %s chipid %d osiz %d%s%s", + name, dir, (int)obuf[0], (int)osiz, ex, buf); + applog(IOCTRL_LOG, " write ret was %d", reply); + } else { + switch (obuf[1]) { + case READ_ADDR(MINION_RES_DATA): + rescount = (int)((float)rsiz / (float)MINION_RES_DATA_SIZ); + applog(IOCTRL_LOG, "%s %s chipid %d osiz %d%s%s", + name, dir, (int)obuf[0], (int)osiz, ex, buf); + for (i = 0; i < rescount; i++) { + res = (struct minion_result *)(rbuf + osiz - rsiz + (i * MINION_RES_DATA_SIZ)); + if (!IS_RESULT(res)) { + applog(IOCTRL_LOG, " %s reply %d of %d - none", name, i+1, rescount); + } else { + __bin2hex(buf, res->nonce, DATA_SIZ); + applog(IOCTRL_LOG, " %s reply %d of %d %d(%d) was task 0x%04x" + " chipid %d core %d gold %s nonce 0x%s", + name, i+1, rescount, reply, rsiz, + RES_TASK(res), + (int)RES_CHIPID(res), + (int)RES_CORE(res), + (int)RES_GOLD(res) ? "Y" : "N", + buf); + } + } + break; + case READ_ADDR(MINION_SYS_CHIP_SIG): + case READ_ADDR(MINION_SYS_CHIP_STA): + default: + applog(IOCTRL_LOG, "%s %s chipid %d osiz %d%s%s", + name, dir, (int)obuf[0], (int)osiz, ex, buf); + __bin2hex(buf, rbuf + osiz - rsiz, rsiz); + applog(IOCTRL_LOG, " %s reply %d(%d) was %s", name, reply, rsiz, buf); + break; + } + } + } +} +#endif + +#define MINION_UNEXPECTED_TASK -999 +#define MINION_OVERSIZE_TASK -998 + +static void set_pin(struct minion_info *minioninfo, int pin, bool on) +{ + volatile uint32_t *paddr; + uint32_t value; + int bcm; + + bcm = minionPins[pin].bcm; + + paddr = minioninfo->gpio + ((on ? BCM2835_GPIO_SET0 : BCM2835_GPIO_CLR0) / 4) + (bcm / 10); + + value = 1 << (bcm % 32); + + *paddr = value; + *paddr = value; +} + +static void init_pins(struct minion_info *minioninfo) +{ + int pin; + + // Initialise all pins high as required + MINION_PIN_BEFORE; + for (pin = 0; pin < (int)MINION_PIN_COUNT; pin++) { + set_pin(minioninfo, pin, true); + MINION_PIN_SLEEP; + } +} + +#define EXTRA_LOG_IO 0 + +static bool minion_init_spi(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, int bus, int chip, bool reset); + +static int __do_ioctl(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, + int pin, uint8_t *obuf, uint32_t osiz, uint8_t *rbuf, + uint32_t rsiz, uint64_t *ioseq, MINION_FFL_ARGS) +{ + struct spi_ioc_transfer tran; + bool fail = false, powercycle = false, show = false; + double lastshow, total; + K_ITEM *xitem; + time_t now; + int ret; +#if MINION_SHOW_IO + char dataw[DATA_ALL], datar[DATA_ALL]; +#endif + +#if DO_IO_STATS + struct timeval sta, fin, lsta, lfin, tsd; +#endif + + if ((int)osiz > MINION_BUFSIZ) + quitfrom(1, file, func, line, "%s() invalid osiz %u > %d (chip=%d reg=0x%02x)", + __func__, osiz, MINION_BUFSIZ, (int)(obuf[0]), obuf[1]); + + if (rsiz >= osiz) + quitfrom(1, file, func, line, "%s() invalid rsiz %u >= osiz %u (chip=%u reg=0x%02x)", + __func__, rsiz, osiz, (int)(obuf[0]), obuf[1]); + + memset(&obuf[0] + osiz - rsiz, 0xff, rsiz); + +#if MINION_SHOW_IO + // if the a5/5a outside the data change, it means data overrun or corruption + memset(dataw, 0xa5, sizeof(dataw)); + memset(datar, 0x5a, sizeof(datar)); + memcpy(&dataw[DATA_OFF], &obuf[0], osiz); + + char *buf = bin2hex((unsigned char *)&(dataw[DATA_OFF]), osiz); + applog(IOCTRL_LOG, "*** %s() pin %d cid %d sending %02x %02x %s %02x %02x", + __func__, pin, (int)(dataw[DATA_OFF]), + dataw[0], dataw[DATA_OFF-1], buf, + dataw[DATA_OFF+osiz], dataw[DATA_ALL-1]); + free(buf); +#endif + + memset((char *)rbuf, 0x00, osiz); + +// cgsleep_ms(5); // TODO: a delay ... based on the last command? But subtract elapsed + // i.e. do any commands need a delay after the I/O has completed before the next I/O? + + memset(&tran, 0, sizeof(tran)); + if (osiz < MINION_SPI_BUFSIZ) + tran.len = osiz; + else + return MINION_OVERSIZE_TASK; + + tran.delay_usecs = opt_minion_spiusec; + tran.speed_hz = MINION_SPI_SPEED; + +#if MINION_SHOW_IO + tran.tx_buf = (uintptr_t)&(dataw[DATA_OFF]); + tran.rx_buf = (uintptr_t)&(datar[DATA_OFF]); +#else + tran.tx_buf = (uintptr_t)obuf; + tran.rx_buf = (uintptr_t)rbuf; +#endif + + IO_STAT_NOW(&lsta); + mutex_lock(&(minioninfo->spi_lock)); + if (usepins) { + // Pin low for I/O + MINION_PIN_BEFORE; + set_pin(minioninfo, pin, false); + MINION_PIN_SLEEP; + } + IO_STAT_NOW(&sta); + ret = ioctl(minioninfo->spifd, SPI_IOC_MESSAGE(1), (void *)&tran); + *ioseq = minioninfo->ioseq++; + IO_STAT_NOW(&fin); + if (usepins) { + MINION_PIN_AFTER; + // Pin back high after I/O + set_pin(minioninfo, pin, true); + } + now = time(NULL); + if (ret >= 0 && rbuf[0] == 0xff && rbuf[ret-1] == 0xff && + (obuf[1] == READ_ADDR(MINION_RES_DATA) || obuf[1] == READ_ADDR(MINION_SYS_FIFO_STA))) { + int i; + fail = true; + for (i = 1; i < ret-2; i++) { + if (rbuf[i] != 0xff) { + fail = false; + break; + } + } + if (fail) { + powercycle = show = false; + minioninfo->xffs++; + minioninfo->last_xff = now; + + if (minioninfo->xfree_list->count > 0) + xitem = k_unlink_head(minioninfo->xfree_list); + else + xitem = k_unlink_tail(minioninfo->xff_list); + DATA_XFF(xitem)->when = now; + if (!minioninfo->xff_list->head) + show = true; + else { + // if !changing and xff_list is full + if (!minioninfo->changing[obuf[0]] && + minioninfo->xfree_list->count == 0) { + total = DATA_XFF(xitem)->when - + DATA_XFF(minioninfo->xff_list->tail)->when; + if (total <= MINION_POWER_TIME) { + powercycle = true; + // Discard the history + k_list_transfer_to_head(minioninfo->xff_list, + minioninfo->xfree_list); + k_add_head(minioninfo->xfree_list, xitem); + xitem = NULL; + } + } + + if (!powercycle) { + lastshow = DATA_XFF(xitem)->when - + DATA_XFF(minioninfo->xff_list->head)->when; + show = (lastshow >= 5); + } + } + if (xitem) + k_add_head(minioninfo->xff_list, xitem); + +#if MINION_ROCKCHIP == 1 + if (powercycle) + minion_toggle_gpio(minioncgpu, MINION_POWERCYCLE_GPIO); +#endif + minion_init_spi(minioncgpu, minioninfo, 0, 0, true); + } + } else if (minioninfo->spi_reset_count) { + if (minioninfo->spi_reset_io) { + if (*ioseq > 0 && (*ioseq % minioninfo->spi_reset_count) == 0) + minion_init_spi(minioncgpu, minioninfo, 0, 0, true); + } else { + if (minioninfo->last_spi_reset == 0) + minioninfo->last_spi_reset = now; + else { + if ((now - minioninfo->last_spi_reset) >= minioninfo->spi_reset_count) + minion_init_spi(minioncgpu, minioninfo, 0, 0, true); + minioninfo->last_spi_reset = now; + } + } + } + if (opt_minion_spidelay) + cgsleep_ms(opt_minion_spidelay); + mutex_unlock(&(minioninfo->spi_lock)); + IO_STAT_NOW(&lfin); + IO_STAT_NOW(&tsd); + + IO_STAT_STORE(&sta, &fin, &lsta, &lfin, &tsd, obuf, osiz, ret, 1); + + if (fail) { + if (powercycle) { + applog(LOG_ERR, "%s%d: power cycle ioctl %"PRIu64" (%"PRIu64")", + minioncgpu->drv->name, minioncgpu->device_id, *ioseq, + minioninfo->xffs - minioninfo->last_displayed_xff); + minioninfo->last_displayed_xff = minioninfo->xffs; + } else if (show) { + char *what = "unk"; + switch (obuf[1]) { + case READ_ADDR(MINION_RES_DATA): + what = "nonce"; + break; + case READ_ADDR(MINION_SYS_FIFO_STA): + what = "fifo"; + break; + } + applog(LOG_ERR, "%s%d: reset ioctl %"PRIu64" %s all 0xff (%"PRIu64")", + minioncgpu->drv->name, minioncgpu->device_id, + *ioseq, what, minioninfo->xffs - minioninfo->last_displayed_xff); + minioninfo->last_displayed_xff = minioninfo->xffs; + } + } + +#if MINION_SHOW_IO + if (ret > 0) { + buf = bin2hex((unsigned char *)&(datar[DATA_OFF]), ret); + applog(IOCTRL_LOG, "*** %s() reply %d = pin %d cid %d %02x %02x %s %02x %02x", + __func__, ret, pin, (int)(dataw[DATA_OFF]), + datar[0], datar[DATA_OFF-1], buf, + datar[DATA_OFF+osiz], datar[DATA_ALL-1]); + free(buf); + } else + applog(LOG_ERR, "*** %s() reply = %d", __func__, ret); + + memcpy(&rbuf[0], &datar[DATA_OFF], osiz); + + display_ioctl(ret, osiz, (uint8_t *)(&dataw[DATA_OFF]), rsiz, (uint8_t *)(&datar[DATA_OFF])); +#endif +#if EXTRA_LOG_IO + if (obuf[1] == READ_ADDR(MINION_RES_PEEK) || + obuf[1] == READ_ADDR(MINION_RES_DATA) || + obuf[1] == READ_ADDR(MINION_SYS_FIFO_STA)) { + char *uf1, *uf2, c; + uf1 = bin2hex(obuf, DATA_SIZ); + uf2 = bin2hex(rbuf, (size_t)ret); + switch (obuf[1]) { + case READ_ADDR(MINION_RES_PEEK): + c = 'P'; + break; + case READ_ADDR(MINION_RES_DATA): + c = 'D'; + break; + case READ_ADDR(MINION_SYS_FIFO_STA): + c = 'F'; + break; + } + applog(LOG_WARNING, "*** ioseq %"PRIu64" cmd %c %s rep %.8s %s", + *ioseq, c, uf1, uf2, uf2+8); + free(uf2); + free(uf1); + } + if (obuf[1] == WRITE_ADDR(MINION_QUE_0)) { + char *uf; + uf = bin2hex(obuf, osiz); + applog(LOG_WARNING, "*** ioseq %"PRIu64" work %s", + *ioseq, uf); + free(uf); + } +#endif + return ret; +} + +#if 1 +#define do_ioctl(_pin, _obuf, _osiz, _rbuf, _rsiz, _ioseq) \ + __do_ioctl(minioncgpu, minioninfo, _pin, _obuf, _osiz, _rbuf, \ + _rsiz, _ioseq, MINION_FFL_HERE) +#else +#define do_ioctl(_pin, _obuf, _osiz, _rbuf, _rsiz, _ioseq) \ + _do_ioctl(minioninfo, _pin, _obuf, _osiz, _rbuf, \ + _rsiz, _ioseq, MINION_FFL_HERE) +// This sends an expected to work, SPI command before each SPI command +static int _do_ioctl(struct minion_info *minioninfo, int pin, uint8_t *obuf, uint32_t osiz, uint8_t *rbuf, uint32_t rsiz, uint64_t *ioseq, MINION_FFL_ARGS) +{ + struct minion_header *head; + uint8_t buf1[MINION_BUFSIZ]; + uint8_t buf2[MINION_BUFSIZ]; + uint32_t siz; + + head = (struct minion_header *)buf1; + head->chipid = 1; // Needs to be set to a valid chip + head->reg = READ_ADDR(MINION_SYS_FIFO_STA); + SET_HEAD_SIZ(head, DATA_SIZ); + siz = HSIZE() + DATA_SIZ; + __do_ioctl(minioncgpu, minioninfo, pin, buf1, siz, buf2, MINION_CORE_SIZ, ioseq, MINION_FFL_PASS); + + return __do_ioctl(minioncgpu, minioninfo, pin, obuf, osiz, rbuf, rsiz, ioseq, MINION_FFL_PASS); +} +#endif + +static bool _minion_txrx(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, TASK_ITEM *task, MINION_FFL_ARGS) +{ + struct minion_header *head; + + head = (struct minion_header *)(task->obuf); + head->chipid = minioninfo->chipid[task->chip]; + if (task->write) + SET_HEAD_WRITE(head, task->address); + else + SET_HEAD_READ(head, task->address); + + SET_HEAD_SIZ(head, task->wsiz + task->rsiz); + + if (task->wsiz) + memcpy(&(head->data[0]), task->wbuf, task->wsiz); + task->osiz = HSIZE() + task->wsiz + task->rsiz; + + task->reply = do_ioctl(CHIP_PIN(task->chip), task->obuf, task->osiz, task->rbuf, task->rsiz, + &(task->ioseq)); + if (task->reply < 0) { + applog(LOG_ERR, "%s%d: chip=%d ioctl failed reply=%d err=%d" MINION_FFL, + minioncgpu->drv->name, minioncgpu->device_id, + task->chip, task->reply, errno, MINION_FFL_PASS); + } else if (task->reply < (int)(task->osiz)) { + applog(LOG_ERR, "%s%d: chip=%d ioctl failed to write %d only wrote %d (err=%d)" MINION_FFL, + minioncgpu->drv->name, minioncgpu->device_id, + task->chip, (int)(task->osiz), task->reply, errno, MINION_FFL_PASS); + } + + return (task->reply >= (int)(task->osiz)); +} + +// Only for DATA_SIZ commands +static int build_cmd(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, int chip, uint8_t reg, uint8_t *rbuf, uint32_t rsiz, uint8_t *data) +{ + struct minion_header *head; + uint8_t wbuf[MINION_BUFSIZ]; + uint32_t wsiz; + uint64_t ioseq; + int reply; + + head = (struct minion_header *)wbuf; + head->chipid = minioninfo->chipid[chip]; + head->reg = reg; + SET_HEAD_SIZ(head, DATA_SIZ); + + head->data[0] = data[0]; + head->data[1] = data[1]; + head->data[2] = data[2]; + head->data[3] = data[3]; + + wsiz = HSIZE() + DATA_SIZ; + reply = do_ioctl(CHIP_PIN(chip), wbuf, wsiz, rbuf, rsiz, &ioseq); + + if (reply != (int)wsiz) { + applog(LOG_ERR, "%s: chip %d %s returned %d (should be %d)", + minioncgpu->drv->dname, chip, + addr2txt(head->reg), + reply, (int)wsiz); + } + + return reply; +} + +static void set_freq(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, int chip, int freq) +{ + uint8_t rbuf[MINION_BUFSIZ]; + uint8_t data[4]; + uint32_t value; + __maybe_unused int reply; + + freq /= MINION_FREQ_FACTOR; + if (freq < MINION_FREQ_FACTOR_MIN) + freq = MINION_FREQ_FACTOR_MIN; + if (freq > MINION_FREQ_FACTOR_MAX) + freq = MINION_FREQ_FACTOR_MAX; + value = minion_freq[freq]; + data[0] = (uint8_t)(value & 0xff); + data[1] = (uint8_t)(((value & 0xff00) >> 8) & 0xff); + data[2] = (uint8_t)(((value & 0xff0000) >> 16) & 0xff); + data[3] = (uint8_t)(((value & 0xff000000) >> 24) & 0xff); + + minioninfo->freqsent[chip] = value; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_SYS_FREQ_CTL), + rbuf, 0, data); + + cgtime(&(minioninfo->lastfreq[chip])); + applog(LOG_DEBUG, "%s%i: chip %d freq %d sec %d usec %d", + minioncgpu->drv->name, minioncgpu->device_id, + chip, freq, + (int)(minioninfo->lastfreq[chip].tv_sec) % 10, + (int)(minioninfo->lastfreq[chip].tv_usec)); + + // Reset all this info on chip reset or freq change + minioninfo->reset_time[chip] = (int)FREQ_DELAY(minioninfo->init_freq[chip]); + if (second_check) + minioninfo->reset2_time[chip] = (int)FREQ2_DELAY(minioninfo->init_freq[chip]); + + minioninfo->chip_status[chip].first_nonce.tv_sec = 0L; + + // Discard chip history (if there is any) + if (minioninfo->hfree_list) { + K_WLOCK(minioninfo->hfree_list); + k_list_transfer_to_head(minioninfo->hchip_list[chip], minioninfo->hfree_list); + minioninfo->reset_mark[chip] = NULL; + minioninfo->reset_count[chip] = 0; + K_WUNLOCK(minioninfo->hfree_list); + } +} + +static void init_chip(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, int chip) +{ + uint8_t rbuf[MINION_BUFSIZ]; + uint8_t data[4]; + __maybe_unused int reply; + int choice; + + // Complete chip reset + data[0] = 0x00; + data[1] = 0x00; + data[2] = 0xa5; + data[3] = 0xf5; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_SYS_RSTN_CTL), + rbuf, 0, data); + + // Default reset + data[0] = SYS_RSTN_CTL_INIT; + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_SYS_RSTN_CTL), + rbuf, 0, data); + + // Default initialisation + data[0] = SYS_MISC_CTL_DEFAULT; + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_SYS_MISC_CTL), + rbuf, 0, data); + + // Set chip frequency + choice = minioninfo->init_freq[chip]; + if (choice < MINION_FREQ_MIN) + choice = MINION_FREQ_MIN; + if (choice > MINION_FREQ_MAX) + choice = MINION_FREQ_MAX; + minioninfo->init_freq[chip] = choice; + set_freq(minioncgpu, minioninfo, chip, choice); + + // Set temp threshold + choice = minioninfo->init_temp[chip]; + if (choice == MINION_TEMP_CTL_DISABLE) + choice = MINION_TEMP_CTL_DISABLE_VALUE; + else { + if (choice < MINION_TEMP_CTL_MIN_VALUE || choice > MINION_TEMP_CTL_MAX_VALUE) + choice = MINION_TEMP_CTL_DEF; + choice -= MINION_TEMP_CTL_MIN_VALUE; + choice /= MINION_TEMP_CTL_STEP; + choice += MINION_TEMP_CTL_MIN; + if (choice < MINION_TEMP_CTL_MIN) + choice = MINION_TEMP_CTL_MIN; + if (choice > MINION_TEMP_CTL_MAX) + choice = MINION_TEMP_CTL_MAX; + } + data[0] = (uint8_t)choice; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + minioninfo->chip_status[chip].tempsent = choice; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_SYS_TEMP_CTL), + rbuf, 0, data); +} + +static void enable_chip_cores(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, int chip) +{ + uint8_t rbuf[MINION_BUFSIZ]; + uint8_t data[4]; + __maybe_unused int reply; + int rep, i; + + for (i = 0; i < 4; i++) + data[i] = minioninfo->init_cores[chip][i]; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_CORE_ENA0_31), + rbuf, 0, data); + + for (i = 0; i < 4; i++) + data[i] = minioninfo->init_cores[chip][i+4]; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_CORE_ENA32_63), + rbuf, 0, data); + + for (i = 0; i < 4; i++) + data[i] = minioninfo->init_cores[chip][i+8]; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_CORE_ENA64_95), + rbuf, 0, data); + + for (i = 0; i < 4; i++) + data[i] = minioninfo->init_cores[chip][i+12]; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_CORE_ENA96_98), + rbuf, 0, data); + +/* Below is for testing - disabled/use default + // 1/3 range for each of the 3 cores +// data[0] = 0x55; +// data[1] = 0x55; +// data[2] = 0x55; +// data[3] = 0x55; + + // quicker replies +// data[0] = 0x05; +// data[1] = 0x05; +// data[2] = 0x05; +// data[3] = 0x05; + + // 0x00000100 at 20MH/s per core = 336TH/s if 1 nonce per work item + // 0x00001000 = 21.0TH/s - so well above 2TH/s + // 0x00002000 = 10.5TH/s - above 2TH/s + // speed test + data[0] = 0x00; + data[1] = 0x01; + data[2] = 0x00; + data[3] = 0x00; +// data[3] = 0x20; // slow it down for other testing + + // 2 cores +// data[0] = 0xff; +// data[1] = 0xff; +// data[2] = 0xff; +// data[3] = 0x7f; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_NONCE_RANGE), + rbuf, 0, data); + + // find lots more nonces in a short time on my test data + // i.e. emulate a MUCH higher hash rate on SPI and work + // generation/testing + // Current test data (same repeated 10 times) has nonce 0x05e0ed6d + data[0] = 0x00; + data[1] = 0xed; + data[2] = 0xe0; + data[3] = 0x05; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_NONCE_START), + rbuf, 0, data); +*/ + + // store the core ena state + for (rep = 0; rep < MINION_CORE_REPS; rep++) { + data[0] = 0x0; + data[1] = 0x0; + data[2] = 0x0; + data[3] = 0x0; + + reply = build_cmd(minioncgpu, minioninfo, + chip, READ_ADDR(MINION_CORE_ENA0_31 + rep), + rbuf, MINION_CORE_SIZ, data); + + minioninfo->chip_core_ena[rep][chip] = *((uint32_t *)&(rbuf[HSIZE()])); + } + + // store the core active state + for (rep = 0; rep < MINION_CORE_REPS; rep++) { + data[0] = 0x0; + data[1] = 0x0; + data[2] = 0x0; + data[3] = 0x0; + + reply = build_cmd(minioncgpu, minioninfo, + chip, READ_ADDR(MINION_CORE_ACT0_31 + rep), + rbuf, MINION_CORE_SIZ, data); + + minioninfo->chip_core_act[rep][chip] = *((uint32_t *)&(rbuf[HSIZE()])); + } +} + +#if ENABLE_INT_NONO +static void enable_interrupt(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, int chip) +{ + uint8_t rbuf[MINION_BUFSIZ]; + uint8_t data[4]; + __maybe_unused int reply; + + data[0] = MINION_RESULT_INT_SIZE; + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_SYS_BUF_TRIG), + rbuf, 0, data); + +// data[0] = MINION_QUE_MAX; // spaces available ... i.e. empty +// data[0] = MINION_QUE_LOW; // spaces in use + data[0] = MINION_QUE_MAX - MINION_QUE_LOW; // spaces available + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_SYS_QUE_TRIG), + rbuf, 0, data); + +// data[0] = MINION_RESULT_INT; + data[0] = MINION_RESULT_INT | MINION_CMD_INT; + data[1] = 0x00; + data[2] = 0x00; + data[3] = 0x00; + + reply = build_cmd(minioncgpu, minioninfo, + chip, WRITE_ADDR(MINION_SYS_INT_ENA), + rbuf, 0, data); +} +#endif + +static void minion_detect_one(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, int pin, int chipid) +{ + struct minion_header *head; + uint8_t wbuf[MINION_BUFSIZ]; + uint8_t rbuf[MINION_BUFSIZ]; + uint32_t wsiz, rsiz; + int reply, tries, newchip; + uint64_t ioseq; + bool ok; + + head = (struct minion_header *)wbuf; + head->chipid = chipid; + rsiz = MINION_SYS_SIZ; + SET_HEAD_READ(head, MINION_SYS_CHIP_SIG); + SET_HEAD_SIZ(head, rsiz); + wsiz = HSIZE() + rsiz; + + tries = 0; + ok = false; + do { + reply = do_ioctl(pin, wbuf, wsiz, rbuf, rsiz, &ioseq); + + if (reply == (int)(wsiz)) { + uint32_t sig = u8tou32(rbuf, wsiz - rsiz); + + if (sig == MINION_CHIP_SIG) { + newchip = (minioninfo->chips)++; + minioninfo->has_chip[newchip] = true; + minioninfo->chipid[newchip] = chipid; + minioninfo->chip_pin[newchip] = pin; + ok = true; + } else { + if (sig == MINION_CHIP_SIG_SHIFT1 || + sig == MINION_CHIP_SIG_SHIFT2 || + sig == MINION_CHIP_SIG_SHIFT3 || + sig == MINION_CHIP_SIG_SHIFT4) { + applog(LOG_WARNING, "%s: pin %d chipid %d detect offset got" + " 0x%08x wanted 0x%08x", + minioncgpu->drv->dname, pin, chipid, + sig, MINION_CHIP_SIG); + } else { + if (sig == MINION_NOCHIP_SIG || + sig == MINION_NOCHIP_SIG2) // Assume no chip + ok = true; + else { + applog(LOG_ERR, "%s: pin %d chipid %d detect failed" + " got 0x%08x wanted 0x%08x", + minioncgpu->drv->dname, pin, + chipid, sig, MINION_CHIP_SIG); + } + } + } + } else { + applog(LOG_ERR, "%s: pin %d chipid %d reply %d ignored should be %d", + minioncgpu->drv->dname, pin, chipid, reply, (int)(wsiz)); + } + } while (!ok && ++tries <= MINION_SIG_TRIES); + + if (!ok) { + applog(LOG_ERR, "%s: pin %d chipid %d - detect failure status", + minioncgpu->drv->dname, pin, chipid); + } +} + +// Simple detect - just check each chip for the signature +static void minion_detect_chips(struct cgpu_info *minioncgpu, struct minion_info *minioninfo) +{ + int pin, chipid, chip; + int pinend, start_freq, want_freq, freqms; + +#if MINION_ROCKCHIP == 1 + minion_toggle_gpio(minioncgpu, MINION_POWERCYCLE_GPIO); + cgsleep_ms(100); +#endif + + if (usepins) { + init_pins(minioninfo); + pinend = (int)MINION_PIN_COUNT; + } else + pinend = 1; + + for (pin = 0; pin < pinend; pin++) { + for (chipid = MINION_MIN_CHIP; chipid <= MINION_MAX_CHIP; chipid++) { + minion_detect_one(minioncgpu, minioninfo, pin, chipid); + } + } + + if (minioninfo->chips) { + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip]) { + want_freq = minioninfo->init_freq[chip]; + start_freq = want_freq * opt_minion_freqpercent / 100; + start_freq -= (start_freq % MINION_FREQ_FACTOR); + if (start_freq < MINION_FREQ_MIN) + start_freq = MINION_FREQ_MIN; + minioninfo->want_freq[chip] = want_freq; + minioninfo->init_freq[chip] = start_freq; + if (start_freq != want_freq) { + freqms = opt_minion_freqchange; + freqms /= ((want_freq - start_freq) / MINION_FREQ_FACTOR); + if (freqms < 0) + freqms = -freqms; + minioninfo->freqms[chip] = freqms; + minioninfo->changing[chip] = true; + } + init_chip(minioncgpu, minioninfo, chip); + enable_chip_cores(minioncgpu, minioninfo, chip); + } + } + +#if ENABLE_INT_NONO + // After everything is ready + for (chip = 0; chip < MINION_CHIPS; chip++) + if (minioninfo->has_chip[chip]) + enable_interrupt(minioncgpu, minioninfo, chip); +#endif + } +} + +static const char *minion_modules[] = { +#if MINION_ROCKCHIP == 0 + "i2c-dev", + "i2c-bcm2708", + "spidev", + "spi-bcm2708", +#endif + NULL +}; + +static struct { + int request; + int value; +} minion_ioc[] = { + { SPI_IOC_RD_MODE, 0 }, + { SPI_IOC_WR_MODE, 0 }, + { SPI_IOC_RD_BITS_PER_WORD, 8 }, + { SPI_IOC_WR_BITS_PER_WORD, 8 }, + { SPI_IOC_RD_MAX_SPEED_HZ, MINION_SPI_SPEED }, + { SPI_IOC_WR_MAX_SPEED_HZ, MINION_SPI_SPEED }, + { -1, -1 } +}; + +static bool minion_init_spi(struct cgpu_info *minioncgpu, struct minion_info *minioninfo, int bus, int chip, bool reset) +{ + int i, err, data; + char buf[64]; + + if (reset) { + // TODO: maybe slow it down? + close(minioninfo->spifd); + if (opt_minion_spisleep) + cgsleep_ms(opt_minion_spisleep); + minioninfo->spifd = open(minioncgpu->device_path, O_RDWR); + if (minioninfo->spifd < 0) + goto bad_out; + minioninfo->spi_resets++; +// minioninfo->chip_status[chip].first_nonce.tv_sec = 0L; + } else { + for (i = 0; minion_modules[i]; i++) { + snprintf(buf, sizeof(buf), "modprobe %s", minion_modules[i]); + err = system(buf); + if (err) { + applog(LOG_ERR, "%s: failed to modprobe %s (%d) - you need to be root?", + minioncgpu->drv->dname, + minion_modules[i], err); + goto bad_out; + } + } + + snprintf(buf, sizeof(buf), "/dev/spidev%d.%d", bus, chip); + minioninfo->spifd = open(buf, O_RDWR); + if (minioninfo->spifd < 0) { + applog(LOG_ERR, "%s: failed to open spidev (%d)", + minioncgpu->drv->dname, + errno); + goto bad_out; + } + + minioncgpu->device_path = strdup(buf); + } + + for (i = 0; minion_ioc[i].value != -1; i++) { + data = minion_ioc[i].value; + err = ioctl(minioninfo->spifd, minion_ioc[i].request, (void *)&data); + if (err < 0) { + applog(LOG_ERR, "%s: failed ioctl configuration (%d) (%d)", + minioncgpu->drv->dname, + i, errno); + goto close_out; + } + } + + return true; + +close_out: + close(minioninfo->spifd); + minioninfo->spifd = 0; + free(minioncgpu->device_path); + minioncgpu->device_path = NULL; + +bad_out: + return false; +} + +static bool minion_setup_chip_select(struct cgpu_info *minioncgpu, struct minion_info *minioninfo) +{ + volatile uint32_t *paddr; + uint32_t mask, value, mem; + int count, memfd, pin, bcm; + + memfd = open(minion_memory, O_RDWR | O_SYNC); + if (memfd < 0) { + applog(LOG_ERR, "%s: failed open %s (%d)", + minioncgpu->drv->dname, + minion_memory, errno); + return false; + } + + minioninfo->gpio = (volatile unsigned *)mmap(NULL, MINION_PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED, memfd, + minion_memory_addr); + if (minioninfo->gpio == MAP_FAILED) { + close(memfd); + applog(LOG_ERR, "%s: failed mmap gpio (%d)", + minioncgpu->drv->dname, + errno); + return false; + } + + close(memfd); + + for (pin = 0; pin < (int)MINION_PIN_COUNT; pin++) { + bcm = minionPins[pin].bcm; + + paddr = minioninfo->gpio + (BCM2835_GPIO_FSEL0 / 4) + (bcm / 10); + + // Set each pin to be an output pin + mask = BCM2835_GPIO_FSEL_MASK << ((bcm % 10) * 3); + value = BCM2835_GPIO_FSEL_OUTPUT << ((bcm % 10) * 3); + + // Read settings + mem = *paddr; + *paddr; + + mem = (mem & ~mask) | (value & mask); + + // Write appended setting + *paddr = mem; + *paddr = mem; + + count++; + } + + if (count == 0) + return false; + else + return true; +} + +#if ENABLE_INT_NONO +static bool minion_init_gpio_interrupt(struct cgpu_info *minioncgpu, struct minion_info *minioninfo) +{ + char pindir[64], ena[64], pin[8], dir[64], edge[64], act[64]; + struct stat st; + int file, err; + ssize_t ret; + + snprintf(pindir, sizeof(pindir), MINION_GPIO_SYS MINION_GPIO_PIN, + MINION_GPIO_RESULT_INT_PIN); + memset(&st, 0, sizeof(st)); + + if (stat(pindir, &st) == 0) { // already exists + if (!S_ISDIR(st.st_mode)) { + applog(LOG_ERR, "%s: failed1 to enable GPIO pin %d interrupt" + " - not a directory", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN); + return false; + } + } else { + snprintf(ena, sizeof(ena), MINION_GPIO_SYS MINION_GPIO_ENA); + file = open(ena, O_WRONLY | O_SYNC); + if (file == -1) { + applog(LOG_ERR, "%s: failed2 to enable GPIO pin %d interrupt (%d)" + " - you need to be root?", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + errno); + return false; + } + snprintf(pin, sizeof(pin), MINION_GPIO_ENA_VAL, MINION_GPIO_RESULT_INT_PIN); + ret = write(file, pin, (size_t)strlen(pin)); + if (ret != (ssize_t)strlen(pin)) { + if (ret < 0) + err = errno; + else + err = (int)ret; + close(file); + applog(LOG_ERR, "%s: failed3 to enable GPIO pin %d interrupt (%d:%d)", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + err, (int)strlen(pin)); + return false; + } + close(file); + + // Check again if it exists + memset(&st, 0, sizeof(st)); + if (stat(pindir, &st) != 0) { + applog(LOG_ERR, "%s: failed4 to enable GPIO pin %d interrupt (%d)", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + errno); + return false; + } + } + + // Set the pin attributes + // Direction + snprintf(dir, sizeof(dir), MINION_GPIO_SYS MINION_GPIO_PIN MINION_GPIO_DIR, + MINION_GPIO_RESULT_INT_PIN); + file = open(dir, O_WRONLY | O_SYNC); + if (file == -1) { + applog(LOG_ERR, "%s: failed5 to enable GPIO pin %d interrupt (%d)" + " - you need to be root?", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + errno); + return false; + } + ret = write(file, MINION_GPIO_DIR_READ, (size_t)strlen(MINION_GPIO_DIR_READ)); + if (ret != (ssize_t)strlen(MINION_GPIO_DIR_READ)) { + if (ret < 0) + err = errno; + else + err = (int)ret; + close(file); + applog(LOG_ERR, "%s: failed6 to enable GPIO pin %d interrupt (%d:%d)", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + err, (int)strlen(MINION_GPIO_DIR_READ)); + return false; + } + close(file); + + // Edge + snprintf(edge, sizeof(edge), MINION_GPIO_SYS MINION_GPIO_PIN MINION_GPIO_EDGE, + MINION_GPIO_RESULT_INT_PIN); + file = open(edge, O_WRONLY | O_SYNC); + if (file == -1) { + applog(LOG_ERR, "%s: failed7 to enable GPIO pin %d interrupt (%d)", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + errno); + return false; + } + ret = write(file, MINION_GPIO_EDGE_RISING, (size_t)strlen(MINION_GPIO_EDGE_RISING)); + if (ret != (ssize_t)strlen(MINION_GPIO_EDGE_RISING)) { + if (ret < 0) + err = errno; + else + err = (int)ret; + close(file); + applog(LOG_ERR, "%s: failed8 to enable GPIO pin %d interrupt (%d:%d)", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + err, (int)strlen(MINION_GPIO_EDGE_RISING)); + return false; + } + close(file); + + // Active + snprintf(act, sizeof(act), MINION_GPIO_SYS MINION_GPIO_PIN MINION_GPIO_ACT, + MINION_GPIO_RESULT_INT_PIN); + file = open(act, O_WRONLY | O_SYNC); + if (file == -1) { + applog(LOG_ERR, "%s: failed9 to enable GPIO pin %d interrupt (%d)", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + errno); + return false; + } + ret = write(file, MINION_GPIO_ACT_HI, (size_t)strlen(MINION_GPIO_ACT_HI)); + if (ret != (ssize_t)strlen(MINION_GPIO_ACT_HI)) { + if (ret < 0) + err = errno; + else + err = (int)ret; + close(file); + applog(LOG_ERR, "%s: failed10 to enable GPIO pin %d interrupt (%d:%d)", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + err, (int)strlen(MINION_GPIO_ACT_HI)); + return false; + } + close(file); + + // Setup fd access to Value + snprintf(minioninfo->gpiointvalue, sizeof(minioninfo->gpiointvalue), + MINION_GPIO_SYS MINION_GPIO_PIN MINION_GPIO_VALUE, + MINION_GPIO_RESULT_INT_PIN); + minioninfo->gpiointfd = open(minioninfo->gpiointvalue, O_RDONLY); + if (minioninfo->gpiointfd == -1) { + applog(LOG_ERR, "%s: failed11 to enable GPIO pin %d interrupt (%d)", + minioncgpu->drv->dname, + MINION_GPIO_RESULT_INT_PIN, + errno); + return false; + } + + return true; +} +#endif + +// Default meaning all cores +static void default_all_cores(uint8_t *cores) +{ + int i; + + // clear all bits + for (i = 0; i < (int)(DATA_SIZ * MINION_CORE_REPS); i++) + cores[i] = 0x00; + + // enable (only) all cores + for (i = 0; i < MINION_CORES; i++) + ENABLE_CORE(cores, i); +} + +static void minion_process_options(struct minion_info *minioninfo) +{ + int last_freq, last_temp; + char *freq, *temp, *core, *comma, *buf, *plus, *minus; + uint8_t last_cores[DATA_SIZ*MINION_CORE_REPS]; + int i, core1, core2; + bool cleared; + + if (opt_minion_spireset && *opt_minion_spireset) { + bool is_io = true; + int val; + + switch (tolower(*opt_minion_spireset)) { + case 'i': + is_io = true; + break; + case 's': + is_io = false; + break; + default: + applog(LOG_WARNING, "ERR: Invalid SPI reset '%s'", + opt_minion_spireset); + goto skip; + } + val = atoi(opt_minion_spireset+1); + if (val < 0 || val > 9999) { + applog(LOG_WARNING, "ERR: Invalid SPI reset '%s'", + opt_minion_spireset); + } else { + minioninfo->spi_reset_io = is_io; + minioninfo->spi_reset_count = val; + minioninfo->last_spi_reset = time(NULL); + } + } +skip: + last_freq = MINION_FREQ_DEF; + if (opt_minion_freq && *opt_minion_freq) { + buf = freq = strdup(opt_minion_freq); + comma = strchr(freq, ','); + if (comma) + *(comma++) = '\0'; + + for (i = 0; i < (int)MINION_CHIPS; i++) { + if (freq && isdigit(*freq)) { + last_freq = (int)(round((double)atoi(freq) / (double)MINION_FREQ_FACTOR)) * MINION_FREQ_FACTOR; + if (last_freq < MINION_FREQ_MIN) + last_freq = MINION_FREQ_MIN; + if (last_freq > MINION_FREQ_MAX) + last_freq = MINION_FREQ_MAX; + + freq = comma; + if (comma) { + comma = strchr(freq, ','); + if (comma) + *(comma++) = '\0'; + } + } + minioninfo->init_freq[i] = last_freq; + } + free(buf); + } + + last_temp = MINION_TEMP_CTL_DEF; + if (opt_minion_temp && *opt_minion_temp) { + buf = temp = strdup(opt_minion_temp); + comma = strchr(temp, ','); + if (comma) + *(comma++) = '\0'; + + for (i = 0; i < (int)MINION_CHIPS; i++) { + if (temp) { + if (isdigit(*temp)) { + last_temp = atoi(temp); + last_temp -= (last_temp % MINION_TEMP_CTL_STEP); + if (last_temp < MINION_TEMP_CTL_MIN_VALUE) + last_temp = MINION_TEMP_CTL_MIN_VALUE; + if (last_temp > MINION_TEMP_CTL_MAX_VALUE) + last_temp = MINION_TEMP_CTL_MAX_VALUE; + } else { + if (strcasecmp(temp, MINION_TEMP_DISABLE) == 0) + last_temp = MINION_TEMP_CTL_DISABLE; + } + + temp = comma; + if (comma) { + comma = strchr(temp, ','); + if (comma) + *(comma++) = '\0'; + } + } + minioninfo->init_temp[i] = last_temp; + } + free(buf); + } + + default_all_cores(&(last_cores[0])); + // default to all cores until we find valid data + cleared = false; + if (opt_minion_cores && *opt_minion_cores) { + buf = core = strdup(opt_minion_cores); + comma = strchr(core, ','); + if (comma) + *(comma++) = '\0'; + + for (i = 0; i < (int)MINION_CHIPS; i++) { + // default to previous until we find valid data + cleared = false; + if (core) { + plus = strchr(core, '+'); + if (plus) + *(plus++) = '\0'; + while (core) { + minus = strchr(core, '-'); + if (minus) + *(minus++) = '\0'; + if (isdigit(*core)) { + core1 = atoi(core); + if (core1 >= 0 && core1 < MINION_CORES) { + if (!minus) { + if (!cleared) { + memset(last_cores, 0, sizeof(last_cores)); + cleared = true; + } + ENABLE_CORE(last_cores, core1); + } else { + core2 = atoi(minus); + if (core2 >= core1) { + if (core2 >= MINION_CORES) + core2 = MINION_CORES - 1; + while (core1 <= core2) { + if (!cleared) { + memset(last_cores, 0, + sizeof(last_cores)); + cleared = true; + } + ENABLE_CORE(last_cores, core1); + core1++; + } + } + } + } + } else { + if (strcasecmp(core, MINION_CORE_ALL) == 0) + default_all_cores(&(last_cores[0])); + } + core = plus; + if (plus) { + plus = strchr(core, '+'); + if (plus) + *(plus++) = '\0'; + } + } + core = comma; + if (comma) { + comma = strchr(core, ','); + if (comma) + *(comma++) = '\0'; + } + } + memcpy(&(minioninfo->init_cores[i][0]), &(last_cores[0]), sizeof(last_cores)); + } + free(buf); + } +} + +static void minion_detect(bool hotplug) +{ + struct cgpu_info *minioncgpu = NULL; + struct minion_info *minioninfo = NULL; + char buf[512]; + size_t off; + int i; + + if (hotplug) + return; + + define_test(); + + minioncgpu = calloc(1, sizeof(*minioncgpu)); + if (unlikely(!minioncgpu)) + quithere(1, "Failed to calloc minioncgpu"); + + minioncgpu->drv = &minion_drv; + minioncgpu->deven = DEV_ENABLED; + minioncgpu->threads = 1; + + minioninfo = calloc(1, sizeof(*minioninfo)); // everything '0' + if (unlikely(!minioninfo)) + quithere(1, "Failed to calloc minioninfo"); + minioncgpu->device_data = (void *)minioninfo; + + if (!minion_init_spi(minioncgpu, minioninfo, MINION_SPI_BUS, MINION_SPI_CHIP, false)) + goto unalloc; + +#if ENABLE_INT_NONO + if (!minion_init_gpio_interrupt(minioncgpu, minioninfo)) + goto unalloc; +#endif + + + if (usepins) { + if (!minion_setup_chip_select(minioncgpu, minioninfo)) + goto unalloc; + } + + mutex_init(&(minioninfo->spi_lock)); + mutex_init(&(minioninfo->sta_lock)); + + for (i = 0; i < (int)MINION_CHIPS; i++) { + minioninfo->init_freq[i] = MINION_FREQ_DEF; + minioninfo->init_temp[i] = MINION_TEMP_CTL_DEF; + default_all_cores(&(minioninfo->init_cores[i][0])); + } + + minion_process_options(minioninfo); + + applog(LOG_WARNING, "%s: checking for chips ...", minioncgpu->drv->dname); + + minion_detect_chips(minioncgpu, minioninfo); + + buf[0] = '\0'; + for (i = 0; i < (int)MINION_CHIPS; i++) { + if (minioninfo->has_chip[i]) { + off = strlen(buf); + snprintf(buf + off, sizeof(buf) - off, " %d:%d/%d", + i, minioninfo->chip_pin[i], (int)(minioninfo->chipid[i])); + } + } + + applog(LOG_WARNING, "%s: found %d chip%s:%s", + minioncgpu->drv->dname, minioninfo->chips, + (minioninfo->chips == 1) ? "" : "s", buf); + + if (minioninfo->chips == 0) + goto cleanup; + + if (!add_cgpu(minioncgpu)) + goto cleanup; + + mutex_init(&(minioninfo->nonce_lock)); + + minioninfo->wfree_list = k_new_list("Work", sizeof(WORK_ITEM), + ALLOC_WORK_ITEMS, LIMIT_WORK_ITEMS, true); + minioninfo->wwork_list = k_new_store(minioninfo->wfree_list); + minioninfo->wstale_list = k_new_store(minioninfo->wfree_list); + // Initialise them all in case we later decide to enable chips + for (i = 0; i < (int)MINION_CHIPS; i++) { + minioninfo->wque_list[i] = k_new_store(minioninfo->wfree_list); + minioninfo->wchip_list[i] = k_new_store(minioninfo->wfree_list); + } + + minioninfo->tfree_list = k_new_list("Task", sizeof(TASK_ITEM), + ALLOC_TASK_ITEMS, LIMIT_TASK_ITEMS, true); + minioninfo->task_list = k_new_store(minioninfo->tfree_list); + minioninfo->treply_list = k_new_store(minioninfo->tfree_list); + + minioninfo->rfree_list = k_new_list("Reply", sizeof(RES_ITEM), + ALLOC_RES_ITEMS, LIMIT_RES_ITEMS, true); + minioninfo->rnonce_list = k_new_store(minioninfo->rfree_list); + + minioninfo->history_gen = MINION_MAX_RESET_CHECK; + minioninfo->hfree_list = k_new_list("History", sizeof(HIST_ITEM), + ALLOC_HIST_ITEMS, LIMIT_HIST_ITEMS, true); + for (i = 0; i < (int)MINION_CHIPS; i++) + minioninfo->hchip_list[i] = k_new_store(minioninfo->hfree_list); + + minioninfo->pfree_list = k_new_list("Performance", sizeof(PERF_ITEM), + ALLOC_PERF_ITEMS, LIMIT_PERF_ITEMS, true); + for (i = 0; i < (int)MINION_CHIPS; i++) + minioninfo->p_list[i] = k_new_store(minioninfo->pfree_list); + + minioninfo->xfree_list = k_new_list("0xff", sizeof(XFF_ITEM), + ALLOC_XFF_ITEMS, LIMIT_XFF_ITEMS, true); + minioninfo->xff_list = k_new_store(minioninfo->xfree_list); + + cgsem_init(&(minioninfo->task_ready)); + cgsem_init(&(minioninfo->nonce_ready)); + cgsem_init(&(minioninfo->scan_work)); + + minioninfo->initialised = true; + + dupalloc(minioncgpu, 10); + + return; + +cleanup: + close(minioninfo->gpiointfd); + close(minioninfo->spifd); + mutex_destroy(&(minioninfo->sta_lock)); + mutex_destroy(&(minioninfo->spi_lock)); +unalloc: + free(minioninfo); + free(minioncgpu); +} + +static char *minion_api_set(struct cgpu_info *minioncgpu, char *option, char *setting, char *replybuf) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + int chip, val; + char *colon; + + if (strcasecmp(option, "help") == 0) { + sprintf(replybuf, "reset: chip 0-%d freq: 0-%d:%d-%d " + "ledcount: 0-100 ledlimit: 0-200 " + "spidelay: 0-9999 spireset i|s0-9999 " + "spisleep: 0-9999", + minioninfo->chips - 1, + minioninfo->chips - 1, + MINION_FREQ_MIN, MINION_FREQ_MAX); + return replybuf; + } + + if (strcasecmp(option, "reset") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing chip to reset"); + return replybuf; + } + + chip = atoi(setting); + if (chip < 0 || chip >= minioninfo->chips) { + sprintf(replybuf, "invalid reset: chip '%s' valid range 0-%d", + setting, + minioninfo->chips); + return replybuf; + } + + if (!minioninfo->has_chip[chip]) { + sprintf(replybuf, "unable to reset chip %d - chip disabled", + chip); + return replybuf; + } + minioninfo->flag_reset[chip] = true; + return NULL; + } + + // This sets up a freq step up/down to the given freq without a reset + if (strcasecmp(option, "freq") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing chip:freq"); + return replybuf; + } + + colon = strchr(setting, ':'); + if (!colon) { + sprintf(replybuf, "missing ':' for chip:freq"); + return replybuf; + } + + *(colon++) = '\0'; + if (!*colon) { + sprintf(replybuf, "missing freq in chip:freq"); + return replybuf; + } + + chip = atoi(setting); + if (chip < 0 || chip >= minioninfo->chips) { + sprintf(replybuf, "invalid freq: chip '%s' valid range 0-%d", + setting, + minioninfo->chips); + return replybuf; + } + + if (!minioninfo->has_chip[chip]) { + sprintf(replybuf, "unable to modify chip %d - chip not enabled", + chip); + return replybuf; + } + + val = atoi(colon); + if (val < MINION_FREQ_MIN || val > MINION_FREQ_MAX) { + sprintf(replybuf, "invalid freq: '%s' valid range %d-%d", + setting, + MINION_FREQ_MIN, MINION_FREQ_MAX); + return replybuf; + } + + int want_freq = val - (val % MINION_FREQ_FACTOR); + int start_freq = minioninfo->init_freq[chip]; + int freqms; + + if (want_freq != start_freq) { + minioninfo->changing[chip] = false; + freqms = opt_minion_freqchange; + freqms /= ((want_freq - start_freq) / MINION_FREQ_FACTOR); + if (freqms < 0) + freqms = -freqms; + minioninfo->freqms[chip] = freqms; + minioninfo->want_freq[chip] = want_freq; + cgtime(&(minioninfo->lastfreq[chip])); + minioninfo->changing[chip] = true; + } + + return NULL; + } + + if (strcasecmp(option, "ledcount") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing ledcount value"); + return replybuf; + } + + val = atoi(setting); + if (val < 0 || val > 100) { + sprintf(replybuf, "invalid ledcount: '%s' valid range 0-100", + setting); + return replybuf; + } + + opt_minion_ledcount = val; + return NULL; + } + + if (strcasecmp(option, "ledlimit") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing ledlimit value"); + return replybuf; + } + + val = atoi(setting); + if (val < 0 || val > 200) { + sprintf(replybuf, "invalid ledlimit: GHs '%s' valid range 0-200", + setting); + return replybuf; + } + + opt_minion_ledlimit = val; + return NULL; + } + + if (strcasecmp(option, "spidelay") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing spidelay value"); + return replybuf; + } + + val = atoi(setting); + if (val < 0 || val > 9999) { + sprintf(replybuf, "invalid spidelay: ms '%s' valid range 0-9999", + setting); + return replybuf; + } + + opt_minion_spidelay = val; + return NULL; + } + + if (strcasecmp(option, "spireset") == 0) { + bool is_io = true; + + if (!setting || !*setting) { + sprintf(replybuf, "missing spireset value"); + return replybuf; + } + + switch (tolower(*setting)) { + case 'i': + is_io = true; + break; + case 's': + is_io = false; + break; + default: + sprintf(replybuf, "invalid spireset: '%s' must start with i or s", + setting); + return replybuf; + } + val = atoi(setting+1); + if (val < 0 || val > 9999) { + sprintf(replybuf, "invalid spireset: %c '%s' valid range 0-9999", + *setting, setting+1); + return replybuf; + } + + minioninfo->spi_reset_io = is_io; + minioninfo->spi_reset_count = val; + minioninfo->last_spi_reset = time(NULL); + + return NULL; + } + + if (strcasecmp(option, "spisleep") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing spisleep value"); + return replybuf; + } + + val = atoi(setting); + if (val < 0 || val > 9999) { + sprintf(replybuf, "invalid spisleep: ms '%s' valid range 0-9999", + setting); + return replybuf; + } + + opt_minion_spisleep = val; + return NULL; + } + + if (strcasecmp(option, "spiusec") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing spiusec value"); + return replybuf; + } + + val = atoi(setting); + if (val < 0 || val > 9999) { + sprintf(replybuf, "invalid spiusec: '%s' valid range 0-9999", + setting); + return replybuf; + } + + opt_minion_spiusec = val; + return NULL; + } + + sprintf(replybuf, "Unknown option: %s", option); + return replybuf; +} + +static void minion_identify(__maybe_unused struct cgpu_info *minioncgpu) +{ + // flash a led +} + +/* + * SPI/ioctl write thread + * Non urgent work is to keep the queue full + * Urgent work is when an LP occurs (or the queue is empty/low) + */ +static void *minion_spi_write(void *userdata) +{ + struct cgpu_info *minioncgpu = (struct cgpu_info *)userdata; + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + K_ITEM *item, *tail, *task, *work; + TASK_ITEM *titem; + + applog(MINION_LOG, "%s%i: SPI writing...", + minioncgpu->drv->name, minioncgpu->device_id); + + // Wait until we're ready + while (minioncgpu->shutdown == false) { + if (minioninfo->initialised) { + break; + } + cgsleep_ms(1); // asap to start mining + } + + // TODO: combine all urgent into a single I/O? + // Then combine all state 1 for the same chip into a single I/O ? + // (then again for state 2?) + while (minioncgpu->shutdown == false) { + item = NULL; + K_WLOCK(minioninfo->task_list); + tail = minioninfo->task_list->tail; + if (tail) { + // Find first urgent item + item = tail; + while (item && !(DATA_TASK(item)->urgent)) + item = item->prev; + + // No urgent items, just do the tail + if (!item) + item = tail; + + k_unlink_item(minioninfo->task_list, item); + } + K_WUNLOCK(minioninfo->task_list); + + if (item) { + bool do_txrx = true; + bool store_reply = true; + struct timeval now; + double howlong; + int i; + + titem = DATA_TASK(item); + + switch (titem->address) { + // TODO: case MINION_SYS_TEMP_CTL: + // TODO: case MINION_SYS_FREQ_CTL: + case READ_ADDR(MINION_SYS_CHIP_STA): + case WRITE_ADDR(MINION_SYS_SPI_LED): + case WRITE_ADDR(MINION_SYS_RSTN_CTL): + case WRITE_ADDR(MINION_SYS_INT_CLR): + case READ_ADDR(MINION_SYS_IDLE_CNT): + case READ_ADDR(MINION_CORE_ENA0_31): + case READ_ADDR(MINION_CORE_ENA32_63): + case READ_ADDR(MINION_CORE_ENA64_95): + case READ_ADDR(MINION_CORE_ENA96_98): + case READ_ADDR(MINION_CORE_ACT0_31): + case READ_ADDR(MINION_CORE_ACT32_63): + case READ_ADDR(MINION_CORE_ACT64_95): + case READ_ADDR(MINION_CORE_ACT96_98): + store_reply = false; + break; + case WRITE_ADDR(MINION_QUE_0): +//applog(LOG_ERR, "%s%i: ZZZ send task_id 0x%04x - chip %d", minioncgpu->drv->name, minioncgpu->device_id, titem->task_id, titem->chip); + store_reply = false; + break; + default: + do_txrx = false; + titem->reply = MINION_UNEXPECTED_TASK; + applog(LOG_ERR, "%s%i: Unexpected task address 0x%02x (%s)", + minioncgpu->drv->name, minioncgpu->device_id, + (unsigned int)(titem->address), + addr2txt(titem->address)); + + break; + } + + if (do_txrx) { + if (titem->witem) { + cgtime(&now); + howlong = tdiff(&now, &(DATA_WORK(titem->witem)->created)); + minioninfo->wt_work++; + minioninfo->wt_time += howlong; + if (minioninfo->wt_min == 0 || minioninfo->wt_min > howlong) + minioninfo->wt_min = howlong; + else if (minioninfo->wt_max < howlong) + minioninfo->wt_max = howlong; + for (i = 0; i < TIME_BANDS; i++) { + if (howlong < time_bands[i]) { + minioninfo->wt_bands[i]++; + break; + } + } + if (i >= TIME_BANDS) + minioninfo->wt_bands[TIME_BANDS]++; + } + + minion_txrx(titem); + + int chip = titem->chip; + switch (titem->address) { + case READ_ADDR(MINION_SYS_CHIP_STA): + if (titem->reply >= (int)(titem->osiz)) { + uint8_t *rep = &(titem->rbuf[titem->osiz - titem->rsiz]); + mutex_lock(&(minioninfo->sta_lock)); + minioninfo->chip_status[chip].temp = STA_TEMP(rep); + minioninfo->chip_status[chip].cores = STA_CORES(rep); + minioninfo->chip_status[chip].freq = STA_FREQ(rep); + mutex_unlock(&(minioninfo->sta_lock)); + + if (minioninfo->chip_status[chip].overheat) { + switch (STA_TEMP(rep)) { + case MINION_TEMP_40: + case MINION_TEMP_60: + case MINION_TEMP_80: + cgtime(&(minioninfo->chip_status[chip].lastrecover)); + minioninfo->chip_status[chip].overheat = false; + applog(LOG_WARNING, "%s%d: chip %d cooled, restarting", + minioncgpu->drv->name, + minioncgpu->device_id, + chip); + cgtime(&(minioninfo->chip_status[chip].lastrecover)); + minioninfo->chip_status[chip].overheattime += + tdiff(&(minioninfo->chip_status[chip].lastrecover), + &(minioninfo->chip_status[chip].lastoverheat)); + break; + default: + break; + } + } else { + if (opt_minion_overheat && STA_TEMP(rep) == MINION_TEMP_OVER) { + cgtime(&(minioninfo->chip_status[chip].lastoverheat)); + minioninfo->chip_status[chip].overheat = true; + applog(LOG_WARNING, "%s%d: chip %d overheated! idling", + minioncgpu->drv->name, + minioncgpu->device_id, + chip); + K_WLOCK(minioninfo->tfree_list); + task = k_unlink_head(minioninfo->tfree_list); + DATA_TASK(task)->tid = ++(minioninfo->next_tid); + DATA_TASK(task)->chip = chip; + DATA_TASK(task)->write = true; + DATA_TASK(task)->address = MINION_SYS_RSTN_CTL; + DATA_TASK(task)->task_id = 0; // ignored + DATA_TASK(task)->wsiz = MINION_SYS_SIZ; + DATA_TASK(task)->rsiz = 0; + DATA_TASK(task)->wbuf[0] = SYS_RSTN_CTL_FLUSH; + DATA_TASK(task)->wbuf[1] = 0; + DATA_TASK(task)->wbuf[2] = 0; + DATA_TASK(task)->wbuf[3] = 0; + DATA_TASK(task)->urgent = true; + k_add_head(minioninfo->task_list, task); + K_WUNLOCK(minioninfo->tfree_list); + minioninfo->chip_status[chip].overheats++; + } + } + } + break; + case READ_ADDR(MINION_SYS_IDLE_CNT): + { + uint32_t *cnt = (uint32_t *)&(titem->rbuf[titem->osiz - titem->rsiz]); + minioninfo->chip_status[chip].idle = *cnt; + } + break; + case WRITE_ADDR(MINION_SYS_RSTN_CTL): + // Do this here after it has actually been flushed + if ((titem->wbuf[0] & SYS_RSTN_CTL_FLUSH) == SYS_RSTN_CTL_FLUSH) { + int cnt = 0; + K_WLOCK(minioninfo->wwork_list); + work = minioninfo->wchip_list[chip]->head; + while (work) { + cnt++; + DATA_WORK(work)->stale = true; + work = work->next; + } + minioninfo->chip_status[chip].chipwork = 0; + minioninfo->chip_status[chip].realwork = 0; + minioninfo->wchip_staled += cnt; +#if MINION_SHOW_IO + applog(IOCTRL_LOG, "RSTN chip %d (cnt=%d) cw0=%u rw0=%u qw=%u", + chip, cnt, + minioninfo->chip_status[chip].chipwork, + minioninfo->chip_status[chip].realwork, + minioninfo->chip_status[chip].quework); +#endif + K_WUNLOCK(minioninfo->wwork_list); + } + break; + case WRITE_ADDR(MINION_QUE_0): + K_WLOCK(minioninfo->wchip_list[chip]); + k_unlink_item(minioninfo->wque_list[chip], titem->witem); + k_add_head(minioninfo->wchip_list[chip], titem->witem); + DATA_WORK(titem->witem)->ioseq = titem->ioseq; + minioninfo->chip_status[chip].quework--; + minioninfo->chip_status[chip].chipwork++; +#if MINION_SHOW_IO + applog(IOCTRL_LOG, "QUE_0 chip %d cw+1=%u rw=%u qw-1=%u", + chip, + minioninfo->chip_status[chip].chipwork, + minioninfo->chip_status[chip].realwork, + minioninfo->chip_status[chip].quework); +#endif + K_WUNLOCK(minioninfo->wchip_list[chip]); + applog(LOG_DEBUG, "%s%d: task 0x%04x sent to chip %d", + minioncgpu->drv->name, minioncgpu->device_id, + titem->task_id, chip); + break; + case READ_ADDR(MINION_CORE_ENA0_31): + case READ_ADDR(MINION_CORE_ENA32_63): + case READ_ADDR(MINION_CORE_ENA64_95): + case READ_ADDR(MINION_CORE_ENA96_98): + { + uint32_t *rep = (uint32_t *)&(titem->rbuf[titem->osiz - titem->rsiz]); + int off = titem->address - READ_ADDR(MINION_CORE_ENA0_31); + minioninfo->chip_core_ena[off][chip] = *rep; + } + break; + case READ_ADDR(MINION_CORE_ACT0_31): + case READ_ADDR(MINION_CORE_ACT32_63): + case READ_ADDR(MINION_CORE_ACT64_95): + case READ_ADDR(MINION_CORE_ACT96_98): + { + uint32_t *rep = (uint32_t *)&(titem->rbuf[titem->osiz - titem->rsiz]); + int off = titem->address - READ_ADDR(MINION_CORE_ACT0_31); + minioninfo->chip_core_act[off][chip] = *rep; + } + break; + case WRITE_ADDR(MINION_SYS_INT_CLR): + case WRITE_ADDR(MINION_SYS_SPI_LED): + break; + default: + break; + } + } + + K_WLOCK(minioninfo->treply_list); + if (store_reply) + k_add_head(minioninfo->treply_list, item); + else + k_free_head(minioninfo->tfree_list, item); + K_WUNLOCK(minioninfo->treply_list); + + /* + * Always check for the next task immediately if we just did one + * i.e. empty the task queue + */ + continue; + } + cgsem_mswait(&(minioninfo->task_ready), MINION_TASK_mS); + } + return NULL; +} + +/* + * SPI/ioctl reply thread + * ioctl done every interrupt or MINION_REPLY_mS checking for results + */ +static void *minion_spi_reply(void *userdata) +{ + struct cgpu_info *minioncgpu = (struct cgpu_info *)userdata; + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct minion_result *result1, *result2, *use1, *use2; + K_ITEM *item; + TASK_ITEM fifo_task, res1_task, res2_task; + int chip, resoff; + bool somelow; + struct timeval now; + +#if ENABLE_INT_NONO + uint64_t ioseq; + TASK_ITEM clr_task; + struct pollfd pfd; + struct minion_header *head; + uint8_t rbuf[MINION_BUFSIZ]; + uint8_t wbuf[MINION_BUFSIZ]; + uint32_t wsiz, rsiz; + int ret, reply; + bool gotreplies = false; +#endif + + applog(MINION_LOG, "%s%i: SPI replying...", + minioncgpu->drv->name, minioncgpu->device_id); + + // Wait until we're ready + while (minioncgpu->shutdown == false) { + if (minioninfo->initialised) { + break; + } + cgsleep_ms(2); + } + + fifo_task.chip = 0; + fifo_task.write = false; + fifo_task.address = MINION_SYS_FIFO_STA; + fifo_task.wsiz = 0; + fifo_task.rsiz = MINION_SYS_SIZ; + + res1_task.chip = 0; + res1_task.write = false; + if (minreread) + res1_task.address = MINION_RES_PEEK; + else + res1_task.address = MINION_RES_DATA; + res1_task.wsiz = 0; + res1_task.rsiz = MINION_RES_DATA_SIZ; + + res2_task.chip = 0; + res2_task.write = false; + res2_task.address = MINION_RES_DATA; + res2_task.wsiz = 0; + res2_task.rsiz = MINION_RES_DATA_SIZ; + +#if ENABLE_INT_NONO + // Clear RESULT_INT after reading all results + clr_task.chip = 0; + clr_task.write = true; + clr_task.address = MINION_SYS_INT_CLR; + clr_task.wsiz = MINION_SYS_SIZ; + clr_task.rsiz = 0; + clr_task.wbuf[0] = MINION_RESULT_INT; + clr_task.wbuf[1] = 0; + clr_task.wbuf[2] = 0; + clr_task.wbuf[3] = 0; + + memset(&pfd, 0, sizeof(pfd)); + pfd.fd = minioninfo->gpiointfd; + pfd.events = POLLPRI; + + head = (struct minion_header *)wbuf; + SET_HEAD_SIZ(head, MINION_SYS_SIZ); + wsiz = HSIZE() + MINION_SYS_SIZ; + rsiz = MINION_SYS_SIZ; // for READ, use 0 for WRITE +#endif + + somelow = false; + while (minioncgpu->shutdown == false) { + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip]) { + int tries = 0; + uint8_t res, cmd; + + if (minioninfo->changing[chip] && + ms_tdiff(&now, &minioninfo->lastfreq[chip]) > + minioninfo->freqms[chip]) { + int want_freq = minioninfo->want_freq[chip]; + int init_freq = minioninfo->init_freq[chip]; + + if (want_freq > init_freq) { + minioninfo->init_freq[chip] += MINION_FREQ_FACTOR; + init_freq += MINION_FREQ_FACTOR; + + set_freq(minioncgpu, minioninfo, chip, init_freq); + } else if (want_freq < init_freq) { + minioninfo->init_freq[chip] -= MINION_FREQ_FACTOR; + init_freq -= MINION_FREQ_FACTOR; + + set_freq(minioncgpu, minioninfo, chip, init_freq); + } + + if (init_freq == want_freq) + minioninfo->changing[chip] = false; + } + + while (++tries < 4) { + res = cmd = 0; + fifo_task.chip = chip; + fifo_task.reply = 0; + minion_txrx(&fifo_task); + if (fifo_task.reply <= 0) { + minioninfo->spi_errors++; + minioninfo->fifo_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + break; + } else { + if (fifo_task.reply < (int)(fifo_task.osiz)) { + char *buf = bin2hex((unsigned char *)(&(fifo_task.rbuf[fifo_task.osiz - fifo_task.rsiz])), + (int)(fifo_task.rsiz)); + applog(LOG_DEBUG, "%s%i: Chip %d Bad fifo reply (%s) size %d, should be %d", + minioncgpu->drv->name, minioncgpu->device_id, + chip, buf, + fifo_task.reply, (int)(fifo_task.osiz)); + free(buf); + minioninfo->spi_errors++; + minioninfo->fifo_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + } else { + if (fifo_task.reply > (int)(fifo_task.osiz)) { + applog(LOG_DEBUG, "%s%i: Chip %d Unexpected fifo reply size %d, " + "expected only %d", + minioncgpu->drv->name, minioncgpu->device_id, + chip, fifo_task.reply, (int)(fifo_task.osiz)); + } + res = FIFO_RES(fifo_task.rbuf, fifo_task.osiz - fifo_task.rsiz); + cmd = FIFO_CMD(fifo_task.rbuf, fifo_task.osiz - fifo_task.rsiz); + // valid reply? + if (res <= MINION_QUE_MAX && cmd <= MINION_QUE_MAX) + break; + + applog(LOG_DEBUG, "%s%i: Chip %d Bad fifo reply res %d (max is %d) " + "cmd %d (max is %d)", + minioncgpu->drv->name, minioncgpu->device_id, + chip, (int)res, MINION_QUE_MAX, + (int)cmd, MINION_QUE_MAX); + minioninfo->spi_errors++; + minioninfo->fifo_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + } + } + } + + // Give up on this chip this round + if (tries >= 4) + continue; + + K_WLOCK(minioninfo->wwork_list); + // have to just assume it's always correct since we can't verify it + minioninfo->chip_status[chip].realwork = (uint32_t)cmd; +#if MINION_SHOW_IO + applog(IOCTRL_LOG, "SetReal chip %d cw=%u rw==%u qw=%u", + chip, + minioninfo->chip_status[chip].chipwork, + minioninfo->chip_status[chip].realwork, + minioninfo->chip_status[chip].quework); +#endif + K_WUNLOCK(minioninfo->wwork_list); + + if (cmd < MINION_QUE_LOW) { + somelow = true; + // Flag it in case the count is wrong + K_WLOCK(minioninfo->wwork_list); + minioninfo->chip_status[chip].islow = true; + minioninfo->chip_status[chip].lowcount = (int)cmd; + K_WUNLOCK(minioninfo->wwork_list); + } + + /* + * Chip has results? + * You can't request results unless it says it has some. + * We don't ever directly flush the output queue while processing + * (except at startup) so the answer is always valid + * i.e. there could be more, but never less ... unless the reply was corrupt + */ + if (res > MINION_MAX_RES) { + applog(LOG_ERR, "%s%i: Large work reply chip %d res %d", + minioncgpu->drv->name, minioncgpu->device_id, chip, res); + minioninfo->spi_errors++; + minioninfo->fifo_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + res = 1; // Just read one result + } +//else +//applog(LOG_ERR, "%s%i: work reply res %d", minioncgpu->drv->name, minioncgpu->device_id, res); + uint8_t left = res; + int peeks = 0; + while (left > 0) { + res = left; + if (res > MINION_MAX_RES) + res = MINION_MAX_RES; + left -= res; +repeek: + res1_task.chip = chip; + res1_task.reply = 0; + res1_task.rsiz = res * MINION_RES_DATA_SIZ; + minion_txrx(&res1_task); + if (res1_task.reply <= 0) + break; + else { + cgtime(&now); + if (res1_task.reply < (int)MINION_RES_DATA_SIZ) { + char *buf = bin2hex((unsigned char *)(&(res1_task.rbuf[res1_task.osiz - res1_task.rsiz])), (int)(res1_task.rsiz)); + applog(LOG_ERR, "%s%i: Chip %d Bad work reply (%s) size %d, should be at least %d", + minioncgpu->drv->name, minioncgpu->device_id, + chip, buf, + res1_task.reply, (int)MINION_RES_DATA_SIZ); + free(buf); + minioninfo->spi_errors++; + minioninfo->res_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + } else { + if (res1_task.reply != (int)(res1_task.osiz)) { + applog(LOG_ERR, "%s%i: Chip %d Unexpected work reply size %d, expected %d", + minioncgpu->drv->name, minioncgpu->device_id, + chip, res1_task.reply, (int)(res1_task.osiz)); + minioninfo->spi_errors++; + minioninfo->res_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + // Can retry a PEEK without losing data + if (minreread) { + if (++peeks < 4) + goto repeek; + break; + } + } + + if (minreread) { + res2_task.chip = chip; + res2_task.reply = 0; + res2_task.rsiz = res * MINION_RES_DATA_SIZ; + minion_txrx(&res2_task); + if (res2_task.reply <= 0) { + minioninfo->spi_errors++; + minioninfo->res_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + } + } + + for (resoff = res1_task.osiz - res1_task.rsiz; resoff < (int)res1_task.osiz; resoff += MINION_RES_DATA_SIZ) { + result1 = (struct minion_result *)&(res1_task.rbuf[resoff]); + if (minreread && resoff < (int)res2_task.osiz) + result2 = (struct minion_result *)&(res2_task.rbuf[resoff]); + else + result2 = NULL; + + if (IS_RESULT(result1) || (minreread && result2 && IS_RESULT(result2))) { + K_WLOCK(minioninfo->rfree_list); + item = k_unlink_head(minioninfo->rfree_list); + K_WUNLOCK(minioninfo->rfree_list); + + if (IS_RESULT(result1)) { + use1 = result1; + if (minreread && result2 && IS_RESULT(result2)) + use2 = result2; + else + use2 = NULL; + } else { + use1 = result2; + use2 = NULL; + minioninfo->use_res2[chip]++; + } + + //DATA_RES(item)->chip = RES_CHIPID(use1); + // We can avoid any SPI transmission error of the chip number + DATA_RES(item)->chip = (uint8_t)chip; + if (minioninfo->chipid[chip] != RES_CHIPID(use1)) { + minioninfo->spi_errors++; + minioninfo->res_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + } + if (use2 && minioninfo->chipid[chip] != RES_CHIPID(use2)) { + minioninfo->spi_errors++; + minioninfo->res_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + } + DATA_RES(item)->core = RES_CORE(use1); + DATA_RES(item)->task_id = RES_TASK(use1); + DATA_RES(item)->nonce = RES_NONCE(use1); + DATA_RES(item)->no_nonce = !RES_GOLD(use1); + memcpy(&(DATA_RES(item)->when), &now, sizeof(now)); + applog(LOG_DEBUG, "%s%i: reply task_id 0x%04x" + " - chip %d - gold %d", + minioncgpu->drv->name, + minioncgpu->device_id, + RES_TASK(use1), + (int)RES_CHIPID(use1), + (int)RES_GOLD(use1)); + + if (!use2) + DATA_RES(item)->another = false; + else { + DATA_RES(item)->another = true; + DATA_RES(item)->task_id2 = RES_TASK(use2); + DATA_RES(item)->nonce2 = RES_NONCE(use2); + } +//if (RES_GOLD(use1)) +//applog(MINTASK_LOG, "%s%i: found a result chip %d core %d task 0x%04x nonce 0x%08x gold=%d", minioncgpu->drv->name, minioncgpu->device_id, DATA_RES(item)->chip, DATA_RES(item)->core, DATA_RES(item)->task_id, DATA_RES(item)->nonce, (int)RES_GOLD(use1)); + + K_WLOCK(minioninfo->rnonce_list); + k_add_head(minioninfo->rnonce_list, item); + K_WUNLOCK(minioninfo->rnonce_list); + + if (!(minioninfo->chip_status[chip].first_nonce.tv_sec)) { + cgtime(&(minioninfo->chip_status[chip].first_nonce)); + minioninfo->chip_status[chip].from_first_good = 0; + } + + cgsem_post(&(minioninfo->nonce_ready)); + } else { + minioninfo->res_err_count[chip]++; + applog(MINTASK_LOG, "%s%i: Invalid res0 task_id 0x%04x - chip %d", + minioncgpu->drv->name, minioncgpu->device_id, + RES_TASK(result1), chip); + if (minreread && result2) { + applog(MINTASK_LOG, "%s%i: Invalid res1 task_id 0x%04x - chip %d", + minioncgpu->drv->name, minioncgpu->device_id, + RES_TASK(result2), chip); + } + } + } + } + } + } + } + } + + if (somelow) + cgsem_post(&(minioninfo->scan_work)); + +#if ENABLE_INT_NONO + if (gotreplies) + minion_txrx(&clr_task); +#endif + +#if !ENABLE_INT_NONO + cgsleep_ms(MINION_REPLY_mS); +#else + // TODO: this is going to require a bit of tuning with 2TH/s mining: + // The interrupt size MINION_RESULT_INT_SIZE should be high enough to expect + // most chips to have some results but low enough to cause negligible latency + // If all chips don't have some results when an interrupt occurs, then it is a waste + // since we have to check all chips for results anyway since we don't know which one + // caused the interrupt + // MINION_REPLY_mS needs to be low enough in the case of bad luck where no chip + // finds MINION_RESULT_INT_SIZE results in a short amount of time, so we go check + // them all anyway - to avoid high latency when there are only a few results due to low luck + ret = poll(&pfd, 1, MINION_REPLY_mS); + if (ret > 0) { + bool gotres; + int c; + + minioninfo->interrupts++; + + read(minioninfo->gpiointfd, &c, 1); + +// applog(LOG_ERR, "%s%i: Interrupt2", +// minioncgpu->drv->name, +// minioncgpu->device_id); + + gotres = false; + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip]) { + SET_HEAD_READ(head, MINION_SYS_INT_STA); + head->chipid = minioninfo->chipid[chip]; + reply = do_ioctl(CHIP_PIN(chip), wbuf, wsiz, rbuf, rsiz, &ioseq); + if (reply != (int)wsiz) { + applog(LOG_ERR, "%s: chip %d int status returned %d" + " (should be %d)", + minioncgpu->drv->dname, + chip, reply, (int)wsiz); + } + + snprintf(minioninfo->last_interrupt, + sizeof(minioninfo->last_interrupt), + "%d %d 0x%02x%02x%02x%02x%02x%02x%02x%02x %d %d 0x%02x %d %d", + (int)(minioninfo->interrupts), chip, + rbuf[0], rbuf[1], rbuf[2], rbuf[3], + rbuf[4], rbuf[5], rbuf[6], rbuf[7], + (int)wsiz, (int)rsiz, rbuf[wsiz - rsiz], + rbuf[wsiz - rsiz] & MINION_RESULT_INT, + rbuf[wsiz - rsiz] & MINION_CMD_INT); + + if ((rbuf[wsiz - rsiz] & MINION_RESULT_INT) != 0) { + gotres = true; + (minioninfo->result_interrupts)++; +// applog(LOG_ERR, "%s%i: chip %d got RES interrupt", +// minioncgpu->drv->name, +// minioncgpu->device_id, +// chip); + } + + if ((rbuf[wsiz - rsiz] & MINION_CMD_INT) != 0) { + // Work queue is empty + (minioninfo->command_interrupts)++; +// applog(LOG_ERR, "%s%i: chip %d got CMD interrupt", +// minioncgpu->drv->name, +// minioncgpu->device_id, +// chip); + } + +// char *tmp; +// tmp = bin2hex(rbuf, wsiz); +// applog(LOG_ERR, "%s%i: chip %d interrupt: %s", +// minioncgpu->drv->name, +// minioncgpu->device_id, +// chip, tmp); +// free(tmp); + + // Don't clear either interrupt until after send/recv + } + } + + // Doing this last means we can't miss an interrupt + if (gotres) + cgsem_post(&(minioninfo->scan_work)); + } +#endif + } + + return NULL; +} + +/* + * Find the matching work item for this chip + * Discard any older work items for this chip + */ + +enum nonce_state { + NONCE_GOOD_NONCE, + NONCE_NO_NONCE, + NONCE_DUP_NONCE, + NONCE_BAD_NONCE, + NONCE_BAD_WORK, + NONCE_NO_WORK, + NONCE_SPI_ERR +}; + +static void cleanup_older(struct cgpu_info *minioncgpu, int chip, K_ITEM *item, bool no_nonce) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + K_ITEM *tail; +// bool errs; + + /* remove older ioseq work items + no_nonce means this 'item' has finished also */ + tail = minioninfo->wchip_list[chip]->tail; + while (tail && (DATA_WORK(tail)->ioseq < DATA_WORK(item)->ioseq)) { + k_unlink_item(minioninfo->wchip_list[chip], tail); + if (!(DATA_WORK(tail)->stale)) { + minioninfo->chip_status[chip].chipwork--; +#if MINION_SHOW_IO + applog(IOCTRL_LOG, "COld chip %d cw-1=%u rw=%u qw=%u", + chip, + minioninfo->chip_status[chip].chipwork, + minioninfo->chip_status[chip].realwork, + minioninfo->chip_status[chip].quework); +#endif +/* + // If it had no valid work (only errors) then it won't have been cleaned up + errs = (DATA_WORK(tail)->errors > 0); + applog(errs ? LOG_DEBUG : LOG_ERR, + applog(LOG_ERR, + "%s%i: discarded old task 0x%04x chip %d no reply errs=%d", + minioncgpu->drv->name, minioncgpu->device_id, + DATA_WORK(tail)->task_id, chip, DATA_WORK(tail)->errors); +*/ + } + applog(MINION_LOG, "%s%i: marking complete - old task 0x%04x chip %d", + minioncgpu->drv->name, minioncgpu->device_id, + DATA_WORK(tail)->task_id, chip); + if (DATA_WORK(tail)->rolled) + free_work(DATA_WORK(tail)->work); + else + work_completed(minioncgpu, DATA_WORK(tail)->work); + k_free_head(minioninfo->wfree_list, tail); + tail = minioninfo->wchip_list[chip]->tail; + } + if (no_nonce) { + if (!(DATA_WORK(item)->stale)) { + minioninfo->chip_status[chip].chipwork--; +#if MINION_SHOW_IO + applog(IOCTRL_LOG, "CONoN chip %d cw-1=%u rw=%u qw=%u", + chip, + minioninfo->chip_status[chip].chipwork, + minioninfo->chip_status[chip].realwork, + minioninfo->chip_status[chip].quework); +#endif + } + applog(MINION_LOG, "%s%i: marking complete - no_nonce task 0x%04x chip %d", + minioncgpu->drv->name, minioncgpu->device_id, + DATA_WORK(item)->task_id, chip); + if (DATA_WORK(item)->rolled) + free_work(DATA_WORK(item)->work); + else + work_completed(minioncgpu, DATA_WORK(item)->work); + } +} + +// Need to put it back in the list where it was - according to ioseq +static void restorework(struct minion_info *minioninfo, int chip, K_ITEM *item) +{ + K_ITEM *look; + + look = minioninfo->wchip_list[chip]->tail; + while (look && DATA_WORK(look)->ioseq < DATA_WORK(item)->ioseq) + look = look->prev; + if (!look) + k_add_head(minioninfo->wchip_list[chip], item); + else + k_insert_after(minioninfo->wchip_list[chip], item, look); +} + +static enum nonce_state oknonce(struct thr_info *thr, struct cgpu_info *minioncgpu, int chip, int core, + uint32_t task_id, uint32_t nonce, bool no_nonce, struct timeval *when, + bool another, uint32_t task_id2, uint32_t nonce2) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct timeval now; + K_ITEM *item, *tail; + uint32_t min_task_id, max_task_id; +// uint64_t chip_good; + bool redo; + + // if the chip has been disabled - but we don't do that - so not possible (yet) + if (!(minioninfo->has_chip[chip])) { + minioninfo->spi_errors++; + applog(MINTASK_LOG, "%s%i: nonce error chip %d not present", + minioncgpu->drv->name, minioncgpu->device_id, chip); + return NONCE_NO_WORK; + } + + if (core < 0 || core >= MINION_CORES) { + minioninfo->spi_errors++; + minioninfo->res_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + applog(MINTASK_LOG, "%s%i: SPI nonce error invalid core %d (chip %d)", + minioncgpu->drv->name, minioncgpu->device_id, core, chip); + + // use the fake core number so we don't discard the result + core = FAKE_CORE; + } + + if (no_nonce) + minioninfo->chip_nononces[chip]++; + else + minioninfo->chip_nonces[chip]++; + + redo = false; +retry: + K_WLOCK(minioninfo->wchip_list[chip]); + item = minioninfo->wchip_list[chip]->tail; + + if (!item) { + K_WUNLOCK(minioninfo->wchip_list[chip]); + minioninfo->spi_errors++; + minioninfo->res_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + applog(MINTASK_LOG, "%s%i: chip %d has no tasks (core %d task 0x%04x)", + minioncgpu->drv->name, minioncgpu->device_id, + chip, core, (int)task_id); + if (!no_nonce) { + minioninfo->untested_nonces++; + minioninfo->chip_err[chip]++; + } + return NONCE_NO_WORK; + } + + min_task_id = DATA_WORK(item)->task_id; + while (item) { + if (DATA_WORK(item)->task_id == task_id) + break; + + item = item->prev; + } + max_task_id = DATA_WORK(minioninfo->wchip_list[chip]->head)->task_id; + + if (!item) { + K_WUNLOCK(minioninfo->wchip_list[chip]); + if (another && task_id != task_id2) { + minioninfo->tasks_failed[chip]++; + task_id = task_id2; + redo = true; + goto retry; + } + + minioninfo->spi_errors++; + minioninfo->res_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + applog(MINTASK_LOG, "%s%i: chip %d core %d unknown task 0x%04x " + "(min=0x%04x max=0x%04x no_nonce=%d)", + minioncgpu->drv->name, minioncgpu->device_id, + chip, core, (int)task_id, (int)min_task_id, + (int)max_task_id, no_nonce); + if (!no_nonce) { + minioninfo->untested_nonces++; + minioninfo->chip_err[chip]++; + } + return NONCE_BAD_WORK; + } + if (redo) + minioninfo->tasks_recovered[chip]++; + + k_unlink_item(minioninfo->wchip_list[chip], item); + if (no_nonce) { + cleanup_older(minioncgpu, chip, item, no_nonce); + k_free_head(minioninfo->wfree_list, item); + K_WUNLOCK(minioninfo->wchip_list[chip]); + return NONCE_NO_NONCE; + } + K_WUNLOCK(minioninfo->wchip_list[chip]); + + minioninfo->tested_nonces++; + + redo = false; +retest: + if (test_nonce(DATA_WORK(item)->work, nonce)) { +/* + if (isdupnonce(minioncgpu, DATA_WORK(item)->work, nonce)) { + minioninfo->chip_dup[chip]++; + applog(LOG_WARNING, " ... nonce %02x%02x%02x%02x chip %d core %d task 0x%04x", + (nonce & 0xff), ((nonce >> 8) & 0xff), + ((nonce >> 16) & 0xff), ((nonce >> 24) & 0xff), + chip, core, task_id); + K_WLOCK(minioninfo->wchip_list[chip]); + restorework(minioninfo, chip, item); + K_WUNLOCK(minioninfo->wchip_list[chip]); + return NONCE_DUP_NONCE; + } +*/ +//applog(MINTASK_LOG, "%s%i: Valid Nonce chip %d core %d task 0x%04x nonce 0x%08x", minioncgpu->drv->name, minioncgpu->device_id, chip, core, task_id, nonce); +// + submit_tested_work(thr, DATA_WORK(item)->work); + + if (redo) + minioninfo->nonces_recovered[chip]++; + + /* chip_good = */ ++(minioninfo->chip_good[chip]); + minioninfo->chip_status[chip].from_first_good++; + minioninfo->core_good[chip][core]++; + DATA_WORK(item)->nonces++; + + mutex_lock(&(minioninfo->nonce_lock)); + minioninfo->new_nonces++; + mutex_unlock(&(minioninfo->nonce_lock)); + minioninfo->ok_nonces++; + + K_WLOCK(minioninfo->wchip_list[chip]); + cleanup_older(minioncgpu, chip, item, no_nonce); + restorework(minioninfo, chip, item); + K_WUNLOCK(minioninfo->wchip_list[chip]); + + // add to history and remove old history and keep track of the 2 reset marks + int chip_tmp; + cgtime(&now); + K_WLOCK(minioninfo->hfree_list); + item = k_unlink_head(minioninfo->hfree_list); + memcpy(&(DATA_HIST(item)->when), when, sizeof(*when)); + k_add_head(minioninfo->hchip_list[chip], item); + if (minioninfo->reset_mark[chip]) + minioninfo->reset_count[chip]++; + if (second_check && minioninfo->reset2_mark[chip]) + minioninfo->reset2_count[chip]++; + + // N.B. this also corrects each reset_mark/reset_count within each hchip_list + for (chip_tmp = 0; chip_tmp < (int)MINION_CHIPS; chip_tmp++) { + tail = minioninfo->hchip_list[chip_tmp]->tail; + while (tail && tdiff(&(DATA_HIST(tail)->when), &now) > MINION_HISTORY_s) { + if (minioninfo->reset_mark[chip] == tail) { + minioninfo->reset_mark[chip] = tail->prev; + minioninfo->reset_count[chip]--; + } + if (second_check && minioninfo->reset2_mark[chip] == tail) { + minioninfo->reset2_mark[chip] = tail->prev; + minioninfo->reset2_count[chip]--; + } + tail = k_unlink_tail(minioninfo->hchip_list[chip_tmp]); + k_add_head(minioninfo->hfree_list, item); + tail = minioninfo->hchip_list[chip_tmp]->tail; + } + if (!(minioninfo->reset_mark[chip])) { + minioninfo->reset_mark[chip] = minioninfo->hchip_list[chip]->tail; + minioninfo->reset_count[chip] = minioninfo->hchip_list[chip]->count; + } + if (second_check && !(minioninfo->reset2_mark[chip])) { + minioninfo->reset2_mark[chip] = minioninfo->hchip_list[chip]->tail; + minioninfo->reset2_count[chip] = minioninfo->hchip_list[chip]->count; + } + tail = minioninfo->reset_mark[chip]; + while (tail && tdiff(&(DATA_HIST(tail)->when), &now) > minioninfo->reset_time[chip]) { + tail = minioninfo->reset_mark[chip] = tail->prev; + minioninfo->reset_count[chip]--; + } + if (second_check) { + tail = minioninfo->reset2_mark[chip]; + while (tail && tdiff(&(DATA_HIST(tail)->when), &now) > minioninfo->reset2_time[chip]) { + tail = minioninfo->reset2_mark[chip] = tail->prev; + minioninfo->reset2_count[chip]--; + } + } + } + K_WUNLOCK(minioninfo->hfree_list); + +/* + // Reset the chip after 8 nonces found + if (chip_good == 8) { + memcpy(&(minioninfo->last_reset[chip]), &now, sizeof(now)); + init_chip(minioncgpu, minioninfo, chip); + } +*/ + + return NONCE_GOOD_NONCE; + } + + if (another && nonce != nonce2) { + minioninfo->nonces_failed[chip]++; + nonce = nonce2; + redo = true; + goto retest; + } + + DATA_WORK(item)->errors++; + K_WLOCK(minioninfo->wchip_list[chip]); + restorework(minioninfo, chip, item); + K_WUNLOCK(minioninfo->wchip_list[chip]); + + minioninfo->chip_bad[chip]++; + minioninfo->core_bad[chip][core]++; + inc_hw_errors(thr); +//applog(MINTASK_LOG, "%s%i: HW ERROR chip %d core %d task 0x%04x nonce 0x%08x", minioncgpu->drv->name, minioncgpu->device_id, chip, core, task_id, nonce); + + return NONCE_BAD_NONCE; +} + +/* Check each chip how long since the last nonce + * Should normally be a fraction of a second + * so (MINION_RESET_s * 1.5) will certainly be long enough, + * but also will avoid lots of resets if there is trouble getting work + * Should be longer than MINION_RESET_s to avoid interfering with normal resets */ +static void check_last_nonce(struct cgpu_info *minioncgpu) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct timeval now; + K_ITEM *head; + double howlong; + int chip; + + cgtime(&now); + K_RLOCK(minioninfo->hfree_list); + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip] && !(minioninfo->changing[chip])) { + head = minioninfo->hchip_list[chip]->head; + if (head) { + howlong = tdiff(&now, &(DATA_HIST(head)->when)); + if (howlong > ((double)MINION_RESET_s * 1.5)) { + // Setup a reset + minioninfo->flag_reset[chip] = true; + minioninfo->do_reset[chip] = 0.0; + } + } + } + } + K_RUNLOCK(minioninfo->hfree_list); +} + +// Results checking thread +static void *minion_results(void *userdata) +{ + struct cgpu_info *minioncgpu = (struct cgpu_info *)userdata; + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct thr_info *thr; + int chip = 0, core = 0; + uint32_t task_id = 0; + uint32_t nonce = 0; + bool no_nonce = false; + struct timeval when; + bool another; + uint32_t task_id2 = 0; + uint32_t nonce2 = 0; + int last_check; + + applog(MINION_LOG, "%s%i: Results...", + minioncgpu->drv->name, minioncgpu->device_id); + + // Wait until we're ready + while (minioncgpu->shutdown == false) { + if (minioninfo->initialised) { + break; + } + cgsleep_ms(3); + } + + thr = minioninfo->thr; + + last_check = 0; + while (minioncgpu->shutdown == false) { + if (!oldest_nonce(minioncgpu, &chip, &core, &task_id, &nonce, + &no_nonce, &when, &another, &task_id2, &nonce2)) { + check_last_nonce(minioncgpu); + last_check = 0; + cgsem_mswait(&(minioninfo->nonce_ready), MINION_NONCE_mS); + continue; + } + + oknonce(thr, minioncgpu, chip, core, task_id, nonce, no_nonce, &when, + another, task_id2, nonce2); + + // Interrupt nonce checking if low CPU and oldest_nonce() is always true + if (++last_check > 100) { + check_last_nonce(minioncgpu); + last_check = 0; + } + } + + return NULL; +} + +static void minion_flush_work(struct cgpu_info *minioncgpu) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + K_ITEM *prev_unused, *task, *prev_task, *witem; + int i; + + if (minioninfo->initialised == false) + return; + + applog(MINION_LOG, "%s%i: flushing work", + minioncgpu->drv->name, minioncgpu->device_id); + + // TODO: N.B. scanwork also gets work locks - which master thread calls flush? + K_WLOCK(minioninfo->wwork_list); + + // Simply remove the whole unused wwork_list + k_list_transfer_to_head(minioninfo->wwork_list, minioninfo->wstale_list); + minioninfo->wwork_flushed += minioninfo->wstale_list->count; + + // TODO: flush/work tasks should have a block sequence number so this task removal code + // might be better implemented in minion_spi_write where each work task would + // update the block sequence number and any work tasks with an old block sequence + // number would be discarded rather than sent - minion_spi_write will also need to + // prioritise flush urgent tasks above work urgent tasks - have 3 urgent states? + // They should however be 2 seperate variables in minioninfo to reduce locking + // - flush will increment one and put it in the flush task, (and work will use that) + // minion_spi_write will check/update the other and thus not need a lock + + // No deadlock since this is the only code to get 2 locks + K_WLOCK(minioninfo->tfree_list); + task = minioninfo->task_list->tail; + while (task) { + prev_task = task->prev; + if (DATA_TASK(task)->address == WRITE_ADDR(MINION_QUE_0)) { + minioninfo->chip_status[DATA_TASK(task)->chip].quework--; +#if MINION_SHOW_IO + applog(IOCTRL_LOG, "QueFlush chip %d cw=%u rw=%u qw-1=%u", + (int)DATA_TASK(task)->chip, + minioninfo->chip_status[DATA_TASK(task)->chip].chipwork, + minioninfo->chip_status[DATA_TASK(task)->chip].realwork, + minioninfo->chip_status[DATA_TASK(task)->chip].quework); +#endif + witem = DATA_TASK(task)->witem; + k_unlink_item(minioninfo->wque_list[DATA_TASK(task)->chip], witem); + minioninfo->wque_flushed++; + if (DATA_WORK(witem)->rolled) + free_work(DATA_WORK(witem)->work); + else + work_completed(minioncgpu, DATA_WORK(witem)->work); + k_free_head(minioninfo->wfree_list, witem); + k_unlink_item(minioninfo->task_list, task); + k_free_head(minioninfo->tfree_list, task); + } + task = prev_task; + } + for (i = 0; i < (int)MINION_CHIPS; i++) { + if (minioninfo->has_chip[i]) { + // TODO: consider sending it now rather than adding to the task list? + task = k_unlink_head(minioninfo->tfree_list); + DATA_TASK(task)->tid = ++(minioninfo->next_tid); + DATA_TASK(task)->chip = i; + DATA_TASK(task)->write = true; + DATA_TASK(task)->address = MINION_SYS_RSTN_CTL; + DATA_TASK(task)->task_id = 0; // ignored + DATA_TASK(task)->wsiz = MINION_SYS_SIZ; + DATA_TASK(task)->rsiz = 0; + DATA_TASK(task)->wbuf[0] = SYS_RSTN_CTL_FLUSH; + DATA_TASK(task)->wbuf[1] = 0; + DATA_TASK(task)->wbuf[2] = 0; + DATA_TASK(task)->wbuf[3] = 0; + DATA_TASK(task)->urgent = true; + k_add_head(minioninfo->task_list, task); + } + } + K_WUNLOCK(minioninfo->tfree_list); + + K_WUNLOCK(minioninfo->wwork_list); + + // TODO: send a signal to force getting and sending new work - needs cgsem_wait in the sending thread + + // TODO: should we use this thread to do the following work? + if (minioninfo->wstale_list->count) { + // mark complete all stale unused work (oldest first) + prev_unused = minioninfo->wstale_list->tail; + while (prev_unused) { + if (DATA_WORK(prev_unused)->rolled) + free_work(DATA_WORK(prev_unused)->work); + else + work_completed(minioncgpu, DATA_WORK(prev_unused)->work); + prev_unused = prev_unused->prev; + } + + // put them back in the wfree_list + K_WLOCK(minioninfo->wfree_list); + k_list_transfer_to_head(minioninfo->wstale_list, minioninfo->wfree_list); + K_WUNLOCK(minioninfo->wfree_list); + } +} + +static void sys_chip_sta(struct cgpu_info *minioncgpu, int chip) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct timeval now; + K_ITEM *item; + int limit, rep; + + cgtime(&now); + // No lock required since 'last' is only accessed here + if (minioninfo->chip_status[chip].last.tv_sec == 0) { + memcpy(&(minioninfo->chip_status[chip].last), &now, sizeof(now)); + } else { + limit = MINION_STATS_UPDATE_TIME_mS + + (int)(random() % MINION_STATS_UPDATE_RAND_mS); + if (ms_tdiff(&now, &(minioninfo->chip_status[chip].last)) > limit) { + memcpy(&(minioninfo->chip_status[chip].last), &now, sizeof(now)); + + K_WLOCK(minioninfo->tfree_list); + item = k_unlink_head(minioninfo->tfree_list); + DATA_TASK(item)->tid = ++(minioninfo->next_tid); + K_WUNLOCK(minioninfo->tfree_list); + + DATA_TASK(item)->chip = chip; + DATA_TASK(item)->write = false; + DATA_TASK(item)->address = READ_ADDR(MINION_SYS_CHIP_STA); + DATA_TASK(item)->task_id = 0; + DATA_TASK(item)->wsiz = 0; + DATA_TASK(item)->rsiz = MINION_SYS_SIZ; + DATA_TASK(item)->urgent = false; + + K_WLOCK(minioninfo->task_list); + k_add_head(minioninfo->task_list, item); + item = k_unlink_head(minioninfo->tfree_list); + DATA_TASK(item)->tid = ++(minioninfo->next_tid); + K_WUNLOCK(minioninfo->task_list); + + DATA_TASK(item)->chip = chip; + DATA_TASK(item)->write = false; + DATA_TASK(item)->address = READ_ADDR(MINION_SYS_IDLE_CNT); + DATA_TASK(item)->task_id = 0; + DATA_TASK(item)->wsiz = 0; + DATA_TASK(item)->rsiz = MINION_SYS_SIZ; + DATA_TASK(item)->urgent = false; + + K_WLOCK(minioninfo->task_list); + k_add_head(minioninfo->task_list, item); + K_WUNLOCK(minioninfo->task_list); + + // Get the core ena and act state + for (rep = 0; rep < MINION_CORE_REPS; rep++) { + // Ena + K_WLOCK(minioninfo->tfree_list); + item = k_unlink_head(minioninfo->tfree_list); + DATA_TASK(item)->tid = ++(minioninfo->next_tid); + K_WUNLOCK(minioninfo->tfree_list); + + DATA_TASK(item)->chip = chip; + DATA_TASK(item)->write = false; + DATA_TASK(item)->address = READ_ADDR(MINION_CORE_ENA0_31 + rep); + DATA_TASK(item)->task_id = 0; + DATA_TASK(item)->wsiz = 0; + DATA_TASK(item)->rsiz = MINION_SYS_SIZ; + DATA_TASK(item)->urgent = false; + + K_WLOCK(minioninfo->task_list); + k_add_head(minioninfo->task_list, item); + // Act + item = k_unlink_head(minioninfo->tfree_list); + DATA_TASK(item)->tid = ++(minioninfo->next_tid); + K_WUNLOCK(minioninfo->task_list); + + DATA_TASK(item)->chip = chip; + DATA_TASK(item)->write = false; + DATA_TASK(item)->address = READ_ADDR(MINION_CORE_ACT0_31 + rep); + DATA_TASK(item)->task_id = 0; + DATA_TASK(item)->wsiz = 0; + DATA_TASK(item)->rsiz = MINION_SYS_SIZ; + DATA_TASK(item)->urgent = false; + + K_WLOCK(minioninfo->task_list); + k_add_head(minioninfo->task_list, item); + K_WUNLOCK(minioninfo->task_list); + } + + if (minioninfo->lednow[chip] != minioninfo->setled[chip]) { + uint32_t led; + + minioninfo->lednow[chip] = minioninfo->setled[chip]; + if (minioninfo->lednow[chip]) + led = MINION_SPI_LED_ON; + else + led = MINION_SPI_LED_OFF; + + K_WLOCK(minioninfo->tfree_list); + item = k_unlink_head(minioninfo->tfree_list); + DATA_TASK(item)->tid = ++(minioninfo->next_tid); + K_WUNLOCK(minioninfo->tfree_list); + + DATA_TASK(item)->chip = chip; + DATA_TASK(item)->write = true; + DATA_TASK(item)->address = MINION_SYS_SPI_LED; + DATA_TASK(item)->task_id = 0; + DATA_TASK(item)->wsiz = MINION_SYS_SIZ; + DATA_TASK(item)->rsiz = 0; + DATA_TASK(item)->wbuf[0] = led & 0xff; + DATA_TASK(item)->wbuf[1] = (led >> 8) & 0xff; + DATA_TASK(item)->wbuf[2] = (led >> 16) & 0xff; + DATA_TASK(item)->wbuf[3] = (led >> 24) & 0xff; + DATA_TASK(item)->urgent = false; + + K_WLOCK(minioninfo->task_list); + k_add_head(minioninfo->task_list, item); + K_WUNLOCK(minioninfo->task_list); + } + } + } +} + +static void new_work_task(struct cgpu_info *minioncgpu, K_ITEM *witem, int chip, bool urgent, uint8_t state) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct minion_que *que; + K_ITEM *item; + + K_WLOCK(minioninfo->tfree_list); + item = k_unlink_head(minioninfo->tfree_list); + DATA_TASK(item)->tid = ++(minioninfo->next_tid); + K_WUNLOCK(minioninfo->tfree_list); + + DATA_TASK(item)->chip = chip; + DATA_TASK(item)->write = true; + DATA_TASK(item)->address = MINION_QUE_0; + + // if threaded access to new_work_task() is added, this will need locking + // Don't use task_id 0 so that we can ignore all '0' work replies + // ... and report them as errors + if (minioninfo->next_task_id == 0) + minioninfo->next_task_id = 1; + DATA_TASK(item)->task_id = minioninfo->next_task_id; + DATA_WORK(witem)->task_id = minioninfo->next_task_id; + minioninfo->next_task_id = (minioninfo->next_task_id + 1) & MINION_MAX_TASK_ID; + + DATA_TASK(item)->urgent = urgent; + DATA_TASK(item)->work_state = state; + DATA_TASK(item)->work = DATA_WORK(witem)->work; + DATA_TASK(item)->witem = witem; + + que = (struct minion_que *)&(DATA_TASK(item)->wbuf[0]); + que->task_id[0] = DATA_TASK(item)->task_id & 0xff; + que->task_id[1] = (DATA_TASK(item)->task_id & 0xff00) >> 8; + + memcpy(&(que->midstate[0]), &(DATA_WORK(witem)->work->midstate[0]), MIDSTATE_BYTES); + memcpy(&(que->merkle7[0]), &(DATA_WORK(witem)->work->data[MERKLE7_OFFSET]), MERKLE_BYTES); + + DATA_TASK(item)->wsiz = (int)sizeof(*que); + DATA_TASK(item)->rsiz = 0; + + K_WLOCK(minioninfo->wque_list[chip]); + k_add_head(minioninfo->wque_list[chip], witem); + minioninfo->chip_status[chip].quework++; +#if MINION_SHOW_IO + applog(IOCTRL_LOG, "Que chip %d cw=%u rw=%u qw+1=%u", + chip, + minioninfo->chip_status[chip].chipwork, + minioninfo->chip_status[chip].realwork, + minioninfo->chip_status[chip].quework); +#endif + K_WUNLOCK(minioninfo->wque_list[chip]); + + K_WLOCK(minioninfo->task_list); + k_add_head(minioninfo->task_list, item); + K_WUNLOCK(minioninfo->task_list); + + if (urgent) + cgsem_post(&(minioninfo->task_ready)); + + // N.B. this will only update often enough if a chip is > ~2GH/s + if (!urgent) + sys_chip_sta(minioncgpu, chip); +} + +// TODO: stale work ... +static K_ITEM *next_work(struct minion_info *minioninfo) +{ + K_ITEM *item; + struct timeval now; + double howlong; + int i; + + K_WLOCK(minioninfo->wwork_list); + item = k_unlink_tail(minioninfo->wwork_list); + K_WUNLOCK(minioninfo->wwork_list); + if (item) { + cgtime(&now); + howlong = tdiff(&now, &(DATA_WORK(item)->created)); + minioninfo->que_work++; + minioninfo->que_time += howlong; + if (minioninfo->que_min == 0 || minioninfo->que_min > howlong) + minioninfo->que_min = howlong; + else if (minioninfo->que_max < howlong) + minioninfo->que_max = howlong; + for (i = 0; i < TIME_BANDS; i++) { + if (howlong < time_bands[i]) { + minioninfo->que_bands[i]++; + break; + } + } + if (i >= TIME_BANDS) + minioninfo->que_bands[TIME_BANDS]++; + } + + return item; +} + +static void minion_do_work(struct cgpu_info *minioncgpu) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + int count, chip, j, lowcount; + TASK_ITEM fifo_task; + uint8_t state, cmd; + K_ITEM *item; +#if ENABLE_INT_NONO + K_ITEM *task; +#endif + bool islow, sentwork; + + fifo_task.chip = 0; + fifo_task.write = false; + fifo_task.address = MINION_SYS_FIFO_STA; + fifo_task.wsiz = 0; + fifo_task.rsiz = MINION_SYS_SIZ; + + // TODO: (remove this) Fake starved of work to test CMD Interrupt +// if (total_secs > 120) { +// cgsleep_ms(888); +// return; +// } + + /* + * Fill the queues as follows: + * 1) put at least 1 in each queue or if islow then add 1 + * 2) push each queue up to LOW or if count is high but islow, then add LOW-1 + * 3) push each LOW queue up to HIGH + */ + + sentwork = false; + for (state = 0; state < 3; state++) { +#define CHP 0 +//applog(LOG_ERR, "%s%i: chip %d presta %d: quew %d chw %d", minioncgpu->drv->name, minioncgpu->device_id, CHP, state, minioninfo->chip_status[CHP].quework, minioninfo->chip_status[CHP].chipwork); + for (chip = 0; chip < (int)MINION_CHIPS; chip++) + minioninfo->chip_status[chip].tohigh = false; + + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip] && !minioninfo->chip_status[chip].overheat) { + struct timeval now; + double howlong; + cgtime(&now); + howlong = tdiff(&now, &(minioninfo->last_reset[chip])); + if (howlong < MINION_RESET_DELAY_s) + continue; + + int tries = 0; + while (tries++ < 4) { + cmd = 0; + fifo_task.chip = chip; + fifo_task.reply = 0; + minion_txrx(&fifo_task); + if (fifo_task.reply <= 0) { + if (fifo_task.reply < (int)(fifo_task.osiz)) { + char *buf = bin2hex((unsigned char *)(&(fifo_task.rbuf[fifo_task.osiz - fifo_task.rsiz])), + (int)(fifo_task.rsiz)); + applog(LOG_ERR, "%s%i: Chip %d Bad fifo reply (%s) size %d, should be %d", + minioncgpu->drv->name, minioncgpu->device_id, + chip, buf, + fifo_task.reply, (int)(fifo_task.osiz)); + free(buf); + minioninfo->spi_errors++; + minioninfo->fifo_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + } else { + if (fifo_task.reply > (int)(fifo_task.osiz)) { + applog(LOG_ERR, "%s%i: Chip %d Unexpected fifo reply size %d, expected only %d", + minioncgpu->drv->name, minioncgpu->device_id, + chip, fifo_task.reply, (int)(fifo_task.osiz)); + } + cmd = FIFO_CMD(fifo_task.rbuf, fifo_task.osiz - fifo_task.rsiz); + // valid reply? + if (cmd < MINION_QUE_MAX) { + K_WLOCK(minioninfo->wchip_list[chip]); + minioninfo->chip_status[chip].realwork = cmd; + K_WUNLOCK(minioninfo->wchip_list[chip]); + if (cmd <= MINION_QUE_LOW || cmd >= MINION_QUE_HIGH) { + applog(LOG_DEBUG, "%s%i: Chip %d fifo cmd %d", + minioncgpu->drv->name, + minioncgpu->device_id, + chip, (int)cmd); + } + break; + } + + applog(LOG_ERR, "%s%i: Chip %d Bad fifo reply cmd %d (max is %d)", + minioncgpu->drv->name, minioncgpu->device_id, + chip, (int)cmd, MINION_QUE_MAX); + minioninfo->spi_errors++; + minioninfo->fifo_spi_errors[chip]++; + minioninfo->res_err_count[chip]++; + } + } + } + + K_WLOCK(minioninfo->wchip_list[chip]); + count = minioninfo->chip_status[chip].quework + + minioninfo->chip_status[chip].realwork; + islow = minioninfo->chip_status[chip].islow; + minioninfo->chip_status[chip].islow = false; + lowcount = minioninfo->chip_status[chip].lowcount; + K_WUNLOCK(minioninfo->wchip_list[chip]); + + switch (state) { + case 0: + if (count == 0 || islow) { + item = next_work(minioninfo); + if (item) { + new_work_task(minioncgpu, item, chip, true, state); + sentwork = true; + applog(MINION_LOG, "%s%i: 0 task 0x%04x in chip %d list", + minioncgpu->drv->name, + minioncgpu->device_id, + DATA_WORK(item)->task_id, chip); + } else { + applog(LOG_ERR, "%s%i: chip %d urgent empty work list", + minioncgpu->drv->name, + minioncgpu->device_id, + chip); + } + } + break; + case 1: + if (count < MINION_QUE_LOW || islow) { + // do case 2: after we've done other chips + minioninfo->chip_status[chip].tohigh = true; + j = count; + if (count >= MINION_QUE_LOW) { + // islow means run a full case 1 + j = 1; + applog(LOG_ERR, "%s%i: chip %d low que (%d) with high count %d", + minioncgpu->drv->name, + minioncgpu->device_id, + chip, lowcount, count); + } + for (; j < MINION_QUE_LOW; j++) { + item = next_work(minioninfo); + if (item) { + new_work_task(minioncgpu, item, chip, false, state); + sentwork = true; + applog(MINION_LOG, "%s%i: 1 task 0x%04x in chip %d list", + minioncgpu->drv->name, + minioncgpu->device_id, + DATA_WORK(item)->task_id, chip); + } else { + applog(LOG_ERR, "%s%i: chip %d non-urgent lo " + "empty work list (count=%d)", + minioncgpu->drv->name, + minioncgpu->device_id, + chip, j); + } + } + } + break; + case 2: + if (count <= MINION_QUE_LOW || minioninfo->chip_status[chip].tohigh) { + for (j = count; j < MINION_QUE_HIGH; j++) { + item = next_work(minioninfo); + if (item) { + new_work_task(minioncgpu, item, chip, false, state); + sentwork = true; + applog(MINION_LOG, "%s%i: 2 task 0x%04x in chip %d list", + minioncgpu->drv->name, + minioncgpu->device_id, + DATA_WORK(item)->task_id, chip); + } else { + applog(LOG_DEBUG, "%s%i: chip %d non-urgent hi " + "empty work list (count=%d)", + minioncgpu->drv->name, + minioncgpu->device_id, + chip, j); + } + } + } + break; + } + } else + if (minioninfo->has_chip[chip] && minioninfo->chip_status[chip].overheat && state == 2) + sys_chip_sta(minioncgpu, chip); + } + } + + sentwork = sentwork; +#if ENABLE_INT_NONO + if (sentwork) { + // Clear CMD interrupt since we've now sent more + K_WLOCK(minioninfo->tfree_list); + task = k_unlink_head(minioninfo->tfree_list); + DATA_TASK(task)->tid = ++(minioninfo->next_tid); + DATA_TASK(task)->chip = 0; // ignored + DATA_TASK(task)->write = true; + DATA_TASK(task)->address = MINION_SYS_INT_CLR; + DATA_TASK(task)->task_id = 0; // ignored + DATA_TASK(task)->wsiz = MINION_SYS_SIZ; + DATA_TASK(task)->rsiz = 0; + DATA_TASK(task)->wbuf[0] = MINION_CMD_INT; + DATA_TASK(task)->wbuf[1] = 0; + DATA_TASK(task)->wbuf[2] = 0; + DATA_TASK(task)->wbuf[3] = 0; + DATA_TASK(task)->urgent = false; + k_add_head(minioninfo->task_list, task); + K_WUNLOCK(minioninfo->tfree_list); + } +#endif + +//applog(LOG_ERR, "%s%i: chip %d fin: quew %d chw %d", minioncgpu->drv->name, minioncgpu->device_id, CHP, minioninfo->chip_status[CHP].quework, minioninfo->chip_status[CHP].chipwork); +} + +static bool minion_thread_prepare(struct thr_info *thr) +{ + struct cgpu_info *minioncgpu = thr->cgpu; + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + + minioninfo->thr = thr; + /* + * SPI/ioctl write thread + */ + if (thr_info_create(&(minioninfo->spiw_thr), NULL, minion_spi_write, (void *)minioncgpu)) { + applog(LOG_ERR, "%s%i: SPI write thread create failed", + minioncgpu->drv->name, minioncgpu->device_id); + return false; + } + pthread_detach(minioninfo->spiw_thr.pth); + + /* + * SPI/ioctl results thread + */ + if (thr_info_create(&(minioninfo->spir_thr), NULL, minion_spi_reply, (void *)minioncgpu)) { + applog(LOG_ERR, "%s%i: SPI reply thread create failed", + minioncgpu->drv->name, minioncgpu->device_id); + return false; + } + pthread_detach(minioninfo->spir_thr.pth); + + /* + * Seperate results checking thread so ioctl timing can ignore the results checking + */ + if (thr_info_create(&(minioninfo->res_thr), NULL, minion_results, (void *)minioncgpu)) { + applog(LOG_ERR, "%s%i: Results thread create failed", + minioncgpu->drv->name, minioncgpu->device_id); + return false; + } + pthread_detach(minioninfo->res_thr.pth); + + return true; +} + +static void minion_shutdown(struct thr_info *thr) +{ + struct cgpu_info *minioncgpu = thr->cgpu; + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + int i; + + applog(MINION_LOG, "%s%i: shutting down", + minioncgpu->drv->name, minioncgpu->device_id); + + for (i = 0; i < (int)MINION_CHIPS; i++) + if (minioninfo->has_chip[i]) +// TODO: minion_shutdown(minioncgpu, minioninfo, i); + i = i; + + minioncgpu->shutdown = true; +} + +static bool minion_queue_full(struct cgpu_info *minioncgpu) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct work *work, *usework; + int count, totneed, need, roll, roll_limit, chip; + bool ret, rolled; + + if (minioninfo->initialised == false) { + cgsleep_us(42); + return true; + } + + K_RLOCK(minioninfo->wwork_list); + count = minioninfo->wwork_list->count; + totneed = 0; + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip] && + !minioninfo->chip_status[chip].overheat) { + totneed += MINION_QUE_HIGH; + totneed -= minioninfo->chip_status[chip].quework; + totneed -= minioninfo->chip_status[chip].realwork; + // One for the pot :) + totneed++; + } + } + K_RUNLOCK(minioninfo->wwork_list); + + if (count >= totneed) + ret = true; + else { + need = totneed - count; + /* Ensure we do enough rolling to reduce CPU + but dont roll too much to have them end up stale */ + if (need < 16) + need = 16; + work = get_queued(minioncgpu); + if (work) { + roll_limit = work->drv_rolllimit; + roll = 0; + do { + if (roll == 0) { + usework = work; + minioninfo->work_unrolled++; + rolled = false; + } else { + usework = copy_work_noffset(work, roll); + minioninfo->work_rolled++; + rolled = true; + } + ready_work(minioncgpu, usework, rolled); + } while (--need > 0 && ++roll <= roll_limit); + } else { + // Avoid a hard loop when we can't get work fast enough + cgsleep_us(42); + } + + if (need > 0) + ret = false; + else + ret = true; + } + + return ret; +} + +static void idle_report(struct cgpu_info *minioncgpu) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct timeval now; + uint32_t idle; + int msdiff; + int chip; + + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip]) { + idle = minioninfo->chip_status[chip].idle; + if (idle != minioninfo->chip_status[chip].last_rpt_idle) { + cgtime(&now); + msdiff = ms_tdiff(&now, &(minioninfo->chip_status[chip].idle_rpt)); + if (msdiff >= MINION_IDLE_MESSAGE_ms) { + memcpy(&(minioninfo->chip_status[chip].idle_rpt), &now, sizeof(now)); + applog(LOG_WARNING, + "%s%d: chip %d internal idle changed %08x", + minioncgpu->drv->name, minioncgpu->device_id, + chip, idle); + minioninfo->chip_status[chip].last_rpt_idle = idle; + } + } + } + } +} + +static void chip_report(struct cgpu_info *minioncgpu) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct timeval now; + char buf[512]; + char res_err_msg[2]; + size_t len; + double elapsed, ghs, ghs2, expect, howlong; + char ghs2_display[64]; + K_ITEM *pitem; + int msdiff, chip; + int res_err_count; + + cgtime(&now); + if (!(minioninfo->chip_chk.tv_sec)) { + memcpy(&(minioninfo->chip_chk), &now, sizeof(now)); + memcpy(&(minioninfo->chip_rpt), &now, sizeof(now)); + return; + } + + // Always run the calculations to check chip GHs for the LED + buf[0] = '\0'; + res_err_msg[0] = '\0'; + res_err_msg[1] = '\0'; + K_RLOCK(minioninfo->hfree_list); + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip]) { + len = strlen(buf); + if (minioninfo->hchip_list[chip]->count < 2) + ghs = 0.0; + else { + ghs = 0xffffffffull * (minioninfo->hchip_list[chip]->count - 1); + ghs /= 1000000000.0; + ghs /= tdiff(&now, &(DATA_HIST(minioninfo->hchip_list[chip]->tail)->when)); + } + if (minioninfo->chip_status[chip].first_nonce.tv_sec == 0L || + tdiff(&now, &minioninfo->chip_status[chip].first_nonce) < MINION_LED_TEST_TIME) { + ghs2_display[0] = '\0'; + minioninfo->setled[chip] = false; + } else { + ghs2 = 0xffffffffull * (minioninfo->chip_status[chip].from_first_good - 1); + ghs2 /= 1000000000.0; + ghs2 /= tdiff(&now, &minioninfo->chip_status[chip].first_nonce); + minioninfo->setled[chip] = (ghs2 >= opt_minion_ledlimit); + snprintf(ghs2_display, sizeof(ghs2_display), "[%.2f]", ghs2); + } + + res_err_count = minioninfo->res_err_count[chip]; + minioninfo->res_err_count[chip] = 0; + if (res_err_count > 100) + res_err_msg[0] = '!'; + else if (res_err_count > 50) + res_err_msg[0] = '*'; + else if (res_err_count > 0) + res_err_msg[0] = '\''; + else + res_err_msg[0] = '\0'; + snprintf(buf + len, sizeof(buf) - len, + " %d=%s%.2f%s", chip, res_err_msg, ghs, ghs2_display); + minioninfo->history_ghs[chip] = ghs; + } + } + K_RUNLOCK(minioninfo->hfree_list); + + // But only display it if required + if (opt_minion_chipreport > 0) { + msdiff = ms_tdiff(&now, &(minioninfo->chip_rpt)); + if (msdiff >= (opt_minion_chipreport * 1000)) { + memcpy(&(minioninfo->chip_chk), &now, sizeof(now)); + applogsiz(LOG_WARNING, 512, + "%s%d: Chip GHs%s", + minioncgpu->drv->name, minioncgpu->device_id, buf); + memcpy(&(minioninfo->chip_rpt), &now, sizeof(now)); + } + } + + msdiff = ms_tdiff(&now, &(minioninfo->chip_chk)); + if (total_secs >= MINION_RESET_s && msdiff >= (minioninfo->history_gen * 1000)) { + K_RLOCK(minioninfo->hfree_list); + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip]) { + // Don't reset the chip while 'changing' + if (minioninfo->changing[chip]) + continue; + + if (!minioninfo->reset_mark[chip] || + minioninfo->reset_count[chip] < 2) { + elapsed = 0.0; + ghs = 0.0; + } else { + // 'now' includes that it may have stopped getting nonces + elapsed = tdiff(&now, &(DATA_HIST(minioninfo->reset_mark[chip])->when)); + ghs = 0xffffffffull * (minioninfo->reset_count[chip] - 1); + ghs /= 1000000000.0; + ghs /= elapsed; + } + expect = (double)(minioninfo->init_freq[chip]) * + MINION_RESET_PERCENT / 1000.0; + howlong = tdiff(&now, &(minioninfo->last_reset[chip])); + if (ghs <= expect && howlong >= minioninfo->reset_time[chip]) { + minioninfo->do_reset[chip] = expect; + + // For now - no lock required since no other code accesses it + pitem = k_unlink_head(minioninfo->pfree_list); + DATA_PERF(pitem)->elapsed = elapsed; + DATA_PERF(pitem)->nonces = minioninfo->reset_count[chip] - 1; + DATA_PERF(pitem)->freq = minioninfo->init_freq[chip]; + DATA_PERF(pitem)->ghs = ghs; + memcpy(&(DATA_PERF(pitem)->when), &now, sizeof(now)); + k_add_head(minioninfo->p_list[chip], pitem); + } else if (second_check) { + expect = (double)(minioninfo->init_freq[chip]) * + MINION_RESET2_PERCENT / 1000.0; + if (ghs < expect && howlong >= minioninfo->reset2_time[chip]) { + /* Only do a reset, don't record it, since the ghs + is still above MINION_RESET_PERCENT */ + minioninfo->do_reset[chip] = expect; + } + } + minioninfo->history_ghs[chip] = ghs; + // Expire old perf items to stop clockdown + if (minioninfo->do_reset[chip] <= 1.0 && howlong > MINION_CLR_s) { + // Always remember the last reset + while (minioninfo->p_list[chip]->count > 1) { + pitem = k_unlink_tail(minioninfo->p_list[chip]); + k_add_head(minioninfo->pfree_list, pitem); + } + } + } + } + K_RUNLOCK(minioninfo->hfree_list); + + memcpy(&(minioninfo->chip_chk), &now, sizeof(now)); + } + + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip]) { + // Don't reset the chip while 'changing' + if (minioninfo->changing[chip]) + continue; + + if (minioninfo->do_reset[chip] > 1.0 || + minioninfo->flag_reset[chip]) { + bool std_reset = true; + int curr_freq = minioninfo->init_freq[chip]; + int new_freq = 0.0; + int count; + + // Adjust frequency down? + if (!opt_minion_noautofreq && + minioninfo->p_list[chip]->count >= MINION_RESET_COUNT) { + pitem = minioninfo->p_list[chip]->head; + count = 1; + while (pitem && pitem->next && count++ < MINION_RESET_COUNT) { + if (DATA_PERF(pitem)->freq != DATA_PERF(pitem->next)->freq) + break; + if (count >= MINION_RESET_COUNT) { + new_freq = minioninfo->init_freq[chip] - + MINION_FREQ_RESET_STEP; + if (new_freq < MINION_FREQ_MIN) + new_freq = MINION_FREQ_MIN; + if (minioninfo->init_freq[chip] != new_freq) { + minioninfo->init_freq[chip] = new_freq; + std_reset = false; + } + break; + } else + pitem = pitem->next; + } + } + + if (std_reset) { + if (minioninfo->do_reset[chip] > 1.0) { + applog(LOG_WARNING, "%s%d: Chip %d %dMHz threshold " + "%.2fGHs - resetting", + minioncgpu->drv->name, + minioncgpu->device_id, + chip, curr_freq, + minioninfo->do_reset[chip]); + } else { + applog(LOG_WARNING, "%s%d: Chip %d %dMhz flagged - " + "resetting", + minioncgpu->drv->name, + minioncgpu->device_id, + chip, curr_freq); + } + } else { + if (minioninfo->do_reset[chip] > 1.0) { + applog(LOG_WARNING, "%s%d: Chip %d %dMHz threshold " + "%.2fGHs - resetting to %dMhz", + minioncgpu->drv->name, + minioncgpu->device_id, + chip, curr_freq, + minioninfo->do_reset[chip], + new_freq); + } else { + applog(LOG_WARNING, "%s%d: Chip %d %dMhz flagged - " + "resetting to %dMHz", + minioncgpu->drv->name, + minioncgpu->device_id, + chip, curr_freq, new_freq); + } + } + minioninfo->do_reset[chip] = 0.0; + memcpy(&(minioninfo->last_reset[chip]), &now, sizeof(now)); + init_chip(minioncgpu, minioninfo, chip); + minioninfo->flag_reset[chip] = false; + } + } + } +} + +static int64_t minion_scanwork(__maybe_unused struct thr_info *thr) +{ + struct cgpu_info *minioncgpu = thr->cgpu; + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + int64_t hashcount = 0; + + if (minioninfo->initialised == false) + return hashcount; + + minion_do_work(minioncgpu); + + mutex_lock(&(minioninfo->nonce_lock)); + if (minioninfo->new_nonces) { + hashcount += 0xffffffffull * minioninfo->new_nonces; + minioninfo->new_nonces = 0; + } + mutex_unlock(&(minioninfo->nonce_lock)); + + if (opt_minion_idlecount) + idle_report(minioncgpu); + + // Must always generate data to check/allow for chip reset + chip_report(minioncgpu); + + /* + * To avoid wasting CPU, wait until we get an interrupt + * before returning back to the main cgminer work loop + * i.e. we then know we'll need more work + */ + cgsem_mswait(&(minioninfo->scan_work), MINION_SCAN_mS); + + return hashcount; +} + +static const char *temp_str(uint16_t temp) +{ + switch (temp) { + case MINION_TEMP_40: + return min_temp_40; + case MINION_TEMP_60: + return min_temp_60; + case MINION_TEMP_80: + return min_temp_80; + case MINION_TEMP_100: + return min_temp_100; + case MINION_TEMP_OVER: + return min_temp_over; + } + return min_temp_invalid; +} + +static void minion_get_statline_before(char *buf, size_t bufsiz, struct cgpu_info *minioncgpu) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + uint16_t max_temp, cores; + int chip, core; + + max_temp = 0; + cores = 0; + mutex_lock(&(minioninfo->sta_lock)); + for (chip = 0; chip < (int)MINION_CHIPS; chip++) { + if (minioninfo->has_chip[chip]) { + if (max_temp < minioninfo->chip_status[chip].temp) + max_temp = minioninfo->chip_status[chip].temp; + for (core = 0; core < MINION_CORES; core++) { + if (minioninfo->chip_core_ena[core >> 5][chip] & (0x1 << (core % 32))) + cores++; + } + } + } + mutex_unlock(&(minioninfo->sta_lock)); + + tailsprintf(buf, bufsiz, "max%sC Ch:%d Co:%d", + temp_str(max_temp), minioninfo->chips, (int)cores); +} + +#define CHIPS_PER_STAT 5 + +static struct api_data *minion_api_stats(struct cgpu_info *minioncgpu) +{ + struct minion_info *minioninfo = (struct minion_info *)(minioncgpu->device_data); + struct api_data *root = NULL; + char cores[MINION_CORES+1]; + char data[2048]; + char buf[32]; + int i, to, j; + size_t datalen, nlen; + int chip, max_chip, que_work, chip_work, temp; + + if (minioninfo->initialised == false) + return NULL; + + root = api_add_uint64(root, "OK Nonces", &(minioninfo->ok_nonces), true); + root = api_add_uint64(root, "New Nonces", &(minioninfo->new_nonces), true); + root = api_add_uint64(root, "Tested Nonces", &(minioninfo->tested_nonces), true); + root = api_add_uint64(root, "Untested Nonces", &(minioninfo->untested_nonces), true); + + root = api_add_int(root, "Chips", &(minioninfo->chips), true); + i = MINION_PIN_COUNT; + root = api_add_int(root, "GPIO Pins", &i, true); + + max_chip = 0; + for (chip = 0; chip < (int)MINION_CHIPS; chip++) + if (minioninfo->has_chip[chip]) { + max_chip = chip; + + snprintf(buf, sizeof(buf), "Chip %d Pin", chip); + root = api_add_int(root, buf, &(minioninfo->chip_pin[chip]), true); + snprintf(buf, sizeof(buf), "Chip %d ChipID", chip); + i = (int)(minioninfo->chipid[chip]); + root = api_add_int(root, buf, &i, true); + snprintf(buf, sizeof(buf), "Chip %d Temperature", chip); + root = api_add_const(root, buf, temp_str(minioninfo->chip_status[chip].temp), false); + snprintf(buf, sizeof(buf), "Chip %d Cores", chip); + root = api_add_uint16(root, buf, &(minioninfo->chip_status[chip].cores), true); + snprintf(buf, sizeof(buf), "Chip %d Frequency", chip); + root = api_add_uint32(root, buf, &(minioninfo->chip_status[chip].freq), true); + snprintf(buf, sizeof(buf), "Chip %d InitFreq", chip); + root = api_add_int(root, buf, &(minioninfo->init_freq[chip]), true); + snprintf(buf, sizeof(buf), "Chip %d FreqSent", chip); + root = api_add_hex32(root, buf, &(minioninfo->freqsent[chip]), true); + snprintf(buf, sizeof(buf), "Chip %d InitTemp", chip); + temp = minioninfo->init_temp[chip]; + if (temp == MINION_TEMP_CTL_DISABLE) + root = api_add_string(root, buf, MINION_TEMP_DISABLE, true); + else { + snprintf(data, sizeof(data), "%d", temp); + root = api_add_string(root, buf, data, true); + } + snprintf(buf, sizeof(buf), "Chip %d TempSent", chip); + root = api_add_hex32(root, buf, &(minioninfo->chip_status[chip].tempsent), true); + __bin2hex(data, (unsigned char *)(&(minioninfo->init_cores[chip][0])), + sizeof(minioninfo->init_cores[chip])); + snprintf(buf, sizeof(buf), "Chip %d InitCores", chip); + root = api_add_string(root, buf, data, true); + snprintf(buf, sizeof(buf), "Chip %d IdleCount", chip); + root = api_add_hex32(root, buf, &(minioninfo->chip_status[chip].idle), true); + snprintf(buf, sizeof(buf), "Chip %d QueWork", chip); + root = api_add_uint32(root, buf, &(minioninfo->chip_status[chip].quework), true); + snprintf(buf, sizeof(buf), "Chip %d ChipWork", chip); + root = api_add_uint32(root, buf, &(minioninfo->chip_status[chip].chipwork), true); + snprintf(buf, sizeof(buf), "Chip %d RealWork", chip); + root = api_add_uint32(root, buf, &(minioninfo->chip_status[chip].realwork), true); + snprintf(buf, sizeof(buf), "Chip %d QueListCount", chip); + root = api_add_int(root, buf, &(minioninfo->wque_list[chip]->count), true); + snprintf(buf, sizeof(buf), "Chip %d WorkListCount", chip); + root = api_add_int(root, buf, &(minioninfo->wchip_list[chip]->count), true); + snprintf(buf, sizeof(buf), "Chip %d Overheat", chip); + root = api_add_bool(root, buf, &(minioninfo->chip_status[chip].overheat), true); + snprintf(buf, sizeof(buf), "Chip %d Overheats", chip); + root = api_add_uint32(root, buf, &(minioninfo->chip_status[chip].overheats), true); + snprintf(buf, sizeof(buf), "Chip %d LastOverheat", chip); + root = api_add_timeval(root, buf, &(minioninfo->chip_status[chip].lastoverheat), true); + snprintf(buf, sizeof(buf), "Chip %d LastRecover", chip); + root = api_add_timeval(root, buf, &(minioninfo->chip_status[chip].lastrecover), true); + snprintf(buf, sizeof(buf), "Chip %d OverheatIdle", chip); + root = api_add_double(root, buf, &(minioninfo->chip_status[chip].overheattime), true); + for (i = 0; i < MINION_CORES; i++) { + if (minioninfo->chip_core_ena[i >> 5][chip] & (0x1 << (i % 32))) + cores[i] = 'o'; + else + cores[i] = 'x'; + } + cores[MINION_CORES] = '\0'; + snprintf(buf, sizeof(buf), "Chip %d CoresEna", chip); + root = api_add_string(root, buf, cores, true); + for (i = 0; i < MINION_CORES; i++) { + if (minioninfo->chip_core_act[i >> 5][chip] & (0x1 << (i % 32))) + cores[i] = '-'; + else + cores[i] = 'o'; + } + cores[MINION_CORES] = '\0'; + snprintf(buf, sizeof(buf), "Chip %d CoresAct", chip); + root = api_add_string(root, buf, cores, true); + + if (opt_minion_extra) { + data[0] = '\0'; + datalen = 0; + for (i = 0; i < MINION_CORES; i++) { + if (datalen < sizeof(data)) { + nlen = snprintf(data+datalen, sizeof(data)-datalen, + "%s%"PRIu64"-%s%"PRIu64, + i == 0 ? "" : "/", + minioninfo->core_good[chip][i], + minioninfo->core_bad[chip][i] ? "'" : "", + minioninfo->core_bad[chip][i]); + if (nlen < 1) + break; + datalen += nlen; + } + } + snprintf(buf, sizeof(buf), "Chip %d Cores Good-Bad", chip); + root = api_add_string(root, buf, data, true); + } + + snprintf(buf, sizeof(buf), "Chip %d History GHs", chip); + root = api_add_mhs(root, buf, &(minioninfo->history_ghs[chip]), true); + } + + double his = MINION_HISTORY_s; + root = api_add_double(root, "History length", &his, true); + his = MINION_RESET_s; + root = api_add_double(root, "Default reset length", &his, true); + his = MINION_RESET2_s; + root = api_add_double(root, "Default reset2 length", &his, true); + root = api_add_bool(root, "Reset2 enabled", &second_check, true); + + for (i = 0; i <= max_chip; i += CHIPS_PER_STAT) { + to = i + CHIPS_PER_STAT - 1; + if (to > max_chip) + to = max_chip; + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%d", + j == i ? "" : " ", + minioninfo->has_chip[j] ? 1 : 0); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Detected %02d - %02d", i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%8"PRIu64, + j == i ? "" : " ", + minioninfo->chip_nonces[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Nonces %02d - %02d", i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%8"PRIu64, + j == i ? "" : " ", + minioninfo->chip_nononces[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "NoNonces %02d - %02d", i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%8"PRIu64, + j == i ? "" : " ", + minioninfo->chip_good[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Good %02d - %02d", i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%8"PRIu64, + j == i ? "" : " ", + minioninfo->chip_bad[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Bad %02d - %02d", i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%8"PRIu64, + j == i ? "" : " ", + minioninfo->chip_err[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Err %02d - %02d", i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%8"PRIu64, + j == i ? "" : " ", + minioninfo->fifo_spi_errors[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "FifoSpiErr %02d - %02d", i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%8"PRIu64, + j == i ? "" : " ", + minioninfo->res_spi_errors[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "ResSpiErr %02d - %02d", i, to); + root = api_add_string(root, buf, data, true); + + data[0] = '\0'; + for (j = i; j <= to; j++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64"/%"PRIu64"/%"PRIu64"/%"PRIu64"/%"PRIu64, + j == i ? "" : " ", + minioninfo->use_res2[j], + minioninfo->tasks_failed[j], + minioninfo->tasks_recovered[j], + minioninfo->nonces_failed[j], + minioninfo->nonces_recovered[j]); + strcat(data, buf); + } + snprintf(buf, sizeof(buf), "Redo %02d - %02d", i, to); + root = api_add_string(root, buf, data, true); + } + + que_work = chip_work = 0; + for (chip = 0; chip <= max_chip; chip++) { + if (minioninfo->has_chip[chip]) { + que_work += minioninfo->wque_list[chip]->count; + chip_work += minioninfo->wchip_list[chip]->count; + } + } + + root = api_add_int(root, "WFree Total", &(minioninfo->wfree_list->total), true); + root = api_add_int(root, "WFree Count", &(minioninfo->wfree_list->count), true); + root = api_add_int(root, "WWork Count", &(minioninfo->wwork_list->count), true); + root = api_add_uint64(root, "WWork Flushed", &(minioninfo->wwork_flushed), true); + root = api_add_int(root, "WQue Count", &que_work, true); + root = api_add_uint64(root, "WQue Flushed", &(minioninfo->wque_flushed), true); + root = api_add_int(root, "WChip Count", &chip_work, true); + root = api_add_uint64(root, "WChip Stale", &(minioninfo->wchip_staled), true); + + root = api_add_int(root, "TFree Total", &(minioninfo->tfree_list->total), true); + root = api_add_int(root, "TFree Count", &(minioninfo->tfree_list->count), true); + root = api_add_int(root, "Task Count", &(minioninfo->task_list->count), true); + root = api_add_int(root, "Reply Count", &(minioninfo->treply_list->count), true); + + root = api_add_int(root, "RFree Total", &(minioninfo->rfree_list->total), true); + root = api_add_int(root, "RFree Count", &(minioninfo->rfree_list->count), true); + root = api_add_int(root, "RNonce Count", &(minioninfo->rnonce_list->count), true); + + root = api_add_int(root, "XFree Count", &(minioninfo->xfree_list->count), true); + root = api_add_int(root, "XFF Count", &(minioninfo->xff_list->count), true); + root = api_add_uint64(root, "XFFs", &(minioninfo->xffs), true); + root = api_add_uint64(root, "SPI Resets", &(minioninfo->spi_resets), true); + root = api_add_uint64(root, "Power Cycles", &(minioninfo->power_cycles), true); + + root = api_add_int(root, "Chip Report", &opt_minion_chipreport, true); + root = api_add_int(root, "LED Count", &opt_minion_ledcount, true); + root = api_add_int(root, "LED Limit", &opt_minion_ledlimit, true); + bool b = !opt_minion_noautofreq; + root = api_add_bool(root, "Auto Freq", &b, true); + root = api_add_int(root, "SPI Delay", &opt_minion_spidelay, true); + root = api_add_bool(root, "SPI Reset I/O", &(minioninfo->spi_reset_io), true); + root = api_add_int(root, "SPI Reset", &(minioninfo->spi_reset_count), true); + root = api_add_int(root, "SPI Reset Sleep", &opt_minion_spisleep, true); + +#if DO_IO_STATS +#define sta_api(_name, _iostat) \ + do { \ + if ((_iostat).count) { \ + float _davg = (float)((_iostat).total_delay) / (float)((_iostat).count); \ + float _dlavg = (float)((_iostat).total_dlock) / (float)((_iostat).count); \ + float _dlwavg = (float)((_iostat).total_dlwait) / (float)((_iostat).count); \ + float _bavg = (float)((_iostat).total_bytes) / (float)((_iostat).count); \ + float _tavg = (float)((_iostat).tsd) / (float)((_iostat).count); \ + snprintf(data, sizeof(data), "%s Count=%"PRIu64 \ + " Delay=%.0fus DAvg=%.3f" \ + " DMin=%.0f DMax=%.0f DZ=%"PRIu64 \ + " DLock=%.0fus DLAvg=%.3f" \ + " DLMin=%.0f DLMax=%.0f DZ=%"PRIu64 \ + " DLWait=%.0fus DLWAvg=%.3f" \ + " Bytes=%"PRIu64" BAvg=%.3f" \ + " BMin=%"PRIu64" BMax=%"PRIu64" BZ=%"PRIu64 \ + " TSD=%.0fus TAvg=%.03f", \ + _name, (_iostat).count, \ + (_iostat).total_delay, _davg, (_iostat).min_delay, \ + (_iostat).max_delay, (_iostat).zero_delay, \ + (_iostat).total_dlock, _dlavg, (_iostat).min_dlock, \ + (_iostat).max_dlock, (_iostat).zero_dlock, \ + (_iostat).total_dlwait, _dlwavg, \ + (_iostat).total_bytes, _bavg, (_iostat).min_bytes, \ + (_iostat).max_bytes, (_iostat).zero_bytes, \ + (_iostat).tsd, _tavg); \ + root = api_add_string(root, buf, data, true); \ + } \ + } while(0); + + for (i = 0; i < 0x200; i++) { + snprintf(buf, sizeof(buf), "Stat-0x%02x", i); + sta_api(addr2txt((uint8_t)(i & 0xff)), minioninfo->iostats[i]); + } + + // Test to avoid showing applog + if (minioninfo->summary.count) { + snprintf(buf, sizeof(buf), "Stat-S"); + sta_api("Summary", minioninfo->summary); + applog(LOG_WARNING, "%s %d: (%.0f) %s - %s", + minioncgpu->drv->name, minioncgpu->device_id, + total_secs, buf, data); + } +#endif + + root = api_add_uint64(root, "Total SPI Errors", &(minioninfo->spi_errors), true); + root = api_add_uint64(root, "Work Unrolled", &(minioninfo->work_unrolled), true); + root = api_add_uint64(root, "Work Rolled", &(minioninfo->work_rolled), true); + root = api_add_uint64(root, "Ints", &(minioninfo->interrupts), true); + root = api_add_uint64(root, "Res Ints", &(minioninfo->result_interrupts), true); + root = api_add_uint64(root, "Cmd Ints", &(minioninfo->command_interrupts), true); + root = api_add_string(root, "Last Int", minioninfo->last_interrupt, true); + root = api_add_hex32(root, "Next TaskID", &(minioninfo->next_task_id), true); + + double avg; + root = api_add_uint64(root, "ToQue", &(minioninfo->que_work), true); + if (minioninfo->que_work) + avg = minioninfo->que_time / (double)(minioninfo->que_work); + else + avg = 0; + root = api_add_double(root, "Que Avg", &avg, true); + root = api_add_double(root, "Que Min", &(minioninfo->que_min), true); + root = api_add_double(root, "Que Max", &(minioninfo->que_max), true); + data[0] = '\0'; + for (i = 0; i <= TIME_BANDS; i++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64, + i == 0 ? "" : "/", + minioninfo->que_bands[i]); + strcat(data, buf); + } + root = api_add_string(root, "Que Bands", data, true); + + root = api_add_uint64(root, "ToTxRx", &(minioninfo->wt_work), true); + if (minioninfo->wt_work) + avg = minioninfo->wt_time / (double)(minioninfo->wt_work); + else + avg = 0; + root = api_add_double(root, "TxRx Avg", &avg, true); + root = api_add_double(root, "TxRx Min", &(minioninfo->wt_min), true); + root = api_add_double(root, "TxRx Max", &(minioninfo->wt_max), true); + data[0] = '\0'; + for (i = 0; i <= TIME_BANDS; i++) { + snprintf(buf, sizeof(buf), + "%s%"PRIu64, + i == 0 ? "" : "/", + minioninfo->wt_bands[i]); + strcat(data, buf); + } + root = api_add_string(root, "TxRx Bands", data, true); + + uint64_t checked, dups; + dupcounters(minioncgpu, &checked, &dups); + root = api_add_uint64(root, "Dups", &dups, true); + + return root; +} +#endif + +struct device_drv minion_drv = { + .drv_id = DRIVER_minion, + .dname = "Minion BlackArrow", + .name = "MBA", + .drv_detect = minion_detect, +#ifdef LINUX + .get_api_stats = minion_api_stats, + .get_statline_before = minion_get_statline_before, + .set_device = minion_api_set, + .identify_device = minion_identify, + .thread_prepare = minion_thread_prepare, + .hash_work = hash_queued_work, + .scanwork = minion_scanwork, + .queue_full = minion_queue_full, + .flush_work = minion_flush_work, + .thread_shutdown = minion_shutdown +#endif +}; diff --git a/driver-modminer.c b/driver-modminer.c new file mode 100644 index 0000000..a7fb856 --- /dev/null +++ b/driver-modminer.c @@ -0,0 +1,1141 @@ +/* + * Copyright 2012-2013 Andrew Smith + * Copyright 2012 Luke Dashjr + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include + +#include "logging.h" +#include "miner.h" +#include "usbutils.h" +#include "fpgautils.h" +#include "util.h" + +#define BITSTREAM_FILENAME "fpgaminer_top_fixed7_197MHz.ncd" +#define BISTREAM_USER_ID "\2\4$B" + +#define BITSTREAM_MAGIC_0 0 +#define BITSTREAM_MAGIC_1 9 + +#define MODMINER_CUTOFF_TEMP 60.0 +#define MODMINER_OVERHEAT_TEMP 50.0 +#define MODMINER_RECOVER_TEMP 46.5 +#define MODMINER_TEMP_UP_LIMIT 47.0 + +#define MODMINER_HW_ERROR_PERCENT 0.75 + +// How many seconds of no nonces means there's something wrong +// First time - drop the clock and see if it revives +// Second time - (and it didn't revive) disable it +#define ITS_DEAD_JIM 300 + +// N.B. in the latest firmware the limit is 250 +// however the voltage/temperature risks preclude that +#define MODMINER_MAX_CLOCK 230 +#define MODMINER_DEF_CLOCK 200 +#define MODMINER_MIN_CLOCK 160 + +#define MODMINER_CLOCK_UP 2 +#define MODMINER_CLOCK_SET 0 +#define MODMINER_CLOCK_DOWN -2 +// = 0 means OVERHEAT doesn't affect the clock +#define MODMINER_CLOCK_OVERHEAT 0 +#define MODMINER_CLOCK_DEAD -6 +#define MODMINER_CLOCK_CUTOFF -10 + +// Commands +#define MODMINER_PING "\x00" +#define MODMINER_GET_VERSION "\x01" +#define MODMINER_FPGA_COUNT "\x02" +// Commands + require FPGAid +#define MODMINER_GET_IDCODE '\x03' +#define MODMINER_GET_USERCODE '\x04' +#define MODMINER_PROGRAM '\x05' +#define MODMINER_SET_CLOCK '\x06' +#define MODMINER_READ_CLOCK '\x07' +#define MODMINER_SEND_WORK '\x08' +#define MODMINER_CHECK_WORK '\x09' +// One byte temperature reply +#define MODMINER_TEMP1 '\x0a' +// Two byte temperature reply +#define MODMINER_TEMP2 '\x0d' + +// +6 bytes +#define MODMINER_SET_REG '\x0b' +// +2 bytes +#define MODMINER_GET_REG '\x0c' + +#define FPGAID_ALL 4 + +// Maximum how many good shares in a row means clock up +// 96 is ~34m22s at 200MH/s +#define MODMINER_TRY_UP 96 +// Initially how many good shares in a row means clock up +// This is doubled each down clock until it reaches MODMINER_TRY_UP +// 6 is ~2m9s at 200MH/s +#define MODMINER_EARLY_UP 6 +// Limit when reducing shares_to_good +#define MODMINER_MIN_BACK 12 + +// 45 noops sent when detecting, in case the device was left in "start job" reading +static const char NOOP[] = MODMINER_PING "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"; + +static void do_ping(struct cgpu_info *modminer) +{ + char buf[0x100+1]; + int err, amount; + + // Don't care if it fails + err = usb_write(modminer, (char *)NOOP, sizeof(NOOP)-1, &amount, C_PING); + applog(LOG_DEBUG, "%s%u: flush noop got %d err %d", + modminer->drv->name, modminer->fpgaid, amount, err); + + // Clear any outstanding data + while ((err = usb_read_once(modminer, buf, sizeof(buf)-1, &amount, C_CLEAR)) == 0 && amount > 0) + applog(LOG_DEBUG, "%s%u: clear got %d", + modminer->drv->name, modminer->fpgaid, amount); + + applog(LOG_DEBUG, "%s%u: final clear got %d err %d", + modminer->drv->name, modminer->fpgaid, amount, err); +} + +static struct cgpu_info *modminer_detect_one(struct libusb_device *dev, struct usb_find_devices *found) +{ + char buf[0x100+1]; + char *devname = NULL; + char devpath[32]; + int err, i, amount; + bool added = false; + + struct cgpu_info *modminer = usb_alloc_cgpu(&modminer_drv, 1); + + modminer->modminer_mutex = calloc(1, sizeof(*(modminer->modminer_mutex))); + mutex_init(modminer->modminer_mutex); + modminer->fpgaid = (char)0; + + if (!usb_init(modminer, dev, found)) + goto shin; + + do_ping(modminer); + + if ((err = usb_write(modminer, MODMINER_GET_VERSION, 1, &amount, C_REQUESTVERSION)) < 0 || amount != 1) { + applog(LOG_ERR, "%s detect (%s) send version request failed (%d:%d)", + modminer->drv->dname, modminer->device_path, amount, err); + goto unshin; + } + + if ((err = usb_read_once(modminer, buf, sizeof(buf)-1, &amount, C_GETVERSION)) < 0 || amount < 1) { + if (err < 0) + applog(LOG_ERR, "%s detect (%s) no version reply (%d)", + modminer->drv->dname, modminer->device_path, err); + else + applog(LOG_ERR, "%s detect (%s) empty version reply (%d)", + modminer->drv->dname, modminer->device_path, amount); + + applog(LOG_DEBUG, "%s detect (%s) check the firmware", + modminer->drv->dname, modminer->device_path); + + goto unshin; + } + buf[amount] = '\0'; + devname = strdup(buf); + applog(LOG_DEBUG, "%s (%s) identified as: %s", modminer->drv->dname, modminer->device_path, devname); + + if ((err = usb_write(modminer, MODMINER_FPGA_COUNT, 1, &amount, C_REQUESTFPGACOUNT) < 0 || amount != 1)) { + applog(LOG_ERR, "%s detect (%s) FPGA count request failed (%d:%d)", + modminer->drv->dname, modminer->device_path, amount, err); + goto unshin; + } + + if ((err = usb_read(modminer, buf, 1, &amount, C_GETFPGACOUNT)) < 0 || amount != 1) { + applog(LOG_ERR, "%s detect (%s) no FPGA count reply (%d:%d)", + modminer->drv->dname, modminer->device_path, amount, err); + goto unshin; + } + + // TODO: flag it use 1 byte temp if it is an old firmware + // can detect with modminer->cgusb->serial ? + + if (buf[0] == 0) { + applog(LOG_ERR, "%s detect (%s) zero FPGA count from %s", + modminer->drv->dname, modminer->device_path, devname); + goto unshin; + } + + if (buf[0] < 1 || buf[0] > 4) { + applog(LOG_ERR, "%s detect (%s) invalid FPGA count (%u) from %s", + modminer->drv->dname, modminer->device_path, buf[0], devname); + goto unshin; + } + + applog(LOG_DEBUG, "%s (%s) %s has %u FPGAs", + modminer->drv->dname, modminer->device_path, devname, buf[0]); + + modminer->name = devname; + + // TODO: test with 1 board missing in the middle and each end + // to see how that affects the sequence numbers + for (i = 0; i < buf[0]; i++) { + struct cgpu_info *tmp = usb_copy_cgpu(modminer); + + sprintf(devpath, "%d:%d:%d", + (int)(modminer->usbinfo.bus_number), + (int)(modminer->usbinfo.device_address), + i); + + tmp->device_path = strdup(devpath); + + // Only the first copy gets the already used stats + if (added) + tmp->usbinfo.usbstat = USB_NOSTAT; + + tmp->fpgaid = (char)i; + tmp->modminer_mutex = modminer->modminer_mutex; + + if (!add_cgpu(tmp)) { + tmp = usb_free_cgpu(tmp); + goto unshin; + } + + update_usb_stats(tmp); + + added = true; + } + + modminer = usb_free_cgpu(modminer); + + return modminer; + +unshin: + if (!added) + usb_uninit(modminer); + +shin: + if (!added) { + free(modminer->modminer_mutex); + modminer->modminer_mutex = NULL; + } + + modminer = usb_free_cgpu(modminer); + + if (added) + return modminer; + else + return NULL; +} + +static void modminer_detect(bool __maybe_unused hotplug) +{ + usb_detect(&modminer_drv, modminer_detect_one); +} + +static bool get_expect(struct cgpu_info *modminer, FILE *f, char c) +{ + char buf; + + if (fread(&buf, 1, 1, f) != 1) { + applog(LOG_ERR, "%s%u: Error (%d) reading bitstream (%c)", + modminer->drv->name, modminer->device_id, errno, c); + return false; + } + + if (buf != c) { + applog(LOG_ERR, "%s%u: bitstream code mismatch (%c)", + modminer->drv->name, modminer->device_id, c); + return false; + } + + return true; +} + +static bool get_info(struct cgpu_info *modminer, FILE *f, char *buf, int bufsiz, const char *name) +{ + unsigned char siz[2]; + int len; + + if (fread(siz, 2, 1, f) != 1) { + applog(LOG_ERR, "%s%u: Error (%d) reading bitstream '%s' len", + modminer->drv->name, modminer->device_id, errno, name); + return false; + } + + len = siz[0] * 256 + siz[1]; + + if (len >= bufsiz) { + applog(LOG_ERR, "%s%u: Bitstream '%s' len too large (%d)", + modminer->drv->name, modminer->device_id, name, len); + return false; + } + + if (fread(buf, len, 1, f) != 1) { + applog(LOG_ERR, "%s%u: Error (%d) reading bitstream '%s'", + modminer->drv->name, modminer->device_id, errno, name); + return false; + } + + buf[len] = '\0'; + + return true; +} + +#define USE_DEFAULT_TIMEOUT 0 + +// mutex must always be locked before calling +static bool get_status_timeout(struct cgpu_info *modminer, char *msg, unsigned int timeout, enum usb_cmds cmd) +{ + int err, amount; + char buf[1]; + + if (timeout == USE_DEFAULT_TIMEOUT) + err = usb_read(modminer, buf, 1, &amount, cmd); + else + err = usb_read_timeout(modminer, buf, 1, &amount, timeout, cmd); + + if (err < 0 || amount != 1) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error (%d:%d) getting %s reply", + modminer->drv->name, modminer->device_id, amount, err, msg); + + return false; + } + + if (buf[0] != 1) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error, invalid %s reply (was %d should be 1)", + modminer->drv->name, modminer->device_id, msg, buf[0]); + + return false; + } + + return true; +} + +// mutex must always be locked before calling +static bool get_status(struct cgpu_info *modminer, char *msg, enum usb_cmds cmd) +{ + return get_status_timeout(modminer, msg, USE_DEFAULT_TIMEOUT, cmd); +} + +static bool modminer_fpga_upload_bitstream(struct cgpu_info *modminer) +{ + const char *bsfile = BITSTREAM_FILENAME; + char buf[0x100], *p; + char devmsg[64]; + unsigned char *ubuf = (unsigned char *)buf; + unsigned long totlen, len; + size_t buflen, remaining; + float nextmsg, upto; + char fpgaid = FPGAID_ALL; + int err, amount, tries; + char *ptr; + + FILE *f = open_bitstream("modminer", bsfile); + if (!f) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error (%d) opening bitstream file %s", + modminer->drv->name, modminer->device_id, errno, bsfile); + + return false; + } + + if (fread(buf, 2, 1, f) != 1) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error (%d) reading bitstream magic", + modminer->drv->name, modminer->device_id, errno); + + goto dame; + } + + if (buf[0] != BITSTREAM_MAGIC_0 || buf[1] != BITSTREAM_MAGIC_1) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: bitstream has incorrect magic (%u,%u) instead of (%u,%u)", + modminer->drv->name, modminer->device_id, + buf[0], buf[1], + BITSTREAM_MAGIC_0, BITSTREAM_MAGIC_1); + + goto dame; + } + + if (fseek(f, 11L, SEEK_CUR)) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error (%d) bitstream seek failed", + modminer->drv->name, modminer->device_id, errno); + + goto dame; + } + + if (!get_expect(modminer, f, 'a')) + goto undame; + + if (!get_info(modminer, f, buf, sizeof(buf), "Design name")) + goto undame; + + applog(LOG_DEBUG, "%s%u: bitstream file '%s' info:", + modminer->drv->name, modminer->device_id, bsfile); + + applog(LOG_DEBUG, " Design name: '%s'", buf); + + p = strrchr(buf, ';') ? : buf; + p = strrchr(buf, '=') ? : p; + if (p[0] == '=') + p++; + + unsigned long fwusercode = (unsigned long)strtoll(p, &p, 16); + + if (p[0] != '\0') { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Bad usercode in bitstream file", + modminer->drv->name, modminer->device_id); + + goto dame; + } + + if (fwusercode == 0xffffffff) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: bitstream doesn't support user code", + modminer->drv->name, modminer->device_id); + + goto dame; + } + + applog(LOG_DEBUG, " Version: %lu, build %lu", (fwusercode >> 8) & 0xff, fwusercode & 0xff); + + if (!get_expect(modminer, f, 'b')) + goto undame; + + if (!get_info(modminer, f, buf, sizeof(buf), "Part number")) + goto undame; + + applog(LOG_DEBUG, " Part number: '%s'", buf); + + if (!get_expect(modminer, f, 'c')) + goto undame; + + if (!get_info(modminer, f, buf, sizeof(buf), "Build date")) + goto undame; + + applog(LOG_DEBUG, " Build date: '%s'", buf); + + if (!get_expect(modminer, f, 'd')) + goto undame; + + if (!get_info(modminer, f, buf, sizeof(buf), "Build time")) + goto undame; + + applog(LOG_DEBUG, " Build time: '%s'", buf); + + if (!get_expect(modminer, f, 'e')) + goto undame; + + if (fread(buf, 4, 1, f) != 1) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error (%d) reading bitstream data len", + modminer->drv->name, modminer->device_id, errno); + + goto dame; + } + + len = ((unsigned long)ubuf[0] << 24) | ((unsigned long)ubuf[1] << 16) | (ubuf[2] << 8) | ubuf[3]; + applog(LOG_DEBUG, " Bitstream size: %lu", len); + + strcpy(devmsg, modminer->device_path); + ptr = strrchr(devmsg, ':'); + if (ptr) + *ptr = '\0'; + + applog(LOG_WARNING, "%s%u: Programming all FPGA on %s ... Mining will not start until complete", + modminer->drv->name, modminer->device_id, devmsg); + + buf[0] = MODMINER_PROGRAM; + buf[1] = fpgaid; + buf[2] = (len >> 0) & 0xff; + buf[3] = (len >> 8) & 0xff; + buf[4] = (len >> 16) & 0xff; + buf[5] = (len >> 24) & 0xff; + + if ((err = usb_write(modminer, buf, 6, &amount, C_STARTPROGRAM)) < 0 || amount != 6) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Program init failed (%d:%d)", + modminer->drv->name, modminer->device_id, amount, err); + + goto dame; + } + + if (!get_status(modminer, "initialise", C_STARTPROGRAMSTATUS)) + goto undame; + +// It must be 32 bytes according to MCU legacy.c +#define WRITE_SIZE 32 + + totlen = len; + nextmsg = 0.1; + while (len > 0) { + buflen = len < WRITE_SIZE ? len : WRITE_SIZE; + if (fread(buf, buflen, 1, f) != 1) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: bitstream file read error %d (%lu bytes left)", + modminer->drv->name, modminer->device_id, errno, len); + + goto dame; + } + + tries = 0; + ptr = buf; + remaining = buflen; + while ((err = usb_write(modminer, ptr, remaining, &amount, C_PROGRAM)) < 0 || amount != (int)remaining) { + if (err == LIBUSB_ERROR_TIMEOUT && amount > 0 && ++tries < 4) { + remaining -= amount; + ptr += amount; + + if (opt_debug) + applog(LOG_DEBUG, "%s%u: Program timeout (%d:%d) sent %d tries %d", + modminer->drv->name, modminer->device_id, + amount, err, (int)remaining, tries); + + if (!get_status(modminer, "write status", C_PROGRAMSTATUS2)) + goto dame; + + } else { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Program failed (%d:%d) sent %d", + modminer->drv->name, modminer->device_id, amount, err, (int)remaining); + + goto dame; + } + } + + if (!get_status(modminer, "write status", C_PROGRAMSTATUS)) + goto dame; + + len -= buflen; + + upto = (float)(totlen - len) / (float)(totlen); + if (upto >= nextmsg) { + applog(LOG_WARNING, + "%s%u: Programming %.1f%% (%lu out of %lu)", + modminer->drv->name, modminer->device_id, upto*100, (totlen - len), totlen); + + nextmsg += 0.1; + } + } + + if (!get_status(modminer, "final status", C_FINALPROGRAMSTATUS)) + goto undame; + + applog(LOG_WARNING, "%s%u: Programming completed for all FPGA on %s", + modminer->drv->name, modminer->device_id, devmsg); + + // Give it a 2/3s delay after programming + cgsleep_ms(666); + + usb_set_dev_start(modminer); + + return true; +undame: + ; + mutex_unlock(modminer->modminer_mutex); + ; +dame: + fclose(f); + return false; +} + +static bool modminer_fpga_prepare(struct thr_info *thr) +{ +// struct cgpu_info *modminer = thr->cgpu; + struct modminer_fpga_state *state; + + state = thr->cgpu_data = calloc(1, sizeof(struct modminer_fpga_state)); + state->shares_to_good = MODMINER_EARLY_UP; + state->overheated = false; + + return true; +} + +/* + * Clocking rules: + * If device exceeds cutoff or overheat temp - stop sending work until it cools + * decrease the clock by MODMINER_CLOCK_CUTOFF/MODMINER_CLOCK_OVERHEAT + * for when it restarts + * with MODMINER_CLOCK_OVERHEAT=0 basically says that temp shouldn't + * affect the clock unless we reach CUTOFF + * + * If device overheats + * set shares_to_good back to MODMINER_MIN_BACK + * to speed up clock recovery if temp drop doesnt help + * + * When to clock down: + * If device gets MODMINER_HW_ERROR_PERCENT errors since last clock up or down + * if clock is <= default it requires 2 HW to do this test + * if clock is > default it only requires 1 HW to do this test + * also double shares_to_good + * + * When to clock up: + * If device gets shares_to_good good shares in a row + * and temp < MODMINER_TEMP_UP_LIMIT + * + * N.B. clock must always be a multiple of 2 + */ +static const char *clocknodev = "clock failed - no device"; +static const char *clockoldwork = "clock already changed for this work"; +static const char *clocktoolow = "clock too low"; +static const char *clocktoohi = "clock too high"; +static const char *clocksetfail = "clock set command failed"; +static const char *clockreplyfail = "clock reply failed"; + +static const char *modminer_delta_clock(struct thr_info *thr, int delta, bool temp, bool force) +{ + struct cgpu_info *modminer = thr->cgpu; + struct modminer_fpga_state *state = thr->cgpu_data; + unsigned char cmd[6], buf[1]; + int err, amount; + + // Device is gone + if (modminer->usbinfo.nodev) + return clocknodev; + + // Only do once if multiple shares per work or multiple reasons + if (!state->new_work && !force) + return clockoldwork; + + state->new_work = false; + + state->shares = 0; + state->shares_last_hw = 0; + state->hw_errors = 0; + + // FYI clock drop has little effect on temp + if (delta < 0 && (modminer->clock + delta) < MODMINER_MIN_CLOCK) + return clocktoolow; + + if (delta > 0 && (modminer->clock + delta) > MODMINER_MAX_CLOCK) + return clocktoohi; + + if (delta < 0) { + if (temp) + state->shares_to_good = MODMINER_MIN_BACK; + else { + if ((state->shares_to_good * 2) < MODMINER_TRY_UP) + state->shares_to_good *= 2; + else + state->shares_to_good = MODMINER_TRY_UP; + } + } + + modminer->clock += delta; + + cmd[0] = MODMINER_SET_CLOCK; + cmd[1] = modminer->fpgaid; + cmd[2] = modminer->clock; + cmd[3] = cmd[4] = cmd[5] = '\0'; + + mutex_lock(modminer->modminer_mutex); + + if ((err = usb_write(modminer, (char *)cmd, 6, &amount, C_SETCLOCK)) < 0 || amount != 6) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error writing set clock speed (%d:%d)", + modminer->drv->name, modminer->device_id, amount, err); + + return clocksetfail; + } + + if ((err = usb_read(modminer, (char *)(&buf), 1, &amount, C_REPLYSETCLOCK)) < 0 || amount != 1) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error reading set clock speed (%d:%d)", + modminer->drv->name, modminer->device_id, amount, err); + + return clockreplyfail; + } + + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_WARNING, "%s%u: Set clock speed %sto %u", + modminer->drv->name, modminer->device_id, + (delta < 0) ? "down " : (delta > 0 ? "up " : ""), + modminer->clock); + + return NULL; +} + +static bool modminer_fpga_init(struct thr_info *thr) +{ + struct cgpu_info *modminer = thr->cgpu; + unsigned char cmd[2], buf[4]; + int err, amount; + + mutex_lock(modminer->modminer_mutex); + + cmd[0] = MODMINER_GET_USERCODE; + cmd[1] = modminer->fpgaid; + if ((err = usb_write(modminer, (char *)cmd, 2, &amount, C_REQUESTUSERCODE)) < 0 || amount != 2) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error requesting USER code (%d:%d)", + modminer->drv->name, modminer->device_id, amount, err); + + return false; + } + + if ((err = usb_read(modminer, (char *)buf, 4, &amount, C_GETUSERCODE)) < 0 || amount != 4) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Error reading USER code (%d:%d)", + modminer->drv->name, modminer->device_id, amount, err); + + return false; + } + + if (memcmp(buf, BISTREAM_USER_ID, 4)) { + applog(LOG_ERR, "%s%u: FPGA not programmed", + modminer->drv->name, modminer->device_id); + + if (!modminer_fpga_upload_bitstream(modminer)) + return false; + + mutex_unlock(modminer->modminer_mutex); + } else { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_DEBUG, "%s%u: FPGA is already programmed :)", + modminer->drv->name, modminer->device_id); + } + + modminer->clock = MODMINER_DEF_CLOCK; + modminer_delta_clock(thr, MODMINER_CLOCK_SET, false, false); + + thr->primary_thread = true; + + return true; +} + +static void get_modminer_statline_before(char *buf, size_t bufsiz, struct cgpu_info *modminer) +{ + tailsprintf(buf, bufsiz, "%s%.1fC %3uMHz", + (modminer->temp < 10) ? " " : "", + modminer->temp, + (unsigned int)(modminer->clock)); +} + +static bool modminer_start_work(struct thr_info *thr, struct work *work) +{ + struct cgpu_info *modminer = thr->cgpu; + struct modminer_fpga_state *state = thr->cgpu_data; + int err, amount; + char cmd[48]; + bool sta; + + cmd[0] = MODMINER_SEND_WORK; + cmd[1] = modminer->fpgaid; + memcpy(&cmd[2], work->midstate, 32); + memcpy(&cmd[34], work->data + 64, 12); + + if (state->first_work.tv_sec == 0) + cgtime(&state->first_work); + + if (state->last_nonce.tv_sec == 0) + cgtime(&state->last_nonce); + + mutex_lock(modminer->modminer_mutex); + + if ((err = usb_write(modminer, cmd, 46, &amount, C_SENDWORK)) < 0 || amount != 46) { + mutex_unlock(modminer->modminer_mutex); + + applog(LOG_ERR, "%s%u: Start work failed (%d:%d)", + modminer->drv->name, modminer->device_id, amount, err); + + return false; + } + + cgtime(&state->tv_workstart); + + sta = get_status(modminer, "start work", C_SENDWORKSTATUS); + + if (sta) { + mutex_unlock(modminer->modminer_mutex); + state->new_work = true; + } + + return sta; +} + +static void check_temperature(struct thr_info *thr) +{ + struct cgpu_info *modminer = thr->cgpu; + struct modminer_fpga_state *state = thr->cgpu_data; + char cmd[2], temperature[2]; + int tbytes, tamount; + int amount; + + // Device is gone + if (modminer->usbinfo.nodev) + return; + + if (state->one_byte_temp) { + cmd[0] = MODMINER_TEMP1; + tbytes = 1; + } else { + cmd[0] = MODMINER_TEMP2; + tbytes = 2; + } + + cmd[1] = modminer->fpgaid; + + mutex_lock(modminer->modminer_mutex); + if (usb_write(modminer, (char *)cmd, 2, &amount, C_REQUESTTEMPERATURE) == 0 && amount == 2 && + usb_read(modminer, (char *)(&temperature), tbytes, &tamount, C_GETTEMPERATURE) == 0 && tamount == tbytes) { + mutex_unlock(modminer->modminer_mutex); + if (state->one_byte_temp) + modminer->temp = temperature[0]; + else { + // Only accurate to 2 and a bit places + modminer->temp = roundf((temperature[1] * 256.0 + temperature[0]) / 0.128) / 1000.0; + + state->tried_two_byte_temp = true; + } + + if (state->overheated) { + // Limit recovery to lower than OVERHEAT so it doesn't just go straight over again + if (modminer->temp < MODMINER_RECOVER_TEMP) { + state->overheated = false; + applog(LOG_WARNING, "%s%u: Recovered, temp less than (%.1f) now %.3f", + modminer->drv->name, modminer->device_id, + MODMINER_RECOVER_TEMP, modminer->temp); + } + } + else if (modminer->temp >= MODMINER_OVERHEAT_TEMP) { + if (modminer->temp >= MODMINER_CUTOFF_TEMP) { + applog(LOG_WARNING, "%s%u: Hit thermal cutoff limit! (%.1f) at %.3f", + modminer->drv->name, modminer->device_id, + MODMINER_CUTOFF_TEMP, modminer->temp); + + modminer_delta_clock(thr, MODMINER_CLOCK_CUTOFF, true, false); + state->overheated = true; + dev_error(modminer, REASON_DEV_THERMAL_CUTOFF); + } else { + applog(LOG_WARNING, "%s%u: Overheat limit (%.1f) reached %.3f", + modminer->drv->name, modminer->device_id, + MODMINER_OVERHEAT_TEMP, modminer->temp); + + // If it's defined to be 0 then don't call modminer_delta_clock() + if (MODMINER_CLOCK_OVERHEAT != 0) + modminer_delta_clock(thr, MODMINER_CLOCK_OVERHEAT, true, false); + state->overheated = true; + dev_error(modminer, REASON_DEV_OVER_HEAT); + } + } + } else { + mutex_unlock(modminer->modminer_mutex); + + if (!state->tried_two_byte_temp) { + state->tried_two_byte_temp = true; + state->one_byte_temp = true; + } + } +} + +#define work_restart(thr) thr->work_restart + +// 250Mhz is 17.17s - ensure we don't go idle +static const double processtime = 17.0; +// 160Mhz is 26.84 - when overheated ensure we don't throw away shares +static const double overheattime = 26.9; + +static uint64_t modminer_process_results(struct thr_info *thr, struct work *work) +{ + struct cgpu_info *modminer = thr->cgpu; + struct modminer_fpga_state *state = thr->cgpu_data; + struct timeval now; + char cmd[2]; + uint32_t nonce; + uint32_t curr_hw_errors; + int err, amount, amount2; + int timeoutloop; + double timeout; + int temploop; + + // Device is gone + if (modminer->usbinfo.nodev) + return -1; + + // If we are overheated it will just keep checking for results + // since we can't stop the work + // The next work will not start until the temp drops + check_temperature(thr); + + cmd[0] = MODMINER_CHECK_WORK; + cmd[1] = modminer->fpgaid; + + timeoutloop = 0; + temploop = 0; + while (0x80085) { + mutex_lock(modminer->modminer_mutex); + if ((err = usb_write(modminer, cmd, 2, &amount, C_REQUESTWORKSTATUS)) < 0 || amount != 2) { + mutex_unlock(modminer->modminer_mutex); + + // timeoutloop never resets so the timeouts can't + // accumulate much during a single item of work + if (err == LIBUSB_ERROR_TIMEOUT && ++timeoutloop < 5) { + state->timeout_fail++; + goto tryagain; + } + + applog(LOG_ERR, "%s%u: Error sending (get nonce) (%d:%d)", + modminer->drv->name, modminer->device_id, amount, err); + + return -1; + } + + err = usb_read(modminer, (char *)(&nonce), 4, &amount, C_GETWORKSTATUS); + while (err == LIBUSB_SUCCESS && amount < 4) { + size_t remain = 4 - amount; + char *pos = ((char *)(&nonce)) + amount; + + state->success_more++; + + err = usb_read(modminer, pos, remain, &amount2, C_GETWORKSTATUS); + + amount += amount2; + } + mutex_unlock(modminer->modminer_mutex); + + if (err < 0 || amount < 4) { + // timeoutloop never resets so the timeouts can't + // accumulate much during a single item of work + if (err == LIBUSB_ERROR_TIMEOUT && ++timeoutloop < 10) { + state->timeout_fail++; + goto tryagain; + } + + applog(LOG_ERR, "%s%u: Error reading (get nonce) (%d:%d)", + modminer->drv->name, modminer->device_id, amount+amount2, err); + } + + if (memcmp(&nonce, "\xff\xff\xff\xff", 4)) { + // found 'something' ... + state->shares++; + curr_hw_errors = state->hw_errors; + submit_nonce(thr, work, nonce); + if (state->hw_errors > curr_hw_errors) { + cgtime(&now); + // Ignore initial errors that often happen + if (tdiff(&now, &state->first_work) < 2.0) { + state->shares = 0; + state->shares_last_hw = 0; + state->hw_errors = 0; + } else { + state->shares_last_hw = state->shares; + if (modminer->clock > MODMINER_DEF_CLOCK || state->hw_errors > 1) { + float pct = (state->hw_errors * 100.0 / (state->shares ? : 1.0)); + if (pct >= MODMINER_HW_ERROR_PERCENT) + modminer_delta_clock(thr, MODMINER_CLOCK_DOWN, false, false); + } + } + } else { + cgtime(&state->last_nonce); + state->death_stage_one = false; + // If we've reached the required good shares in a row then clock up + if (((state->shares - state->shares_last_hw) >= state->shares_to_good) && + modminer->temp < MODMINER_TEMP_UP_LIMIT) + modminer_delta_clock(thr, MODMINER_CLOCK_UP, false, false); + } + } else { + // on rare occasions - the MMQ can just stop returning valid nonces + double death = ITS_DEAD_JIM * (state->death_stage_one ? 2.0 : 1.0); + cgtime(&now); + if (tdiff(&now, &state->last_nonce) >= death) { + if (state->death_stage_one) { + modminer_delta_clock(thr, MODMINER_CLOCK_DEAD, false, true); + applog(LOG_ERR, "%s%u: DEATH clock down", + modminer->drv->name, modminer->device_id); + + // reset the death info and DISABLE it + state->last_nonce.tv_sec = 0; + state->last_nonce.tv_usec = 0; + state->death_stage_one = false; + return -1; + } else { + modminer_delta_clock(thr, MODMINER_CLOCK_DEAD, false, true); + applog(LOG_ERR, "%s%u: death clock down", + modminer->drv->name, modminer->device_id); + + state->death_stage_one = true; + } + } + } + +tryagain: + + if (work_restart(thr)) + break; + + if (state->overheated == true) { + // don't check every time (every ~1/2 sec) + if (++temploop > 4) { + check_temperature(thr); + temploop = 0; + } + + } + + if (state->overheated == true) + timeout = overheattime; + else + timeout = processtime; + + cgtime(&now); + if (tdiff(&now, &state->tv_workstart) > timeout) + break; + + // 1/10th sec to lower CPU usage + cgsleep_ms(100); + if (work_restart(thr)) + break; + } + + struct timeval tv_workend, elapsed; + cgtime(&tv_workend); + timersub(&tv_workend, &state->tv_workstart, &elapsed); + + // Not exact since the clock may have changed ... but close enough I guess + uint64_t hashes = (uint64_t)modminer->clock * (((uint64_t)elapsed.tv_sec * 1000000) + elapsed.tv_usec); + // Overheat will complete the nonce range + if (hashes > 0xffffffff) + hashes = 0xffffffff; + + work->nonce = 0xffffffff; + + return hashes; +} + +static int64_t modminer_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce) +{ + struct modminer_fpga_state *state = thr->cgpu_data; + struct timeval tv1, tv2; + int64_t hashes; + + // Device is gone + if (thr->cgpu->usbinfo.nodev) + return -1; + + // Don't start new work if overheated + if (state->overheated == true) { + cgtime(&tv1); + + while (state->overheated == true) { + check_temperature(thr); + + // Device is gone + if (thr->cgpu->usbinfo.nodev) + return -1; + + if (state->overheated == true) { + cgtime(&tv2); + + // give up on this work item after 30s + if (work_restart(thr) || tdiff(&tv2, &tv1) > 30) + return 0; + + // Give it 1s rest then check again + cgsleep_ms(1000); + } + } + } + + if (!modminer_start_work(thr, work)) + return -1; + + hashes = modminer_process_results(thr, work); + if (hashes == -1) + return hashes; + + return hashes; +} + +static void modminer_hw_error(struct thr_info *thr) +{ + struct modminer_fpga_state *state = thr->cgpu_data; + + state->hw_errors++; +} + +static void modminer_fpga_shutdown(struct thr_info *thr) +{ + free(thr->cgpu_data); + thr->cgpu_data = NULL; +} + +static char *modminer_set_device(struct cgpu_info *modminer, char *option, char *setting, char *replybuf) +{ + const char *ret; + int val; + + if (strcasecmp(option, "help") == 0) { + sprintf(replybuf, "clock: range %d-%d and a multiple of 2", + MODMINER_MIN_CLOCK, MODMINER_MAX_CLOCK); + return replybuf; + } + + if (strcasecmp(option, "clock") == 0) { + if (!setting || !*setting) { + sprintf(replybuf, "missing clock setting"); + return replybuf; + } + + val = atoi(setting); + if (val < MODMINER_MIN_CLOCK || val > MODMINER_MAX_CLOCK || (val & 1) != 0) { + sprintf(replybuf, "invalid clock: '%s' valid range %d-%d and a multiple of 2", + setting, MODMINER_MIN_CLOCK, MODMINER_MAX_CLOCK); + return replybuf; + } + + val -= (int)(modminer->clock); + + ret = modminer_delta_clock(modminer->thr[0], val, false, true); + if (ret) { + sprintf(replybuf, "Set clock failed: %s", ret); + return replybuf; + } else + return NULL; + } + + sprintf(replybuf, "Unknown option: %s", option); + return replybuf; +} + +struct device_drv modminer_drv = { + .drv_id = DRIVER_modminer, + .dname = "ModMiner", + .name = "MMQ", + .drv_detect = modminer_detect, + .get_statline_before = get_modminer_statline_before, + .set_device = modminer_set_device, + .thread_prepare = modminer_fpga_prepare, + .thread_init = modminer_fpga_init, + .scanhash = modminer_scanhash, + .hw_error = modminer_hw_error, + .thread_shutdown = modminer_fpga_shutdown, +}; diff --git a/driver-spondoolies-sp10-p.c b/driver-spondoolies-sp10-p.c new file mode 100644 index 0000000..c37e171 --- /dev/null +++ b/driver-spondoolies-sp10-p.c @@ -0,0 +1,44 @@ +/* + * Copyright 2014 Con Kolivas + * Copyright 2014 Zvi (Zvisha) Shteingart - Spondoolies-tech.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + * + * Note that changing this SW will void your miners guaranty + */ + +/* + This file holds functions needed for minergate packet parsing/creation + by Zvisha Shteingart +*/ + +#include "driver-spondoolies-sp10-p.h" +#include "assert.h" +//#include "spond_debug.h" + +minergate_req_packet *allocate_minergate_packet_req(uint8_t requester_id, uint8_t request_id) +{ + minergate_req_packet *p = (minergate_req_packet*)malloc(sizeof(minergate_req_packet)); + p->requester_id = requester_id; + p->req_count = 0; + p->protocol_version = MINERGATE_PROTOCOL_VERSION; + p->request_id = request_id; + p->magic = 0xcaf4; + p->mask |= 0x01; // first packet + return p; +} + +minergate_rsp_packet *allocate_minergate_packet_rsp(uint8_t requester_id, uint8_t request_id) +{ + minergate_rsp_packet *p = (minergate_rsp_packet*)malloc(sizeof(minergate_rsp_packet)); + p->requester_id = requester_id; + p->rsp_count = 0; + p->protocol_version = MINERGATE_PROTOCOL_VERSION; + p->request_id = request_id; + p->magic = 0xcaf4; + p->gh_div_10_rate = 0; + return p; +} diff --git a/driver-spondoolies-sp10-p.h b/driver-spondoolies-sp10-p.h new file mode 100644 index 0000000..1476bc2 --- /dev/null +++ b/driver-spondoolies-sp10-p.h @@ -0,0 +1,92 @@ +/* + * Copyright 2014 Con Kolivas + * Copyright 2014 Zvi (Zvisha) Shteingart - Spondoolies-tech.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + * + * Note that changing this SW will void your miners guaranty + */ + +#ifndef ____MINERGATE_LIB_H___ +#define ____MINERGATE_LIB_H___ + +//#include "squid.h" +#include +#include +#include +#include +#include +#include +#include +#include +//#include "queue.h" +//#include "spond_debug.h" + +#ifndef passert +#define passert assert +#endif + +#define MINERGATE_PROTOCOL_VERSION 6 +#define MINERGATE_SOCKET_FILE "/tmp/connection_pipe" + +typedef enum { + //MINERGATE_DATA_ID_CONNECT = 1, + MINERGATE_DATA_ID_DO_JOB_REQ = 2, + MINERGATE_DATA_ID_DO_JOB_RSP = 3, + +} MINERGATE_DATA_ID; + +typedef struct { + uint32_t work_id_in_sw; + uint32_t difficulty; + uint32_t timestamp; + uint32_t mrkle_root; + uint32_t midstate[8]; + uint8_t leading_zeroes; + uint8_t ntime_limit; + uint8_t ntime_offset; + uint8_t resr1; +} minergate_do_job_req; + +#define MAX_REQUESTS 100 +#define MAX_RESPONDS 300 +#define MINERGATE_TOTAL_QUEUE 300 + +typedef struct { + uint32_t work_id_in_sw; + uint32_t mrkle_root; // to validate + uint32_t winner_nonce[2]; + uint8_t ntime_offset; + uint8_t res; // 0 = done, 1 = overflow, 2 = dropped bist + uint8_t resrv1; + uint8_t resrv2; +} minergate_do_job_rsp; + + +typedef struct { + uint8_t requester_id; + uint8_t request_id; + uint8_t protocol_version; + uint8_t mask; // 0x01 = first request, 0x2 = drop old work + uint16_t magic; // 0xcafe + uint16_t req_count; + minergate_do_job_req req[MAX_REQUESTS]; // array of requests +} minergate_req_packet; + +typedef struct { + uint8_t requester_id; + uint8_t request_id; + uint8_t protocol_version; + uint8_t gh_div_10_rate; // == + uint16_t magic; // 0xcafe + uint16_t rsp_count; + minergate_do_job_rsp rsp[MAX_RESPONDS]; // array of responce +} minergate_rsp_packet; + +minergate_req_packet *allocate_minergate_packet_req(uint8_t requester_id, uint8_t request_id); +minergate_rsp_packet *allocate_minergate_packet_rsp(uint8_t requester_id, uint8_t request_id); + +#endif diff --git a/driver-spondoolies-sp10.c b/driver-spondoolies-sp10.c new file mode 100644 index 0000000..3ffab8d --- /dev/null +++ b/driver-spondoolies-sp10.c @@ -0,0 +1,446 @@ +/* + * Copyright 2014 Con Kolivas + * Copyright 2014 Zvi (Zvisha) Shteingart - Spondoolies-tech.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +/* + This driver communicates the job requests via Unix socket to the minergate + process, that is responsible for controlling the Spondoolies Dawson SP10 miner. + + The jobs sent each with unique ID and returned asynchronously in one of the next + transactions. REQUEST_PERIOD and REQUEST_SIZE define the communication rate with minergate. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#ifdef WIN32 +#include +#endif + +#include "compat.h" +#include "miner.h" +#include "driver-spondoolies-sp10-p.h" +#include "driver-spondoolies-sp10.h" + +#ifdef WORDS_BIGENDIAN +# define swap32tobe(out, in, sz) ((out == in) ? (void)0 : memmove(out, in, sz)) +# define LOCAL_swap32be(type, var, sz) ; +# define swap32tole(out, in, sz) swap32yes(out, in, sz) +# define LOCAL_swap32le(type, var, sz) LOCAL_swap32(type, var, sz) +#else +# define swap32tobe(out, in, sz) swap32yes(out, in, sz) +# define LOCAL_swap32be(type, var, sz) LOCAL_swap32(type, var, sz) +# define swap32tole(out, in, sz) ((out == in) ? (void)0 : memmove(out, in, sz)) +# define LOCAL_swap32le(type, var, sz) ; +#endif + +static inline void swap32yes(void *out, const void *in, size_t sz) +{ + size_t swapcounter; + + for (swapcounter = 0; swapcounter < sz; ++swapcounter) + (((uint32_t*)out)[swapcounter]) = swab32(((uint32_t*)in)[swapcounter]); +} + +static void send_minergate_pkt(const minergate_req_packet* mp_req, minergate_rsp_packet* mp_rsp, + int socket_fd) +{ + int nbytes, nwrote, nread; + + nbytes = sizeof(minergate_req_packet); + nwrote = write(socket_fd, (const void *)mp_req, nbytes); + if (unlikely(nwrote != nbytes)) + _quit(-1); + nbytes = sizeof(minergate_rsp_packet); + nread = read(socket_fd, (void *)mp_rsp, nbytes); + if (unlikely(nread != nbytes)) + _quit(-1); + passert(mp_rsp->magic == 0xcaf4); +} + +static bool spondoolies_prepare(struct thr_info *thr) +{ + struct cgpu_info *spondoolies = thr->cgpu; + struct timeval now; + + assert(spondoolies); + cgtime(&now); + /* FIXME: Vladik */ +#if NEED_FIX + get_datestamp(spondoolies->init, &now); +#endif + return true; +} + +static int init_socket(void) +{ + int socket_fd = socket(PF_UNIX, SOCK_STREAM, 0); + struct sockaddr_un address; + + if (socket_fd < 0) { + printf("socket() failed\n"); + perror("Err:"); + return 0; + } + + /* start with a clean address structure */ + memset(&address, 0, sizeof(struct sockaddr_un)); + + address.sun_family = AF_UNIX; + sprintf(address.sun_path, MINERGATE_SOCKET_FILE); + + if(connect(socket_fd, (struct sockaddr *) &address, sizeof(struct sockaddr_un))) { + printf("connect() failed\n"); + perror("Err:"); + return 0; + } + + return socket_fd; +} + +static bool spondoolies_flush_queue(struct spond_adapter* a, bool flush_queue) +{ + if (!a->parse_resp) { + static int i = 0; + + if (i++ % 10 == 0 && a->works_in_minergate_and_pending_tx + a->works_pending_tx != a->works_in_driver) + printf("%d + %d != %d\n", a->works_in_minergate_and_pending_tx, a->works_pending_tx,a->works_in_driver); + assert(a->works_in_minergate_and_pending_tx + a->works_pending_tx == a->works_in_driver); + send_minergate_pkt(a->mp_next_req, a->mp_last_rsp, a->socket_fd); + if (flush_queue) + a->mp_next_req->mask |= 0x02; + else + a->mp_next_req->mask &= ~0x02; + + a->mp_next_req->req_count = 0; + a->parse_resp = 1; + a->works_in_minergate_and_pending_tx += a->works_pending_tx; + a->works_pending_tx = 0; + } + return true; +} + +static void spondoolies_detect(__maybe_unused bool hotplug) +{ + struct cgpu_info *cgpu = calloc(1, sizeof(*cgpu)); + struct device_drv *drv = &sp10_drv; + struct spond_adapter *a; + +#if NEED_FIX + nDevs = 1; +#endif + + assert(cgpu); + cgpu->drv = drv; + cgpu->deven = DEV_ENABLED; + cgpu->threads = 1; + cgpu->device_data = calloc(sizeof(struct spond_adapter), 1); + if (unlikely(!(cgpu->device_data))) + quit(1, "Failed to calloc cgpu_info data"); + a = cgpu->device_data; + a->cgpu = (void *)cgpu; + a->adapter_state = ADAPTER_STATE_OPERATIONAL; + a->mp_next_req = allocate_minergate_packet_req(0xca, 0xfe); + a->mp_last_rsp = allocate_minergate_packet_rsp(0xca, 0xfe); + + pthread_mutex_init(&a->lock, NULL); + a->socket_fd = init_socket(); + if (a->socket_fd < 1) { + printf("Error connecting to minergate server!"); + _quit(-1); + } + + assert(add_cgpu(cgpu)); + // Clean MG socket + spondoolies_flush_queue(a, true); + spondoolies_flush_queue(a, true); + spondoolies_flush_queue(a, true); + applog(LOG_DEBUG, "SPOND spondoolies_detect done"); +} + +static struct api_data *spondoolies_api_stats(struct cgpu_info *cgpu) +{ + struct spond_adapter *a = cgpu->device_data; + struct api_data *root = NULL; + + root = api_add_int(root, "ASICs total rate", &a->temp_rate, false); + root = api_add_int(root, "Temperature front", &a->front_temp, false); + root = api_add_int(root, "Temperature rear top", &a->rear_temp_top, false); + root = api_add_int(root, "Temperature rear bot", &a->rear_temp_bot, false); + + return root; +} + +#if 0 +static unsigned char get_leading_zeroes(const unsigned char *target) +{ + unsigned char leading = 0; + int first_non_zero_chr; + uint8_t m; + + for (first_non_zero_chr = 31; first_non_zero_chr >= 0; first_non_zero_chr--) { + if (target[first_non_zero_chr] == 0) + leading += 8; + else + break; + } + + // j = first non-zero + m = target[first_non_zero_chr]; + while ((m & 0x80) == 0) { + leading++; + m = m << 1; + } + return leading; +} +#endif + +static void spondoolies_shutdown(__maybe_unused struct thr_info *thr) +{ +} + +static void fill_minergate_request(minergate_do_job_req* work, struct work *cg_work, + int ntime_offset) +{ + uint32_t x[64/4]; + uint64_t wd; + + memset(work, 0, sizeof(minergate_do_job_req)); + //work-> + LOCAL_swap32le(unsigned char, cg_work->midstate, 32/4) + LOCAL_swap32le(unsigned char, cg_work->data+64, 64/4) + swap32yes(x, cg_work->data + 64, 64/4); + memcpy(work->midstate, cg_work->midstate, 32); + work->mrkle_root = ntohl(x[0]); + work->timestamp = ntohl(x[1]); + work->difficulty = ntohl(x[2]); + //work->leading_zeroes = get_leading_zeroes(cg_work->target); + // Is there no better way to get leading zeroes? + work->leading_zeroes = 30; + wd = round(cg_work->work_difficulty); + while (wd) { + work->leading_zeroes++; + wd = wd >> 1; + } + //printf("%d %d\n",work->leading_zeroes, (int)round(cg_work->work_difficulty)); + work->work_id_in_sw = cg_work->subid; + work->ntime_limit = 0; + work->ntime_offset = ntime_offset; +} + +// returns true if queue full. +static struct timeval last_force_queue = {0}; + +static bool spondoolies_queue_full(struct cgpu_info *cgpu) +{ + // Only once every 1/10 second do work. + struct spond_adapter* a = cgpu->device_data; + int next_job_id, ntime_clones, i; + struct timeval tv; + struct work *work; + unsigned int usec; + bool ret = false; + + mutex_lock(&a->lock); + passert(a->works_pending_tx <= REQUEST_SIZE); + + gettimeofday(&tv, NULL); + + usec = (tv.tv_sec-last_force_queue.tv_sec) * 1000000; + usec += (tv.tv_usec-last_force_queue.tv_usec); + + if ((usec >= REQUEST_PERIOD) || (a->reset_mg_queue == 2) || + ((a->reset_mg_queue == 1) && (a->works_pending_tx == REQUEST_SIZE))) { + spondoolies_flush_queue(a, (a->reset_mg_queue == 2)); + if (a->reset_mg_queue) + a->reset_mg_queue--; + last_force_queue = tv; + } + + // see if we have enough jobs + if (a->works_pending_tx == REQUEST_SIZE) { + ret = true; + goto return_unlock; + } + + // see if can take 1 more job. + next_job_id = (a->current_job_id + 1) % MAX_JOBS_IN_MINERGATE; + if (a->my_jobs[next_job_id].cgminer_work) { + ret = true; + goto return_unlock; + } + work = get_queued(cgpu); + if (!work) { + cgsleep_ms(10); + goto return_unlock; + } + + work->thr = cgpu->thr[0]; + work->thr_id = cgpu->thr[0]->id; + assert(work->thr); + + // Create 5 works using ntime increment + a->current_job_id = next_job_id; + work->subid = a->current_job_id; + // Get pointer for the request + a->my_jobs[a->current_job_id].cgminer_work = work; + a->my_jobs[a->current_job_id].state = SPONDWORK_STATE_IN_BUSY; + a->my_jobs[a->current_job_id].ntime_clones = 0; + + ntime_clones = (work->drv_rolllimit < MAX_NROLES) ? work->drv_rolllimit : MAX_NROLES; + for (i = 0 ; (i < ntime_clones) && (a->works_pending_tx < REQUEST_SIZE) ; i++) { + minergate_do_job_req* pkt_job = &a->mp_next_req->req[a->works_pending_tx]; + fill_minergate_request(pkt_job, work, i); + a->works_in_driver++; + a->works_pending_tx++; + a->mp_next_req->req_count++; + a->my_jobs[a->current_job_id].merkle_root = pkt_job->mrkle_root; + a->my_jobs[a->current_job_id].ntime_clones++; + } + +return_unlock: + mutex_unlock(&a->lock); + + return ret; +} + +static void spond_poll_stats(struct cgpu_info *spond, struct spond_adapter *a) +{ + FILE *fp = fopen("/var/run/mg_rate_temp", "r"); + + if (!fp) { + applog(LOG_DEBUG, "SPOND unable to open mg_rate_temp"); + a->temp_rate = a->front_temp = a->rear_temp_top = a->rear_temp_bot = 0; + } else { + int ret = fscanf(fp, "%d %d %d %d", &a->temp_rate, &a->front_temp , &a->rear_temp_top , &a->rear_temp_bot); + + if (ret != 4) + a->temp_rate = a->front_temp = a->rear_temp_top = a->rear_temp_bot = 0; + fclose(fp); + } + applog(LOG_DEBUG, "SPOND poll_stats rate: %d front: %d rear(T/B): %d/%d", + a->temp_rate, a->front_temp , a->rear_temp_top, a->rear_temp_bot); + /* Use the rear temperature as the dev temperature for now */ + spond->temp = (a->rear_temp_top + a->rear_temp_bot)/2; +} + +// Return completed work to submit_nonce() and work_completed() +// struct timeval last_force_queue = {0}; +static int64_t spond_scanhash(struct thr_info *thr) +{ + struct cgpu_info *cgpu = thr->cgpu; + struct spond_adapter *a = cgpu->device_data; + int64_t ghashes = 0; + cgtimer_t cgt; + time_t now_t; + + cgsleep_prepare_r(&cgt); + now_t = time(NULL); + /* Poll stats only once per second */ + if (now_t != a->last_stats) { + a->last_stats = now_t; + spond_poll_stats(cgpu, a); + } + + if (a->parse_resp) { + int array_size, i, j; + + mutex_lock(&a->lock); + ghashes = (a->mp_last_rsp->gh_div_10_rate); + ghashes = ghashes * 10000 * REQUEST_PERIOD; + array_size = a->mp_last_rsp->rsp_count; + for (i = 0; i < array_size; i++) { // walk the jobs + int job_id; + + minergate_do_job_rsp* work = a->mp_last_rsp->rsp + i; + job_id = work->work_id_in_sw; + if ((a->my_jobs[job_id].cgminer_work)) { + if (a->my_jobs[job_id].merkle_root == work->mrkle_root) { + assert(a->my_jobs[job_id].state == SPONDWORK_STATE_IN_BUSY); + a->works_in_minergate_and_pending_tx--; + a->works_in_driver--; + for (j = 0; j < 2; j++) { + if (work->winner_nonce[j]) { + bool __maybe_unused ok; + struct work *cg_work = a->my_jobs[job_id].cgminer_work; +#ifndef SP_NTIME + ok = submit_nonce(cg_work->thr, cg_work, work->winner_nonce[j]); +#else + ok = submit_noffset_nonce(cg_work->thr, cg_work, work->winner_nonce[j], work->ntime_offset); +#endif + //printf("OK on %d:%d = %d\n",work->work_id_in_sw,j, ok); + a->wins++; + } + } + //printf("%d ntime_clones = %d\n",job_id,a->my_jobs[job_id].ntime_clones); + if ((--a->my_jobs[job_id].ntime_clones) == 0) { + //printf("Done with %d\n", job_id); + work_completed(a->cgpu, a->my_jobs[job_id].cgminer_work); + a->good++; + a->my_jobs[job_id].cgminer_work = NULL; + a->my_jobs[job_id].state = SPONDWORK_STATE_EMPTY; + } + } else { + a->bad++; + printf("Dropping minergate old job id=%d mrkl=%x my-mrkl=%x\n", + job_id, a->my_jobs[job_id].merkle_root, work->mrkle_root); + } + } else { + a->empty++; + printf("No cgminer job (id:%d res:%d)!\n",job_id, work->res); + } + } + mutex_unlock(&a->lock); + + a->parse_resp = 0; + } + cgsleep_ms_r(&cgt, 40); + + return ghashes; +} + +// Remove all work from queue +static void spond_flush_work(struct cgpu_info *cgpu) +{ + struct spond_adapter *a = cgpu->device_data; + + mutex_lock(&a->lock); + a->reset_mg_queue = 2; + mutex_unlock(&a->lock); +} + +struct device_drv sp10_drv = { + .drv_id = DRIVER_sp10, + .dname = "Spondoolies", + .name = "SPN", + .max_diff = 64.0, // Limit max diff to get some nonces back regardless + .drv_detect = spondoolies_detect, + .get_api_stats = spondoolies_api_stats, + .thread_prepare = spondoolies_prepare, + .thread_shutdown = spondoolies_shutdown, + .hash_work = hash_queued_work, + .queue_full = spondoolies_queue_full, + .scanwork = spond_scanhash, + .flush_work = spond_flush_work, +}; diff --git a/driver-spondoolies-sp10.h b/driver-spondoolies-sp10.h new file mode 100644 index 0000000..b99fe21 --- /dev/null +++ b/driver-spondoolies-sp10.h @@ -0,0 +1,84 @@ +/* + * Copyright 2014 Con Kolivas + * Copyright 2014 Zvi Shteingart - Spondoolies-tech.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef SPONDA_HFILE +#define SPONDA_HFILE + +#include "miner.h" +#include "driver-spondoolies-sp10-p.h" + + +#define SP_NTIME + +typedef enum adapter_state { + ADAPTER_STATE_INIT, + ADAPTER_STATE_OPERATIONAL, +} ADAPTER_STATE; + +typedef enum spond_work_state { + SPONDWORK_STATE_EMPTY, + SPONDWORK_STATE_IN_BUSY, +} SPONDWORK_STATE; + +#define MAX_JOBS_IN_MINERGATE MINERGATE_TOTAL_QUEUE // 1.5 sec worth of jobs +#define MAX_NROLES 50 + +typedef struct { + struct work *cgminer_work; + SPONDWORK_STATE state; + uint32_t merkle_root; + time_t start_time; + int job_id[MAX_NROLES]; + int ntime_clones; +} spond_driver_work; + +struct spond_adapter { + pthread_mutex_t lock; + ADAPTER_STATE adapter_state; + void *cgpu; + + // Statistics + int wins; + int good; + int empty; + int bad; + int overflow; + // state + int works_in_driver; + int works_in_minergate_and_pending_tx; + int works_pending_tx; + int socket_fd; + int reset_mg_queue; // 2=reset, 1=fast send, 0=nada + int current_job_id; + int parse_resp; + minergate_req_packet* mp_next_req; + minergate_rsp_packet* mp_last_rsp; + spond_driver_work my_jobs[MAX_JOBS_IN_MINERGATE]; + + // Temperature statistics + int temp_rate; + int front_temp; + int rear_temp_top; + int rear_temp_bot; + + // Last second we polled stats + time_t last_stats; +}; + +// returns non-zero if needs to change ASICs. +int spond_one_sec_timer_scaling(struct spond_adapter *a, int t); +int spond_do_scaling(struct spond_adapter *a); + +extern void one_sec_spondoolies_watchdog(int uptime); + +#define REQUEST_PERIOD (100000) // times per second - in usec +#define REQUEST_SIZE 100 // jobs per request + +#endif diff --git a/driver-spondoolies-sp30-p.c b/driver-spondoolies-sp30-p.c new file mode 100644 index 0000000..2d6b604 --- /dev/null +++ b/driver-spondoolies-sp30-p.c @@ -0,0 +1,47 @@ +/* + * Copyright 2014 Zvi (Zvisha) Shteingart - Spondoolies-tech.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +/* + This file holds functions needed for minergate packet parsing/creation +*/ + +#include "driver-spondoolies-sp30-p.h" +#include "assert.h" +//#include "spond_debug.h" + +#ifndef passert +#define passert assert +#endif + +minergate_req_packet_sp30 *allocate_minergate_packet_req_sp30(uint8_t requester_id, + uint8_t request_id) { + minergate_req_packet_sp30 *p = + (minergate_req_packet_sp30 *)malloc(sizeof(minergate_req_packet_sp30)); + p->requester_id = requester_id; + p->req_count = 0; + p->protocol_version = MINERGATE_PROTOCOL_VERSION_SP30; + p->request_id = request_id; + p->magic = 0xcaf4; + p->mask = 0; + return p; +} + +minergate_rsp_packet_sp30 *allocate_minergate_packet_rsp_sp30(uint8_t requester_id, + uint8_t request_id) { + + minergate_rsp_packet_sp30 *p = + (minergate_rsp_packet_sp30 *)malloc(sizeof(minergate_rsp_packet_sp30)); + p->requester_id = requester_id; + p->rsp_count = 0; + p->protocol_version = MINERGATE_PROTOCOL_VERSION_SP30; + p->request_id = request_id; + p->magic = 0xcaf4; + p->gh_div_50_rate= 0; + return p; +} diff --git a/driver-spondoolies-sp30-p.h b/driver-spondoolies-sp30-p.h new file mode 100644 index 0000000..27f755b --- /dev/null +++ b/driver-spondoolies-sp30-p.h @@ -0,0 +1,83 @@ +/* + * Copyright 2014 Zvi (Zvisha) Shteingart - Spondoolies-tech.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + * + * Note that changing this SW will void your miners guaranty + */ + + +#ifndef ____MINERGATE_LIB30_H___ +#define ____MINERGATE_LIB30_H___ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define MINERGATE_PROTOCOL_VERSION_SP30 30 +#define MINERGATE_SOCKET_FILE_SP30 "/tmp/connection_pipe_sp30" + +typedef enum { + MINERGATE_DATA_ID_DO_JOB_REQ_SP30 = 5, + MINERGATE_DATA_ID_DO_JOB_RSP_SP30 = 6, +} MINERGATE_DATA_ID_SP30; + +typedef struct { + uint32_t work_id_in_sw; + uint32_t difficulty; + uint32_t timestamp; + uint32_t mrkle_root; + uint32_t midstate[8]; + uint8_t leading_zeroes; + uint8_t ntime_limit; // max ntime - should be 60 + uint8_t resr2; + uint8_t resr1; +} minergate_do_job_req_sp30; + +#define MAX_REQUESTS_SP30 30 +#define MAX_RESPONDS_SP30 60 +#define MINERGATE_ADAPTER_QUEUE_SP30 40 + +typedef struct { + uint32_t work_id_in_sw; + uint32_t mrkle_root; // to validate + uint32_t winner_nonce; + uint8_t ntime_offset; + uint8_t res; // 0 = done, 1 = overflow, 2 = dropped bist + uint8_t job_complete; + uint8_t resrv2; +} minergate_do_job_rsp_sp30; + +typedef struct { + uint8_t requester_id; + uint8_t request_id; + uint8_t protocol_version; + uint8_t mask; // 0x01 = first request, 0x2 = drop old work + uint16_t magic; // 0xcaf4 + uint16_t req_count; + minergate_do_job_req_sp30 req[MAX_REQUESTS_SP30]; // array of requests +} minergate_req_packet_sp30; + +typedef struct { + uint8_t requester_id; + uint8_t request_id; + uint8_t protocol_version; + uint8_t gh_div_50_rate; + uint16_t magic; // 0xcaf4 + uint16_t rsp_count; + minergate_do_job_rsp_sp30 rsp[MAX_RESPONDS_SP30]; // array of responces +} minergate_rsp_packet_sp30; + +minergate_req_packet_sp30* allocate_minergate_packet_req_sp30(uint8_t requester_id,uint8_t request_id); +minergate_rsp_packet_sp30* allocate_minergate_packet_rsp_sp30(uint8_t requester_id,uint8_t request_id); + +#endif diff --git a/driver-spondoolies-sp30.c b/driver-spondoolies-sp30.c new file mode 100644 index 0000000..f7664d3 --- /dev/null +++ b/driver-spondoolies-sp30.c @@ -0,0 +1,489 @@ +/* + * Copyright 2014 Con Kolivas + * Copyright 2014 Zvi (Zvisha) Shteingart - Spondoolies-tech.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +/* + This driver communicates the job requests via Unix socket to the minergate + process, that is responsible for controlling the Spondoolies Dawson SP10 miner. + + The jobs sent each with unique ID and returned asynchronously in one of the next + transactions. REQUEST_PERIOD and REQUEST_SIZE define the communication rate with minergate. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#ifdef WIN32 +#include +#endif + +#include "compat.h" +#include "miner.h" +#include "driver-spondoolies-sp30-p.h" +#include "driver-spondoolies-sp30.h" + +#ifdef WORDS_BIGENDIAN +# define swap32tobe(out, in, sz) ((out == in) ? (void)0 : memmove(out, in, sz)) +# define LOCAL_swap32be(type, var, sz) ; +# define swap32tole(out, in, sz) swap32yes(out, in, sz) +# define LOCAL_swap32le(type, var, sz) LOCAL_swap32(type, var, sz) +#else +# define swap32tobe(out, in, sz) swap32yes(out, in, sz) +# define LOCAL_swap32be(type, var, sz) LOCAL_swap32(type, var, sz) +# define swap32tole(out, in, sz) ((out == in) ? (void)0 : memmove(out, in, sz)) +# define LOCAL_swap32le(type, var, sz) ; +#endif + +static inline void swap32yes(void *out, const void *in, size_t sz) +{ + size_t swapcounter; + + for (swapcounter = 0; swapcounter < sz; ++swapcounter) + (((uint32_t*)out)[swapcounter]) = swab32(((uint32_t*)in)[swapcounter]); +} + +static void send_minergate_pkt(const minergate_req_packet_sp30* mp_req, minergate_rsp_packet_sp30* mp_rsp, + int socket_fd) +{ + int nbytes, nwrote, nread; + + nbytes = sizeof(minergate_req_packet_sp30); + nwrote = write(socket_fd, (const void *)mp_req, nbytes); + if (unlikely(nwrote != nbytes)) + _quit(-1); + nbytes = sizeof(minergate_rsp_packet_sp30); + nread = read(socket_fd, (void *)mp_rsp, nbytes); + if (unlikely(nread != nbytes)) + _quit(-1); + assert(mp_rsp->magic == 0xcaf4); +} + +static bool spondoolies_prepare_sp30(struct thr_info *thr) +{ + struct cgpu_info *spondoolies_sp30 = thr->cgpu; + struct timeval now; + + assert(spondoolies_sp30); + cgtime(&now); + /* FIXME: Vladik */ +#if NEED_FIX + get_datestamp(spondoolies_sp30->init, &now); +#endif + return true; +} + +static int init_socket(void) +{ + int socket_fd; + struct sockaddr_un address; + + printf("Init\n"); + socket_fd = socket(PF_UNIX, SOCK_STREAM, 0); + if (socket_fd < 0) { + printf("socket() failed\n"); + perror("Err:"); + return 0; + } + + /* start with a clean address structure */ + memset(&address, 0, sizeof(struct sockaddr_un)); + + address.sun_family = AF_UNIX; + sprintf(address.sun_path, MINERGATE_SOCKET_FILE_SP30); + + if (connect(socket_fd, (struct sockaddr *) &address, sizeof(struct sockaddr_un))) { + printf("connect() failed\n"); + perror("Err:"); + return 0; + } + + return socket_fd; +} + +static bool spondoolies_flush_queue(struct spond_adapter* a, bool flush_queue) +{ + if (!a->parse_resp) { + static int i = 0; + + if (i++ % 10 == 0 && a->works_in_minergate_and_pending_tx + a->works_pending_tx != a->works_in_driver) + printf("%d + %d != %d\n", a->works_in_minergate_and_pending_tx, a->works_pending_tx,a->works_in_driver); + assert(a->works_in_minergate_and_pending_tx + a->works_pending_tx == a->works_in_driver); + send_minergate_pkt(a->mp_next_req, a->mp_last_rsp, a->socket_fd); + if (flush_queue) { + printf("FLUSH!\n"); + a->mp_next_req->mask |= 0x02; + } else { + a->mp_next_req->mask &= ~0x02; + } + + a->mp_next_req->req_count = 0; + a->parse_resp = 1; + a->works_in_minergate_and_pending_tx += a->works_pending_tx; + a->works_pending_tx = 0; + } + return true; +} + +static void spondoolies_detect_sp30(__maybe_unused bool hotplug) +{ + struct cgpu_info *cgpu = calloc(1, sizeof(*cgpu)); + struct device_drv *drv = &sp30_drv; + struct spond_adapter *a; + +#if NEED_FIX + nDevs = 1; +#endif + + assert(cgpu); + cgpu->drv = drv; + cgpu->deven = DEV_ENABLED; + cgpu->threads = 1; + cgpu->device_data = calloc(sizeof(struct spond_adapter), 1); + if (unlikely(!(cgpu->device_data))) + quit(1, "Failed to calloc cgpu_info data"); + a = cgpu->device_data; + a->cgpu = (void *)cgpu; + a->adapter_state = ADAPTER_STATE_OPERATIONAL; + a->mp_next_req = allocate_minergate_packet_req_sp30(0xca, 0xfe); + a->mp_last_rsp = allocate_minergate_packet_rsp_sp30(0xca, 0xfe); + + pthread_mutex_init(&a->lock, NULL); + a->socket_fd = init_socket(); + if (a->socket_fd < 1) { + printf("Error connecting to minergate server!"); + _quit(-1); + } + + assert(add_cgpu(cgpu)); + // Clean MG socket + spondoolies_flush_queue(a, true); + spondoolies_flush_queue(a, true); + spondoolies_flush_queue(a, true); + applog(LOG_DEBUG, "SPOND spondoolies_detect_sp30 done"); +} + +static struct api_data *spondoolies_api_stats_sp30(struct cgpu_info *cgpu) +{ + struct spond_adapter *a = cgpu->device_data; + struct api_data *root = NULL; + + root = api_add_int(root, "ASICs total rate", &a->temp_rate, false); + root = api_add_int(root, "Temperature front", &a->front_temp, false); + root = api_add_int(root, "Temperature rear top", &a->rear_temp_top, false); + root = api_add_int(root, "Temperature rear bot", &a->rear_temp_bot, false); + + + + return root; +} + +#if 0 +static unsigned char get_leading_zeroes(const unsigned char *target) +{ + unsigned char leading = 0; + int first_non_zero_chr; + uint8_t m; + + for (first_non_zero_chr = 31; first_non_zero_chr >= 0; first_non_zero_chr--) { + if (target[first_non_zero_chr] == 0) + leading += 8; + else + break; + } + + // j = first non-zero + m = target[first_non_zero_chr]; + while ((m & 0x80) == 0) { + leading++; + m = m << 1; + } + return leading; +} +#endif + +static void spondoolies_shutdown_sp30(__maybe_unused struct thr_info *thr) +{ +} + +static void fill_minergate_request(minergate_do_job_req_sp30* work, struct work *cg_work, int max_offset) +{ + uint32_t x[64 / 4]; + uint64_t wd; + + memset(work, 0, sizeof(minergate_do_job_req_sp30)); + //work-> + LOCAL_swap32le(unsigned char, cg_work->midstate, 32 / 4) + LOCAL_swap32le(unsigned char, cg_work->data + 64, 64 / 4) + swap32yes(x, cg_work->data + 64, 64 / 4); + memcpy(work->midstate, cg_work->midstate, 32); + work->mrkle_root = ntohl(x[0]); + work->timestamp = ntohl(x[1]); + work->difficulty = ntohl(x[2]); + //work->leading_zeroes = get_leading_zeroes(cg_work->target); + // Is there no better way to get leading zeroes? + work->leading_zeroes = 31; + wd = round(cg_work->device_diff); + while (wd) { + work->leading_zeroes++; + wd = wd >> 1; + } + //printf("%d %d\n",work->leading_zeroes, (int)round(cg_work->work_difficulty)); + work->work_id_in_sw = cg_work->subid; + work->ntime_limit = max_offset; + //printf("ID:%d, TS:%x\n",work->work_id_in_sw,work->timestamp); + //work->ntime_offset = ntime_offset; +} + +// returns true if queue full. +static struct timeval last_force_queue; + +unsigned long usec_stamp(void) +{ + static unsigned long long int first_usec = 0; + struct timeval tv; + unsigned long long int curr_usec; + + cgtime(&tv); + curr_usec = tv.tv_sec * 1000000 + tv.tv_usec; + if (first_usec == 0) { + first_usec = curr_usec; + curr_usec = 0; + } else + curr_usec -= first_usec; + return curr_usec; +} + +static bool spondoolies_queue_full_sp30(struct cgpu_info *cgpu) +{ + struct spond_adapter* a = cgpu->device_data; +#if 0 + static int bla = 0; + + if (!((bla++)%500)) { + printf("FAKE TEST FLUSH T:%d!\n",usec_stamp()); + a->reset_mg_queue = 3; + } +#endif + // Only once every 1/10 second do work. + bool ret = false, do_sleep = false; + int next_job_id; + struct timeval tv; + struct work *work; + unsigned int usec; + + mutex_lock(&a->lock); + assert(a->works_pending_tx <= REQUEST_SIZE); + + gettimeofday(&tv, NULL); + + usec = (tv.tv_sec-last_force_queue.tv_sec) * 1000000; + usec += (tv.tv_usec-last_force_queue.tv_usec); + + if ((usec >= REQUEST_PERIOD) || + (a->reset_mg_queue == 3) || // push flush + ((a->reset_mg_queue == 2)) || // Fast pull + ((a->reset_mg_queue == 1) && (a->works_pending_tx == REQUEST_SIZE))) { // Fast push after flush + spondoolies_flush_queue(a, (a->reset_mg_queue == 3)); + if (a->reset_mg_queue) { + //printf("FLUSH(%d) %d T:%d\n",a->reset_mg_queue , a->works_pending_tx, usec_stamp()); + if (a->works_pending_tx || (a->reset_mg_queue == 3)) { + a->reset_mg_queue--; + } + } + last_force_queue = tv; + } + + // see if we have enough jobs + if (a->works_pending_tx == REQUEST_SIZE) { + ret = true; + goto return_unlock; + } + + // see if can take 1 more job. + // Must be smaller to prevent overflow. + assert(MAX_JOBS_PENDING_IN_MINERGATE_SP30 < MINERGATE_ADAPTER_QUEUE_SP30); + next_job_id = (a->current_job_id + 1) % MAX_JOBS_PENDING_IN_MINERGATE_SP30; + if (a->my_jobs[next_job_id].cgminer_work) { + ret = true; + goto return_unlock; + } + work = get_queued(cgpu); + if (unlikely(!work)) { + do_sleep = true; + goto return_unlock; + } + + work->thr = cgpu->thr[0]; + work->thr_id = cgpu->thr[0]->id; + assert(work->thr); + + a->current_job_id = next_job_id; + work->subid = a->current_job_id; + // Get pointer for the request + a->my_jobs[a->current_job_id].cgminer_work = work; + a->my_jobs[a->current_job_id].state = SPONDWORK_STATE_IN_BUSY; + //printf("Push: %d\n", a->current_job_id); + + int max_ntime_roll = (work->drv_rolllimit < MAX_NROLES) ? work->drv_rolllimit : MAX_NROLES; + minergate_do_job_req_sp30* pkt_job = &a->mp_next_req->req[a->works_pending_tx]; + fill_minergate_request(pkt_job, work, max_ntime_roll); + a->works_in_driver++; + a->works_pending_tx++; + a->mp_next_req->req_count++; + a->my_jobs[a->current_job_id].merkle_root = pkt_job->mrkle_root; + +return_unlock: + //printf("D:P.TX:%d inD:%d\n", a->works_pending_tx, a->works_in_driver); + mutex_unlock(&a->lock); + + if (do_sleep) + cgsleep_ms(10); + + return ret; +} + +static void spond_poll_stats(struct cgpu_info *spond, struct spond_adapter *a) +{ + FILE *fp = fopen("/var/run/mg_rate_temp", "r"); + + if (!fp) { + applog(LOG_DEBUG, "SPOND unable to open mg_rate_temp"); + a->temp_rate = a->front_temp = a->rear_temp_top = a->rear_temp_bot = 0; + } else { + int ret = fscanf(fp, "%d %d %d %d", &a->temp_rate, &a->front_temp , &a->rear_temp_top , &a->rear_temp_bot); + + + if (ret != 4) + a->temp_rate = a->front_temp = a->rear_temp_top = a->rear_temp_bot = 0; + fclose(fp); + } + applog(LOG_DEBUG, "SPOND poll_stats rate: %d front: %d rear(T/B): %d/%d", + a->temp_rate, a->front_temp , a->rear_temp_top, a->rear_temp_bot); + /* Use the rear temperature as the dev temperature for now */ + spond->temp = (a->rear_temp_top + a->rear_temp_bot)/2; +} + +// Return completed work to submit_nonce() and work_completed() +// struct timeval last_force_queue = {0}; +static int64_t spond_scanhash_sp30(struct thr_info *thr) +{ + struct cgpu_info *cgpu = thr->cgpu; + struct spond_adapter *a = cgpu->device_data; + int64_t ghashes = 0; + cgtimer_t cgt; + time_t now_t; + + cgsleep_prepare_r(&cgt); + now_t = time(NULL); + /* Poll stats only once per second */ + if (now_t != a->last_stats) { + a->last_stats = now_t; + spond_poll_stats(cgpu, a); + } + + if (a->parse_resp) { + int array_size, i; + + mutex_lock(&a->lock); + //ghashes = (a->mp_last_rsp->gh_div_50_rate); + //ghashes = ghashes * 50000 * REQUEST_PERIOD; + array_size = a->mp_last_rsp->rsp_count; + for (i = 0; i < array_size; i++) { // walk the jobs + int job_id; + + minergate_do_job_rsp_sp30* work = a->mp_last_rsp->rsp + i; + job_id = work->work_id_in_sw; + if ((a->my_jobs[job_id].cgminer_work)) { + if (a->my_jobs[job_id].merkle_root == work->mrkle_root) { + assert(a->my_jobs[job_id].state == SPONDWORK_STATE_IN_BUSY); + + if (work->winner_nonce) { + struct work *cg_work = a->my_jobs[job_id].cgminer_work; + bool ok; + + ok = submit_noffset_nonce(cg_work->thr, cg_work, work->winner_nonce, work->ntime_offset); + if (ok) + ghashes += 0xffffffffull * cg_work->device_diff; + /*printf("WIn on %d (+%d), none=%x = %d\n", + * work->work_id_in_sw, work->ntime_offset, htole32(work->winner_nonce), ok);*/ + a->wins++; + } + + //printf("%d ntime_clones = %d\n",job_id,a->my_jobs[job_id].ntime_clones); + + //printf("Done with %d\n", job_id); + if (work->job_complete) { + //printf("Complete %d\n", job_id); + work_completed(a->cgpu, a->my_jobs[job_id].cgminer_work); + a->good++; + a->my_jobs[job_id].cgminer_work = NULL; + a->my_jobs[job_id].state = SPONDWORK_STATE_EMPTY; + a->works_in_minergate_and_pending_tx--; + a->works_in_driver--; + } + } else { + a->bad++; + printf("Dropping minergate old job id=%d mrkl=%x my-mrkl=%x\n", + job_id, a->my_jobs[job_id].merkle_root, work->mrkle_root); + } + } else { + a->empty++; + printf("No cgminer job (id:%d res:%d)!\n",job_id, work->res); + } + } + mutex_unlock(&a->lock); + + a->parse_resp = 0; + } + cgsleep_ms_r(&cgt, 40); + + return ghashes; +} + +// Remove all work from queue +static void spond_flush_work_sp30(struct cgpu_info *cgpu) +{ + struct spond_adapter *a = cgpu->device_data; + + //printf("GOT FLUSH!%d\n"); + mutex_lock(&a->lock); + a->reset_mg_queue = 3; + mutex_unlock(&a->lock); +} + +struct device_drv sp30_drv = { + .drv_id = DRIVER_sp30, + .dname = "Sp30", + .name = "S30", + .min_diff = 16, + .max_diff = 1024.0, // Limit max diff to get some nonces back regardless + .drv_detect = spondoolies_detect_sp30, + .get_api_stats = spondoolies_api_stats_sp30, + .thread_prepare = spondoolies_prepare_sp30, + .thread_shutdown = spondoolies_shutdown_sp30, + .hash_work = hash_queued_work, + .queue_full = spondoolies_queue_full_sp30, + .scanwork = spond_scanhash_sp30, + .flush_work = spond_flush_work_sp30, +}; diff --git a/driver-spondoolies-sp30.h b/driver-spondoolies-sp30.h new file mode 100644 index 0000000..ced4eb4 --- /dev/null +++ b/driver-spondoolies-sp30.h @@ -0,0 +1,85 @@ +/* + * Copyright 2014 Con Kolivas + * Copyright 2014 Zvi Shteingart - Spondoolies-tech.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef SPONDA_HFILE +#define SPONDA_HFILE + +#include "miner.h" +#include "driver-spondoolies-sp30-p.h" + + + +typedef enum adapter_state { + ADAPTER_STATE_INIT, + ADAPTER_STATE_OPERATIONAL, +} ADAPTER_STATE; + +typedef enum spond_work_state { + SPONDWORK_STATE_EMPTY, + SPONDWORK_STATE_IN_BUSY, +} SPONDWORK_STATE; + +#define MAX_JOBS_PENDING_IN_MINERGATE_SP30 30 +#define MAX_NROLES 60 + + +typedef struct { + struct work *cgminer_work; + SPONDWORK_STATE state; + uint32_t merkle_root; + time_t start_time; + int job_id; +} spond_driver_work_sp30; + + + +struct spond_adapter { + pthread_mutex_t lock; + ADAPTER_STATE adapter_state; + void *cgpu; + + // Statistics + int wins; + int good; + int empty; + int bad; + int overflow; + // state + int works_in_driver; + int works_in_minergate_and_pending_tx; + int works_pending_tx; + int socket_fd; + int reset_mg_queue; // 3=reset, 2=fast send 1 job, 1=fast send 10 jobs, 0=nada + int current_job_id; + int parse_resp; + minergate_req_packet_sp30* mp_next_req; + minergate_rsp_packet_sp30* mp_last_rsp; + spond_driver_work_sp30 my_jobs[MAX_JOBS_PENDING_IN_MINERGATE_SP30]; + + // Temperature statistics + int temp_rate; + int front_temp; + int rear_temp_top; + int rear_temp_bot; + + // Last second we polled stats + time_t last_stats; +}; + +// returns non-zero if needs to change ASICs. +int spond_one_sec_timer_scaling(struct spond_adapter *a, int t); +int spond_do_scaling(struct spond_adapter *a); + +extern void one_sec_spondoolies_watchdog(int uptime); + +#define REQUEST_PERIOD (100000) // times per second - in usec +#define REQUEST_SIZE 10 // jobs per request + +#endif diff --git a/elist.h b/elist.h new file mode 100644 index 0000000..afd5937 --- /dev/null +++ b/elist.h @@ -0,0 +1,256 @@ +#ifndef _LINUX_LIST_H +#define _LINUX_LIST_H + +/* + * Simple doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +struct list_head { + struct list_head *next, *prev; +}; + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +#define INIT_LIST_HEAD(ptr) do { \ + (ptr)->next = (ptr); (ptr)->prev = (ptr); \ +} while (0) + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head *prev, struct list_head *next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty on entry does not return true after this, the entry is in an undefined state. + */ +static inline void list_del(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->next = (void *) 0; + entry->prev = (void *) 0; +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add_tail(list, head); +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(struct list_head *head) +{ + return head->next == head; +} + +static inline void __list_splice(struct list_head *list, + struct list_head *head) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + struct list_head *at = head->next; + + first->prev = head; + head->next = first; + + last->next = at; + at->prev = last; +} + +/** + * list_splice - join two lists + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(struct list_head *list, struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#ifndef _WIN64 +#define list_entry(ptr, type, member) \ + ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member))) +#else +#define list_entry(ptr, type, member) \ + ((type *)((char *)(ptr)-(unsigned long long)(&((type *)0)->member))) +#endif + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop counter. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); \ + pos = pos->next) +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop counter. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; pos != (head); \ + pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop counter. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop counter. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop counter. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_continue - iterate over list of given type + * continuing after existing point + * @pos: the type * to use as a loop counter. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member), \ + prefetch(pos->member.next); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member), \ + prefetch(pos->member.next)) + +#endif diff --git a/example.conf b/example.conf new file mode 100644 index 0000000..2448ed5 --- /dev/null +++ b/example.conf @@ -0,0 +1,25 @@ +{ +"pools" : [ + { + "url" : "http://url1:8332", + "user" : "user1", + "pass" : "pass1" + }, + { + "url" : "http://url2:8344", + "user" : "user2", + "pass" : "pass2" + }, + { + "url" : "http://url3:8332", + "user" : "user3", + "pass" : "pass3" + } +], + +"failover-only" : true, +"no-submit-stale" : true, +"api-listen" : true, +"api-port" : "4028", +"api-allow" : "W:192.168.1.0/24,W:127.0.0.1" +} diff --git a/fpgautils.c b/fpgautils.c new file mode 100644 index 0000000..8c262e7 --- /dev/null +++ b/fpgautils.c @@ -0,0 +1,610 @@ +/* + * Copyright 2013 Con Kolivas + * Copyright 2012 Luke Dashjr + * Copyright 2012 Andrew Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include + +#include "miner.h" + +#ifndef WIN32 +#include +#include +#include +#include +#include +#include +#ifndef O_CLOEXEC +#define O_CLOEXEC 0 +#endif +#else +#include +#include +#endif + +#ifdef HAVE_LIBUDEV +#include +#include +#endif + +#include "elist.h" +#include "logging.h" +#include "miner.h" +#include "fpgautils.h" + +#ifdef HAVE_LIBUDEV +int serial_autodetect_udev(detectone_func_t detectone, const char*prodname) +{ + struct udev *udev = udev_new(); + struct udev_enumerate *enumerate = udev_enumerate_new(udev); + struct udev_list_entry *list_entry; + char found = 0; + + udev_enumerate_add_match_subsystem(enumerate, "tty"); + udev_enumerate_add_match_property(enumerate, "ID_MODEL", prodname); + udev_enumerate_scan_devices(enumerate); + udev_list_entry_foreach(list_entry, udev_enumerate_get_list_entry(enumerate)) { + struct udev_device *device = udev_device_new_from_syspath( + udev_enumerate_get_udev(enumerate), + udev_list_entry_get_name(list_entry) + ); + if (!device) + continue; + + const char *devpath = udev_device_get_devnode(device); + if (devpath && detectone(devpath)) + ++found; + + udev_device_unref(device); + } + udev_enumerate_unref(enumerate); + udev_unref(udev); + + return found; +} +#else +int serial_autodetect_udev(__maybe_unused detectone_func_t detectone, __maybe_unused const char*prodname) +{ + return 0; +} +#endif + +int serial_autodetect_devserial(__maybe_unused detectone_func_t detectone, __maybe_unused const char*prodname) +{ +#ifndef WIN32 + DIR *D; + struct dirent *de; + const char udevdir[] = "/dev/serial/by-id"; + char devpath[sizeof(udevdir) + 1 + NAME_MAX]; + char *devfile = devpath + sizeof(udevdir); + char found = 0; + + D = opendir(udevdir); + if (!D) + return 0; + memcpy(devpath, udevdir, sizeof(udevdir) - 1); + devpath[sizeof(udevdir) - 1] = '/'; + while ( (de = readdir(D)) ) { + if (!strstr(de->d_name, prodname)) + continue; + strcpy(devfile, de->d_name); + if (detectone(devpath)) + ++found; + } + closedir(D); + + return found; +#else + return 0; +#endif +} + +int _serial_detect(struct device_drv *drv, detectone_func_t detectone, autoscan_func_t autoscan, bool forceauto) +{ + struct string_elist *iter, *tmp; + const char *dev, *colon; + bool inhibitauto = false; + char found = 0; + size_t namel = strlen(drv->name); + size_t dnamel = strlen(drv->dname); + + list_for_each_entry_safe(iter, tmp, &scan_devices, list) { + dev = iter->string; + if ((colon = strchr(dev, ':')) && colon[1] != '\0') { + size_t idlen = colon - dev; + + // allow either name:device or dname:device + if ((idlen != namel || strncasecmp(dev, drv->name, idlen)) + && (idlen != dnamel || strncasecmp(dev, drv->dname, idlen))) + continue; + + dev = colon + 1; + } + if (!strcmp(dev, "auto")) + forceauto = true; + else if (!strcmp(dev, "noauto")) + inhibitauto = true; + else if (detectone(dev)) { + string_elist_del(iter); + inhibitauto = true; + ++found; + } + } + + if ((forceauto || !inhibitauto) && autoscan) + found += autoscan(); + + return found; +} + +// This code is purely for debugging but is very useful for that +// It also took quite a bit of effort so I left it in +// #define TERMIOS_DEBUG 1 +// Here to include it at compile time +// It's off by default +#ifndef WIN32 +#ifdef TERMIOS_DEBUG + +#define BITSSET "Y" +#define BITSNOTSET "N" + +int tiospeed(speed_t speed) +{ + switch (speed) { + case B0: + return 0; + case B50: + return 50; + case B75: + return 75; + case B110: + return 110; + case B134: + return 134; + case B150: + return 150; + case B200: + return 200; + case B300: + return 300; + case B600: + return 600; + case B1200: + return 1200; + case B1800: + return 1800; + case B2400: + return 2400; + case B4800: + return 4800; + case B9600: + return 9600; + case B19200: + return 19200; + case B38400: + return 38400; + case B57600: + return 57600; + case B115200: + return 115200; + case B230400: + return 230400; + case B460800: + return 460800; + case B500000: + return 500000; + case B576000: + return 576000; + case B921600: + return 921600; + case B1000000: + return 1000000; + case B1152000: + return 1152000; + case B1500000: + return 1500000; + case B2000000: + return 2000000; + case B2500000: + return 2500000; + case B3000000: + return 3000000; + case B3500000: + return 3500000; + case B4000000: + return 4000000; + default: + return -1; + } +} + +void termios_debug(const char *devpath, struct termios *my_termios, const char *msg) +{ + applog(LOG_DEBUG, "TIOS: Open %s attributes %s: ispeed=%d ospeed=%d", + devpath, msg, tiospeed(cfgetispeed(my_termios)), tiospeed(cfgetispeed(my_termios))); + +#define ISSETI(b) ((my_termios->c_iflag | (b)) ? BITSSET : BITSNOTSET) + + applog(LOG_DEBUG, "TIOS: c_iflag: IGNBRK=%s BRKINT=%s IGNPAR=%s PARMRK=%s INPCK=%s ISTRIP=%s INLCR=%s IGNCR=%s ICRNL=%s IUCLC=%s IXON=%s IXANY=%s IOFF=%s IMAXBEL=%s IUTF8=%s", + ISSETI(IGNBRK), ISSETI(BRKINT), ISSETI(IGNPAR), ISSETI(PARMRK), + ISSETI(INPCK), ISSETI(ISTRIP), ISSETI(INLCR), ISSETI(IGNCR), + ISSETI(ICRNL), ISSETI(IUCLC), ISSETI(IXON), ISSETI(IXANY), + ISSETI(IXOFF), ISSETI(IMAXBEL), ISSETI(IUTF8)); + +#define ISSETO(b) ((my_termios->c_oflag | (b)) ? BITSSET : BITSNOTSET) +#define VALO(b) (my_termios->c_oflag | (b)) + + applog(LOG_DEBUG, "TIOS: c_oflag: OPOST=%s OLCUC=%s ONLCR=%s OCRNL=%s ONOCR=%s ONLRET=%s OFILL=%s OFDEL=%s NLDLY=%d CRDLY=%d TABDLY=%d BSDLY=%d VTDLY=%d FFDLY=%d", + ISSETO(OPOST), ISSETO(OLCUC), ISSETO(ONLCR), ISSETO(OCRNL), + ISSETO(ONOCR), ISSETO(ONLRET), ISSETO(OFILL), ISSETO(OFDEL), + VALO(NLDLY), VALO(CRDLY), VALO(TABDLY), VALO(BSDLY), + VALO(VTDLY), VALO(FFDLY)); + +#define ISSETC(b) ((my_termios->c_cflag | (b)) ? BITSSET : BITSNOTSET) +#define VALC(b) (my_termios->c_cflag | (b)) + + applog(LOG_DEBUG, "TIOS: c_cflag: CBAUDEX=%s CSIZE=%d CSTOPB=%s CREAD=%s PARENB=%s PARODD=%s HUPCL=%s CLOCAL=%s" +#ifdef LOBLK + " LOBLK=%s" +#endif + " CMSPAR=%s CRTSCTS=%s", + ISSETC(CBAUDEX), VALC(CSIZE), ISSETC(CSTOPB), ISSETC(CREAD), + ISSETC(PARENB), ISSETC(PARODD), ISSETC(HUPCL), ISSETC(CLOCAL), +#ifdef LOBLK + ISSETC(LOBLK), +#endif + ISSETC(CMSPAR), ISSETC(CRTSCTS)); + +#define ISSETL(b) ((my_termios->c_lflag | (b)) ? BITSSET : BITSNOTSET) + + applog(LOG_DEBUG, "TIOS: c_lflag: ISIG=%s ICANON=%s XCASE=%s ECHO=%s ECHOE=%s ECHOK=%s ECHONL=%s ECHOCTL=%s ECHOPRT=%s ECHOKE=%s" +#ifdef DEFECHO + " DEFECHO=%s" +#endif + " FLUSHO=%s NOFLSH=%s TOSTOP=%s PENDIN=%s IEXTEN=%s", + ISSETL(ISIG), ISSETL(ICANON), ISSETL(XCASE), ISSETL(ECHO), + ISSETL(ECHOE), ISSETL(ECHOK), ISSETL(ECHONL), ISSETL(ECHOCTL), + ISSETL(ECHOPRT), ISSETL(ECHOKE), +#ifdef DEFECHO + ISSETL(DEFECHO), +#endif + ISSETL(FLUSHO), ISSETL(NOFLSH), ISSETL(TOSTOP), ISSETL(PENDIN), + ISSETL(IEXTEN)); + +#define VALCC(b) (my_termios->c_cc[b]) + applog(LOG_DEBUG, "TIOS: c_cc: VINTR=0x%02x VQUIT=0x%02x VERASE=0x%02x VKILL=0x%02x VEOF=0x%02x VMIN=%u VEOL=0x%02x VTIME=%u VEOL2=0x%02x" +#ifdef VSWTCH + " VSWTCH=0x%02x" +#endif + " VSTART=0x%02x VSTOP=0x%02x VSUSP=0x%02x" +#ifdef VDSUSP + " VDSUSP=0x%02x" +#endif + " VLNEXT=0x%02x VWERASE=0x%02x VREPRINT=0x%02x VDISCARD=0x%02x" +#ifdef VSTATUS + " VSTATUS=0x%02x" +#endif + , + VALCC(VINTR), VALCC(VQUIT), VALCC(VERASE), VALCC(VKILL), + VALCC(VEOF), VALCC(VMIN), VALCC(VEOL), VALCC(VTIME), + VALCC(VEOL2), +#ifdef VSWTCH + VALCC(VSWTCH), +#endif + VALCC(VSTART), VALCC(VSTOP), VALCC(VSUSP), +#ifdef VDSUSP + VALCC(VDSUSP), +#endif + VALCC(VLNEXT), VALCC(VWERASE), + VALCC(VREPRINT), VALCC(VDISCARD) +#ifdef VSTATUS + ,VALCC(VSTATUS) +#endif + ); +} +#endif +#endif + +int serial_open(const char *devpath, unsigned long baud, signed short timeout, bool purge) +{ +#ifdef WIN32 + HANDLE hSerial = CreateFile(devpath, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL); + if (unlikely(hSerial == INVALID_HANDLE_VALUE)) + { + DWORD e = GetLastError(); + switch (e) { + case ERROR_ACCESS_DENIED: + applog(LOG_ERR, "Do not have user privileges required to open %s", devpath); + break; + case ERROR_SHARING_VIOLATION: + applog(LOG_ERR, "%s is already in use by another process", devpath); + break; + default: + applog(LOG_DEBUG, "Open %s failed, GetLastError:%d", devpath, (int)e); + break; + } + return -1; + } + + // thanks to af_newbie for pointers about this + COMMCONFIG comCfg = {0}; + comCfg.dwSize = sizeof(COMMCONFIG); + comCfg.wVersion = 1; + comCfg.dcb.DCBlength = sizeof(DCB); + comCfg.dcb.BaudRate = baud; + comCfg.dcb.fBinary = 1; + comCfg.dcb.fDtrControl = DTR_CONTROL_ENABLE; + comCfg.dcb.fRtsControl = RTS_CONTROL_ENABLE; + comCfg.dcb.ByteSize = 8; + + SetCommConfig(hSerial, &comCfg, sizeof(comCfg)); + + // Code must specify a valid timeout value (0 means don't timeout) + const DWORD ctoms = (timeout * 100); + COMMTIMEOUTS cto = {ctoms, 0, ctoms, 0, ctoms}; + SetCommTimeouts(hSerial, &cto); + + if (purge) { + PurgeComm(hSerial, PURGE_RXABORT); + PurgeComm(hSerial, PURGE_TXABORT); + PurgeComm(hSerial, PURGE_RXCLEAR); + PurgeComm(hSerial, PURGE_TXCLEAR); + } + + return _open_osfhandle((intptr_t)hSerial, 0); +#else + int fdDev = open(devpath, O_RDWR | O_CLOEXEC | O_NOCTTY); + + if (unlikely(fdDev == -1)) + { + if (errno == EACCES) + applog(LOG_ERR, "Do not have user privileges required to open %s", devpath); + else + applog(LOG_DEBUG, "Open %s failed, errno:%d", devpath, errno); + + return -1; + } + + struct termios my_termios; + + tcgetattr(fdDev, &my_termios); + +#ifdef TERMIOS_DEBUG + termios_debug(devpath, &my_termios, "before"); +#endif + + switch (baud) { + case 0: + break; + case 19200: + cfsetispeed(&my_termios, B19200); + cfsetospeed(&my_termios, B19200); + break; + case 38400: + cfsetispeed(&my_termios, B38400); + cfsetospeed(&my_termios, B38400); + break; + case 57600: + cfsetispeed(&my_termios, B57600); + cfsetospeed(&my_termios, B57600); + break; + case 115200: + cfsetispeed(&my_termios, B115200); + cfsetospeed(&my_termios, B115200); + break; + // TODO: try some higher speeds with the Icarus and BFL to see + // if they support them and if setting them makes any difference + // N.B. B3000000 doesn't work on Icarus + default: + applog(LOG_WARNING, "Unrecognized baud rate: %lu", baud); + } + + my_termios.c_cflag &= ~(CSIZE | PARENB); + my_termios.c_cflag |= CS8; + my_termios.c_cflag |= CREAD; + my_termios.c_cflag |= CLOCAL; + + my_termios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | + ISTRIP | INLCR | IGNCR | ICRNL | IXON); + my_termios.c_oflag &= ~OPOST; + my_termios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); + + // Code must specify a valid timeout value (0 means don't timeout) + my_termios.c_cc[VTIME] = (cc_t)timeout; + my_termios.c_cc[VMIN] = 0; + +#ifdef TERMIOS_DEBUG + termios_debug(devpath, &my_termios, "settings"); +#endif + + tcsetattr(fdDev, TCSANOW, &my_termios); + +#ifdef TERMIOS_DEBUG + tcgetattr(fdDev, &my_termios); + termios_debug(devpath, &my_termios, "after"); +#endif + + if (purge) + tcflush(fdDev, TCIOFLUSH); + return fdDev; +#endif +} + +ssize_t _serial_read(int fd, char *buf, size_t bufsiz, char *eol) +{ + ssize_t len, tlen = 0; + while (bufsiz) { + len = read(fd, buf, eol ? 1 : bufsiz); + if (unlikely(len == -1)) + break; + tlen += len; + if (eol && *eol == buf[0]) + break; + buf += len; + bufsiz -= len; + } + return tlen; +} + +static FILE *_open_bitstream(const char *path, const char *subdir, const char *filename) +{ + char fullpath[PATH_MAX]; + strcpy(fullpath, path); + strcat(fullpath, "/"); + if (subdir) { + strcat(fullpath, subdir); + strcat(fullpath, "/"); + } + strcat(fullpath, filename); + return fopen(fullpath, "rb"); +} +#define _open_bitstream(path, subdir) do { \ + f = _open_bitstream(path, subdir, filename); \ + if (f) \ + return f; \ +} while(0) + +#define _open_bitstream3(path) do { \ + _open_bitstream(path, dname); \ + _open_bitstream(path, "bitstreams"); \ + _open_bitstream(path, NULL); \ +} while(0) + +FILE *open_bitstream(const char *dname, const char *filename) +{ + FILE *f; + + _open_bitstream3(opt_kernel_path); + _open_bitstream3(cgminer_path); + _open_bitstream3("."); + + return NULL; +} + +#ifndef WIN32 + +static bool _select_wait_read(int fd, struct timeval *timeout) +{ + fd_set rfds; + + FD_ZERO(&rfds); + FD_SET(fd, &rfds); + + if (select(fd+1, &rfds, NULL, NULL, timeout) > 0) + return true; + else + return false; +} + +// Default timeout 100ms - only for device initialisation +const struct timeval tv_timeout_default = { 0, 100000 }; +// Default inter character timeout = 1ms - only for device initialisation +const struct timeval tv_inter_char_default = { 0, 1000 }; + +// Device initialisation function - NOT for work processing +size_t _select_read(int fd, char *buf, size_t bufsiz, struct timeval *timeout, struct timeval *char_timeout, int finished) +{ + struct timeval tv_time, tv_char; + ssize_t siz, red = 0; + char got; + + // timeout is the maximum time to wait for the first character + tv_time.tv_sec = timeout->tv_sec; + tv_time.tv_usec = timeout->tv_usec; + + if (!_select_wait_read(fd, &tv_time)) + return 0; + + while (4242) { + if ((siz = read(fd, buf, 1)) < 0) + return red; + + got = *buf; + buf += siz; + red += siz; + bufsiz -= siz; + + if (bufsiz < 1 || (finished >= 0 && got == finished)) + return red; + + // char_timeout is the maximum time to wait for each subsequent character + // this is OK for initialisation, but bad for work processing + // work processing MUST have a fixed size so this doesn't come into play + tv_char.tv_sec = char_timeout->tv_sec; + tv_char.tv_usec = char_timeout->tv_usec; + + if (!_select_wait_read(fd, &tv_char)) + return red; + } + + return red; +} + +// Device initialisation function - NOT for work processing +size_t _select_write(int fd, char *buf, size_t siz, struct timeval *timeout) +{ + struct timeval tv_time, tv_now, tv_finish; + fd_set rfds; + ssize_t wrote = 0, ret; + + cgtime(&tv_now); + timeradd(&tv_now, timeout, &tv_finish); + + // timeout is the maximum time to spend trying to write + tv_time.tv_sec = timeout->tv_sec; + tv_time.tv_usec = timeout->tv_usec; + + FD_ZERO(&rfds); + FD_SET(fd, &rfds); + + while (siz > 0 && (tv_now.tv_sec < tv_finish.tv_sec || (tv_now.tv_sec == tv_finish.tv_sec && tv_now.tv_usec < tv_finish.tv_usec)) && select(fd+1, NULL, &rfds, NULL, &tv_time) > 0) { + if ((ret = write(fd, buf, 1)) > 0) { + buf++; + wrote++; + siz--; + } + else if (ret < 0) + return wrote; + + cgtime(&tv_now); + } + + return wrote; +} + +int get_serial_cts(int fd) +{ + int flags; + + if (!fd) + return -1; + + ioctl(fd, TIOCMGET, &flags); + return (flags & TIOCM_CTS) ? 1 : 0; +} +#else +int get_serial_cts(const int fd) +{ + if (!fd) + return -1; + const HANDLE fh = (HANDLE)_get_osfhandle(fd); + if (!fh) + return -1; + + DWORD flags; + if (!GetCommModemStatus(fh, &flags)) + return -1; + + return (flags & MS_CTS_ON) ? 1 : 0; +} +#endif // ! WIN32 diff --git a/fpgautils.h b/fpgautils.h new file mode 100644 index 0000000..8a7dd83 --- /dev/null +++ b/fpgautils.h @@ -0,0 +1,84 @@ +/* + * Copyright 2012 Luke Dashjr + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef FPGAUTILS_H +#define FPGAUTILS_H + +#include +#include + +typedef bool(*detectone_func_t)(const char*); +typedef int(*autoscan_func_t)(); + +extern int _serial_detect(struct device_drv *drv, detectone_func_t, autoscan_func_t, bool force_autoscan); +#define serial_detect_fauto(drv, detectone, autoscan) \ + _serial_detect(drv, detectone, autoscan, true) +#define serial_detect_auto(drv, detectone, autoscan) \ + _serial_detect(drv, detectone, autoscan, false) +#define serial_detect(drv, detectone) \ + _serial_detect(drv, detectone, NULL, false) +extern int serial_autodetect_devserial(detectone_func_t, const char *prodname); +extern int serial_autodetect_udev(detectone_func_t, const char *prodname); + +extern int serial_open(const char *devpath, unsigned long baud, signed short timeout, bool purge); +extern ssize_t _serial_read(int fd, char *buf, size_t buflen, char *eol); +#define serial_read(fd, buf, count) \ + _serial_read(fd, (char*)(buf), count, NULL) +#define serial_read_line(fd, buf, bufsiz, eol) \ + _serial_read(fd, buf, bufsiz, &eol) +#define serial_close(fd) close(fd) + +extern FILE *open_bitstream(const char *dname, const char *filename); + +extern int get_serial_cts(int fd); + +#ifndef WIN32 +extern const struct timeval tv_timeout_default; +extern const struct timeval tv_inter_char_default; + +extern size_t _select_read(int fd, char *buf, size_t bufsiz, struct timeval *timeout, struct timeval *char_timeout, int finished); +extern size_t _select_write(int fd, char *buf, size_t siz, struct timeval *timeout); + +#define select_open(devpath) \ + serial_open(devpath, 0, 0, false) + +#define select_open_purge(devpath, purge)\ + serial_open(devpath, 0, 0, purge) + +#define select_write(fd, buf, siz) \ + _select_write(fd, buf, siz, (struct timeval *)(&tv_timeout_default)) + +#define select_write_full _select_write + +#define select_read(fd, buf, bufsiz) \ + _select_read(fd, buf, bufsiz, (struct timeval *)(&tv_timeout_default), \ + (struct timeval *)(&tv_inter_char_default), -1) + +#define select_read_til(fd, buf, bufsiz, eol) \ + _select_read(fd, buf, bufsiz, (struct timeval *)(&tv_timeout_default), \ + (struct timeval *)(&tv_inter_char_default), eol) + +#define select_read_wait(fd, buf, bufsiz, timeout) \ + _select_read(fd, buf, bufsiz, timeout, \ + (struct timeval *)(&tv_inter_char_default), -1) + +#define select_read_wait_til(fd, buf, bufsiz, timeout, eol) \ + _select_read(fd, buf, bufsiz, timeout, \ + (struct timeval *)(&tv_inter_char_default), eol) + +#define select_read_wait_both(fd, buf, bufsiz, timeout, char_timeout) \ + _select_read(fd, buf, bufsiz, timeout, char_timeout, -1) + +#define select_read_full _select_read + +#define select_close(fd) close(fd) + +#endif // ! WIN32 + +#endif diff --git a/hexdump.c b/hexdump.c new file mode 100644 index 0000000..80b8c2d --- /dev/null +++ b/hexdump.c @@ -0,0 +1,77 @@ +/* + * hexdump implementation without depenecies to *printf() + * output is equal to 'hexdump -C' + * should be compatible to 64bit architectures + * + * Copyright (c) 2009 Daniel Mack + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#define hex_print(p) applog(LOG_DEBUG, "%s", p) + +static char nibble[] = { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; + +#define BYTES_PER_LINE 0x10 + +static void hexdump(const uint8_t *p, unsigned int len) +{ + unsigned int i, addr; + unsigned int wordlen = sizeof(unsigned int); + unsigned char v, line[BYTES_PER_LINE * 5]; + + for (addr = 0; addr < len; addr += BYTES_PER_LINE) { + /* clear line */ + for (i = 0; i < sizeof(line); i++) { + if (i == wordlen * 2 + 52 || + i == wordlen * 2 + 69) { + line[i] = '|'; + continue; + } + + if (i == wordlen * 2 + 70) { + line[i] = '\0'; + continue; + } + + line[i] = ' '; + } + + /* print address */ + for (i = 0; i < wordlen * 2; i++) { + v = addr >> ((wordlen * 2 - i - 1) * 4); + line[i] = nibble[v & 0xf]; + } + + /* dump content */ + for (i = 0; i < BYTES_PER_LINE; i++) { + int pos = (wordlen * 2) + 3 + (i / 8); + + if (addr + i >= len) + break; + + v = p[addr + i]; + line[pos + (i * 3) + 0] = nibble[v >> 4]; + line[pos + (i * 3) + 1] = nibble[v & 0xf]; + + /* character printable? */ + line[(wordlen * 2) + 53 + i] = + (v >= ' ' && v <= '~') ? v : '.'; + } + + hex_print(line); + } +} diff --git a/hf_protocol.h b/hf_protocol.h new file mode 100644 index 0000000..7a1a0c2 --- /dev/null +++ b/hf_protocol.h @@ -0,0 +1,407 @@ +// +// Copyright 2013, 2014 HashFast Technologies LLC +// +// This program is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3 of the License, or (at your option) +// any later version. See COPYING for more details. +// +// Useful data structures and values for interfacing with HashFast products +// +// Version 1.1 +// + +#ifndef _HF_PROTOCOL_H_ +#define _HF_PROTOCOL_H_ + +#define HF_PROTOCOL_VERSION ((0<<8)|1) + +#define HF_PREAMBLE (uint8_t) 0xaa +#define HF_BROADCAST_ADDRESS (uint8_t) 0xff +#define HF_GWQ_ADDRESS (uint8_t) 254 + +// Serial protocol operation codes (Second header byte) +#define OP_NULL 0 +#define OP_ROOT 1 +#define OP_RESET 2 +#define OP_PLL_CONFIG 3 +#define OP_ADDRESS 4 +#define OP_READDRESS 5 +#define OP_HIGHEST 6 +#define OP_BAUD 7 +#define OP_UNROOT 8 + +#define OP_HASH 9 +#define OP_NONCE 10 +#define OP_ABORT 11 +#define OP_STATUS 12 +#define OP_GPIO 13 +#define OP_CONFIG 14 +#define OP_STATISTICS 15 +#define OP_GROUP 16 +#define OP_CLOCKGATE 17 + +// Conversions for the ADC readings from GN on-chip sensors +#define GN_CORE_VOLTAGE(a) ((float)(a)/256*1.2) +#define GN_DIE_TEMPERATURE(a) ((((float)(a)*240)/4096.0)-61.5) + +// What to use in an OP_CONFIG hdata field to set thermal overload point to a given temp in degrees C +#define GN_THERMAL_CUTOFF(temp) ((uint16_t)(((temp)+61.5)*4096/240)) + +// The sequence distance between a sent and received sequence number. +#define HF_SEQUENCE_DISTANCE(tx,rx) ((tx)>=(rx)?((tx)-(rx)):(info->num_sequence+(tx)-(rx))) + +// Values the protocol field in the above structure may take +#define PROTOCOL_USB_MAPPED_SERIAL 0 +#define PROTOCOL_GLOBAL_WORK_QUEUE 1 + +// Conversions for the board/module level sensors +#define M_VOLTAGE(a) ((float)(a)*19.0734e-6) +#define M_PHASE_CURRENT(a) ((float)(a)*0.794728597e-3) + +// Values info->device_type can take +#define HFD_G1 1 // HashFast G-1 GN ASIC +#define HFD_VC709 128 +#define HFD_ExpressAGX 129 + +// USB interface specific operation codes +#define OP_USB_INIT 128 // Initialize USB interface details +#define OP_GET_TRACE 129 // Send back the trace buffer if present +#define OP_LOOPBACK_USB 130 +#define OP_LOOPBACK_UART 131 +#define OP_DFU 132 // Jump into the boot loader +#define OP_USB_SHUTDOWN 133 // Initialize USB interface details +#define OP_DIE_STATUS 134 // Die status. There are 4 die per ASIC +#define OP_GWQ_STATUS 135 // Global Work Queue protocol status +#define OP_WORK_RESTART 136 // Stratum work restart regime +#define OP_USB_STATS1 137 // Statistics class 1 +#define OP_USB_GWQSTATS 138 // GWQ protocol statistics +#define OP_USB_NOTICE 139 // Asynchronous notification event +#define OP_PING 140 // Echo +#define OP_CORE_MAP 141 // Return core map +#define OP_VERSION 142 // Version information +#define OP_FAN 143 // Set Fan Speed +#define OP_NAME 144 // System name write/read +#define OP_USB_DEBUG 255 + +// HashFast vendor and product ID's +#define HF_USB_VENDOR_ID 0x297c +#define HF_USB_PRODUCT_ID_G1 0x0001 + +// If this bit is set, search forward for other nonce(s) +#define HF_NTIME_MASK 0xfff // Mask for for ntime +#define HF_NONCE_SEARCH 0x1000 // Search bit in candidate_nonce -> ntime + +// +// Fault codes that can be returned in struct hf_usb_init_base.operation_status +// +#define E_RESET_TIMEOUT 1 +#define E_ADDRESS_TIMEOUT 2 +#define E_CLOCKGATE_TIMEOUT 3 +#define E_CONFIG_TIMEOUT 4 +#define E_EXCESS_CORE_FAILURES 5 +#define E_TOTAL_CORE_FAILURES 6 +#define E_TOO_MANY_GROUPS 7 +#define E_NO_SLAVES 8 +#define E_SLAVE_COMM 9 +#define E_MAIN_POWER_BAD 10 +#define E_SECONDARY_POWER_BAD 11 +#define E_BOARD_1 12 +#define E_BOARD_2 13 +#define E_BOARD_3 14 +#define E_BOARD_4 15 +#define E_BOARD_5 16 +#define E_CORE_POWER_FAULT 17 +#define E_BAUD_TIMEOUT 18 +#define E_ADDRESS_FAILURE 19 +#define E_IR_PROG_FAILURE 20 +#define E_MIXED_MISMATCH 21 +#define E_MIXED_TIMEOUT 22 + +#define U32SIZE(x) (sizeof(x)/sizeof(uint32_t)) + +// Baud rate vs. code for gpi[7:5] coming out of reset +#define BAUD_RATE_PWRUP_0 115200 +#define BAUD_RATE_PWRUP_1 9600 +#define BAUD_RATE_PWRUP_2 38400 +#define BAUD_RATE_PWRUP_3 57600 +#define BAUD_RATE_PWRUP_4 230400 +#define BAUD_RATE_PWRUP_5 576000 +#define BAUD_RATE_PWRUP_6 921600 +#define BAUD_RATE_PWRUP_7 1152000 + +// OP_WORK_RESTART hash clock change methods. +// +// May be issued *infrequently* by the host to adjust hash clock rate for thermal control +// The "hdata" field, if non zero, contains adjustment instructions. Bits 15:12 of "hdata" +// contain the adjustment type according to the following code, and bits 11:0 contain the +// associated value. Examples: +// hdata = (1<<12)|550 = Set hash clock rate to 550 Mhz +// hdata = (4<<12)|1 = Increase hash clock rate by 1% +// hdata = (6<<12) = Go back to whatever the "original" OP_USB_INIT settings were +// +// Finally, if 4 bytes of "data" follows the OP_WORK_RESTART header, then that data is taken +// as a little endian bitmap, bit set = enable clock change to that die, bit clear = don't +// change clock on that die, i.e. considered as a uint32_t, then 0x1 = die 0, 0x2 = die 1 etc. + +#define WR_NO_CHANGE 0 +#define WR_CLOCK_VALUE 1 +#define WR_MHZ_INCREASE 2 +#define WR_MHZ_DECREASE 3 +#define WR_PERCENT_INCREASE 4 +#define WR_PERCENT_DECREASE 5 +#define WR_REVERT 6 + +#define WR_COMMAND_SHIFT 12 + +// Structure definitions, LE platforms + +#if __BYTE_ORDER == __BIG_ENDIAN && !defined(WIN32) +#include "hf_protocol_be.h" +#else +// Generic header +struct hf_header { + uint8_t preamble; // Always 0xaa + uint8_t operation_code; + uint8_t chip_address; + uint8_t core_address; + uint16_t hdata; // Header specific data + uint8_t data_length; // .. of data frame to follow, in 4 byte blocks, 0=no data + uint8_t crc8; // Computed across bytes 1-6 inclusive +} __attribute__((packed,aligned(4))); // 8 bytes total + +// Header specific to OP_PLL_CONFIG +struct hf_pll_config { + uint8_t preamble; + uint8_t operation_code; + uint8_t chip_address; + + uint8_t pll_divr:6; + uint8_t pll_bypass:1; + uint8_t pll_reset:1; + + uint8_t pll_divf; + + uint8_t spare1:1; // Must always be 0 + uint8_t pll_divq:3; + uint8_t pll_range:3; + uint8_t pll_fse:1; // Must always be 1 + + uint8_t data_length; // Always 0 + uint8_t crc8; // Computed across bytes 1-6 inclusive +} __attribute__((packed,aligned(4))); // 8 bytes total + +// OP_HASH serial data +struct hf_hash_serial { + uint8_t midstate[32]; // Computed from first half of block header + uint8_t merkle_residual[4]; // From block header + uint32_t timestamp; // From block header + uint32_t bits; // Actual difficulty target for block header + uint32_t starting_nonce; // Usually set to 0 + uint32_t nonce_loops; // How many nonces to search, or 0 for 2^32 + uint16_t ntime_loops; // How many times to roll timestamp, or 0 + uint8_t search_difficulty; // Search difficulty to use, # of '0' digits required + uint8_t option; + uint8_t group; + uint8_t spare3[3]; +} __attribute__((packed,aligned(4))); + +// OP_HASH usb data - header+data = 64 bytes +struct hf_hash_usb { + uint8_t midstate[32]; // Computed from first half of block header + uint8_t merkle_residual[4]; // From block header + uint32_t timestamp; // From block header + uint32_t bits; // Actual difficulty target for block header + uint32_t starting_nonce; // Usually set to 0 + uint32_t nonce_loops; // How many nonces to search, or 0 for 2^32 + uint16_t ntime_loops; // How many times to roll timestamp, or 0 + uint8_t search_difficulty; // Search difficulty to use, # of '0' digits required + uint8_t group; // Non-zero for valid group +} __attribute__((packed,aligned(4))); + +// OP_NONCE data +struct hf_candidate_nonce { + uint32_t nonce; // Candidate nonce + uint16_t sequence; // Sequence number from corresponding OP_HASH + uint16_t ntime; // ntime offset, if ntime roll occurred, in LS 12 bits + // If b12 set, search forward next 128 nonces to find solution(s) +} __attribute__((packed,aligned(4))); + +// OP_CONFIG data +struct hf_config_data { + uint16_t status_period:11; // Periodic status time, msec + uint16_t enable_periodic_status:1; // Send periodic status + uint16_t send_status_on_core_idle:1; // Schedule status whenever core goes idle + uint16_t send_status_on_pending_empty:1; // Schedule status whenever core pending goes idle + uint16_t pwm_active_level:1; // Active level of PWM outputs, if used + uint16_t forward_all_privileged_packets:1; // Forward priv pkts -- diagnostic + uint8_t status_batch_delay; // Batching delay, time to wait before sending status + uint8_t watchdog:7; // Watchdog timeout, seconds + uint8_t disable_sensors:1; // Diagnostic + + uint8_t rx_header_timeout:7; // Header timeout in char times + uint8_t rx_ignore_header_crc:1; // Ignore rx header crc's (diagnostic) + uint8_t rx_data_timeout:7; // Data timeout in char times / 16 + uint8_t rx_ignore_data_crc:1; // Ignore rx data crc's (diagnostic) + uint8_t stats_interval:7; // Minimum interval to report statistics (seconds) + uint8_t stat_diagnostic:1; // Never set this + uint8_t measure_interval; // Die temperature measurement interval (msec) + + uint32_t one_usec:12; // How many LF clocks per usec. + uint32_t max_nonces_per_frame:4; // Maximum # of nonces to combine in a single frame + uint32_t voltage_sample_points:8; // Bit mask for sample points (up to 5 bits set) + uint32_t pwm_phases:2; // phases - 1 + uint32_t trim:4; // Trim value for temperature measurements + uint32_t clock_diagnostic:1; // Never set this + uint32_t forward_all_packets:1; // Forward everything - diagnostic. + + uint16_t pwm_period; // Period of PWM outputs, in reference clock cycles + uint16_t pwm_pulse_period; // Initial count, phase 0 +} __attribute__((packed,aligned(4))); + +// OP_GROUP data +struct hf_group_data { + uint16_t nonce_msoffset; // This value << 16 added to starting nonce + uint16_t ntime_offset; // This value added to timestamp +} __attribute__((packed,aligned(4))); + +// Structure of the monitor fields for G-1, returned in OP_STATUS, core bitmap follows this +struct hf_g1_monitor { + uint16_t die_temperature; // Die temperature ADC count + uint8_t core_voltage[6]; // Core voltage + // [0] = main sensor + // [1]-[5] = other positions +} __attribute__((packed,aligned(4))); + +// What comes back in the body of an OP_STATISTICS frame (On die statistics) +struct hf_statistics { + uint8_t rx_header_crc; // Header CRC error's + uint8_t rx_body_crc; // Data CRC error's + uint8_t rx_header_timeouts; // Header timeouts + uint8_t rx_body_timeouts; // Data timeouts + uint8_t core_nonce_fifo_full; // Core nonce Q overrun events + uint8_t array_nonce_fifo_full; // System nonce Q overrun events + uint8_t stats_overrun; // Overrun in statistics reporting + uint8_t spare; +} __attribute__((packed,aligned(4))); + + +//////////////////////////////////////////////////////////////////////////////// +// USB protocol data structures +//////////////////////////////////////////////////////////////////////////////// + +// Convenience header specific to OP_USB_INIT +struct hf_usb_init_header { + uint8_t preamble; // Always 0xaa + uint8_t operation_code; + uint8_t spare1; + + uint8_t protocol:3; // Which protocol to use + uint8_t user_configuration:1; // Use the following configuration data + uint8_t pll_bypass:1; // Force PLL bypass, hash clock = ref clock + uint8_t no_asic_initialization:1; // Do not perform automatic ASIC initialization + uint8_t do_atspeed_core_tests:1; // Do core tests at speed, return second bitmap + uint8_t shed_supported:1; // Host supports gwq status shed_count + + uint16_t hash_clock; // Requested hash clock frequency + + uint8_t data_length; // .. of data frame to follow, in 4 byte blocks + uint8_t crc8; // Computed across bytes 1-6 inclusive +} __attribute__((packed,aligned(4))); // 8 bytes total + +// Options (only if present) that may be appended to the above header +// Each option involving a numerical value will only be in effect if the value is non-zero +// This allows the user to select only those options desired for modification. Do not +// use this facility unless you are an expert - loading inconsistent settings will not work. +struct hf_usb_init_options { + uint16_t group_ntime_roll; // Total ntime roll amount per group + uint16_t core_ntime_roll; // Total core ntime roll amount + uint8_t low_operating_temp_limit; // Lowest normal operating limit + uint8_t high_operating_temp_limit; // Highest normal operating limit + uint16_t spare; +} __attribute__((packed,aligned(4))); + +// Base item returned from device for OP_USB_INIT +struct hf_usb_init_base { + uint16_t firmware_rev; // Firmware revision # + uint16_t hardware_rev; // Hardware revision # + uint32_t serial_number; // Board serial number + uint8_t operation_status; // Reply status for OP_USB_INIT (0 = success) + uint8_t extra_status_1; // Extra reply status information, code specific + uint16_t sequence_modulus; // Sequence numbers are to be modulo this + uint16_t hash_clockrate; // Actual hash clock rate used (nearest Mhz) + uint16_t inflight_target; // Target inflight amount for GWQ protocol +} __attribute__((packed,aligned(4))); + +// The above base item (16 bytes) is followed by the struct hf_config_data (16 bytes) actually +// used internally (so users may modify non-critical fields by doing subsequent +// OP_CONFIG operations). This is followed by a device specific "core good" bitmap (unless the +// user disabled initialization), and optionally by an at-speed "core good" bitmap. + + +// Information in an OP_DIE_STATUS frame. This is for one die - there are four per ASIC. +// Board level phase current and voltage sensors are likely to disappear in later production models. +struct hf_g1_die_data { + struct hf_g1_monitor die; // Die sensors - 8 bytes + uint16_t phase_currents[4]; // Phase currents (0 if unavailable) + uint16_t voltage; // Voltage at device boundary (0 if unavailable) + uint16_t temperature; // Regulator temp sensor + uint16_t tacho; // See documentation + uint16_t spare; +} __attribute__((packed,aligned(4))); // 24 bytes total + + +// Information for an OP_GWQ_STATUS frame +// If sequence_head == sequence_tail, then there is no active work and sequence_head is invalid +struct hf_gwq_data { + uint64_t hash_count; // Add this to host's cumulative hash count + uint16_t sequence_head; // The latest, internal, active sequence # + uint16_t sequence_tail; // The latest, internal, inactive sequence # + uint16_t shed_count; // # of cores have been shedded for thermal control + uint16_t spare; +} __attribute__((packed,aligned(4))); + + +// Information for an OP_USB_STATS1 frame - Communication statistics +struct hf_usb_stats1 { + // USB incoming + uint16_t usb_rx_preambles; + uint16_t usb_rx_receive_byte_errors; + uint16_t usb_rx_bad_hcrc; + + // USB outgoing + uint16_t usb_tx_attempts; + uint16_t usb_tx_packets; + uint16_t usb_tx_timeouts; + uint16_t usb_tx_incompletes; + uint16_t usb_tx_endpointstalled; + uint16_t usb_tx_disconnected; + uint16_t usb_tx_suspended; + + // Internal UART transmit + uint16_t uart_tx_queue_dma; + uint16_t uart_tx_interrupts; + + // Internal UART receive + uint16_t uart_rx_preamble_ints; + uint16_t uart_rx_missed_preamble_ints; + uint16_t uart_rx_header_done; + uint16_t uart_rx_data_done; + uint16_t uart_rx_bad_hcrc; + //uint16_t uart_rx_bad_crc32; + uint16_t uart_rx_bad_dma; + uint16_t uart_rx_short_dma; + uint16_t uart_rx_buffers_full; + + uint8_t max_tx_buffers; // Maximum # of send buffers ever used + uint8_t max_rx_buffers; // Maximum # of receive buffers ever used +} __attribute__((packed,aligned(4))); + +// Information for an OP_USB_NOTICE frame +struct hf_usb_notice_data { + uint32_t extra_data; // Depends on notification code + char message[]; // NULL terminated, little endian byte order +}; +#endif + +#endif diff --git a/hf_protocol_be.h b/hf_protocol_be.h new file mode 100644 index 0000000..5920d50 --- /dev/null +++ b/hf_protocol_be.h @@ -0,0 +1,267 @@ +// +// Copyright 2013, 2014 HashFast Technologies LLC +// +// This program is free software; you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by the Free +// Software Foundation; either version 3 of the License, or (at your option) +// any later version. See COPYING for more details. +// +// Big endian versions of packed structures +// +// Version 1.0 +// + +#ifndef _HF_PROTOCOL_BE_H_ +#define _HF_PROTOCOL_BE_H_ + +// Generic header +struct hf_header { + uint8_t preamble; // Always 0xaa + uint8_t operation_code; + uint8_t chip_address; + uint8_t core_address; + uint16_t hdata; // Header specific data + uint8_t data_length; // .. of data frame to follow, in 4 byte blocks, 0=no data + uint8_t crc8; // Computed across bytes 1-6 inclusive +} __attribute__((packed,aligned(4))); // 8 bytes total + +// Header specific to OP_PLL_CONFIG +struct hf_pll_config { + uint8_t preamble; + uint8_t operation_code; + uint8_t chip_address; + + uint8_t pll_reset:1; + uint8_t pll_bypass:1; + uint8_t pll_divr:6; + + uint8_t pll_divf; + + uint8_t pll_fse:1; // Must always be 1 + uint8_t pll_range:3; + uint8_t pll_divq:3; + uint8_t spare1:1; // Must always be 0 + + uint8_t data_length; // Always 0 + uint8_t crc8; // Computed across bytes 1-6 inclusive +} __attribute__((packed,aligned(4))); // 8 bytes total + +// OP_HASH serial data +struct hf_hash_serial { + uint8_t midstate[32]; // Computed from first half of block header + uint8_t merkle_residual[4]; // From block header + uint32_t timestamp; // From block header + uint32_t bits; // Actual difficulty target for block header + uint32_t starting_nonce; // Usually set to 0 + uint32_t nonce_loops; // How many nonces to search, or 0 for 2^32 + uint16_t ntime_loops; // How many times to roll timestamp, or 0 + uint8_t search_difficulty; // Search difficulty to use, # of '0' digits required + uint8_t option; + uint8_t group; + uint8_t spare3[3]; +} __attribute__((packed,aligned(4))); + +// OP_HASH usb data - header+data = 64 bytes +struct hf_hash_usb { + uint8_t midstate[32]; // Computed from first half of block header + uint8_t merkle_residual[4]; // From block header + uint32_t timestamp; // From block header + uint32_t bits; // Actual difficulty target for block header + uint32_t starting_nonce; // Usually set to 0 + uint32_t nonce_loops; // How many nonces to search, or 0 for 2^32 + uint16_t ntime_loops; // How many times to roll timestamp, or 0 + uint8_t search_difficulty; // Search difficulty to use, # of '0' digits required + uint8_t group; // Non-zero for valid group +} __attribute__((packed,aligned(4))); + +// OP_NONCE data +struct hf_candidate_nonce { + uint32_t nonce; // Candidate nonce + uint16_t sequence; // Sequence number from corresponding OP_HASH + uint16_t ntime; // ntime offset, if ntime roll occurred, in LS 12 bits + // If b12 set, search forward next 128 nonces to find solution(s) +} __attribute__((packed,aligned(4))); + +// OP_CONFIG data +// This is usually internal data only, for serial drivers only +// Users shouldn't normally need to interpret this, but in the event a Big Endian +// user requires access to this data, the following structure will get all +// the fields in the right place, but byte swaps will be required for the +// uint16_t's and the uint32_t. +struct hf_config_data { + uint16_t forward_all_privileged_packets:1; // Forward priv pkts -- diagnostic + uint16_t pwm_active_level:1; // Active level of PWM outputs, if used + uint16_t send_status_on_pending_empty:1; // Schedule status whenever core pending goes idle + uint16_t send_status_on_core_idle:1; // Schedule status whenever core goes idle + uint16_t enable_periodic_status:1; // Send periodic status + uint16_t status_period:11; // Periodic status time, msec + + uint8_t status_batch_delay; // Batching delay, time to wait before sending status + uint8_t disable_sensors:1; // Diagnostic + uint8_t watchdog:7; // Watchdog timeout, seconds + + uint8_t rx_ignore_header_crc:1; // Ignore rx header crc's (diagnostic) + uint8_t rx_header_timeout:7; // Header timeout in char times + uint8_t rx_ignore_data_crc:1; // Ignore rx data crc's (diagnostic) + uint8_t rx_data_timeout:7; // Data timeout in char times / 16 + uint8_t stat_diagnostic:1; // Never set this + uint8_t stats_interval:7; // Minimum interval to report statistics (seconds) + uint8_t measure_interval; // Die temperature measurement interval (msec) + + uint32_t forward_all_packets:1; // Forward everything - diagnostic. + uint32_t clock_diagnostic:1; // Never set this + uint32_t trim:4; // Trim value for temperature measurements + uint32_t pwm_phases:2; // phases - 1 + uint32_t voltage_sample_points:8; // Bit mask for sample points (up to 5 bits set) + uint32_t max_nonces_per_frame:4; // Maximum # of nonces to combine in a single frame + uint32_t one_usec:12; // How many LF clocks per usec. + + uint16_t pwm_period; // Period of PWM outputs, in reference clock cycles + uint16_t pwm_pulse_period; // Initial count, phase 0 +} __attribute__((packed,aligned(4))); + +// OP_GROUP data +struct hf_group_data { + uint16_t nonce_msoffset; // This value << 16 added to starting nonce + uint16_t ntime_offset; // This value added to timestamp +} __attribute__((packed,aligned(4))); + +// Structure of the monitor fields for G-1, returned in OP_STATUS, core bitmap follows this +struct hf_g1_monitor { + uint16_t die_temperature; // Die temperature ADC count + uint8_t core_voltage[6]; // Core voltage + // [0] = main sensor + // [1]-[5] = other positions +} __attribute__((packed,aligned(4))); + +// What comes back in the body of an OP_STATISTICS frame (On die statistics) +struct hf_statistics { + uint8_t rx_header_crc; // Header CRC error's + uint8_t rx_body_crc; // Data CRC error's + uint8_t rx_header_timeouts; // Header timeouts + uint8_t rx_body_timeouts; // Data timeouts + uint8_t core_nonce_fifo_full; // Core nonce Q overrun events + uint8_t array_nonce_fifo_full; // System nonce Q overrun events + uint8_t stats_overrun; // Overrun in statistics reporting + uint8_t spare; +} __attribute__((packed,aligned(4))); + + +//////////////////////////////////////////////////////////////////////////////// +// USB protocol data structures +//////////////////////////////////////////////////////////////////////////////// + +// Convenience header specific to OP_USB_INIT +struct hf_usb_init_header { + uint8_t preamble; // Always 0xaa + uint8_t operation_code; + uint8_t spare1; + + uint8_t shed_supported:1; // Host supports gwq status shed_count + uint8_t do_atspeed_core_tests:1; // Do core tests at speed, return second bitmap + uint8_t no_asic_initialization:1; // Do not perform automatic ASIC initialization + uint8_t pll_bypass:1; // Force PLL bypass, hash clock = ref clock + uint8_t user_configuration:1; // Use the following configuration data + uint8_t protocol:3; // Which protocol to use + + uint16_t hash_clock; // Requested hash clock frequency + + uint8_t data_length; // .. of data frame to follow, in 4 byte blocks + uint8_t crc8; // Computed across bytes 1-6 inclusive +} __attribute__((packed,aligned(4))); // 8 bytes total + +// Options (only if present) that may be appended to the above header +// Each option involving a numerical value will only be in effect if the value is non-zero +// This allows the user to select only those options desired for modification. Do not +// use this facility unless you are an expert - loading inconsistent settings will not work. +struct hf_usb_init_options { + uint16_t group_ntime_roll; // Total ntime roll amount per group + uint16_t core_ntime_roll; // Total core ntime roll amount + uint8_t low_operating_temp_limit; // Lowest normal operating limit + uint8_t high_operating_temp_limit; // Highest normal operating limit + uint16_t spare; +} __attribute__((packed,aligned(4))); + +// Base item returned from device for OP_USB_INIT +struct hf_usb_init_base { + uint16_t firmware_rev; // Firmware revision # + uint16_t hardware_rev; // Hardware revision # + uint32_t serial_number; // Board serial number + uint8_t operation_status; // Reply status for OP_USB_INIT (0 = success) + uint8_t extra_status_1; // Extra reply status information, code specific + uint16_t sequence_modulus; // Sequence numbers are to be modulo this + uint16_t hash_clockrate; // Actual hash clock rate used (nearest Mhz) + uint16_t inflight_target; // Target inflight amount for GWQ protocol +} __attribute__((packed,aligned(4))); + +// The above base item (16 bytes) is followed by the struct hf_config_data (16 bytes) actually +// used internally (so users may modify non-critical fields by doing subsequent +// OP_CONFIG operations). This is followed by a device specific "core good" bitmap (unless the +// user disabled initialization), and optionally by an at-speed "core good" bitmap. + + +// Information in an OP_DIE_STATUS frame. This is for one die - there are four per ASIC. +// Board level phase current and voltage sensors are likely to disappear in later production models. +struct hf_g1_die_data { + struct hf_g1_monitor die; // Die sensors - 8 bytes + uint16_t phase_currents[4]; // Phase currents (0 if unavailable) + uint16_t voltage; // Voltage at device boundary (0 if unavailable) + uint16_t temperature; // Regulator temp sensor + uint16_t tacho; // See documentation + uint16_t spare; +} __attribute__((packed,aligned(4))); // 24 bytes total + +// Information for an OP_GWQ_STATUS frame +// If sequence_head == sequence_tail, then there is no active work and sequence_head is invalid +struct hf_gwq_data { + uint64_t hash_count; // Add this to host's cumulative hash count + uint16_t sequence_head; // The latest, internal, active sequence # + uint16_t sequence_tail; // The latest, internal, inactive sequence # + uint16_t shed_count; // # of cores have been shedded for thermal control + uint16_t spare; +} __attribute__((packed,aligned(4))); + + +// Information for an OP_USB_STATS1 frame - Communication statistics +struct hf_usb_stats1 { + // USB incoming + uint16_t usb_rx_preambles; + uint16_t usb_rx_receive_byte_errors; + uint16_t usb_rx_bad_hcrc; + + // USB outgoing + uint16_t usb_tx_attempts; + uint16_t usb_tx_packets; + uint16_t usb_tx_timeouts; + uint16_t usb_tx_incompletes; + uint16_t usb_tx_endpointstalled; + uint16_t usb_tx_disconnected; + uint16_t usb_tx_suspended; + + // Internal UART transmit + uint16_t uart_tx_queue_dma; + uint16_t uart_tx_interrupts; + + // Internal UART receive + uint16_t uart_rx_preamble_ints; + uint16_t uart_rx_missed_preamble_ints; + uint16_t uart_rx_header_done; + uint16_t uart_rx_data_done; + uint16_t uart_rx_bad_hcrc; + //uint16_t uart_rx_bad_crc32; + uint16_t uart_rx_bad_dma; + uint16_t uart_rx_short_dma; + uint16_t uart_rx_buffers_full; + + uint8_t max_tx_buffers; // Maximum # of send buffers ever used + uint8_t max_rx_buffers; // Maximum # of receive buffers ever used +} __attribute__((packed,aligned(4))); + +// Information for an OP_USB_NOTICE frame +struct hf_usb_notice_data { + uint32_t extra_data; // Depends on notification code + char message[]; // NULL terminated, little endian byte order +}; + + +#endif diff --git a/i2c-context.c b/i2c-context.c new file mode 100644 index 0000000..b3fbe18 --- /dev/null +++ b/i2c-context.c @@ -0,0 +1,102 @@ +/* + * generic I2C slave access interface + * + * Copyright 2014 Zefir Kurtisi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "miner.h" +#include "i2c-context.h" + + +static bool i2c_slave_write(struct i2c_ctx *ctx, uint8_t reg, uint8_t val) +{ + union i2c_smbus_data data; + data.byte = val; + + struct i2c_smbus_ioctl_data args; + + args.read_write = I2C_SMBUS_WRITE; + args.command = reg; + args.size = I2C_SMBUS_BYTE_DATA; + args.data = &data; + + if (ioctl(ctx->file, I2C_SMBUS, &args) == -1) { + applog(LOG_INFO, "i2c 0x%02x: failed to write to fdesc %d: %s", + ctx->addr, ctx->file, strerror(errno)); + return false; + } + applog(LOG_DEBUG, "I2C-W(0x%02x/0x%02x)=0x%02x", ctx->addr, reg, val); + return true; +} + +static bool i2c_slave_read(struct i2c_ctx *ctx, uint8_t reg, uint8_t *val) +{ + union i2c_smbus_data data; + struct i2c_smbus_ioctl_data args; + + args.read_write = I2C_SMBUS_READ; + args.command = reg; + args.size = I2C_SMBUS_BYTE_DATA; + args.data = &data; + + if (ioctl(ctx->file, I2C_SMBUS, &args) == -1) { + applog(LOG_INFO, "i2c 0x%02x: failed to read from fdesc %d: %s", + ctx->addr, ctx->file, strerror(errno)); + return false; + } + *val = data.byte; + applog(LOG_DEBUG, "I2C-R(0x%02x/0x%02x)=0x%02x", ctx->addr, reg, *val); + return true; +} + +static void i2c_slave_exit(struct i2c_ctx *ctx) +{ + if (ctx->file == -1) + return; + close(ctx->file); + free(ctx); +} + +extern struct i2c_ctx *i2c_slave_open(char *i2c_bus, uint8_t slave_addr) +{ + int file = open(i2c_bus, O_RDWR); + if (file < 0) { + applog(LOG_INFO, "Failed to open i2c-1: %s", strerror(errno)); + return NULL; + } + + if (ioctl(file, I2C_SLAVE, slave_addr) < 0) { + close(file); + return NULL; + } + struct i2c_ctx *ctx = malloc(sizeof(*ctx)); + assert(ctx != NULL); + + ctx->addr = slave_addr; + ctx->file = file; + ctx->exit = i2c_slave_exit; + ctx->read = i2c_slave_read; + ctx->write = i2c_slave_write; + return ctx; +} + diff --git a/i2c-context.h b/i2c-context.h new file mode 100644 index 0000000..e39d5da --- /dev/null +++ b/i2c-context.h @@ -0,0 +1,26 @@ +#ifndef I2C_CONTEXT_H +#define I2C_CONTEXT_H + +#include +#include + +/* common i2c context */ +struct i2c_ctx { + /* destructor */ + void (*exit)(struct i2c_ctx *ctx); + /* write one byte to given register */ + bool (*write)(struct i2c_ctx *ctx, uint8_t reg, uint8_t val); + /* read one byte from given register */ + bool (*read)(struct i2c_ctx *ctx, uint8_t reg, uint8_t *val); + + /* common data */ + uint8_t addr; + int file; +}; + +/* the default I2C bus on RPi */ +#define I2C_BUS "/dev/i2c-1" + +extern struct i2c_ctx *i2c_slave_open(char *i2c_bus, uint8_t slave_addr); + +#endif /* I2C_CONTEXT_H */ diff --git a/klist.c b/klist.c new file mode 100644 index 0000000..a552017 --- /dev/null +++ b/klist.c @@ -0,0 +1,430 @@ +/* + * Copyright 2013-2014 Andrew Smith - BlackArrow Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include + +static void k_alloc_items(K_LIST *list, KLIST_FFL_ARGS) +{ + K_ITEM *item; + int allocate, i; + + if (list->is_store) { + quithere(1, "List %s store can't %s()" KLIST_FFL, + list->name, __func__, KLIST_FFL_PASS); + } + + if (list->limit > 0 && list->total >= list->limit) + return; + + allocate = list->allocate; + if (list->limit > 0 && (list->total + allocate) > list->limit) + allocate = list->limit - list->total; + + list->item_mem_count++; + if (!(list->item_memory = realloc(list->item_memory, + list->item_mem_count * sizeof(*(list->item_memory))))) { + quithere(1, "List %s item_memory failed to realloc count=%d", + list->name, list->item_mem_count); + } + item = calloc(allocate, sizeof(*item)); + if (!item) { + quithere(1, "List %s failed to calloc %d new items - total was %d, limit was %d", + list->name, allocate, list->total, list->limit); + } + list->item_memory[list->item_mem_count - 1] = (void *)item; + + list->total += allocate; + list->count = allocate; + list->count_up = allocate; + + item[0].name = list->name; + item[0].prev = NULL; + item[0].next = &(item[1]); + for (i = 1; i < allocate-1; i++) { + item[i].name = list->name; + item[i].prev = &item[i-1]; + item[i].next = &item[i+1]; + } + item[allocate-1].name = list->name; + item[allocate-1].prev = &(item[allocate-2]); + item[allocate-1].next = NULL; + + list->head = item; + if (list->do_tail) + list->tail = &(item[allocate-1]); + + item = list->head; + while (item) { + list->data_mem_count++; + if (!(list->data_memory = realloc(list->data_memory, + list->data_mem_count * + sizeof(*(list->data_memory))))) { + quithere(1, "List %s data_memory failed to realloc count=%d", + list->name, list->data_mem_count); + } + item->data = calloc(1, list->siz); + if (!(item->data)) + quithere(1, "List %s failed to calloc item data", list->name); + list->data_memory[list->data_mem_count - 1] = (void *)(item->data); + item = item->next; + } +} + +K_STORE *k_new_store(K_LIST *list) +{ + K_STORE *store; + + store = calloc(1, sizeof(*store)); + if (!store) + quithere(1, "Failed to calloc store for %s", list->name); + + store->is_store = true; + store->lock = list->lock; + store->name = list->name; + store->do_tail = list->do_tail; + + return store; +} + +K_LIST *_k_new_list(const char *name, size_t siz, int allocate, int limit, bool do_tail, KLIST_FFL_ARGS) +{ + K_LIST *list; + + if (allocate < 1) + quithere(1, "Invalid new list %s with allocate %d must be > 0", name, allocate); + + if (limit < 0) + quithere(1, "Invalid new list %s with limit %d must be >= 0", name, limit); + + list = calloc(1, sizeof(*list)); + if (!list) + quithere(1, "Failed to calloc list %s", name); + + list->is_store = false; + + list->lock = calloc(1, sizeof(*(list->lock))); + if (!(list->lock)) + quithere(1, "Failed to calloc lock for list %s", name); + + cglock_init(list->lock); + + list->name = name; + list->siz = siz; + list->allocate = allocate; + list->limit = limit; + list->do_tail = do_tail; + + k_alloc_items(list, KLIST_FFL_PASS); + + return list; +} + +/* + * Unlink and return the head of the list + * If the list is empty: + * 1) If it's a store - return NULL + * 2) alloc a new list and return the head - + * which is NULL if the list limit has been reached + */ +K_ITEM *_k_unlink_head(K_LIST *list, KLIST_FFL_ARGS) +{ + K_ITEM *item; + + if (!(list->head) && !(list->is_store)) + k_alloc_items(list, KLIST_FFL_PASS); + + if (!(list->head)) + return NULL; + + item = list->head; + list->head = item->next; + if (list->head) + list->head->prev = NULL; + else { + if (list->do_tail) + list->tail = NULL; + } + + item->prev = item->next = NULL; + + list->count--; + + return item; +} + +// Zeros the head returned +K_ITEM *_k_unlink_head_zero(K_LIST *list, KLIST_FFL_ARGS) +{ + K_ITEM *item; + + item = _k_unlink_head(list, KLIST_FFL_PASS); + + if (item) + memset(item->data, 0, list->siz); + + return item; +} + +// Returns NULL if empty +K_ITEM *_k_unlink_tail(K_LIST *list, KLIST_FFL_ARGS) +{ + K_ITEM *item; + + if (!(list->do_tail)) { + quithere(1, "List %s can't %s() - do_tail is false" KLIST_FFL, + list->name, __func__, KLIST_FFL_PASS); + } + + if (!(list->tail)) + return NULL; + + item = list->tail; + list->tail = item->prev; + if (list->tail) + list->tail->next = NULL; + else + list->head = NULL; + + item->prev = item->next = NULL; + + list->count--; + + return item; +} + +void _k_add_head(K_LIST *list, K_ITEM *item, KLIST_FFL_ARGS) +{ + if (item->name != list->name) { + quithere(1, "List %s can't %s() a %s item" KLIST_FFL, + list->name, __func__, item->name, KLIST_FFL_PASS); + } + + item->prev = NULL; + item->next = list->head; + if (list->head) + list->head->prev = item; + + list->head = item; + + if (list->do_tail) { + if (!(list->tail)) + list->tail = item; + } + + list->count++; + list->count_up++; +} + +/* slows it down (of course) - only for debugging +void _k_free_head(K_LIST *list, K_ITEM *item, KLIST_FFL_ARGS) +{ + memset(item->data, 0xff, list->siz); + _k_add_head(list, item, KLIST_FFL_PASS); +} +*/ + +void _k_add_tail(K_LIST *list, K_ITEM *item, KLIST_FFL_ARGS) +{ + if (item->name != list->name) { + quithere(1, "List %s can't %s() a %s item" KLIST_FFL, + list->name, __func__, item->name, KLIST_FFL_PASS); + } + + if (!(list->do_tail)) { + quithere(1, "List %s can't %s() - do_tail is false" KLIST_FFL, + list->name, __func__, KLIST_FFL_PASS); + } + + item->prev = list->tail; + item->next = NULL; + if (list->tail) + list->tail->next = item; + + list->tail = item; + + if (!(list->head)) + list->head = item; + + list->count++; + list->count_up++; +} + +void _k_insert_before(K_LIST *list, K_ITEM *item, K_ITEM *before, KLIST_FFL_ARGS) +{ + if (item->name != list->name) { + quithere(1, "List %s can't %s() a %s item" KLIST_FFL, + list->name, __func__, item->name, KLIST_FFL_PASS); + } + + if (!before) { + quithere(1, "%s() (%s) can't before a null item" KLIST_FFL, + __func__, list->name, KLIST_FFL_PASS); + } + + item->next = before; + item->prev = before->prev; + if (before->prev) + before->prev->next = item; + else + list->head = item; + before->prev = item; + + list->count++; + list->count_up++; +} + +void _k_insert_after(K_LIST *list, K_ITEM *item, K_ITEM *after, KLIST_FFL_ARGS) +{ + if (item->name != list->name) { + quithere(1, "List %s can't %s() a %s item" KLIST_FFL, + list->name, __func__, item->name, KLIST_FFL_PASS); + } + + if (!after) { + quithere(1, "%s() (%s) can't after a null item" KLIST_FFL, + __func__, list->name, KLIST_FFL_PASS); + } + + item->prev = after; + item->next = after->next; + if (after->next) + after->next->prev = item; + else { + if (list->do_tail) + list->tail = item; + } + after->next = item; + + list->count++; + list->count_up++; +} + +void _k_unlink_item(K_LIST *list, K_ITEM *item, KLIST_FFL_ARGS) +{ + if (item->name != list->name) { + quithere(1, "List %s can't %s() a %s item" KLIST_FFL, + list->name, __func__, item->name, KLIST_FFL_PASS); + } + + if (item->prev) + item->prev->next = item->next; + + if (item->next) + item->next->prev = item->prev; + + if (list->head == item) + list->head = item->next; + + if (list->do_tail) { + if (list->tail == item) + list->tail = item->prev; + } + + item->prev = item->next = NULL; + + list->count--; +} + +void _k_list_transfer_to_head(K_LIST *from, K_LIST *to, KLIST_FFL_ARGS) +{ + if (from->name != to->name) { + quithere(1, "List %s can't %s() to a %s list" KLIST_FFL, + from->name, __func__, to->name, KLIST_FFL_PASS); + } + + if (!(from->do_tail)) { + quithere(1, "List %s can't %s() - do_tail is false" KLIST_FFL, + from->name, __func__, KLIST_FFL_PASS); + } + + if (!(from->head)) + return; + + if (to->head) + to->head->prev = from->tail; + else + to->tail = from->tail; + + from->tail->next = to->head; + to->head = from->head; + + from->head = from->tail = NULL; + to->count += from->count; + from->count = 0; + to->count_up += from->count_up; + from->count_up = 0; +} + +void _k_list_transfer_to_tail(K_LIST *from, K_LIST *to, KLIST_FFL_ARGS) +{ + if (from->name != to->name) { + quithere(1, "List %s can't %s() to a %s list" KLIST_FFL, + from->name, __func__, to->name, KLIST_FFL_PASS); + } + + if (!(from->do_tail)) { + quithere(1, "List %s can't %s() - do_tail is false" KLIST_FFL, + from->name, __func__, KLIST_FFL_PASS); + } + + if (!(from->head)) + return; + + if (to->tail) + to->tail->next = from->head; + else + to->head = from->head; + + from->head->prev = to->tail; + to->tail = from->tail; + + from->head = from->tail = NULL; + to->count += from->count; + from->count = 0; + to->count_up += from->count_up; + from->count_up = 0; +} + +K_LIST *_k_free_list(K_LIST *list, KLIST_FFL_ARGS) +{ + int i; + + if (list->is_store) { + quithere(1, "List %s can't %s() a store" KLIST_FFL, + list->name, __func__, KLIST_FFL_PASS); + } + + for (i = 0; i < list->item_mem_count; i++) + free(list->item_memory[i]); + free(list->item_memory); + + for (i = 0; i < list->data_mem_count; i++) + free(list->data_memory[i]); + free(list->data_memory); + + cglock_destroy(list->lock); + + free(list->lock); + + free(list); + + return NULL; +} + +K_STORE *_k_free_store(K_STORE *store, KLIST_FFL_ARGS) +{ + if (!(store->is_store)) { + quithere(1, "Store %s can't %s() the list" KLIST_FFL, + store->name, __func__, KLIST_FFL_PASS); + } + + free(store); + + return NULL; +} diff --git a/klist.h b/klist.h new file mode 100644 index 0000000..a79a4ed --- /dev/null +++ b/klist.h @@ -0,0 +1,94 @@ +/* + * Copyright 2013-2014 Andrew Smith - BlackArrow Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef KLIST_H +#define KLIST_H + +#include + +#define KLIST_FFL " - from %s %s() line %d" +#define KLIST_FFL_HERE __FILE__, __func__, __LINE__ +#define KLIST_FFL_PASS file, func, line +#define KLIST_FFL_ARGS __maybe_unused const char *file, \ + __maybe_unused const char *func, \ + __maybe_unused const int line + +typedef struct k_item { + const char *name; + struct k_item *prev; + struct k_item *next; + void *data; +} K_ITEM; + +typedef struct k_list { + const char *name; + bool is_store; + cglock_t *lock; + struct k_item *head; + struct k_item *tail; + size_t siz; // item data size + int total; // total allocated + int count; // in this list + int count_up; // incremented every time one is added + int allocate; // number to intially allocate and each time we run out + int limit; // total limit - 0 means unlimited + bool do_tail; // track the tail? + int item_mem_count; // how many item memory buffers have been allocated + void **item_memory; // allocated item memory buffers + int data_mem_count; // how many item data memory buffers have been allocated + void **data_memory; // allocated item data memory buffers +} K_LIST; + +/* + * K_STORE is for a list of items taken from a K_LIST + * The restriction is, a K_STORE must not allocate new items, + * only the K_LIST should do that + * i.e. all K_STORE items came from a K_LIST + */ +#define K_STORE K_LIST + +/* + * N.B. all locking is done in the code using the K_*LOCK macros + */ +#define K_WLOCK(_list) cg_wlock(_list->lock) +#define K_WUNLOCK(_list) cg_wunlock(_list->lock) +#define K_RLOCK(_list) cg_rlock(_list->lock) +#define K_RUNLOCK(_list) cg_runlock(_list->lock) + +extern K_STORE *k_new_store(K_LIST *list); +extern K_LIST *_k_new_list(const char *name, size_t siz, int allocate, int limit, bool do_tail, KLIST_FFL_ARGS); +#define k_new_list(_name, _siz, _allocate, _limit, _do_tail) _k_new_list(_name, _siz, _allocate, _limit, _do_tail, KLIST_FFL_HERE) +extern K_ITEM *_k_unlink_head(K_LIST *list, KLIST_FFL_ARGS); +#define k_unlink_head(_list) _k_unlink_head(_list, KLIST_FFL_HERE) +extern K_ITEM *_k_unlink_head_zero(K_LIST *list, KLIST_FFL_ARGS); +#define k_unlink_head_zero(_list) _k_unlink_head_zero(_list, KLIST_FFL_HERE) +extern K_ITEM *_k_unlink_tail(K_LIST *list, KLIST_FFL_ARGS); +#define k_unlink_tail(_list) _k_unlink_tail(_list, KLIST_FFL_HERE) +extern void _k_add_head(K_LIST *list, K_ITEM *item, KLIST_FFL_ARGS); +#define k_add_head(_list, _item) _k_add_head(_list, _item, KLIST_FFL_HERE) +// extern void k_free_head(K_LIST *list, K_ITEM *item, KLIST_FFL_ARGS); +#define k_free_head(__list, __item) _k_add_head(__list, __item, KLIST_FFL_HERE) +extern void _k_add_tail(K_LIST *list, K_ITEM *item, KLIST_FFL_ARGS); +#define k_add_tail(_list, _item) _k_add_tail(_list, _item, KLIST_FFL_HERE) +extern void _k_insert_before(K_LIST *list, K_ITEM *item, K_ITEM *before, KLIST_FFL_ARGS); +#define k_insert_before(_list, _item, _before) _k_insert_before(_list, _item, _before, KLIST_FFL_HERE) +extern void _k_insert_after(K_LIST *list, K_ITEM *item, K_ITEM *after, KLIST_FFL_ARGS); +#define k_insert_after(_list, _item, _after) _k_insert_after(_list, _item, _after, KLIST_FFL_HERE) +extern void _k_unlink_item(K_LIST *list, K_ITEM *item, KLIST_FFL_ARGS); +#define k_unlink_item(_list, _item) _k_unlink_item(_list, _item, KLIST_FFL_HERE) +void _k_list_transfer_to_head(K_LIST *from, K_LIST *to, KLIST_FFL_ARGS); +#define k_list_transfer_to_head(_from, _to) _k_list_transfer_to_head(_from, _to, KLIST_FFL_HERE) +void _k_list_transfer_to_tail(K_LIST *from, K_LIST *to, KLIST_FFL_ARGS); +#define k_list_transfer_to_tail(_from, _to) _k_list_transfer_to_tail(_from, _to, KLIST_FFL_HERE) +extern K_LIST *_k_free_list(K_LIST *list, KLIST_FFL_ARGS); +#define k_free_list(_list) _k_free_list(_list, KLIST_FFL_HERE) +extern K_STORE *_k_free_store(K_STORE *store, KLIST_FFL_ARGS); +#define k_free_store(_store) _k_free_store(_store, KLIST_FFL_HERE) + +#endif diff --git a/knc-asic.c b/knc-asic.c new file mode 100644 index 0000000..7c164f9 --- /dev/null +++ b/knc-asic.c @@ -0,0 +1,551 @@ +/* + * library for KnCminer devices + * + * Copyright 2014 KnCminer + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "miner.h" +#include "logging.h" + +#include "knc-transport.h" + +#include "knc-asic.h" + +/* Control Commands + * + * SPI command on channel. 1- + * 1'b1 3'channel 12'msglen_in_bits SPI message data + * Sends the supplied message on selected SPI bus + * + * Communication test + * 16'h1 16'x + * Simple test of SPI communication + * + * LED control + * 4'h1 4'red 4'green 4'blue + * Sets led colour + * + * Clock frequency + * 4'h2 12'msglen_in_bits 4'channel 4'die 16'MHz 512'x + * Configures the hashing clock rate + */ + +/* ASIC Command structure + * command 8 bits + * chip 8 bits + * core 16 bits + * data [command dependent] + * CRC32 32 bits (Neptune) + * + * ASIC response starts immediately after core address bits. + * + * response data + * CRC32 32 bits (Neptune) + * STATUS 8 bits 1 0 ~CRC_OK 0 0 ACCEPTED_WORK 0 1 (Neptune) + * + * Requests + * + * SETWORK (Jupiter) + * midstate 256 bits + * data 96 bits + * + * SETWORK/SETWORK_CLEAN (Neptune) + * slot | 0xf0 8 bits + * precalc_midstate 192 bits + * precalc_data 96 bits + * midstate 256 bits + * + * Returns REPORT response on Neptune + * + * Responses + * + * GETINFO + * + * (core field unused) + * + * cores 16 bits + * version 16 bits + * reserved 60 bits (Neptune) + * die_status 4 bits (Neptune) + * 1' pll_locked + * 1' hash_reset_n 1 if cores have been reset since last report + * 1' pll_reset_n 1 if PLL have been reset since last report + * 1' pll_power_down + * core_status cores * 2 bits (Neptune) rounded up to bytes + * 1' want_work + * 1' has_report (unreliable) + * + * REPORT + * + * reserved 2 bits + * next_state 1 bit next work state loaded + * state 1 bit hashing (0 on Jupiter) + * next_slot 4 bit slot id of next work state (0 on Jupiter) + * progress 8 bits upper 8 bits of nonce counter + * active_slot 4 bits slot id of current work state + * nonce_slot 4 bits slot id of found nonce + * nonce 32 bits + * + * reserved 4 bits + * nonce_slot 4 bits + * nonce 32 bits + * + * repeat for 5 nonce entries in total on Neptune + * Jupiter only has first nonce entry + */ + +// Precalculate first 3 rounds of SHA256 - as much as possible +// Macro routines copied from sha2.c +static void knc_prepare_neptune_work(unsigned char *out, struct work *work) { + const uint8_t *midstate = work->midstate; + const uint8_t *data = work->data + 16*4; + +#ifndef GET_ULONG_BE +#define GET_ULONG_BE(b,i) \ + (( (uint32_t) (b)[(i) ] << 24 ) \ + | ( (uint32_t) (b)[(i) + 1] << 16 ) \ + | ( (uint32_t) (b)[(i) + 2] << 8 ) \ + | ( (uint32_t) (b)[(i) + 3] )) +#endif + +#ifndef GET_ULONG_LE +#define GET_ULONG_LE(b,i) \ + (( (uint32_t) (b)[(i) + 3] << 24 ) \ + | ( (uint32_t) (b)[(i) + 2] << 16 ) \ + | ( (uint32_t) (b)[(i) + 1] << 8 ) \ + | ( (uint32_t) (b)[(i) + 0] )) +#endif + +#ifndef PUT_ULONG_BE +#define PUT_ULONG_BE(n,b,i) \ + { \ + (b)[(i) ] = (unsigned char) ( (n) >> 24 ); \ + (b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \ + (b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \ + (b)[(i) + 3] = (unsigned char) ( (n) ); \ + } +#endif + +#ifndef PUT_ULONG_LE +#define PUT_ULONG_LE(n,b,i) \ + { \ + (b)[(i) + 3] = (unsigned char) ( (n) >> 24 ); \ + (b)[(i) + 2] = (unsigned char) ( (n) >> 16 ); \ + (b)[(i) + 1] = (unsigned char) ( (n) >> 8 ); \ + (b)[(i) + 0] = (unsigned char) ( (n) ); \ + } +#endif + +#define SHR(x,n) ((x & 0xFFFFFFFF) >> n) +#define ROTR(x,n) (SHR(x,n) | (x << (32 - n))) + +#define S0(x) (ROTR(x, 7) ^ ROTR(x,18) ^ SHR(x, 3)) +#define S1(x) (ROTR(x,17) ^ ROTR(x,19) ^ SHR(x,10)) + +#define S2(x) (ROTR(x, 2) ^ ROTR(x,13) ^ ROTR(x,22)) +#define S3(x) (ROTR(x, 6) ^ ROTR(x,11) ^ ROTR(x,25)) + +#define F0(x,y,z) ((x & y) | (z & (x | y))) +#define F1(x,y,z) (z ^ (x & (y ^ z))) + +#define R(t) \ +( \ + W[t] = S1(W[t - 2]) + W[t - 7] + \ + S0(W[t - 15]) + W[t - 16] \ +) + +#define P(a,b,c,d,e,f,g,h,x,K) \ + { \ + temp1 = h + S3(e) + F1(e,f,g) + K + x; \ + temp2 = S2(a) + F0(a,b,c); \ + d += temp1; h = temp1 + temp2; \ + } + + uint32_t temp1, temp2, W[16+3]; + uint32_t A, B, C, D, E, F, G, H; + + W[0] = GET_ULONG_LE(data, 0*4 ); + W[1] = GET_ULONG_LE(data, 1*4 ); + W[2] = GET_ULONG_LE(data, 2*4 ); + W[3] = 0; // since S0(0)==0, this must be 0. S0(nonce) is added in hardware. + W[4] = 0x80000000; + W[5] = 0; + W[6] = 0; + W[7] = 0; + W[8] = 0; + W[9] = 0; + W[10] = 0; + W[11] = 0; + W[12] = 0; + W[13] = 0; + W[14] = 0; + W[15] = 0x00000280; + R(16); // Expand W 14, 9, 1, 0 + R(17); // 15, 10, 2, 1 + R(18); // 16, 11, 3, 2 + + A = GET_ULONG_LE(midstate, 0*4 ); + B = GET_ULONG_LE(midstate, 1*4 ); + C = GET_ULONG_LE(midstate, 2*4 ); + D = GET_ULONG_LE(midstate, 3*4 ); + E = GET_ULONG_LE(midstate, 4*4 ); + F = GET_ULONG_LE(midstate, 5*4 ); + G = GET_ULONG_LE(midstate, 6*4 ); + H = GET_ULONG_LE(midstate, 7*4 ); + + uint32_t D_ = D, H_ = H; + P( A, B, C, D_, E, F, G, H_, W[ 0], 0x428A2F98 ); + uint32_t C_ = C, G_ = G; + P( H_, A, B, C_, D_, E, F, G_, W[ 1], 0x71374491 ); + uint32_t B_ = B, F_ = F; + P( G_, H_, A, B_, C_, D_, E, F_, W[ 2], 0xB5C0FBCF ); + + PUT_ULONG_BE( D_, out, 0*4 ); + PUT_ULONG_BE( C_, out, 1*4 ); + PUT_ULONG_BE( B_, out, 2*4 ); + PUT_ULONG_BE( H_, out, 3*4 ); + PUT_ULONG_BE( G_, out, 4*4 ); + PUT_ULONG_BE( F_, out, 5*4 ); + PUT_ULONG_BE( W[18], out, 6*4 ); // This is partial S0(nonce) added by hardware + PUT_ULONG_BE( W[17], out, 7*4 ); + PUT_ULONG_BE( W[16], out, 8*4 ); + PUT_ULONG_BE( H, out, 9*4 ); + PUT_ULONG_BE( G, out, 10*4 ); + PUT_ULONG_BE( F, out, 11*4 ); + PUT_ULONG_BE( E, out, 12*4 ); + PUT_ULONG_BE( D, out, 13*4 ); + PUT_ULONG_BE( C, out, 14*4 ); + PUT_ULONG_BE( B, out, 15*4 ); + PUT_ULONG_BE( A, out, 16*4 ); +} + +static void knc_prepare_jupiter_work(unsigned char *out, struct work *work) { + int i; + for (i = 0; i < 8 * 4; i++) + out[i] = work->midstate[8 * 4 - i - 1]; + for (i = 0; i < 3 * 4; i++) + out[8 * 4 + i] = work->data[16 * 4 + 3 * 4 - i - 1]; +} + +static void knc_prepare_core_command(uint8_t *request, int command, int die, int core) +{ + request[0] = command; + request[1] = die; + request[2] = core >> 8; + request[3] = core & 0xff; +} + +int knc_prepare_report(uint8_t *request, int die, int core) +{ + knc_prepare_core_command(request, KNC_ASIC_CMD_REPORT, die, core); + return 4; +} + +int knc_prepare_info(uint8_t *request, int die, struct knc_die_info *die_info, int *response_size) +{ + request[0] = KNC_ASIC_CMD_GETINFO; + request[1] = die; + request[2] = 0; + request[3] = 0; + switch (die_info->version) { + case KNC_VERSION_JUPITER: + *response_size = 4; + break; + default: + *response_size = 12 + (KNC_MAX_CORES_PER_DIE*2 + 7) / 8; + break; + case KNC_VERSION_NEPTUNE: + *response_size = 12 + (die_info->cores*2 + 7) / 8; + break; + } + return 4; +} + +int knc_prepare_neptune_setwork(uint8_t *request, int die, int core, int slot, struct work *work, int clean) +{ + if (!clean) + knc_prepare_core_command(request, KNC_ASIC_CMD_SETWORK, die, core); + else + knc_prepare_core_command(request, KNC_ASIC_CMD_SETWORK_CLEAN, die, core); + request[4] = slot | 0xf0; + if (work) + knc_prepare_neptune_work(request + 4 + 1, work); + else + memset(request + 4 + 1, 0, 6*4 + 3*4 + 8*4); + return 4 + 1 + 6*4 + 3*4 + 8*4; +} + +int knc_prepare_jupiter_setwork(uint8_t *request, int die, int core, int slot, struct work *work) +{ + knc_prepare_core_command(request, KNC_ASIC_CMD_SETWORK, die, core); + request[4] = slot | 0xf0; + if (work) + knc_prepare_jupiter_work(request + 4 + 1, work); + else + memset(request + 4 + 1, 0, 8*4 + 3*4); + return 4 + 1 + 8*4 + 3*4; +} + +int knc_prepare_jupiter_halt(uint8_t *request, int die, int core) +{ + knc_prepare_core_command(request, KNC_ASIC_CMD_HALT, die, core); + return 4; +} + +int knc_prepare_neptune_halt(uint8_t *request, int die, int core) +{ + knc_prepare_core_command(request, KNC_ASIC_CMD_HALT, die, core); + request[4] = 0 | 0xf0; + memset(request + 4 + 1, 0, 6*4 + 3*4 + 8*4); + return 4 + 1 + 6*4 + 3*4 + 8*4; +} + +void knc_prepare_neptune_message(int request_length, const uint8_t *request, uint8_t *buffer) +{ + uint32_t crc; + memcpy(buffer, request, request_length); + buffer += request_length; + crc = crc32(0, Z_NULL, 0); + crc = crc32(crc, request, request_length); + PUT_ULONG_BE(crc, buffer, 0); +} + +int knc_transfer_length(int request_length, int response_length) +{ + /* FPGA control, request header, request body/response, CRC(4), ACK(1), EXTRA(3) */ + return 2 + MAX(request_length, 4 + response_length ) + 4 + 1 + 3; +} + +int knc_prepare_transfer(uint8_t *txbuf, int offset, int size, int channel, int request_length, const uint8_t *request, int response_length) +{ + /* FPGA control, request header, request body/response, CRC(4), ACK(1), EXTRA(3) */ + int msglen = MAX(request_length, 4 + response_length ) + 4 + 1 + 3; + int len = 2 + msglen; + txbuf += offset; + + if (len + offset > size) { + applog(LOG_DEBUG, "KnC SPI buffer full"); + return -1; + } + txbuf[0] = 1 << 7 | (channel+1) << 4 | (msglen * 8) >> 8; + txbuf[1] = (msglen * 8); + knc_prepare_neptune_message(request_length, request, txbuf+2); + + return offset + len; +} + +/* red, green, blue valid range 0 - 15 */ +int knc_prepare_led(uint8_t *txbuf, int offset, int size, int red, int green, int blue) +{ + /* 4'h1 4'red 4'green 4'blue */ + int len = 2; + txbuf += offset; + + if (len + offset > size) { + applog(LOG_DEBUG, "KnC SPI buffer full"); + return -1; + } + txbuf[0] = 1 << 4 | red; + txbuf[1] = green << 4 | blue; + + return offset + len; + +} + +/* reset controller */ +int knc_prepare_reset(uint8_t *txbuf, int offset, int size) +{ + /* 16'h0002 16'unused */ + int len = 4; + txbuf += offset; + + if (len + offset > size) { + applog(LOG_DEBUG, "KnC SPI buffer full"); + return -1; + } + txbuf[0] = (0x0002) >> 8; + txbuf[1] = (0x0002) & 0xff; + txbuf[2] = 0; + txbuf[3] = 0; + + return offset + len; +} + +/* request_length = 0 disables communication checks, i.e. Jupiter protocol */ +int knc_decode_response(uint8_t *rxbuf, int request_length, uint8_t **response, int response_length) +{ + int ret = 0; + int len = knc_transfer_length(request_length, response_length); + if (request_length > 0 && response_length > 0) { + uint32_t crc, recv_crc; + crc = crc32(0, Z_NULL, 0); + crc = crc32(crc, rxbuf + 2 + 4, response_length); + recv_crc = GET_ULONG_BE(rxbuf + 2 + 4, response_length); + if (crc != recv_crc) + ret |= KNC_ERR_CRC; + } + + if (response) { + if (response_length > 0) { + *response = rxbuf + 2 + 4; + } else { + *response = NULL; + } + } + + if (response_length == 0) + return 0; + + uint8_t ack = rxbuf[len - 4]; + + if ((ack & KNC_ASIC_ACK_MASK) != KNC_ASIC_ACK_MATCH) + ret |= KNC_ERR_ACK; + if ((ack & KNC_ASIC_ACK_CRC)) + ret |= KNC_ERR_CRCACK; + if ((ack & KNC_ASIC_ACK_ACCEPT)) + ret |= KNC_ACCEPTED; + if (ret && memcmp(&rxbuf[len-4], "\377\377\377\377", 4) == 0) + ret = KNC_ERR_UNAVAIL; + return ret; +} + +int knc_syncronous_transfer(void *ctx, int channel, int request_length, const uint8_t *request, int response_length, uint8_t *response) +{ + int len = knc_transfer_length(request_length, response_length); + uint8_t txbuf[len]; + uint8_t rxbuf[len]; + memset(txbuf, 0, len); + knc_prepare_transfer(txbuf, 0, len, channel, request_length, request, response_length); + knc_trnsp_transfer(ctx, txbuf, rxbuf, len); + + uint8_t *response_buf; + int rc = knc_decode_response(rxbuf, request_length, &response_buf, response_length); + if (response) + memcpy(response, response_buf, response_length); + return rc; +} + +int knc_decode_info(uint8_t *response, struct knc_die_info *die_info) +{ + int cores_in_die = response[0]<<8 | response[1]; + int version = response[2]<<8 | response[3]; + if (version == KNC_ASIC_VERSION_JUPITER && cores_in_die <= 48) { + die_info->version = KNC_VERSION_JUPITER; + die_info->cores = cores_in_die; + memset(die_info->want_work, -1, cores_in_die); + die_info->pll_power_down = -1; + die_info->pll_reset_n = -1; + die_info->hash_reset_n = -1; + die_info->pll_locked = -1; + return 0; + } else if (version == KNC_ASIC_VERSION_NEPTUNE && cores_in_die <= KNC_MAX_CORES_PER_DIE) { + die_info->version = KNC_VERSION_NEPTUNE; + die_info->cores = cores_in_die; + int core; + for (core = 0; core < cores_in_die; core++) + die_info->want_work[core] = ((response[12 + core/4] >> ((3-(core % 4)) * 2)) >> 1) & 1; + int die_status = response[11] & 0xf; + die_info->pll_power_down = (die_status >> 0) & 1; + die_info->pll_reset_n = (die_status >> 1) & 1; + die_info->hash_reset_n = (die_status >> 2) & 1; + die_info->pll_locked = (die_status >> 3) & 1; + return 0; + } else { + return -1; + } +} + +int knc_decode_report(uint8_t *response, struct knc_report *report, int version) +{ +/* + * reserved 2 bits + * next_state 1 bit next work state loaded + * state 1 bit hashing (0 on Jupiter) + * next_slot 4 bit slot id of next work state (0 on Jupiter) + * progress 8 bits upper 8 bits of nonce counter + * active_slot 4 bits slot id of current work state + * nonce_slot 4 bits slot id of found nonce + * nonce 32 bits + * + * reserved 4 bits + * nonce_slot 4 bits + * nonce 32 bits + */ + report->next_state = (response[0] >> 5) & 1; + if (version != KNC_VERSION_JUPITER) { + report->state = (response[0] >> 4) & 1; + report->next_slot = response[0] & ((1<<4)-1); + } else { + report->state = -1; + report->next_slot = -1; + } + report->progress = (uint32_t)response[1] << 24; + report->active_slot = (response[2] >> 4) & ((1<<4)-1); + int n; + int n_nonces = version == KNC_VERSION_JUPITER ? 1 : 5; + for (n = 0; n < n_nonces; n++) { + report->nonce[n].slot = response[2+n*5] & ((1<<4)-1); + report->nonce[n].nonce = + (uint32_t)response[3+n*5] << 24 | + (uint32_t)response[4+n*5] << 16 | + (uint32_t)response[5+n*5] << 8 | + (uint32_t)response[6+n*5] << 0 | + 0; + } + for (; n < KNC_NONCES_PER_REPORT; n++) { + report->nonce[n].slot = -1; + report->nonce[n].nonce = 0; + } + return 0; +} + +int knc_detect_die(void *ctx, int channel, int die, struct knc_die_info *die_info) +{ + uint8_t request[4]; + int response_len = 2 + 2 + 4 + 4 + (KNC_MAX_CORES_PER_DIE*2 + 7) / 8; + uint8_t response[response_len]; + + int request_len = knc_prepare_info(request, die, die_info, &response_len); + int status = knc_syncronous_transfer(ctx, channel, request_len, request, response_len, response); + + /* Workaround for pre-ASIC version */ + int cores_in_die = response[0]<<8 | response[1]; + int version = response[2]<<8 | response[3]; + if (version == KNC_ASIC_VERSION_NEPTUNE && cores_in_die < KNC_MAX_CORES_PER_DIE) { + applog(LOG_DEBUG, "KnC %d-%d: Looks like a NEPTUNE die with %d cores", channel, die, cores_in_die); + /* Try again with right response size */ + response_len = 2 + 2 + 4 + 4 + (cores_in_die*2 + 7) / 8; + status = knc_syncronous_transfer(ctx, channel, request_len, request, response_len, response); + } + int rc = -1; + if (version == KNC_ASIC_VERSION_JUPITER || status == 0) + rc = knc_decode_info(response, die_info); + if (rc == 0) + applog(LOG_INFO, "KnC %d-%d: Found %s die with %d cores", channel, die, + die_info->version == KNC_VERSION_NEPTUNE ? "NEPTUNE" : + die_info->version == KNC_VERSION_JUPITER ? "JUPITER" : + "UNKNOWN", + cores_in_die); + else + applog(LOG_DEBUG, "KnC %d-%d: No KnC chip found", channel, die); + return rc; +} + diff --git a/knc-asic.h b/knc-asic.h new file mode 100644 index 0000000..7c48317 --- /dev/null +++ b/knc-asic.h @@ -0,0 +1,88 @@ +#ifndef _CGMINER_NEPTUNE_H +#define _CGMINER_NEPTUNE_H +#include +#include "miner.h" + +/* ASIC Command codes */ +#define KNC_ASIC_CMD_GETINFO 0x80 +#define KNC_ASIC_CMD_SETWORK 0x81 +#define KNC_ASIC_CMD_SETWORK_CLEAN 0x83 /* Neptune */ +#define KNC_ASIC_CMD_HALT 0x83 /* Jupiter */ +#define KNC_ASIC_CMD_REPORT 0x82 + +/* Status byte */ +#define KNC_ASIC_ACK_CRC (1<<5) +#define KNC_ASIC_ACK_ACCEPT (1<<2) +#define KNC_ASIC_ACK_MASK (~(KNC_ASIC_ACK_CRC|KNC_ASIC_ACK_ACCEPT)) +#define KNC_ASIC_ACK_MATCH ((1<<7)|(1<<0)) + +/* Version word */ +#define KNC_ASIC_VERSION_JUPITER 0xa001 +#define KNC_ASIC_VERSION_NEPTUNE 0xa002 + +/* Limits of current chips & I/O board */ +#define KNC_MAX_CORES_PER_DIE 360 +#define KNC_MAX_ASICS 6 + +struct knc_die_info { + enum { + KNC_VERSION_UNKNOWN = 0, + KNC_VERSION_JUPITER, + KNC_VERSION_NEPTUNE + } version; + char want_work[KNC_MAX_CORES_PER_DIE]; + int cores; + int pll_locked; + int hash_reset_n; + int pll_reset_n; + int pll_power_down; +}; + +#define KNC_NONCES_PER_REPORT 5 + +struct knc_report { + int next_state; + int state; + int next_slot; + int active_slot; + uint32_t progress; + struct { + int slot; + uint32_t nonce; + } nonce[KNC_NONCES_PER_REPORT]; +}; + +int knc_prepare_info(uint8_t *request, int die, struct knc_die_info *die_info, int *response_size); +int knc_prepare_report(uint8_t *request, int die, int core); +int knc_prepare_neptune_setwork(uint8_t *request, int die, int core, int slot, struct work *work, int clean); +int knc_prepare_jupiter_setwork(uint8_t *request, int die, int core, int slot, struct work *work); +int knc_prepare_jupiter_halt(uint8_t *request, int die, int core); +int knc_prepare_neptune_halt(uint8_t *request, int die, int core); + +int knc_decode_info(uint8_t *response, struct knc_die_info *die_info); +int knc_decode_report(uint8_t *response, struct knc_report *report, int version); + +void knc_prepare_neptune_message(int request_length, const uint8_t *request, uint8_t *buffer); + +#define KNC_ACCEPTED (1<<0) +#define KNC_ERR_CRC (1<<1) +#define KNC_ERR_ACK (1<<2) +#define KNC_ERR_CRCACK (1<<3) +#define KNC_ERR_UNAVAIL (1<<4) +#define KNC_ERR_MASK (~(KNC_ACCEPTED)) +#define KNC_IS_ERROR(x) (((x) & KNC_ERR_MASK) != 0) + +int knc_prepare_transfer(uint8_t *txbuf, int offset, int size, int channel, int request_length, const uint8_t *request, int response_length); +int knc_decode_response(uint8_t *rxbuf, int request_length, uint8_t **response, int response_length); +int knc_syncronous_transfer(void *ctx, int channel, int request_length, const uint8_t *request, int response_length, uint8_t *response); + +/* Detect ASIC DIE version */ +int knc_detect_die(void *ctx, int channel, int die, struct knc_die_info *die_info); + +/* red, green, blue valid range 0 - 15. No response or checksum from controller */ +int knc_prepare_led(uint8_t *txbuf, int offset, int size, int red, int green, int blue); + +/* Reset controller */ +int knc_prepare_reset(uint8_t *txbuf, int offset, int size); + +#endif diff --git a/knc-transport-spi.c b/knc-transport-spi.c new file mode 100644 index 0000000..5cefdf6 --- /dev/null +++ b/knc-transport-spi.c @@ -0,0 +1,148 @@ +/* + * Direct SPI transport layer for KnCminer Jupiters + * + * Copyright 2014 KnCminer + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include +#include +#include +#include + +#include "logging.h" +#include "miner.h" +#include "hexdump.c" +#include "knc-transport.h" + +#define SPI_DEVICE_TEMPLATE "/dev/spidev%d.%d" +#define SPI_MODE (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH) +#define SPI_BITS_PER_WORD 8 +#define SPI_MAX_SPEED 3000000 +#define SPI_DELAY_USECS 0 + +struct spidev_context { + int fd; + uint32_t speed; + uint16_t delay; + uint8_t mode; + uint8_t bits; +}; + +/* Init SPI transport */ +void *knc_trnsp_new(int dev_idx) +{ + struct spidev_context *ctx; + char dev_name[PATH_MAX]; + + if (NULL == (ctx = malloc(sizeof(struct spidev_context)))) { + applog(LOG_ERR, "KnC transport: Out of memory"); + goto l_exit_error; + } + ctx->mode = SPI_MODE; + ctx->bits = SPI_BITS_PER_WORD; + ctx->speed = SPI_MAX_SPEED; + ctx->delay = SPI_DELAY_USECS; + + ctx->fd = -1; + sprintf(dev_name, SPI_DEVICE_TEMPLATE, + dev_idx + 1, /* bus */ + 0 /* chipselect */ + ); + if (0 > (ctx->fd = open(dev_name, O_RDWR))) { + applog(LOG_ERR, "KnC transport: Can not open SPI device %s: %m", + dev_name); + goto l_free_exit_error; + } + + /* + * spi mode + */ + if (0 > ioctl(ctx->fd, SPI_IOC_WR_MODE, &ctx->mode)) + goto l_ioctl_error; + if (0 > ioctl(ctx->fd, SPI_IOC_RD_MODE, &ctx->mode)) + goto l_ioctl_error; + + /* + * bits per word + */ + if (0 > ioctl(ctx->fd, SPI_IOC_WR_BITS_PER_WORD, &ctx->bits)) + goto l_ioctl_error; + if (0 > ioctl(ctx->fd, SPI_IOC_RD_BITS_PER_WORD, &ctx->bits)) + goto l_ioctl_error; + + /* + * max speed hz + */ + if (0 > ioctl(ctx->fd, SPI_IOC_WR_MAX_SPEED_HZ, &ctx->speed)) + goto l_ioctl_error; + if (0 > ioctl(ctx->fd, SPI_IOC_RD_MAX_SPEED_HZ, &ctx->speed)) + goto l_ioctl_error; + + applog(LOG_INFO, "KnC transport: SPI device %s uses mode %hhu, bits %hhu, speed %u", + dev_name, ctx->mode, ctx->bits, ctx->speed); + + return ctx; + +l_ioctl_error: + applog(LOG_ERR, "KnC transport: ioctl error on SPI device %s: %m", dev_name); + close(ctx->fd); +l_free_exit_error: + free(ctx); +l_exit_error: + return NULL; +} + +void knc_trnsp_free(void *opaque_ctx) +{ + struct spidev_context *ctx = opaque_ctx; + + if (NULL == ctx) + return; + + close(ctx->fd); + free(ctx); +} + +int knc_trnsp_transfer(void *opaque_ctx, uint8_t *txbuf, uint8_t *rxbuf, int len) +{ + struct spidev_context *ctx = opaque_ctx; + struct spi_ioc_transfer xfr; + int ret; + + memset(rxbuf, 0xff, len); + + ret = len; + + xfr.tx_buf = (unsigned long)txbuf; + xfr.rx_buf = (unsigned long)rxbuf; + xfr.len = len; + xfr.speed_hz = ctx->speed; + xfr.delay_usecs = ctx->delay; + xfr.bits_per_word = ctx->bits; + xfr.cs_change = 0; + xfr.pad = 0; + + applog(LOG_DEBUG, "KnC spi:"); + hexdump(txbuf, len); + if (1 > (ret = ioctl(ctx->fd, SPI_IOC_MESSAGE(1), &xfr))) + applog(LOG_ERR, "KnC spi xfer: ioctl error on SPI device: %m"); + hexdump(rxbuf, len); + + return ret; +} + +bool knc_trnsp_asic_detect(void *opaque_ctx, int chip_id) +{ + return true; +} + +void knc_trnsp_periodic_check(void *opaque_ctx) +{ + return; +} + diff --git a/knc-transport.h b/knc-transport.h new file mode 100644 index 0000000..3db9110 --- /dev/null +++ b/knc-transport.h @@ -0,0 +1,23 @@ +/* + * Transport layer interface for KnCminer devices + * + * Copyright 2014 KnCminer + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#define MAX_ASICS 6 +#define NUM_DIES_IN_ASIC 4 +#define CORES_IN_DIE 48 +#define CORES_PER_ASIC (NUM_DIES_IN_ASIC * CORES_IN_DIE) + +#define MAX_BYTES_IN_SPI_XSFER 4096 + +void *knc_trnsp_new(int dev_idx); +void knc_trnsp_free(void *opaque_ctx); +int knc_trnsp_transfer(void *opaque_ctx, uint8_t *txbuf, uint8_t *rxbuf, int len); +bool knc_trnsp_asic_detect(void *opaque_ctx, int chip_id); +void knc_trnsp_periodic_check(void *opaque_ctx); diff --git a/lib/Makefile.am b/lib/Makefile.am new file mode 100644 index 0000000..bb78703 --- /dev/null +++ b/lib/Makefile.am @@ -0,0 +1,367 @@ +## DO NOT EDIT! GENERATED AUTOMATICALLY! +## Process this file with automake to produce Makefile.in. +# Copyright (C) 2002-2011 Free Software Foundation, Inc. +# +# This file is free software, distributed under the terms of the GNU +# General Public License. As a special exception to the GNU General +# Public License, this file may be distributed as part of a program +# that contains a configuration script generated by Autoconf, under +# the same distribution terms as the rest of that program. +# +# Generated by gnulib-tool. +# Reproduce by: gnulib-tool --import --dir=. --lib=libgnu --source-base=lib --m4-base=m4 --doc-base=doc --tests-base=tests --aux-dir=. --no-conditional-dependencies --no-libtool --macro-prefix=gl --no-vc-files memmem sigaction signal + +AUTOMAKE_OPTIONS = 1.5 gnits + +SUBDIRS = +noinst_HEADERS = +noinst_LIBRARIES = +noinst_LTLIBRARIES = +EXTRA_DIST = +BUILT_SOURCES = +SUFFIXES = +MOSTLYCLEANFILES = core *.stackdump +MOSTLYCLEANDIRS = +CLEANFILES = +DISTCLEANFILES = +MAINTAINERCLEANFILES = + +AM_CPPFLAGS = +AM_CFLAGS = + +noinst_LIBRARIES += libgnu.a + +libgnu_a_SOURCES = +libgnu_a_LIBADD = $(gl_LIBOBJS) +libgnu_a_DEPENDENCIES = $(gl_LIBOBJS) +EXTRA_libgnu_a_SOURCES = + +## begin gnulib module arg-nonnull + +# The BUILT_SOURCES created by this Makefile snippet are not used via #include +# statements but through direct file reference. Therefore this snippet must be +# present in all Makefile.am that need it. This is ensured by the applicability +# 'all' defined above. + +BUILT_SOURCES += arg-nonnull.h +# The arg-nonnull.h that gets inserted into generated .h files is the same as +# build-aux/arg-nonnull.h, except that it has the copyright header cut off. +arg-nonnull.h: $(top_srcdir)/./arg-nonnull.h + $(AM_V_GEN)rm -f $@-t $@ && \ + sed -n -e '/GL_ARG_NONNULL/,$$p' \ + < $(top_srcdir)/./arg-nonnull.h \ + > $@-t && \ + mv $@-t $@ +MOSTLYCLEANFILES += arg-nonnull.h arg-nonnull.h-t + +ARG_NONNULL_H=arg-nonnull.h + +EXTRA_DIST += $(top_srcdir)/./arg-nonnull.h + +## end gnulib module arg-nonnull + +## begin gnulib module c++defs + +# The BUILT_SOURCES created by this Makefile snippet are not used via #include +# statements but through direct file reference. Therefore this snippet must be +# present in all Makefile.am that need it. This is ensured by the applicability +# 'all' defined above. + +BUILT_SOURCES += c++defs.h +# The c++defs.h that gets inserted into generated .h files is the same as +# build-aux/c++defs.h, except that it has the copyright header cut off. +c++defs.h: $(top_srcdir)/./c++defs.h + $(AM_V_GEN)rm -f $@-t $@ && \ + sed -n -e '/_GL_CXXDEFS/,$$p' \ + < $(top_srcdir)/./c++defs.h \ + > $@-t && \ + mv $@-t $@ +MOSTLYCLEANFILES += c++defs.h c++defs.h-t + +CXXDEFS_H=c++defs.h + +EXTRA_DIST += $(top_srcdir)/./c++defs.h + +## end gnulib module c++defs + +## begin gnulib module memchr + + +EXTRA_DIST += memchr.c memchr.valgrind + +EXTRA_libgnu_a_SOURCES += memchr.c + +## end gnulib module memchr + +## begin gnulib module memmem-simple + + +EXTRA_DIST += memmem.c str-two-way.h + +EXTRA_libgnu_a_SOURCES += memmem.c + +## end gnulib module memmem-simple + +## begin gnulib module sigaction + + +EXTRA_DIST += sig-handler.h sigaction.c + +EXTRA_libgnu_a_SOURCES += sigaction.c + +## end gnulib module sigaction + +## begin gnulib module signal + +BUILT_SOURCES += signal.h + +# We need the following in order to create when the system +# doesn't have a complete one. +signal.h: signal.in.h $(top_builddir)/config.status $(CXXDEFS_H) $(ARG_NONNULL_H) $(WARN_ON_USE_H) + $(AM_V_GEN)rm -f $@-t $@ && \ + { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */' && \ + sed -e 's|@''GUARD_PREFIX''@|GL|g' \ + -e 's|@''INCLUDE_NEXT''@|$(INCLUDE_NEXT)|g' \ + -e 's|@''PRAGMA_SYSTEM_HEADER''@|@PRAGMA_SYSTEM_HEADER@|g' \ + -e 's|@''PRAGMA_COLUMNS''@|@PRAGMA_COLUMNS@|g' \ + -e 's|@''NEXT_SIGNAL_H''@|$(NEXT_SIGNAL_H)|g' \ + -e 's/@''GNULIB_SIGNAL_H_SIGPIPE''@/$(GNULIB_SIGNAL_H_SIGPIPE)/g' \ + -e 's/@''GNULIB_SIGPROCMASK''@/$(GNULIB_SIGPROCMASK)/g' \ + -e 's/@''GNULIB_SIGACTION''@/$(GNULIB_SIGACTION)/g' \ + -e 's|@''HAVE_POSIX_SIGNALBLOCKING''@|$(HAVE_POSIX_SIGNALBLOCKING)|g' \ + -e 's|@''HAVE_SIGSET_T''@|$(HAVE_SIGSET_T)|g' \ + -e 's|@''HAVE_SIGINFO_T''@|$(HAVE_SIGINFO_T)|g' \ + -e 's|@''HAVE_SIGACTION''@|$(HAVE_SIGACTION)|g' \ + -e 's|@''HAVE_STRUCT_SIGACTION_SA_SIGACTION''@|$(HAVE_STRUCT_SIGACTION_SA_SIGACTION)|g' \ + -e 's|@''HAVE_TYPE_VOLATILE_SIG_ATOMIC_T''@|$(HAVE_TYPE_VOLATILE_SIG_ATOMIC_T)|g' \ + -e 's|@''HAVE_SIGHANDLER_T''@|$(HAVE_SIGHANDLER_T)|g' \ + -e '/definitions of _GL_FUNCDECL_RPL/r $(CXXDEFS_H)' \ + -e '/definition of _GL_ARG_NONNULL/r $(ARG_NONNULL_H)' \ + -e '/definition of _GL_WARN_ON_USE/r $(WARN_ON_USE_H)' \ + < $(srcdir)/signal.in.h; \ + } > $@-t && \ + mv $@-t $@ +MOSTLYCLEANFILES += signal.h signal.h-t + +EXTRA_DIST += signal.in.h + +## end gnulib module signal + +## begin gnulib module sigprocmask + + +EXTRA_DIST += sigprocmask.c + +EXTRA_libgnu_a_SOURCES += sigprocmask.c + +## end gnulib module sigprocmask + +## begin gnulib module stddef + +BUILT_SOURCES += $(STDDEF_H) + +# We need the following in order to create when the system +# doesn't have one that works with the given compiler. +if GL_GENERATE_STDDEF_H +stddef.h: stddef.in.h $(top_builddir)/config.status + $(AM_V_GEN)rm -f $@-t $@ && \ + { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */' && \ + sed -e 's|@''GUARD_PREFIX''@|GL|g' \ + -e 's|@''INCLUDE_NEXT''@|$(INCLUDE_NEXT)|g' \ + -e 's|@''PRAGMA_SYSTEM_HEADER''@|@PRAGMA_SYSTEM_HEADER@|g' \ + -e 's|@''PRAGMA_COLUMNS''@|@PRAGMA_COLUMNS@|g' \ + -e 's|@''NEXT_STDDEF_H''@|$(NEXT_STDDEF_H)|g' \ + -e 's|@''HAVE_WCHAR_T''@|$(HAVE_WCHAR_T)|g' \ + -e 's|@''REPLACE_NULL''@|$(REPLACE_NULL)|g' \ + < $(srcdir)/stddef.in.h; \ + } > $@-t && \ + mv $@-t $@ +else +stddef.h: $(top_builddir)/config.status + rm -f $@ +endif +MOSTLYCLEANFILES += stddef.h stddef.h-t + +EXTRA_DIST += stddef.in.h + +## end gnulib module stddef + +## begin gnulib module stdint + +BUILT_SOURCES += $(STDINT_H) + +# We need the following in order to create when the system +# doesn't have one that works with the given compiler. +if GL_GENERATE_STDINT_H +stdint.h: stdint.in.h $(top_builddir)/config.status + $(AM_V_GEN)rm -f $@-t $@ && \ + { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */'; \ + sed -e 's|@''GUARD_PREFIX''@|GL|g' \ + -e 's/@''HAVE_STDINT_H''@/$(HAVE_STDINT_H)/g' \ + -e 's|@''INCLUDE_NEXT''@|$(INCLUDE_NEXT)|g' \ + -e 's|@''PRAGMA_SYSTEM_HEADER''@|@PRAGMA_SYSTEM_HEADER@|g' \ + -e 's|@''PRAGMA_COLUMNS''@|@PRAGMA_COLUMNS@|g' \ + -e 's|@''NEXT_STDINT_H''@|$(NEXT_STDINT_H)|g' \ + -e 's/@''HAVE_SYS_TYPES_H''@/$(HAVE_SYS_TYPES_H)/g' \ + -e 's/@''HAVE_INTTYPES_H''@/$(HAVE_INTTYPES_H)/g' \ + -e 's/@''HAVE_SYS_INTTYPES_H''@/$(HAVE_SYS_INTTYPES_H)/g' \ + -e 's/@''HAVE_SYS_BITYPES_H''@/$(HAVE_SYS_BITYPES_H)/g' \ + -e 's/@''HAVE_WCHAR_H''@/$(HAVE_WCHAR_H)/g' \ + -e 's/@''HAVE_LONG_LONG_INT''@/$(HAVE_LONG_LONG_INT)/g' \ + -e 's/@''HAVE_UNSIGNED_LONG_LONG_INT''@/$(HAVE_UNSIGNED_LONG_LONG_INT)/g' \ + -e 's/@''APPLE_UNIVERSAL_BUILD''@/$(APPLE_UNIVERSAL_BUILD)/g' \ + -e 's/@''BITSIZEOF_PTRDIFF_T''@/$(BITSIZEOF_PTRDIFF_T)/g' \ + -e 's/@''PTRDIFF_T_SUFFIX''@/$(PTRDIFF_T_SUFFIX)/g' \ + -e 's/@''BITSIZEOF_SIG_ATOMIC_T''@/$(BITSIZEOF_SIG_ATOMIC_T)/g' \ + -e 's/@''HAVE_SIGNED_SIG_ATOMIC_T''@/$(HAVE_SIGNED_SIG_ATOMIC_T)/g' \ + -e 's/@''SIG_ATOMIC_T_SUFFIX''@/$(SIG_ATOMIC_T_SUFFIX)/g' \ + -e 's/@''BITSIZEOF_SIZE_T''@/$(BITSIZEOF_SIZE_T)/g' \ + -e 's/@''SIZE_T_SUFFIX''@/$(SIZE_T_SUFFIX)/g' \ + -e 's/@''BITSIZEOF_WCHAR_T''@/$(BITSIZEOF_WCHAR_T)/g' \ + -e 's/@''HAVE_SIGNED_WCHAR_T''@/$(HAVE_SIGNED_WCHAR_T)/g' \ + -e 's/@''WCHAR_T_SUFFIX''@/$(WCHAR_T_SUFFIX)/g' \ + -e 's/@''BITSIZEOF_WINT_T''@/$(BITSIZEOF_WINT_T)/g' \ + -e 's/@''HAVE_SIGNED_WINT_T''@/$(HAVE_SIGNED_WINT_T)/g' \ + -e 's/@''WINT_T_SUFFIX''@/$(WINT_T_SUFFIX)/g' \ + < $(srcdir)/stdint.in.h; \ + } > $@-t && \ + mv $@-t $@ +else +stdint.h: $(top_builddir)/config.status + rm -f $@ +endif +MOSTLYCLEANFILES += stdint.h stdint.h-t + +EXTRA_DIST += stdint.in.h + +## end gnulib module stdint + +## begin gnulib module string + +BUILT_SOURCES += string.h + +# We need the following in order to create when the system +# doesn't have one that works with the given compiler. +string.h: string.in.h $(top_builddir)/config.status $(CXXDEFS_H) $(ARG_NONNULL_H) $(WARN_ON_USE_H) + $(AM_V_GEN)rm -f $@-t $@ && \ + { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */' && \ + sed -e 's|@''GUARD_PREFIX''@|GL|g' \ + -e 's|@''INCLUDE_NEXT''@|$(INCLUDE_NEXT)|g' \ + -e 's|@''PRAGMA_SYSTEM_HEADER''@|@PRAGMA_SYSTEM_HEADER@|g' \ + -e 's|@''PRAGMA_COLUMNS''@|@PRAGMA_COLUMNS@|g' \ + -e 's|@''NEXT_STRING_H''@|$(NEXT_STRING_H)|g' \ + -e 's/@''GNULIB_MBSLEN''@/$(GNULIB_MBSLEN)/g' \ + -e 's/@''GNULIB_MBSNLEN''@/$(GNULIB_MBSNLEN)/g' \ + -e 's/@''GNULIB_MBSCHR''@/$(GNULIB_MBSCHR)/g' \ + -e 's/@''GNULIB_MBSRCHR''@/$(GNULIB_MBSRCHR)/g' \ + -e 's/@''GNULIB_MBSSTR''@/$(GNULIB_MBSSTR)/g' \ + -e 's/@''GNULIB_MBSCASECMP''@/$(GNULIB_MBSCASECMP)/g' \ + -e 's/@''GNULIB_MBSNCASECMP''@/$(GNULIB_MBSNCASECMP)/g' \ + -e 's/@''GNULIB_MBSPCASECMP''@/$(GNULIB_MBSPCASECMP)/g' \ + -e 's/@''GNULIB_MBSCASESTR''@/$(GNULIB_MBSCASESTR)/g' \ + -e 's/@''GNULIB_MBSCSPN''@/$(GNULIB_MBSCSPN)/g' \ + -e 's/@''GNULIB_MBSPBRK''@/$(GNULIB_MBSPBRK)/g' \ + -e 's/@''GNULIB_MBSSPN''@/$(GNULIB_MBSSPN)/g' \ + -e 's/@''GNULIB_MBSSEP''@/$(GNULIB_MBSSEP)/g' \ + -e 's/@''GNULIB_MBSTOK_R''@/$(GNULIB_MBSTOK_R)/g' \ + -e 's/@''GNULIB_MEMCHR''@/$(GNULIB_MEMCHR)/g' \ + -e 's/@''GNULIB_MEMMEM''@/$(GNULIB_MEMMEM)/g' \ + -e 's/@''GNULIB_MEMPCPY''@/$(GNULIB_MEMPCPY)/g' \ + -e 's/@''GNULIB_MEMRCHR''@/$(GNULIB_MEMRCHR)/g' \ + -e 's/@''GNULIB_RAWMEMCHR''@/$(GNULIB_RAWMEMCHR)/g' \ + -e 's/@''GNULIB_STPCPY''@/$(GNULIB_STPCPY)/g' \ + -e 's/@''GNULIB_STPNCPY''@/$(GNULIB_STPNCPY)/g' \ + -e 's/@''GNULIB_STRCHRNUL''@/$(GNULIB_STRCHRNUL)/g' \ + -e 's/@''GNULIB_STRDUP''@/$(GNULIB_STRDUP)/g' \ + -e 's/@''GNULIB_STRNCAT''@/$(GNULIB_STRNCAT)/g' \ + -e 's/@''GNULIB_STRNDUP''@/$(GNULIB_STRNDUP)/g' \ + -e 's/@''GNULIB_STRNLEN''@/$(GNULIB_STRNLEN)/g' \ + -e 's/@''GNULIB_STRPBRK''@/$(GNULIB_STRPBRK)/g' \ + -e 's/@''GNULIB_STRSEP''@/$(GNULIB_STRSEP)/g' \ + -e 's/@''GNULIB_STRSTR''@/$(GNULIB_STRSTR)/g' \ + -e 's/@''GNULIB_STRCASESTR''@/$(GNULIB_STRCASESTR)/g' \ + -e 's/@''GNULIB_STRTOK_R''@/$(GNULIB_STRTOK_R)/g' \ + -e 's/@''GNULIB_STRERROR''@/$(GNULIB_STRERROR)/g' \ + -e 's/@''GNULIB_STRERROR_R''@/$(GNULIB_STRERROR_R)/g' \ + -e 's/@''GNULIB_STRSIGNAL''@/$(GNULIB_STRSIGNAL)/g' \ + -e 's/@''GNULIB_STRVERSCMP''@/$(GNULIB_STRVERSCMP)/g' \ + < $(srcdir)/string.in.h | \ + sed -e 's|@''HAVE_MBSLEN''@|$(HAVE_MBSLEN)|g' \ + -e 's|@''HAVE_MEMCHR''@|$(HAVE_MEMCHR)|g' \ + -e 's|@''HAVE_DECL_MEMMEM''@|$(HAVE_DECL_MEMMEM)|g' \ + -e 's|@''HAVE_MEMPCPY''@|$(HAVE_MEMPCPY)|g' \ + -e 's|@''HAVE_DECL_MEMRCHR''@|$(HAVE_DECL_MEMRCHR)|g' \ + -e 's|@''HAVE_RAWMEMCHR''@|$(HAVE_RAWMEMCHR)|g' \ + -e 's|@''HAVE_STPCPY''@|$(HAVE_STPCPY)|g' \ + -e 's|@''HAVE_STPNCPY''@|$(HAVE_STPNCPY)|g' \ + -e 's|@''HAVE_STRCHRNUL''@|$(HAVE_STRCHRNUL)|g' \ + -e 's|@''HAVE_DECL_STRDUP''@|$(HAVE_DECL_STRDUP)|g' \ + -e 's|@''HAVE_DECL_STRNDUP''@|$(HAVE_DECL_STRNDUP)|g' \ + -e 's|@''HAVE_DECL_STRNLEN''@|$(HAVE_DECL_STRNLEN)|g' \ + -e 's|@''HAVE_STRPBRK''@|$(HAVE_STRPBRK)|g' \ + -e 's|@''HAVE_STRSEP''@|$(HAVE_STRSEP)|g' \ + -e 's|@''HAVE_STRCASESTR''@|$(HAVE_STRCASESTR)|g' \ + -e 's|@''HAVE_DECL_STRTOK_R''@|$(HAVE_DECL_STRTOK_R)|g' \ + -e 's|@''HAVE_DECL_STRERROR_R''@|$(HAVE_DECL_STRERROR_R)|g' \ + -e 's|@''HAVE_DECL_STRSIGNAL''@|$(HAVE_DECL_STRSIGNAL)|g' \ + -e 's|@''HAVE_STRVERSCMP''@|$(HAVE_STRVERSCMP)|g' \ + -e 's|@''REPLACE_STPNCPY''@|$(REPLACE_STPNCPY)|g' \ + -e 's|@''REPLACE_MEMCHR''@|$(REPLACE_MEMCHR)|g' \ + -e 's|@''REPLACE_MEMMEM''@|$(REPLACE_MEMMEM)|g' \ + -e 's|@''REPLACE_STRCASESTR''@|$(REPLACE_STRCASESTR)|g' \ + -e 's|@''REPLACE_STRCHRNUL''@|$(REPLACE_STRCHRNUL)|g' \ + -e 's|@''REPLACE_STRDUP''@|$(REPLACE_STRDUP)|g' \ + -e 's|@''REPLACE_STRSTR''@|$(REPLACE_STRSTR)|g' \ + -e 's|@''REPLACE_STRERROR''@|$(REPLACE_STRERROR)|g' \ + -e 's|@''REPLACE_STRERROR_R''@|$(REPLACE_STRERROR_R)|g' \ + -e 's|@''REPLACE_STRNCAT''@|$(REPLACE_STRNCAT)|g' \ + -e 's|@''REPLACE_STRNDUP''@|$(REPLACE_STRNDUP)|g' \ + -e 's|@''REPLACE_STRNLEN''@|$(REPLACE_STRNLEN)|g' \ + -e 's|@''REPLACE_STRSIGNAL''@|$(REPLACE_STRSIGNAL)|g' \ + -e 's|@''REPLACE_STRTOK_R''@|$(REPLACE_STRTOK_R)|g' \ + -e 's|@''UNDEFINE_STRTOK_R''@|$(UNDEFINE_STRTOK_R)|g' \ + -e '/definitions of _GL_FUNCDECL_RPL/r $(CXXDEFS_H)' \ + -e '/definition of _GL_ARG_NONNULL/r $(ARG_NONNULL_H)' \ + -e '/definition of _GL_WARN_ON_USE/r $(WARN_ON_USE_H)'; \ + < $(srcdir)/string.in.h; \ + } > $@-t && \ + mv $@-t $@ +MOSTLYCLEANFILES += string.h string.h-t + +EXTRA_DIST += string.in.h + +## end gnulib module string + +## begin gnulib module warn-on-use + +BUILT_SOURCES += warn-on-use.h +# The warn-on-use.h that gets inserted into generated .h files is the same as +# build-aux/warn-on-use.h, except that it has the copyright header cut off. +warn-on-use.h: $(top_srcdir)/./warn-on-use.h + $(AM_V_GEN)rm -f $@-t $@ && \ + sed -n -e '/^.ifndef/,$$p' \ + < $(top_srcdir)/./warn-on-use.h \ + > $@-t && \ + mv $@-t $@ +MOSTLYCLEANFILES += warn-on-use.h warn-on-use.h-t + +WARN_ON_USE_H=warn-on-use.h + +EXTRA_DIST += $(top_srcdir)/./warn-on-use.h + +## end gnulib module warn-on-use + +## begin gnulib module dummy + +libgnu_a_SOURCES += dummy.c + +## end gnulib module dummy + + +mostlyclean-local: mostlyclean-generic + @for dir in '' $(MOSTLYCLEANDIRS); do \ + if test -n "$$dir" && test -d $$dir; then \ + echo "rmdir $$dir"; rmdir $$dir; \ + fi; \ + done; \ + : diff --git a/lib/dummy.c b/lib/dummy.c new file mode 100644 index 0000000..c958ea0 --- /dev/null +++ b/lib/dummy.c @@ -0,0 +1,42 @@ +/* A dummy file, to prevent empty libraries from breaking builds. + Copyright (C) 2004, 2007, 2009-2011 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* Some systems, reportedly OpenBSD and Mac OS X, refuse to create + libraries without any object files. You might get an error like: + + > ar cru .libs/libgl.a + > ar: no archive members specified + + Compiling this file, and adding its object file to the library, will + prevent the library from being empty. */ + +/* Some systems, such as Solaris with cc 5.0, refuse to work with libraries + that don't export any symbol. You might get an error like: + + > cc ... libgnu.a + > ild: (bad file) garbled symbol table in archive ../gllib/libgnu.a + + Compiling this file, and adding its object file to the library, will + prevent the library from exporting no symbols. */ + +#ifdef __sun +/* This declaration ensures that the library will export at least 1 symbol. */ +int gl_dummy_symbol; +#else +/* This declaration is solely to ensure that after preprocessing + this file is never empty. */ +typedef int dummy; +#endif diff --git a/lib/memchr.c b/lib/memchr.c new file mode 100644 index 0000000..6d903b1 --- /dev/null +++ b/lib/memchr.c @@ -0,0 +1,172 @@ +/* Copyright (C) 1991, 1993, 1996-1997, 1999-2000, 2003-2004, 2006, 2008-2011 + Free Software Foundation, Inc. + + Based on strlen implementation by Torbjorn Granlund (tege@sics.se), + with help from Dan Sahlin (dan@sics.se) and + commentary by Jim Blandy (jimb@ai.mit.edu); + adaptation to memchr suggested by Dick Karpinski (dick@cca.ucsf.edu), + and implemented by Roland McGrath (roland@ai.mit.edu). + +NOTE: The canonical source of this file is maintained with the GNU C Library. +Bugs can be reported to bug-glibc@prep.ai.mit.edu. + +This program is free software: you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3 of the License, or any +later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . */ + +#ifndef _LIBC +# include +#endif + +#include + +#include + +#if defined _LIBC +# include +#else +# define reg_char char +#endif + +#include + +#if HAVE_BP_SYM_H || defined _LIBC +# include +#else +# define BP_SYM(sym) sym +#endif + +#undef __memchr +#ifdef _LIBC +# undef memchr +#endif + +#ifndef weak_alias +# define __memchr memchr +#endif + +/* Search no more than N bytes of S for C. */ +void * +__memchr (void const *s, int c_in, size_t n) +{ + /* On 32-bit hardware, choosing longword to be a 32-bit unsigned + long instead of a 64-bit uintmax_t tends to give better + performance. On 64-bit hardware, unsigned long is generally 64 + bits already. Change this typedef to experiment with + performance. */ + typedef unsigned long int longword; + + const unsigned char *char_ptr; + const longword *longword_ptr; + longword repeated_one; + longword repeated_c; + unsigned reg_char c; + + c = (unsigned char) c_in; + + /* Handle the first few bytes by reading one byte at a time. + Do this until CHAR_PTR is aligned on a longword boundary. */ + for (char_ptr = (const unsigned char *) s; + n > 0 && (size_t) char_ptr % sizeof (longword) != 0; + --n, ++char_ptr) + if (*char_ptr == c) + return (void *) char_ptr; + + longword_ptr = (const longword *) char_ptr; + + /* All these elucidatory comments refer to 4-byte longwords, + but the theory applies equally well to any size longwords. */ + + /* Compute auxiliary longword values: + repeated_one is a value which has a 1 in every byte. + repeated_c has c in every byte. */ + repeated_one = 0x01010101; + repeated_c = c | (c << 8); + repeated_c |= repeated_c << 16; + if (0xffffffffU < (longword) -1) + { + repeated_one |= repeated_one << 31 << 1; + repeated_c |= repeated_c << 31 << 1; + if (8 < sizeof (longword)) + { + size_t i; + + for (i = 64; i < sizeof (longword) * 8; i *= 2) + { + repeated_one |= repeated_one << i; + repeated_c |= repeated_c << i; + } + } + } + + /* Instead of the traditional loop which tests each byte, we will test a + longword at a time. The tricky part is testing if *any of the four* + bytes in the longword in question are equal to c. We first use an xor + with repeated_c. This reduces the task to testing whether *any of the + four* bytes in longword1 is zero. + + We compute tmp = + ((longword1 - repeated_one) & ~longword1) & (repeated_one << 7). + That is, we perform the following operations: + 1. Subtract repeated_one. + 2. & ~longword1. + 3. & a mask consisting of 0x80 in every byte. + Consider what happens in each byte: + - If a byte of longword1 is zero, step 1 and 2 transform it into 0xff, + and step 3 transforms it into 0x80. A carry can also be propagated + to more significant bytes. + - If a byte of longword1 is nonzero, let its lowest 1 bit be at + position k (0 <= k <= 7); so the lowest k bits are 0. After step 1, + the byte ends in a single bit of value 0 and k bits of value 1. + After step 2, the result is just k bits of value 1: 2^k - 1. After + step 3, the result is 0. And no carry is produced. + So, if longword1 has only non-zero bytes, tmp is zero. + Whereas if longword1 has a zero byte, call j the position of the least + significant zero byte. Then the result has a zero at positions 0, ..., + j-1 and a 0x80 at position j. We cannot predict the result at the more + significant bytes (positions j+1..3), but it does not matter since we + already have a non-zero bit at position 8*j+7. + + So, the test whether any byte in longword1 is zero is equivalent to + testing whether tmp is nonzero. */ + + while (n >= sizeof (longword)) + { + longword longword1 = *longword_ptr ^ repeated_c; + + if ((((longword1 - repeated_one) & ~longword1) + & (repeated_one << 7)) != 0) + break; + longword_ptr++; + n -= sizeof (longword); + } + + char_ptr = (const unsigned char *) longword_ptr; + + /* At this point, we know that either n < sizeof (longword), or one of the + sizeof (longword) bytes starting at char_ptr is == c. On little-endian + machines, we could determine the first such byte without any further + memory accesses, just by looking at the tmp result from the last loop + iteration. But this does not work on big-endian machines. Choose code + that works in both cases. */ + + for (; n > 0; --n, ++char_ptr) + { + if (*char_ptr == c) + return (void *) char_ptr; + } + + return NULL; +} +#ifdef weak_alias +weak_alias (__memchr, BP_SYM (memchr)) +#endif diff --git a/lib/memchr.valgrind b/lib/memchr.valgrind new file mode 100644 index 0000000..60f247e --- /dev/null +++ b/lib/memchr.valgrind @@ -0,0 +1,14 @@ +# Suppress a valgrind message about use of uninitialized memory in memchr(). +# POSIX states that when the character is found, memchr must not read extra +# bytes in an overestimated length (for example, where memchr is used to +# implement strnlen). However, we use a safe word read to provide a speedup. +{ + memchr-value4 + Memcheck:Value4 + fun:rpl_memchr +} +{ + memchr-value8 + Memcheck:Value8 + fun:rpl_memchr +} diff --git a/lib/memmem.c b/lib/memmem.c new file mode 100644 index 0000000..acd1a3e --- /dev/null +++ b/lib/memmem.c @@ -0,0 +1,76 @@ +/* Copyright (C) 1991-1994, 1996-1998, 2000, 2004, 2007-2011 Free Software + Foundation, Inc. + This file is part of the GNU C Library. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, + Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +/* This particular implementation was written by Eric Blake, 2008. */ + +#ifndef _LIBC +# include +#endif + +/* Specification of memmem. */ +#include + +#ifndef _LIBC +# define __builtin_expect(expr, val) (expr) +#endif + +#define RETURN_TYPE void * +#define AVAILABLE(h, h_l, j, n_l) ((j) <= (h_l) - (n_l)) +#include "str-two-way.h" + +/* Return the first occurrence of NEEDLE in HAYSTACK. Return HAYSTACK + if NEEDLE_LEN is 0, otherwise NULL if NEEDLE is not found in + HAYSTACK. */ +void * +memmem (const void *haystack_start, size_t haystack_len, + const void *needle_start, size_t needle_len) +{ + /* Abstract memory is considered to be an array of 'unsigned char' values, + not an array of 'char' values. See ISO C 99 section 6.2.6.1. */ + const unsigned char *haystack = (const unsigned char *) haystack_start; + const unsigned char *needle = (const unsigned char *) needle_start; + + if (needle_len == 0) + /* The first occurrence of the empty string is deemed to occur at + the beginning of the string. */ + return (void *) haystack; + + /* Sanity check, otherwise the loop might search through the whole + memory. */ + if (__builtin_expect (haystack_len < needle_len, 0)) + return NULL; + + /* Use optimizations in memchr when possible, to reduce the search + size of haystack using a linear algorithm with a smaller + coefficient. However, avoid memchr for long needles, since we + can often achieve sublinear performance. */ + if (needle_len < LONG_NEEDLE_THRESHOLD) + { + haystack = memchr (haystack, *needle, haystack_len); + if (!haystack || __builtin_expect (needle_len == 1, 0)) + return (void *) haystack; + haystack_len -= haystack - (const unsigned char *) haystack_start; + if (haystack_len < needle_len) + return NULL; + return two_way_short_needle (haystack, haystack_len, needle, needle_len); + } + else + return two_way_long_needle (haystack, haystack_len, needle, needle_len); +} + +#undef LONG_NEEDLE_THRESHOLD diff --git a/lib/sig-handler.h b/lib/sig-handler.h new file mode 100644 index 0000000..abb660c --- /dev/null +++ b/lib/sig-handler.h @@ -0,0 +1,44 @@ +/* Convenience declarations when working with . + + Copyright (C) 2008-2011 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +#ifndef _GL_SIG_HANDLER_H +#define _GL_SIG_HANDLER_H + +#include + +/* Convenience type when working with signal handlers. */ +typedef void (*sa_handler_t) (int); + +/* Return the handler of a signal, as a sa_handler_t value regardless + of its true type. The resulting function can be compared to + special values like SIG_IGN but it is not portable to call it. */ +static inline sa_handler_t +get_handler (struct sigaction const *a) +{ +#ifdef SA_SIGINFO + /* POSIX says that special values like SIG_IGN can only occur when + action.sa_flags does not contain SA_SIGINFO. But in Linux 2.4, + for example, sa_sigaction and sa_handler are aliases and a signal + is ignored if sa_sigaction (after casting) equals SIG_IGN. So + use (and cast) sa_sigaction in that case. */ + if (a->sa_flags & SA_SIGINFO) + return (sa_handler_t) a->sa_sigaction; +#endif + return a->sa_handler; +} + +#endif /* _GL_SIG_HANDLER_H */ diff --git a/lib/sigaction.c b/lib/sigaction.c new file mode 100644 index 0000000..e6a55da --- /dev/null +++ b/lib/sigaction.c @@ -0,0 +1,204 @@ +/* POSIX compatible signal blocking. + Copyright (C) 2008-2011 Free Software Foundation, Inc. + Written by Eric Blake , 2008. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +#include + +/* Specification. */ +#include + +#include +#include +#include + +/* This implementation of sigaction is tailored to Woe32 behavior: + signal() has SysV semantics (ie. the handler is uninstalled before + it is invoked). This is an inherent data race if an asynchronous + signal is sent twice in a row before we can reinstall our handler, + but there's nothing we can do about it. Meanwhile, sigprocmask() + is not present, and while we can use the gnulib replacement to + provide critical sections, it too suffers from potential data races + in the face of an ill-timed asynchronous signal. And we compound + the situation by reading static storage in a signal handler, which + POSIX warns is not generically async-signal-safe. Oh well. + + Additionally: + - We don't implement SA_NOCLDSTOP or SA_NOCLDWAIT, because SIGCHLD + is not defined. + - We don't implement SA_ONSTACK, because sigaltstack() is not present. + - We ignore SA_RESTART, because blocking Win32 calls are not interrupted + anyway when an asynchronous signal occurs, and the MSVCRT runtime + never sets errno to EINTR. + - We don't implement SA_SIGINFO because it is impossible to do so + portably. + + POSIX states that an application should not mix signal() and + sigaction(). We support the use of signal() within the gnulib + sigprocmask() substitute, but all other application code linked + with this module should stick with only sigaction(). */ + +/* Check some of our assumptions. */ +#if defined SIGCHLD || defined HAVE_SIGALTSTACK || defined HAVE_SIGINTERRUPT +# error "Revisit the assumptions made in the sigaction module" +#endif + +/* Out-of-range substitutes make a good fallback for uncatchable + signals. */ +#ifndef SIGKILL +# define SIGKILL (-1) +#endif +#ifndef SIGSTOP +# define SIGSTOP (-1) +#endif + +/* On native Windows, as of 2008, the signal SIGABRT_COMPAT is an alias + for the signal SIGABRT. Only one signal handler is stored for both + SIGABRT and SIGABRT_COMPAT. SIGABRT_COMPAT is not a signal of its own. */ +#if (defined _WIN32 || defined __WIN32__) && ! defined __CYGWIN__ +# undef SIGABRT_COMPAT +# define SIGABRT_COMPAT 6 +#endif + +/* A signal handler. */ +typedef void (*handler_t) (int signal); + +/* Set of current actions. If sa_handler for an entry is NULL, then + that signal is not currently handled by the sigaction handler. */ +static struct sigaction volatile action_array[NSIG] /* = 0 */; + +/* Signal handler that is installed for signals. */ +static void +sigaction_handler (int sig) +{ + handler_t handler; + sigset_t mask; + sigset_t oldmask; + int saved_errno = errno; + if (sig < 0 || NSIG <= sig || !action_array[sig].sa_handler) + { + /* Unexpected situation; be careful to avoid recursive abort. */ + if (sig == SIGABRT) + signal (SIGABRT, SIG_DFL); + abort (); + } + + /* Reinstall the signal handler when required; otherwise update the + bookkeeping so that the user's handler may call sigaction and get + accurate results. We know the signal isn't currently blocked, or + we wouldn't be in its handler, therefore we know that we are not + interrupting a sigaction() call. There is a race where any + asynchronous instance of the same signal occurring before we + reinstall the handler will trigger the default handler; oh + well. */ + handler = action_array[sig].sa_handler; + if ((action_array[sig].sa_flags & SA_RESETHAND) == 0) + signal (sig, sigaction_handler); + else + action_array[sig].sa_handler = NULL; + + /* Block appropriate signals. */ + mask = action_array[sig].sa_mask; + if ((action_array[sig].sa_flags & SA_NODEFER) == 0) + sigaddset (&mask, sig); + sigprocmask (SIG_BLOCK, &mask, &oldmask); + + /* Invoke the user's handler, then restore prior mask. */ + errno = saved_errno; + handler (sig); + saved_errno = errno; + sigprocmask (SIG_SETMASK, &oldmask, NULL); + errno = saved_errno; +} + +/* Change and/or query the action that will be taken on delivery of + signal SIG. If not NULL, ACT describes the new behavior. If not + NULL, OACT is set to the prior behavior. Return 0 on success, or + set errno and return -1 on failure. */ +int +sigaction (int sig, const struct sigaction *restrict act, + struct sigaction *restrict oact) +{ + sigset_t mask; + sigset_t oldmask; + int saved_errno; + + if (sig < 0 || NSIG <= sig || sig == SIGKILL || sig == SIGSTOP + || (act && act->sa_handler == SIG_ERR)) + { + errno = EINVAL; + return -1; + } + +#ifdef SIGABRT_COMPAT + if (sig == SIGABRT_COMPAT) + sig = SIGABRT; +#endif + + /* POSIX requires sigaction() to be async-signal-safe. In other + words, if an asynchronous signal can occur while we are anywhere + inside this function, the user's handler could then call + sigaction() recursively and expect consistent results. We meet + this rule by using sigprocmask to block all signals before + modifying any data structure that could be read from a signal + handler; this works since we know that the gnulib sigprocmask + replacement does not try to use sigaction() from its handler. */ + if (!act && !oact) + return 0; + sigfillset (&mask); + sigprocmask (SIG_BLOCK, &mask, &oldmask); + if (oact) + { + if (action_array[sig].sa_handler) + *oact = action_array[sig]; + else + { + /* Safe to change the handler at will here, since all + signals are currently blocked. */ + oact->sa_handler = signal (sig, SIG_DFL); + if (oact->sa_handler == SIG_ERR) + goto failure; + signal (sig, oact->sa_handler); + oact->sa_flags = SA_RESETHAND | SA_NODEFER; + sigemptyset (&oact->sa_mask); + } + } + + if (act) + { + /* Safe to install the handler before updating action_array, + since all signals are currently blocked. */ + if (act->sa_handler == SIG_DFL || act->sa_handler == SIG_IGN) + { + if (signal (sig, act->sa_handler) == SIG_ERR) + goto failure; + action_array[sig].sa_handler = NULL; + } + else + { + if (signal (sig, sigaction_handler) == SIG_ERR) + goto failure; + action_array[sig] = *act; + } + } + sigprocmask (SIG_SETMASK, &oldmask, NULL); + return 0; + + failure: + saved_errno = errno; + sigprocmask (SIG_SETMASK, &oldmask, NULL); + errno = saved_errno; + return -1; +} diff --git a/lib/signal.in.h b/lib/signal.in.h new file mode 100644 index 0000000..9669215 --- /dev/null +++ b/lib/signal.in.h @@ -0,0 +1,378 @@ +/* A GNU-like . + + Copyright (C) 2006-2011 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +#if __GNUC__ >= 3 +@PRAGMA_SYSTEM_HEADER@ +#endif +@PRAGMA_COLUMNS@ + +#include "config.h" + +#if defined __need_sig_atomic_t || defined __need_sigset_t +/* Special invocation convention inside glibc header files. */ + +# @INCLUDE_NEXT@ @NEXT_SIGNAL_H@ + +#else +/* Normal invocation convention. */ + +#ifndef _@GUARD_PREFIX@_SIGNAL_H + +/* The include_next requires a split double-inclusion guard. */ +#@INCLUDE_NEXT@ @NEXT_SIGNAL_H@ + +#ifndef _@GUARD_PREFIX@_SIGNAL_H +#define _@GUARD_PREFIX@_SIGNAL_H + +/* The definitions of _GL_FUNCDECL_RPL etc. are copied here. */ + +/* The definition of _GL_ARG_NONNULL is copied here. */ + +/* The definition of _GL_WARN_ON_USE is copied here. */ + +/* Define pid_t, uid_t. + Also, mingw defines sigset_t not in , but in . */ +#include + +/* On AIX, sig_atomic_t already includes volatile. C99 requires that + 'volatile sig_atomic_t' ignore the extra modifier, but C89 did not. + Hence, redefine this to a non-volatile type as needed. */ +#if ! @HAVE_TYPE_VOLATILE_SIG_ATOMIC_T@ +# if !GNULIB_defined_sig_atomic_t +typedef int rpl_sig_atomic_t; +# undef sig_atomic_t +# define sig_atomic_t rpl_sig_atomic_t +# define GNULIB_defined_sig_atomic_t 1 +# endif +#endif + +/* A set or mask of signals. */ +#if !@HAVE_SIGSET_T@ +# if !GNULIB_defined_sigset_t +typedef unsigned int sigset_t; +# define GNULIB_defined_sigset_t 1 +# endif +#endif + +/* Define sighandler_t, the type of signal handlers. A GNU extension. */ +#if !@HAVE_SIGHANDLER_T@ +# ifdef __cplusplus +extern "C" { +# endif +# if !GNULIB_defined_sighandler_t +typedef void (*sighandler_t) (int); +# define GNULIB_defined_sighandler_t 1 +# endif +# ifdef __cplusplus +} +# endif +#endif + + +#if @GNULIB_SIGNAL_H_SIGPIPE@ +# ifndef SIGPIPE +/* Define SIGPIPE to a value that does not overlap with other signals. */ +# define SIGPIPE 13 +# define GNULIB_defined_SIGPIPE 1 +/* To actually use SIGPIPE, you also need the gnulib modules 'sigprocmask', + 'write', 'stdio'. */ +# endif +#endif + + +/* Maximum signal number + 1. */ +#ifndef NSIG +# if defined __TANDEM +# define NSIG 32 +# endif +#endif + + +#if @GNULIB_SIGPROCMASK@ +# if !@HAVE_POSIX_SIGNALBLOCKING@ + +/* Maximum signal number + 1. */ +# ifndef NSIG +# define NSIG 32 +# endif + +/* This code supports only 32 signals. */ +# if !GNULIB_defined_verify_NSIG_constraint +typedef int verify_NSIG_constraint[NSIG <= 32 ? 1 : -1]; +# define GNULIB_defined_verify_NSIG_constraint 1 +# endif + +# endif + +/* Test whether a given signal is contained in a signal set. */ +# if @HAVE_POSIX_SIGNALBLOCKING@ +/* This function is defined as a macro on MacOS X. */ +# if defined __cplusplus && defined GNULIB_NAMESPACE +# undef sigismember +# endif +# else +_GL_FUNCDECL_SYS (sigismember, int, (const sigset_t *set, int sig) + _GL_ARG_NONNULL ((1))); +# endif +_GL_CXXALIAS_SYS (sigismember, int, (const sigset_t *set, int sig)); +_GL_CXXALIASWARN (sigismember); + +/* Initialize a signal set to the empty set. */ +# if @HAVE_POSIX_SIGNALBLOCKING@ +/* This function is defined as a macro on MacOS X. */ +# if defined __cplusplus && defined GNULIB_NAMESPACE +# undef sigemptyset +# endif +# else +_GL_FUNCDECL_SYS (sigemptyset, int, (sigset_t *set) _GL_ARG_NONNULL ((1))); +# endif +_GL_CXXALIAS_SYS (sigemptyset, int, (sigset_t *set)); +_GL_CXXALIASWARN (sigemptyset); + +/* Add a signal to a signal set. */ +# if @HAVE_POSIX_SIGNALBLOCKING@ +/* This function is defined as a macro on MacOS X. */ +# if defined __cplusplus && defined GNULIB_NAMESPACE +# undef sigaddset +# endif +# else +_GL_FUNCDECL_SYS (sigaddset, int, (sigset_t *set, int sig) + _GL_ARG_NONNULL ((1))); +# endif +_GL_CXXALIAS_SYS (sigaddset, int, (sigset_t *set, int sig)); +_GL_CXXALIASWARN (sigaddset); + +/* Remove a signal from a signal set. */ +# if @HAVE_POSIX_SIGNALBLOCKING@ +/* This function is defined as a macro on MacOS X. */ +# if defined __cplusplus && defined GNULIB_NAMESPACE +# undef sigdelset +# endif +# else +_GL_FUNCDECL_SYS (sigdelset, int, (sigset_t *set, int sig) + _GL_ARG_NONNULL ((1))); +# endif +_GL_CXXALIAS_SYS (sigdelset, int, (sigset_t *set, int sig)); +_GL_CXXALIASWARN (sigdelset); + +/* Fill a signal set with all possible signals. */ +# if @HAVE_POSIX_SIGNALBLOCKING@ +/* This function is defined as a macro on MacOS X. */ +# if defined __cplusplus && defined GNULIB_NAMESPACE +# undef sigfillset +# endif +# else +_GL_FUNCDECL_SYS (sigfillset, int, (sigset_t *set) _GL_ARG_NONNULL ((1))); +# endif +_GL_CXXALIAS_SYS (sigfillset, int, (sigset_t *set)); +_GL_CXXALIASWARN (sigfillset); + +/* Return the set of those blocked signals that are pending. */ +# if !@HAVE_POSIX_SIGNALBLOCKING@ +_GL_FUNCDECL_SYS (sigpending, int, (sigset_t *set) _GL_ARG_NONNULL ((1))); +# endif +_GL_CXXALIAS_SYS (sigpending, int, (sigset_t *set)); +_GL_CXXALIASWARN (sigpending); + +/* If OLD_SET is not NULL, put the current set of blocked signals in *OLD_SET. + Then, if SET is not NULL, affect the current set of blocked signals by + combining it with *SET as indicated in OPERATION. + In this implementation, you are not allowed to change a signal handler + while the signal is blocked. */ +# if !@HAVE_POSIX_SIGNALBLOCKING@ +# define SIG_BLOCK 0 /* blocked_set = blocked_set | *set; */ +# define SIG_SETMASK 1 /* blocked_set = *set; */ +# define SIG_UNBLOCK 2 /* blocked_set = blocked_set & ~*set; */ +_GL_FUNCDECL_SYS (sigprocmask, int, + (int operation, const sigset_t *set, sigset_t *old_set)); +# endif +_GL_CXXALIAS_SYS (sigprocmask, int, + (int operation, const sigset_t *set, sigset_t *old_set)); +_GL_CXXALIASWARN (sigprocmask); + +/* Install the handler FUNC for signal SIG, and return the previous + handler. */ +# ifdef __cplusplus +extern "C" { +# endif +# if !GNULIB_defined_function_taking_int_returning_void_t +typedef void (*_gl_function_taking_int_returning_void_t) (int); +# define GNULIB_defined_function_taking_int_returning_void_t 1 +# endif +# ifdef __cplusplus +} +# endif +# if !@HAVE_POSIX_SIGNALBLOCKING@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define signal rpl_signal +# endif +_GL_FUNCDECL_RPL (signal, _gl_function_taking_int_returning_void_t, + (int sig, _gl_function_taking_int_returning_void_t func)); +_GL_CXXALIAS_RPL (signal, _gl_function_taking_int_returning_void_t, + (int sig, _gl_function_taking_int_returning_void_t func)); +# else +_GL_CXXALIAS_SYS (signal, _gl_function_taking_int_returning_void_t, + (int sig, _gl_function_taking_int_returning_void_t func)); +# endif +_GL_CXXALIASWARN (signal); + +/* Raise signal SIG. */ +# if !@HAVE_POSIX_SIGNALBLOCKING@ && GNULIB_defined_SIGPIPE +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# undef raise +# define raise rpl_raise +# endif +_GL_FUNCDECL_RPL (raise, int, (int sig)); +_GL_CXXALIAS_RPL (raise, int, (int sig)); +# else +_GL_CXXALIAS_SYS (raise, int, (int sig)); +# endif +_GL_CXXALIASWARN (raise); + +#elif defined GNULIB_POSIXCHECK +# undef sigaddset +# if HAVE_RAW_DECL_SIGADDSET +_GL_WARN_ON_USE (sigaddset, "sigaddset is unportable - " + "use the gnulib module sigprocmask for portability"); +# endif +# undef sigdelset +# if HAVE_RAW_DECL_SIGDELSET +_GL_WARN_ON_USE (sigdelset, "sigdelset is unportable - " + "use the gnulib module sigprocmask for portability"); +# endif +# undef sigemptyset +# if HAVE_RAW_DECL_SIGEMPTYSET +_GL_WARN_ON_USE (sigemptyset, "sigemptyset is unportable - " + "use the gnulib module sigprocmask for portability"); +# endif +# undef sigfillset +# if HAVE_RAW_DECL_SIGFILLSET +_GL_WARN_ON_USE (sigfillset, "sigfillset is unportable - " + "use the gnulib module sigprocmask for portability"); +# endif +# undef sigismember +# if HAVE_RAW_DECL_SIGISMEMBER +_GL_WARN_ON_USE (sigismember, "sigismember is unportable - " + "use the gnulib module sigprocmask for portability"); +# endif +# undef sigpending +# if HAVE_RAW_DECL_SIGPENDING +_GL_WARN_ON_USE (sigpending, "sigpending is unportable - " + "use the gnulib module sigprocmask for portability"); +# endif +# undef sigprocmask +# if HAVE_RAW_DECL_SIGPROCMASK +_GL_WARN_ON_USE (sigprocmask, "sigprocmask is unportable - " + "use the gnulib module sigprocmask for portability"); +# endif +#endif /* @GNULIB_SIGPROCMASK@ */ + + +#if @GNULIB_SIGACTION@ +# if !@HAVE_SIGACTION@ + +# if !@HAVE_SIGINFO_T@ + +# if !GNULIB_defined_siginfo_types + +/* Present to allow compilation, but unsupported by gnulib. */ +union sigval +{ + int sival_int; + void *sival_ptr; +}; + +/* Present to allow compilation, but unsupported by gnulib. */ +struct siginfo_t +{ + int si_signo; + int si_code; + int si_errno; + pid_t si_pid; + uid_t si_uid; + void *si_addr; + int si_status; + long si_band; + union sigval si_value; +}; +typedef struct siginfo_t siginfo_t; + +# define GNULIB_defined_siginfo_types 1 +# endif + +# endif /* !@HAVE_SIGINFO_T@ */ + +/* We assume that platforms which lack the sigaction() function also lack + the 'struct sigaction' type, and vice versa. */ + +# if !GNULIB_defined_struct_sigaction + +struct sigaction +{ + union + { + void (*_sa_handler) (int); + /* Present to allow compilation, but unsupported by gnulib. POSIX + says that implementations may, but not must, make sa_sigaction + overlap with sa_handler, but we know of no implementation where + they do not overlap. */ + void (*_sa_sigaction) (int, siginfo_t *, void *); + } _sa_func; + sigset_t sa_mask; + /* Not all POSIX flags are supported. */ + int sa_flags; +}; +# define sa_handler _sa_func._sa_handler +# define sa_sigaction _sa_func._sa_sigaction +/* Unsupported flags are not present. */ +# define SA_RESETHAND 1 +# define SA_NODEFER 2 +# define SA_RESTART 4 + +# define GNULIB_defined_struct_sigaction 1 +# endif + +_GL_FUNCDECL_SYS (sigaction, int, (int, const struct sigaction *restrict, + struct sigaction *restrict)); + +# elif !@HAVE_STRUCT_SIGACTION_SA_SIGACTION@ + +# define sa_sigaction sa_handler + +# endif /* !@HAVE_SIGACTION@, !@HAVE_STRUCT_SIGACTION_SA_SIGACTION@ */ + +_GL_CXXALIAS_SYS (sigaction, int, (int, const struct sigaction *restrict, + struct sigaction *restrict)); +_GL_CXXALIASWARN (sigaction); + +#elif defined GNULIB_POSIXCHECK +# undef sigaction +# if HAVE_RAW_DECL_SIGACTION +_GL_WARN_ON_USE (sigaction, "sigaction is unportable - " + "use the gnulib module sigaction for portability"); +# endif +#endif + +/* Some systems don't have SA_NODEFER. */ +#ifndef SA_NODEFER +# define SA_NODEFER 0 +#endif + + +#endif /* _@GUARD_PREFIX@_SIGNAL_H */ +#endif /* _@GUARD_PREFIX@_SIGNAL_H */ +#endif diff --git a/lib/sigprocmask.c b/lib/sigprocmask.c new file mode 100644 index 0000000..6780a37 --- /dev/null +++ b/lib/sigprocmask.c @@ -0,0 +1,329 @@ +/* POSIX compatible signal blocking. + Copyright (C) 2006-2011 Free Software Foundation, Inc. + Written by Bruno Haible , 2006. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +#include + +/* Specification. */ +#include + +#include +#include +#include + +/* We assume that a platform without POSIX signal blocking functions + also does not have the POSIX sigaction() function, only the + signal() function. We also assume signal() has SysV semantics, + where any handler is uninstalled prior to being invoked. This is + true for Woe32 platforms. */ + +/* We use raw signal(), but also provide a wrapper rpl_signal() so + that applications can query or change a blocked signal. */ +#undef signal + +/* Provide invalid signal numbers as fallbacks if the uncatchable + signals are not defined. */ +#ifndef SIGKILL +# define SIGKILL (-1) +#endif +#ifndef SIGSTOP +# define SIGSTOP (-1) +#endif + +/* On native Windows, as of 2008, the signal SIGABRT_COMPAT is an alias + for the signal SIGABRT. Only one signal handler is stored for both + SIGABRT and SIGABRT_COMPAT. SIGABRT_COMPAT is not a signal of its own. */ +#if (defined _WIN32 || defined __WIN32__) && ! defined __CYGWIN__ +# undef SIGABRT_COMPAT +# define SIGABRT_COMPAT 6 +#endif +#ifdef SIGABRT_COMPAT +# define SIGABRT_COMPAT_MASK (1U << SIGABRT_COMPAT) +#else +# define SIGABRT_COMPAT_MASK 0 +#endif + +typedef void (*handler_t) (int); + +/* Handling of gnulib defined signals. */ + +#if GNULIB_defined_SIGPIPE +static handler_t SIGPIPE_handler = SIG_DFL; +#endif + +#if GNULIB_defined_SIGPIPE +static handler_t +ext_signal (int sig, handler_t handler) +{ + switch (sig) + { + case SIGPIPE: + { + handler_t old_handler = SIGPIPE_handler; + SIGPIPE_handler = handler; + return old_handler; + } + default: /* System defined signal */ + return signal (sig, handler); + } +} +# define signal ext_signal +#endif + +int +sigismember (const sigset_t *set, int sig) +{ + if (sig >= 0 && sig < NSIG) + { + #ifdef SIGABRT_COMPAT + if (sig == SIGABRT_COMPAT) + sig = SIGABRT; + #endif + + return (*set >> sig) & 1; + } + else + return 0; +} + +int +sigemptyset (sigset_t *set) +{ + *set = 0; + return 0; +} + +int +sigaddset (sigset_t *set, int sig) +{ + if (sig >= 0 && sig < NSIG) + { + #ifdef SIGABRT_COMPAT + if (sig == SIGABRT_COMPAT) + sig = SIGABRT; + #endif + + *set |= 1U << sig; + return 0; + } + else + { + errno = EINVAL; + return -1; + } +} + +int +sigdelset (sigset_t *set, int sig) +{ + if (sig >= 0 && sig < NSIG) + { + #ifdef SIGABRT_COMPAT + if (sig == SIGABRT_COMPAT) + sig = SIGABRT; + #endif + + *set &= ~(1U << sig); + return 0; + } + else + { + errno = EINVAL; + return -1; + } +} + + +int +sigfillset (sigset_t *set) +{ + *set = ((2U << (NSIG - 1)) - 1) & ~ SIGABRT_COMPAT_MASK; + return 0; +} + +/* Set of currently blocked signals. */ +static volatile sigset_t blocked_set /* = 0 */; + +/* Set of currently blocked and pending signals. */ +static volatile sig_atomic_t pending_array[NSIG] /* = { 0 } */; + +/* Signal handler that is installed for blocked signals. */ +static void +blocked_handler (int sig) +{ + /* Reinstall the handler, in case the signal occurs multiple times + while blocked. There is an inherent race where an asynchronous + signal in between when the kernel uninstalled the handler and + when we reinstall it will trigger the default handler; oh + well. */ + signal (sig, blocked_handler); + if (sig >= 0 && sig < NSIG) + pending_array[sig] = 1; +} + +int +sigpending (sigset_t *set) +{ + sigset_t pending = 0; + int sig; + + for (sig = 0; sig < NSIG; sig++) + if (pending_array[sig]) + pending |= 1U << sig; + *set = pending; + return 0; +} + +/* The previous signal handlers. + Only the array elements corresponding to blocked signals are relevant. */ +static volatile handler_t old_handlers[NSIG]; + +int +sigprocmask (int operation, const sigset_t *set, sigset_t *old_set) +{ + if (old_set != NULL) + *old_set = blocked_set; + + if (set != NULL) + { + sigset_t new_blocked_set; + sigset_t to_unblock; + sigset_t to_block; + + switch (operation) + { + case SIG_BLOCK: + new_blocked_set = blocked_set | *set; + break; + case SIG_SETMASK: + new_blocked_set = *set; + break; + case SIG_UNBLOCK: + new_blocked_set = blocked_set & ~*set; + break; + default: + errno = EINVAL; + return -1; + } + to_unblock = blocked_set & ~new_blocked_set; + to_block = new_blocked_set & ~blocked_set; + + if (to_block != 0) + { + int sig; + + for (sig = 0; sig < NSIG; sig++) + if ((to_block >> sig) & 1) + { + pending_array[sig] = 0; + if ((old_handlers[sig] = signal (sig, blocked_handler)) != SIG_ERR) + blocked_set |= 1U << sig; + } + } + + if (to_unblock != 0) + { + sig_atomic_t received[NSIG]; + int sig; + + for (sig = 0; sig < NSIG; sig++) + if ((to_unblock >> sig) & 1) + { + if (signal (sig, old_handlers[sig]) != blocked_handler) + /* The application changed a signal handler while the signal + was blocked, bypassing our rpl_signal replacement. + We don't support this. */ + abort (); + received[sig] = pending_array[sig]; + blocked_set &= ~(1U << sig); + pending_array[sig] = 0; + } + else + received[sig] = 0; + + for (sig = 0; sig < NSIG; sig++) + if (received[sig]) + raise (sig); + } + } + return 0; +} + +/* Install the handler FUNC for signal SIG, and return the previous + handler. */ +handler_t +rpl_signal (int sig, handler_t handler) +{ + /* We must provide a wrapper, so that a user can query what handler + they installed even if that signal is currently blocked. */ + if (sig >= 0 && sig < NSIG && sig != SIGKILL && sig != SIGSTOP + && handler != SIG_ERR) + { + #ifdef SIGABRT_COMPAT + if (sig == SIGABRT_COMPAT) + sig = SIGABRT; + #endif + + if (blocked_set & (1U << sig)) + { + /* POSIX states that sigprocmask and signal are both + async-signal-safe. This is not true of our + implementation - there is a slight data race where an + asynchronous interrupt on signal A can occur after we + install blocked_handler but before we have updated + old_handlers for signal B, such that handler A can see + stale information if it calls signal(B). Oh well - + signal handlers really shouldn't try to manipulate the + installed handlers of unrelated signals. */ + handler_t result = old_handlers[sig]; + old_handlers[sig] = handler; + return result; + } + else + return signal (sig, handler); + } + else + { + errno = EINVAL; + return SIG_ERR; + } +} + +#if GNULIB_defined_SIGPIPE +/* Raise the signal SIG. */ +int +rpl_raise (int sig) +# undef raise +{ + switch (sig) + { + case SIGPIPE: + if (blocked_set & (1U << sig)) + pending_array[sig] = 1; + else + { + handler_t handler = SIGPIPE_handler; + if (handler == SIG_DFL) + exit (128 + SIGPIPE); + else if (handler != SIG_IGN) + (*handler) (sig); + } + return 0; + default: /* System defined signal */ + return raise (sig); + } +} +#endif diff --git a/lib/stddef.in.h b/lib/stddef.in.h new file mode 100644 index 0000000..c7b98e7 --- /dev/null +++ b/lib/stddef.in.h @@ -0,0 +1,87 @@ +/* A substitute for POSIX 2008 , for platforms that have issues. + + Copyright (C) 2009-2011 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software Foundation, + Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +/* Written by Eric Blake. */ + +/* + * POSIX 2008 for platforms that have issues. + * + */ + +#if __GNUC__ >= 3 +@PRAGMA_SYSTEM_HEADER@ +#endif +@PRAGMA_COLUMNS@ + +#if defined __need_wchar_t || defined __need_size_t \ + || defined __need_ptrdiff_t || defined __need_NULL \ + || defined __need_wint_t +/* Special invocation convention inside gcc header files. In + particular, gcc provides a version of that blindly + redefines NULL even when __need_wint_t was defined, even though + wint_t is not normally provided by . Hence, we must + remember if special invocation has ever been used to obtain wint_t, + in which case we need to clean up NULL yet again. */ + +# if !(defined _@GUARD_PREFIX@_STDDEF_H && defined _GL_STDDEF_WINT_T) +# ifdef __need_wint_t +# undef _@GUARD_PREFIX@_STDDEF_H +# define _GL_STDDEF_WINT_T +# endif +# @INCLUDE_NEXT@ @NEXT_STDDEF_H@ +# endif + +#else +/* Normal invocation convention. */ + +# ifndef _@GUARD_PREFIX@_STDDEF_H + +/* The include_next requires a split double-inclusion guard. */ + +# @INCLUDE_NEXT@ @NEXT_STDDEF_H@ + +# ifndef _@GUARD_PREFIX@_STDDEF_H +# define _@GUARD_PREFIX@_STDDEF_H + +/* On NetBSD 5.0, the definition of NULL lacks proper parentheses. */ +#if @REPLACE_NULL@ +# undef NULL +# ifdef __cplusplus + /* ISO C++ says that the macro NULL must expand to an integer constant + expression, hence '((void *) 0)' is not allowed in C++. */ +# if __GNUG__ >= 3 + /* GNU C++ has a __null macro that behaves like an integer ('int' or + 'long') but has the same size as a pointer. Use that, to avoid + warnings. */ +# define NULL __null +# else +# define NULL 0L +# endif +# else +# define NULL ((void *) 0) +# endif +#endif + +/* Some platforms lack wchar_t. */ +#if !@HAVE_WCHAR_T@ +# define wchar_t int +#endif + +# endif /* _@GUARD_PREFIX@_STDDEF_H */ +# endif /* _@GUARD_PREFIX@_STDDEF_H */ +#endif /* __need_XXX */ diff --git a/lib/stdint.in.h b/lib/stdint.in.h new file mode 100644 index 0000000..09ac138 --- /dev/null +++ b/lib/stdint.in.h @@ -0,0 +1,592 @@ +/* Copyright (C) 2001-2002, 2004-2011 Free Software Foundation, Inc. + Written by Paul Eggert, Bruno Haible, Sam Steingold, Peter Burwood. + This file is part of gnulib. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software Foundation, + Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +/* + * ISO C 99 for platforms that lack it. + * + */ + +#ifndef _@GUARD_PREFIX@_STDINT_H + +#if __GNUC__ >= 3 +@PRAGMA_SYSTEM_HEADER@ +#endif +@PRAGMA_COLUMNS@ + +/* When including a system file that in turn includes , + use the system , not our substitute. This avoids + problems with (for example) VMS, whose includes + . */ +#define _GL_JUST_INCLUDE_SYSTEM_INTTYPES_H + +/* Get those types that are already defined in other system include + files, so that we can "#define int8_t signed char" below without + worrying about a later system include file containing a "typedef + signed char int8_t;" that will get messed up by our macro. Our + macros should all be consistent with the system versions, except + for the "fast" types and macros, which we recommend against using + in public interfaces due to compiler differences. */ + +#if @HAVE_STDINT_H@ +# if defined __sgi && ! defined __c99 + /* Bypass IRIX's if in C89 mode, since it merely annoys users + with "This header file is to be used only for c99 mode compilations" + diagnostics. */ +# define __STDINT_H__ +# endif + /* Other systems may have an incomplete or buggy . + Include it before , since any "#include " + in would reinclude us, skipping our contents because + _@GUARD_PREFIX@_STDINT_H is defined. + The include_next requires a split double-inclusion guard. */ +# @INCLUDE_NEXT@ @NEXT_STDINT_H@ +#endif + +#if ! defined _@GUARD_PREFIX@_STDINT_H && ! defined _GL_JUST_INCLUDE_SYSTEM_STDINT_H +#define _@GUARD_PREFIX@_STDINT_H + +/* defines some of the stdint.h types as well, on glibc, + IRIX 6.5, and OpenBSD 3.8 (via ). + AIX 5.2 isn't needed and causes troubles. + MacOS X 10.4.6 includes (which is us), but + relies on the system definitions, so include + after @NEXT_STDINT_H@. */ +#if @HAVE_SYS_TYPES_H@ && ! defined _AIX +# include +#endif + +/* Get LONG_MIN, LONG_MAX, ULONG_MAX. */ +#include + +#if @HAVE_INTTYPES_H@ + /* In OpenBSD 3.8, includes , which defines + int{8,16,32,64}_t, uint{8,16,32,64}_t and __BIT_TYPES_DEFINED__. + also defines intptr_t and uintptr_t. */ +# include +#elif @HAVE_SYS_INTTYPES_H@ + /* Solaris 7 has the types except the *_fast*_t types, and + the macros except for *_FAST*_*, INTPTR_MIN, PTRDIFF_MIN, PTRDIFF_MAX. */ +# include +#endif + +#if @HAVE_SYS_BITYPES_H@ && ! defined __BIT_TYPES_DEFINED__ + /* Linux libc4 >= 4.6.7 and libc5 have a that defines + int{8,16,32,64}_t and __BIT_TYPES_DEFINED__. In libc5 >= 5.2.2 it is + included by . */ +# include +#endif + +#undef _GL_JUST_INCLUDE_SYSTEM_INTTYPES_H + +/* Minimum and maximum values for an integer type under the usual assumption. + Return an unspecified value if BITS == 0, adding a check to pacify + picky compilers. */ + +#define _STDINT_MIN(signed, bits, zero) \ + ((signed) ? (- ((zero) + 1) << ((bits) ? (bits) - 1 : 0)) : (zero)) + +#define _STDINT_MAX(signed, bits, zero) \ + ((signed) \ + ? ~ _STDINT_MIN (signed, bits, zero) \ + : /* The expression for the unsigned case. The subtraction of (signed) \ + is a nop in the unsigned case and avoids "signed integer overflow" \ + warnings in the signed case. */ \ + ((((zero) + 1) << ((bits) ? (bits) - 1 - (signed) : 0)) - 1) * 2 + 1) + +#if !GNULIB_defined_stdint_types + +/* 7.18.1.1. Exact-width integer types */ + +/* Here we assume a standard architecture where the hardware integer + types have 8, 16, 32, optionally 64 bits. */ + +#undef int8_t +#undef uint8_t +typedef signed char gl_int8_t; +typedef unsigned char gl_uint8_t; +#define int8_t gl_int8_t +#define uint8_t gl_uint8_t + +#undef int16_t +#undef uint16_t +typedef short int gl_int16_t; +typedef unsigned short int gl_uint16_t; +#define int16_t gl_int16_t +#define uint16_t gl_uint16_t + +#undef int32_t +#undef uint32_t +typedef int gl_int32_t; +typedef unsigned int gl_uint32_t; +#define int32_t gl_int32_t +#define uint32_t gl_uint32_t + +/* If the system defines INT64_MAX, assume int64_t works. That way, + if the underlying platform defines int64_t to be a 64-bit long long + int, the code below won't mistakenly define it to be a 64-bit long + int, which would mess up C++ name mangling. We must use #ifdef + rather than #if, to avoid an error with HP-UX 10.20 cc. */ + +#ifdef INT64_MAX +# define GL_INT64_T +#else +/* Do not undefine int64_t if gnulib is not being used with 64-bit + types, since otherwise it breaks platforms like Tandem/NSK. */ +# if LONG_MAX >> 31 >> 31 == 1 +# undef int64_t +typedef long int gl_int64_t; +# define int64_t gl_int64_t +# define GL_INT64_T +# elif defined _MSC_VER +# undef int64_t +typedef __int64 gl_int64_t; +# define int64_t gl_int64_t +# define GL_INT64_T +# elif @HAVE_LONG_LONG_INT@ +# undef int64_t +typedef long long int gl_int64_t; +# define int64_t gl_int64_t +# define GL_INT64_T +# endif +#endif + +#ifdef UINT64_MAX +# define GL_UINT64_T +#else +# if ULONG_MAX >> 31 >> 31 >> 1 == 1 +# undef uint64_t +typedef unsigned long int gl_uint64_t; +# define uint64_t gl_uint64_t +# define GL_UINT64_T +# elif defined _MSC_VER +# undef uint64_t +typedef unsigned __int64 gl_uint64_t; +# define uint64_t gl_uint64_t +# define GL_UINT64_T +# elif @HAVE_UNSIGNED_LONG_LONG_INT@ +# undef uint64_t +typedef unsigned long long int gl_uint64_t; +# define uint64_t gl_uint64_t +# define GL_UINT64_T +# endif +#endif + +/* Avoid collision with Solaris 2.5.1 etc. */ +#define _UINT8_T +#define _UINT32_T +#define _UINT64_T + + +/* 7.18.1.2. Minimum-width integer types */ + +/* Here we assume a standard architecture where the hardware integer + types have 8, 16, 32, optionally 64 bits. Therefore the leastN_t types + are the same as the corresponding N_t types. */ + +#undef int_least8_t +#undef uint_least8_t +#undef int_least16_t +#undef uint_least16_t +#undef int_least32_t +#undef uint_least32_t +#undef int_least64_t +#undef uint_least64_t +#define int_least8_t int8_t +#define uint_least8_t uint8_t +#define int_least16_t int16_t +#define uint_least16_t uint16_t +#define int_least32_t int32_t +#define uint_least32_t uint32_t +#ifdef GL_INT64_T +# define int_least64_t int64_t +#endif +#ifdef GL_UINT64_T +# define uint_least64_t uint64_t +#endif + +/* 7.18.1.3. Fastest minimum-width integer types */ + +/* Note: Other substitutes may define these types differently. + It is not recommended to use these types in public header files. */ + +/* Here we assume a standard architecture where the hardware integer + types have 8, 16, 32, optionally 64 bits. Therefore the fastN_t types + are taken from the same list of types. Assume that 'long int' + is fast enough for all narrower integers. */ + +#undef int_fast8_t +#undef uint_fast8_t +#undef int_fast16_t +#undef uint_fast16_t +#undef int_fast32_t +#undef uint_fast32_t +#undef int_fast64_t +#undef uint_fast64_t +typedef long int gl_int_fast8_t; +typedef unsigned long int gl_uint_fast8_t; +typedef long int gl_int_fast16_t; +typedef unsigned long int gl_uint_fast16_t; +typedef long int gl_int_fast32_t; +typedef unsigned long int gl_uint_fast32_t; +#define int_fast8_t gl_int_fast8_t +#define uint_fast8_t gl_uint_fast8_t +#define int_fast16_t gl_int_fast16_t +#define uint_fast16_t gl_uint_fast16_t +#define int_fast32_t gl_int_fast32_t +#define uint_fast32_t gl_uint_fast32_t +#ifdef GL_INT64_T +# define int_fast64_t int64_t +#endif +#ifdef GL_UINT64_T +# define uint_fast64_t uint64_t +#endif + +/* 7.18.1.4. Integer types capable of holding object pointers */ + +#undef intptr_t +#undef uintptr_t +typedef long int gl_intptr_t; +typedef unsigned long int gl_uintptr_t; +#define intptr_t gl_intptr_t +#define uintptr_t gl_uintptr_t + +/* 7.18.1.5. Greatest-width integer types */ + +/* Note: These types are compiler dependent. It may be unwise to use them in + public header files. */ + +#undef intmax_t +#if @HAVE_LONG_LONG_INT@ && LONG_MAX >> 30 == 1 +typedef long long int gl_intmax_t; +# define intmax_t gl_intmax_t +#elif defined GL_INT64_T +# define intmax_t int64_t +#else +typedef long int gl_intmax_t; +# define intmax_t gl_intmax_t +#endif + +#undef uintmax_t +#if @HAVE_UNSIGNED_LONG_LONG_INT@ && ULONG_MAX >> 31 == 1 +typedef unsigned long long int gl_uintmax_t; +# define uintmax_t gl_uintmax_t +#elif defined GL_UINT64_T +# define uintmax_t uint64_t +#else +typedef unsigned long int gl_uintmax_t; +# define uintmax_t gl_uintmax_t +#endif + +/* Verify that intmax_t and uintmax_t have the same size. Too much code + breaks if this is not the case. If this check fails, the reason is likely + to be found in the autoconf macros. */ +typedef int _verify_intmax_size[sizeof (intmax_t) == sizeof (uintmax_t) + ? 1 : -1]; + +#define GNULIB_defined_stdint_types 1 +#endif /* !GNULIB_defined_stdint_types */ + +/* 7.18.2. Limits of specified-width integer types */ + +#if ! defined __cplusplus || defined __STDC_LIMIT_MACROS + +/* 7.18.2.1. Limits of exact-width integer types */ + +/* Here we assume a standard architecture where the hardware integer + types have 8, 16, 32, optionally 64 bits. */ + +#undef INT8_MIN +#undef INT8_MAX +#undef UINT8_MAX +#define INT8_MIN (~ INT8_MAX) +#define INT8_MAX 127 +#define UINT8_MAX 255 + +#undef INT16_MIN +#undef INT16_MAX +#undef UINT16_MAX +#define INT16_MIN (~ INT16_MAX) +#define INT16_MAX 32767 +#define UINT16_MAX 65535 + +#undef INT32_MIN +#undef INT32_MAX +#undef UINT32_MAX +#define INT32_MIN (~ INT32_MAX) +#define INT32_MAX 2147483647 +#define UINT32_MAX 4294967295U + +#if defined GL_INT64_T && ! defined INT64_MAX +/* Prefer (- INTMAX_C (1) << 63) over (~ INT64_MAX) because SunPRO C 5.0 + evaluates the latter incorrectly in preprocessor expressions. */ +# define INT64_MIN (- INTMAX_C (1) << 63) +# define INT64_MAX INTMAX_C (9223372036854775807) +#endif + +#if defined GL_UINT64_T && ! defined UINT64_MAX +# define UINT64_MAX UINTMAX_C (18446744073709551615) +#endif + +/* 7.18.2.2. Limits of minimum-width integer types */ + +/* Here we assume a standard architecture where the hardware integer + types have 8, 16, 32, optionally 64 bits. Therefore the leastN_t types + are the same as the corresponding N_t types. */ + +#undef INT_LEAST8_MIN +#undef INT_LEAST8_MAX +#undef UINT_LEAST8_MAX +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define UINT_LEAST8_MAX UINT8_MAX + +#undef INT_LEAST16_MIN +#undef INT_LEAST16_MAX +#undef UINT_LEAST16_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define UINT_LEAST16_MAX UINT16_MAX + +#undef INT_LEAST32_MIN +#undef INT_LEAST32_MAX +#undef UINT_LEAST32_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define UINT_LEAST32_MAX UINT32_MAX + +#undef INT_LEAST64_MIN +#undef INT_LEAST64_MAX +#ifdef GL_INT64_T +# define INT_LEAST64_MIN INT64_MIN +# define INT_LEAST64_MAX INT64_MAX +#endif + +#undef UINT_LEAST64_MAX +#ifdef GL_UINT64_T +# define UINT_LEAST64_MAX UINT64_MAX +#endif + +/* 7.18.2.3. Limits of fastest minimum-width integer types */ + +/* Here we assume a standard architecture where the hardware integer + types have 8, 16, 32, optionally 64 bits. Therefore the fastN_t types + are taken from the same list of types. */ + +#undef INT_FAST8_MIN +#undef INT_FAST8_MAX +#undef UINT_FAST8_MAX +#define INT_FAST8_MIN LONG_MIN +#define INT_FAST8_MAX LONG_MAX +#define UINT_FAST8_MAX ULONG_MAX + +#undef INT_FAST16_MIN +#undef INT_FAST16_MAX +#undef UINT_FAST16_MAX +#define INT_FAST16_MIN LONG_MIN +#define INT_FAST16_MAX LONG_MAX +#define UINT_FAST16_MAX ULONG_MAX + +#undef INT_FAST32_MIN +#undef INT_FAST32_MAX +#undef UINT_FAST32_MAX +#define INT_FAST32_MIN LONG_MIN +#define INT_FAST32_MAX LONG_MAX +#define UINT_FAST32_MAX ULONG_MAX + +#undef INT_FAST64_MIN +#undef INT_FAST64_MAX +#ifdef GL_INT64_T +# define INT_FAST64_MIN INT64_MIN +# define INT_FAST64_MAX INT64_MAX +#endif + +#undef UINT_FAST64_MAX +#ifdef GL_UINT64_T +# define UINT_FAST64_MAX UINT64_MAX +#endif + +/* 7.18.2.4. Limits of integer types capable of holding object pointers */ + +#undef INTPTR_MIN +#undef INTPTR_MAX +#undef UINTPTR_MAX +#define INTPTR_MIN LONG_MIN +#define INTPTR_MAX LONG_MAX +#define UINTPTR_MAX ULONG_MAX + +/* 7.18.2.5. Limits of greatest-width integer types */ + +#undef INTMAX_MIN +#undef INTMAX_MAX +#ifdef INT64_MAX +# define INTMAX_MIN INT64_MIN +# define INTMAX_MAX INT64_MAX +#else +# define INTMAX_MIN INT32_MIN +# define INTMAX_MAX INT32_MAX +#endif + +#undef UINTMAX_MAX +#ifdef UINT64_MAX +# define UINTMAX_MAX UINT64_MAX +#else +# define UINTMAX_MAX UINT32_MAX +#endif + +/* 7.18.3. Limits of other integer types */ + +/* ptrdiff_t limits */ +#undef PTRDIFF_MIN +#undef PTRDIFF_MAX +#if @APPLE_UNIVERSAL_BUILD@ +# ifdef _LP64 +# define PTRDIFF_MIN _STDINT_MIN (1, 64, 0l) +# define PTRDIFF_MAX _STDINT_MAX (1, 64, 0l) +# else +# define PTRDIFF_MIN _STDINT_MIN (1, 32, 0) +# define PTRDIFF_MAX _STDINT_MAX (1, 32, 0) +# endif +#else +# define PTRDIFF_MIN \ + _STDINT_MIN (1, @BITSIZEOF_PTRDIFF_T@, 0@PTRDIFF_T_SUFFIX@) +# define PTRDIFF_MAX \ + _STDINT_MAX (1, @BITSIZEOF_PTRDIFF_T@, 0@PTRDIFF_T_SUFFIX@) +#endif + +/* sig_atomic_t limits */ +#undef SIG_ATOMIC_MIN +#undef SIG_ATOMIC_MAX +#define SIG_ATOMIC_MIN \ + _STDINT_MIN (@HAVE_SIGNED_SIG_ATOMIC_T@, @BITSIZEOF_SIG_ATOMIC_T@, \ + 0@SIG_ATOMIC_T_SUFFIX@) +#define SIG_ATOMIC_MAX \ + _STDINT_MAX (@HAVE_SIGNED_SIG_ATOMIC_T@, @BITSIZEOF_SIG_ATOMIC_T@, \ + 0@SIG_ATOMIC_T_SUFFIX@) + + +/* size_t limit */ +#undef SIZE_MAX +#if @APPLE_UNIVERSAL_BUILD@ +# ifdef _LP64 +# define SIZE_MAX _STDINT_MAX (0, 64, 0ul) +# else +# define SIZE_MAX _STDINT_MAX (0, 32, 0ul) +# endif +#else +# define SIZE_MAX _STDINT_MAX (0, @BITSIZEOF_SIZE_T@, 0@SIZE_T_SUFFIX@) +#endif + +/* wchar_t limits */ +/* Get WCHAR_MIN, WCHAR_MAX. + This include is not on the top, above, because on OSF/1 4.0 we have a + sequence of nested includes + -> -> -> , and the latter includes + and assumes its types are already defined. */ +#if @HAVE_WCHAR_H@ && ! (defined WCHAR_MIN && defined WCHAR_MAX) + /* BSD/OS 4.0.1 has a bug: , and must be + included before . */ +# include +# include +# include +# define _GL_JUST_INCLUDE_SYSTEM_WCHAR_H +# include +# undef _GL_JUST_INCLUDE_SYSTEM_WCHAR_H +#endif +#undef WCHAR_MIN +#undef WCHAR_MAX +#define WCHAR_MIN \ + _STDINT_MIN (@HAVE_SIGNED_WCHAR_T@, @BITSIZEOF_WCHAR_T@, 0@WCHAR_T_SUFFIX@) +#define WCHAR_MAX \ + _STDINT_MAX (@HAVE_SIGNED_WCHAR_T@, @BITSIZEOF_WCHAR_T@, 0@WCHAR_T_SUFFIX@) + +/* wint_t limits */ +#undef WINT_MIN +#undef WINT_MAX +#define WINT_MIN \ + _STDINT_MIN (@HAVE_SIGNED_WINT_T@, @BITSIZEOF_WINT_T@, 0@WINT_T_SUFFIX@) +#define WINT_MAX \ + _STDINT_MAX (@HAVE_SIGNED_WINT_T@, @BITSIZEOF_WINT_T@, 0@WINT_T_SUFFIX@) + +#endif /* !defined __cplusplus || defined __STDC_LIMIT_MACROS */ + +/* 7.18.4. Macros for integer constants */ + +#if ! defined __cplusplus || defined __STDC_CONSTANT_MACROS + +/* 7.18.4.1. Macros for minimum-width integer constants */ +/* According to ISO C 99 Technical Corrigendum 1 */ + +/* Here we assume a standard architecture where the hardware integer + types have 8, 16, 32, optionally 64 bits, and int is 32 bits. */ + +#undef INT8_C +#undef UINT8_C +#define INT8_C(x) x +#define UINT8_C(x) x + +#undef INT16_C +#undef UINT16_C +#define INT16_C(x) x +#define UINT16_C(x) x + +#undef INT32_C +#undef UINT32_C +#define INT32_C(x) x +#define UINT32_C(x) x ## U + +#undef INT64_C +#undef UINT64_C +#if LONG_MAX >> 31 >> 31 == 1 +# define INT64_C(x) x##L +#elif defined _MSC_VER +# define INT64_C(x) x##i64 +#elif @HAVE_LONG_LONG_INT@ +# define INT64_C(x) x##LL +#endif +#if ULONG_MAX >> 31 >> 31 >> 1 == 1 +# define UINT64_C(x) x##UL +#elif defined _MSC_VER +# define UINT64_C(x) x##ui64 +#elif @HAVE_UNSIGNED_LONG_LONG_INT@ +# define UINT64_C(x) x##ULL +#endif + +/* 7.18.4.2. Macros for greatest-width integer constants */ + +#undef INTMAX_C +#if @HAVE_LONG_LONG_INT@ && LONG_MAX >> 30 == 1 +# define INTMAX_C(x) x##LL +#elif defined GL_INT64_T +# define INTMAX_C(x) INT64_C(x) +#else +# define INTMAX_C(x) x##L +#endif + +#undef UINTMAX_C +#if @HAVE_UNSIGNED_LONG_LONG_INT@ && ULONG_MAX >> 31 == 1 +# define UINTMAX_C(x) x##ULL +#elif defined GL_UINT64_T +# define UINTMAX_C(x) UINT64_C(x) +#else +# define UINTMAX_C(x) x##UL +#endif + +#endif /* !defined __cplusplus || defined __STDC_CONSTANT_MACROS */ + +#endif /* _@GUARD_PREFIX@_STDINT_H */ +#endif /* !defined _@GUARD_PREFIX@_STDINT_H && !defined _GL_JUST_INCLUDE_SYSTEM_STDINT_H */ diff --git a/lib/str-two-way.h b/lib/str-two-way.h new file mode 100644 index 0000000..08a6cd3 --- /dev/null +++ b/lib/str-two-way.h @@ -0,0 +1,453 @@ +/* Byte-wise substring search, using the Two-Way algorithm. + Copyright (C) 2008-2011 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Written by Eric Blake , 2008. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, + Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +/* Before including this file, you need to include and + , and define: + RESULT_TYPE A macro that expands to the return type. + AVAILABLE(h, h_l, j, n_l) + A macro that returns nonzero if there are + at least N_L bytes left starting at H[J]. + H is 'unsigned char *', H_L, J, and N_L + are 'size_t'; H_L is an lvalue. For + NUL-terminated searches, H_L can be + modified each iteration to avoid having + to compute the end of H up front. + + For case-insensitivity, you may optionally define: + CMP_FUNC(p1, p2, l) A macro that returns 0 iff the first L + characters of P1 and P2 are equal. + CANON_ELEMENT(c) A macro that canonicalizes an element right after + it has been fetched from one of the two strings. + The argument is an 'unsigned char'; the result + must be an 'unsigned char' as well. + + This file undefines the macros documented above, and defines + LONG_NEEDLE_THRESHOLD. +*/ + +#include +#include + +/* We use the Two-Way string matching algorithm (also known as + Chrochemore-Perrin), which guarantees linear complexity with + constant space. Additionally, for long needles, we also use a bad + character shift table similar to the Boyer-Moore algorithm to + achieve improved (potentially sub-linear) performance. + + See http://www-igm.univ-mlv.fr/~lecroq/string/node26.html#SECTION00260, + http://en.wikipedia.org/wiki/Boyer-Moore_string_search_algorithm, + http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.34.6641&rep=rep1&type=pdf +*/ + +/* Point at which computing a bad-byte shift table is likely to be + worthwhile. Small needles should not compute a table, since it + adds (1 << CHAR_BIT) + NEEDLE_LEN computations of preparation for a + speedup no greater than a factor of NEEDLE_LEN. The larger the + needle, the better the potential performance gain. On the other + hand, on non-POSIX systems with CHAR_BIT larger than eight, the + memory required for the table is prohibitive. */ +#if CHAR_BIT < 10 +# define LONG_NEEDLE_THRESHOLD 32U +#else +# define LONG_NEEDLE_THRESHOLD SIZE_MAX +#endif + +#ifndef MAX +# define MAX(a, b) ((a < b) ? (b) : (a)) +#endif + +#ifndef CANON_ELEMENT +# define CANON_ELEMENT(c) c +#endif +#ifndef CMP_FUNC +# define CMP_FUNC memcmp +#endif + +/* Perform a critical factorization of NEEDLE, of length NEEDLE_LEN. + Return the index of the first byte in the right half, and set + *PERIOD to the global period of the right half. + + The global period of a string is the smallest index (possibly its + length) at which all remaining bytes in the string are repetitions + of the prefix (the last repetition may be a subset of the prefix). + + When NEEDLE is factored into two halves, a local period is the + length of the smallest word that shares a suffix with the left half + and shares a prefix with the right half. All factorizations of a + non-empty NEEDLE have a local period of at least 1 and no greater + than NEEDLE_LEN. + + A critical factorization has the property that the local period + equals the global period. All strings have at least one critical + factorization with the left half smaller than the global period. + And while some strings have more than one critical factorization, + it is provable that with an ordered alphabet, at least one of the + critical factorizations corresponds to a maximal suffix. + + Given an ordered alphabet, a critical factorization can be computed + in linear time, with 2 * NEEDLE_LEN comparisons, by computing the + shorter of two ordered maximal suffixes. The ordered maximal + suffixes are determined by lexicographic comparison while tracking + periodicity. */ +static size_t +critical_factorization (const unsigned char *needle, size_t needle_len, + size_t *period) +{ + /* Index of last byte of left half, or SIZE_MAX. */ + size_t max_suffix, max_suffix_rev; + size_t j; /* Index into NEEDLE for current candidate suffix. */ + size_t k; /* Offset into current period. */ + size_t p; /* Intermediate period. */ + unsigned char a, b; /* Current comparison bytes. */ + + /* Special case NEEDLE_LEN of 1 or 2 (all callers already filtered + out 0-length needles. */ + if (needle_len < 3) + { + *period = 1; + return needle_len - 1; + } + + /* Invariants: + 0 <= j < NEEDLE_LEN - 1 + -1 <= max_suffix{,_rev} < j (treating SIZE_MAX as if it were signed) + min(max_suffix, max_suffix_rev) < global period of NEEDLE + 1 <= p <= global period of NEEDLE + p == global period of the substring NEEDLE[max_suffix{,_rev}+1...j] + 1 <= k <= p + */ + + /* Perform lexicographic search. */ + max_suffix = SIZE_MAX; + j = 0; + k = p = 1; + while (j + k < needle_len) + { + a = CANON_ELEMENT (needle[j + k]); + b = CANON_ELEMENT (needle[max_suffix + k]); + if (a < b) + { + /* Suffix is smaller, period is entire prefix so far. */ + j += k; + k = 1; + p = j - max_suffix; + } + else if (a == b) + { + /* Advance through repetition of the current period. */ + if (k != p) + ++k; + else + { + j += p; + k = 1; + } + } + else /* b < a */ + { + /* Suffix is larger, start over from current location. */ + max_suffix = j++; + k = p = 1; + } + } + *period = p; + + /* Perform reverse lexicographic search. */ + max_suffix_rev = SIZE_MAX; + j = 0; + k = p = 1; + while (j + k < needle_len) + { + a = CANON_ELEMENT (needle[j + k]); + b = CANON_ELEMENT (needle[max_suffix_rev + k]); + if (b < a) + { + /* Suffix is smaller, period is entire prefix so far. */ + j += k; + k = 1; + p = j - max_suffix_rev; + } + else if (a == b) + { + /* Advance through repetition of the current period. */ + if (k != p) + ++k; + else + { + j += p; + k = 1; + } + } + else /* a < b */ + { + /* Suffix is larger, start over from current location. */ + max_suffix_rev = j++; + k = p = 1; + } + } + + /* Choose the shorter suffix. Return the index of the first byte of + the right half, rather than the last byte of the left half. + + For some examples, 'banana' has two critical factorizations, both + exposed by the two lexicographic extreme suffixes of 'anana' and + 'nana', where both suffixes have a period of 2. On the other + hand, with 'aab' and 'bba', both strings have a single critical + factorization of the last byte, with the suffix having a period + of 1. While the maximal lexicographic suffix of 'aab' is 'b', + the maximal lexicographic suffix of 'bba' is 'ba', which is not a + critical factorization. Conversely, the maximal reverse + lexicographic suffix of 'a' works for 'bba', but not 'ab' for + 'aab'. The shorter suffix of the two will always be a critical + factorization. */ + if (max_suffix_rev + 1 < max_suffix + 1) + return max_suffix + 1; + *period = p; + return max_suffix_rev + 1; +} + +/* Return the first location of non-empty NEEDLE within HAYSTACK, or + NULL. HAYSTACK_LEN is the minimum known length of HAYSTACK. This + method is optimized for NEEDLE_LEN < LONG_NEEDLE_THRESHOLD. + Performance is guaranteed to be linear, with an initialization cost + of 2 * NEEDLE_LEN comparisons. + + If AVAILABLE does not modify HAYSTACK_LEN (as in memmem), then at + most 2 * HAYSTACK_LEN - NEEDLE_LEN comparisons occur in searching. + If AVAILABLE modifies HAYSTACK_LEN (as in strstr), then at most 3 * + HAYSTACK_LEN - NEEDLE_LEN comparisons occur in searching. */ +static RETURN_TYPE +two_way_short_needle (const unsigned char *haystack, size_t haystack_len, + const unsigned char *needle, size_t needle_len) +{ + size_t i; /* Index into current byte of NEEDLE. */ + size_t j; /* Index into current window of HAYSTACK. */ + size_t period; /* The period of the right half of needle. */ + size_t suffix; /* The index of the right half of needle. */ + + /* Factor the needle into two halves, such that the left half is + smaller than the global period, and the right half is + periodic (with a period as large as NEEDLE_LEN - suffix). */ + suffix = critical_factorization (needle, needle_len, &period); + + /* Perform the search. Each iteration compares the right half + first. */ + if (CMP_FUNC (needle, needle + period, suffix) == 0) + { + /* Entire needle is periodic; a mismatch in the left half can + only advance by the period, so use memory to avoid rescanning + known occurrences of the period in the right half. */ + size_t memory = 0; + j = 0; + while (AVAILABLE (haystack, haystack_len, j, needle_len)) + { + /* Scan for matches in right half. */ + i = MAX (suffix, memory); + while (i < needle_len && (CANON_ELEMENT (needle[i]) + == CANON_ELEMENT (haystack[i + j]))) + ++i; + if (needle_len <= i) + { + /* Scan for matches in left half. */ + i = suffix - 1; + while (memory < i + 1 && (CANON_ELEMENT (needle[i]) + == CANON_ELEMENT (haystack[i + j]))) + --i; + if (i + 1 < memory + 1) + return (RETURN_TYPE) (haystack + j); + /* No match, so remember how many repetitions of period + on the right half were scanned. */ + j += period; + memory = needle_len - period; + } + else + { + j += i - suffix + 1; + memory = 0; + } + } + } + else + { + /* The two halves of needle are distinct; no extra memory is + required, and any mismatch results in a maximal shift. */ + period = MAX (suffix, needle_len - suffix) + 1; + j = 0; + while (AVAILABLE (haystack, haystack_len, j, needle_len)) + { + /* Scan for matches in right half. */ + i = suffix; + while (i < needle_len && (CANON_ELEMENT (needle[i]) + == CANON_ELEMENT (haystack[i + j]))) + ++i; + if (needle_len <= i) + { + /* Scan for matches in left half. */ + i = suffix - 1; + while (i != SIZE_MAX && (CANON_ELEMENT (needle[i]) + == CANON_ELEMENT (haystack[i + j]))) + --i; + if (i == SIZE_MAX) + return (RETURN_TYPE) (haystack + j); + j += period; + } + else + j += i - suffix + 1; + } + } + return NULL; +} + +/* Return the first location of non-empty NEEDLE within HAYSTACK, or + NULL. HAYSTACK_LEN is the minimum known length of HAYSTACK. This + method is optimized for LONG_NEEDLE_THRESHOLD <= NEEDLE_LEN. + Performance is guaranteed to be linear, with an initialization cost + of 3 * NEEDLE_LEN + (1 << CHAR_BIT) operations. + + If AVAILABLE does not modify HAYSTACK_LEN (as in memmem), then at + most 2 * HAYSTACK_LEN - NEEDLE_LEN comparisons occur in searching, + and sublinear performance O(HAYSTACK_LEN / NEEDLE_LEN) is possible. + If AVAILABLE modifies HAYSTACK_LEN (as in strstr), then at most 3 * + HAYSTACK_LEN - NEEDLE_LEN comparisons occur in searching, and + sublinear performance is not possible. */ +static RETURN_TYPE +two_way_long_needle (const unsigned char *haystack, size_t haystack_len, + const unsigned char *needle, size_t needle_len) +{ + size_t i; /* Index into current byte of NEEDLE. */ + size_t j; /* Index into current window of HAYSTACK. */ + size_t period; /* The period of the right half of needle. */ + size_t suffix; /* The index of the right half of needle. */ + size_t shift_table[1U << CHAR_BIT]; /* See below. */ + + /* Factor the needle into two halves, such that the left half is + smaller than the global period, and the right half is + periodic (with a period as large as NEEDLE_LEN - suffix). */ + suffix = critical_factorization (needle, needle_len, &period); + + /* Populate shift_table. For each possible byte value c, + shift_table[c] is the distance from the last occurrence of c to + the end of NEEDLE, or NEEDLE_LEN if c is absent from the NEEDLE. + shift_table[NEEDLE[NEEDLE_LEN - 1]] contains the only 0. */ + for (i = 0; i < 1U << CHAR_BIT; i++) + shift_table[i] = needle_len; + for (i = 0; i < needle_len; i++) + shift_table[CANON_ELEMENT (needle[i])] = needle_len - i - 1; + + /* Perform the search. Each iteration compares the right half + first. */ + if (CMP_FUNC (needle, needle + period, suffix) == 0) + { + /* Entire needle is periodic; a mismatch in the left half can + only advance by the period, so use memory to avoid rescanning + known occurrences of the period in the right half. */ + size_t memory = 0; + size_t shift; + j = 0; + while (AVAILABLE (haystack, haystack_len, j, needle_len)) + { + /* Check the last byte first; if it does not match, then + shift to the next possible match location. */ + shift = shift_table[CANON_ELEMENT (haystack[j + needle_len - 1])]; + if (0 < shift) + { + if (memory && shift < period) + { + /* Since needle is periodic, but the last period has + a byte out of place, there can be no match until + after the mismatch. */ + shift = needle_len - period; + } + memory = 0; + j += shift; + continue; + } + /* Scan for matches in right half. The last byte has + already been matched, by virtue of the shift table. */ + i = MAX (suffix, memory); + while (i < needle_len - 1 && (CANON_ELEMENT (needle[i]) + == CANON_ELEMENT (haystack[i + j]))) + ++i; + if (needle_len - 1 <= i) + { + /* Scan for matches in left half. */ + i = suffix - 1; + while (memory < i + 1 && (CANON_ELEMENT (needle[i]) + == CANON_ELEMENT (haystack[i + j]))) + --i; + if (i + 1 < memory + 1) + return (RETURN_TYPE) (haystack + j); + /* No match, so remember how many repetitions of period + on the right half were scanned. */ + j += period; + memory = needle_len - period; + } + else + { + j += i - suffix + 1; + memory = 0; + } + } + } + else + { + /* The two halves of needle are distinct; no extra memory is + required, and any mismatch results in a maximal shift. */ + size_t shift; + period = MAX (suffix, needle_len - suffix) + 1; + j = 0; + while (AVAILABLE (haystack, haystack_len, j, needle_len)) + { + /* Check the last byte first; if it does not match, then + shift to the next possible match location. */ + shift = shift_table[CANON_ELEMENT (haystack[j + needle_len - 1])]; + if (0 < shift) + { + j += shift; + continue; + } + /* Scan for matches in right half. The last byte has + already been matched, by virtue of the shift table. */ + i = suffix; + while (i < needle_len - 1 && (CANON_ELEMENT (needle[i]) + == CANON_ELEMENT (haystack[i + j]))) + ++i; + if (needle_len - 1 <= i) + { + /* Scan for matches in left half. */ + i = suffix - 1; + while (i != SIZE_MAX && (CANON_ELEMENT (needle[i]) + == CANON_ELEMENT (haystack[i + j]))) + --i; + if (i == SIZE_MAX) + return (RETURN_TYPE) (haystack + j); + j += period; + } + else + j += i - suffix + 1; + } + } + return NULL; +} + +#undef AVAILABLE +#undef CANON_ELEMENT +#undef CMP_FUNC +#undef MAX +#undef RETURN_TYPE diff --git a/lib/string.in.h b/lib/string.in.h new file mode 100644 index 0000000..d9c95a4 --- /dev/null +++ b/lib/string.in.h @@ -0,0 +1,981 @@ +/* A GNU-like . + + Copyright (C) 1995-1996, 2001-2011 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software Foundation, + Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +#ifndef _@GUARD_PREFIX@_STRING_H + +#if __GNUC__ >= 3 +@PRAGMA_SYSTEM_HEADER@ +#endif +@PRAGMA_COLUMNS@ + +/* The include_next requires a split double-inclusion guard. */ +#@INCLUDE_NEXT@ @NEXT_STRING_H@ + +#ifndef _@GUARD_PREFIX@_STRING_H +#define _@GUARD_PREFIX@_STRING_H + +/* NetBSD 5.0 mis-defines NULL. */ +#include + +/* MirBSD defines mbslen as a macro. */ +#if @GNULIB_MBSLEN@ && defined __MirBSD__ +# include +#endif + +/* The __attribute__ feature is available in gcc versions 2.5 and later. + The attribute __pure__ was added in gcc 2.96. */ +#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96) +# define _GL_ATTRIBUTE_PURE __attribute__ ((__pure__)) +#else +# define _GL_ATTRIBUTE_PURE /* empty */ +#endif + +/* NetBSD 5.0 declares strsignal in , not in . */ +/* But in any case avoid namespace pollution on glibc systems. */ +#if (@GNULIB_STRSIGNAL@ || defined GNULIB_POSIXCHECK) && defined __NetBSD__ \ + && ! defined __GLIBC__ +# include +#endif + +/* The definitions of _GL_FUNCDECL_RPL etc. are copied here. */ + +/* The definition of _GL_ARG_NONNULL is copied here. */ + +/* The definition of _GL_WARN_ON_USE is copied here. */ + + +/* Return the first instance of C within N bytes of S, or NULL. */ +#if @GNULIB_MEMCHR@ +# if @REPLACE_MEMCHR@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define memchr rpl_memchr +# endif +_GL_FUNCDECL_RPL (memchr, void *, (void const *__s, int __c, size_t __n) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_RPL (memchr, void *, (void const *__s, int __c, size_t __n)); +# else +# if ! @HAVE_MEMCHR@ +_GL_FUNCDECL_SYS (memchr, void *, (void const *__s, int __c, size_t __n) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1))); +# endif + /* On some systems, this function is defined as an overloaded function: + extern "C" { const void * std::memchr (const void *, int, size_t); } + extern "C++" { void * std::memchr (void *, int, size_t); } */ +_GL_CXXALIAS_SYS_CAST2 (memchr, + void *, (void const *__s, int __c, size_t __n), + void const *, (void const *__s, int __c, size_t __n)); +# endif +# if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ + && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) +_GL_CXXALIASWARN1 (memchr, void *, (void *__s, int __c, size_t __n)); +_GL_CXXALIASWARN1 (memchr, void const *, + (void const *__s, int __c, size_t __n)); +# else +_GL_CXXALIASWARN (memchr); +# endif +#elif defined GNULIB_POSIXCHECK +# undef memchr +/* Assume memchr is always declared. */ +_GL_WARN_ON_USE (memchr, "memchr has platform-specific bugs - " + "use gnulib module memchr for portability" ); +#endif + +/* Return the first occurrence of NEEDLE in HAYSTACK. */ +#if @GNULIB_MEMMEM@ +# if @REPLACE_MEMMEM@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define memmem rpl_memmem +# endif +_GL_FUNCDECL_RPL (memmem, void *, + (void const *__haystack, size_t __haystack_len, + void const *__needle, size_t __needle_len) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1, 3))); +_GL_CXXALIAS_RPL (memmem, void *, + (void const *__haystack, size_t __haystack_len, + void const *__needle, size_t __needle_len)); +# else +# if ! @HAVE_DECL_MEMMEM@ +_GL_FUNCDECL_SYS (memmem, void *, + (void const *__haystack, size_t __haystack_len, + void const *__needle, size_t __needle_len) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1, 3))); +# endif +_GL_CXXALIAS_SYS (memmem, void *, + (void const *__haystack, size_t __haystack_len, + void const *__needle, size_t __needle_len)); +# endif +_GL_CXXALIASWARN (memmem); +#elif defined GNULIB_POSIXCHECK +# undef memmem +# if HAVE_RAW_DECL_MEMMEM +_GL_WARN_ON_USE (memmem, "memmem is unportable and often quadratic - " + "use gnulib module memmem-simple for portability, " + "and module memmem for speed" ); +# endif +#endif + +/* Copy N bytes of SRC to DEST, return pointer to bytes after the + last written byte. */ +#if @GNULIB_MEMPCPY@ +# if ! @HAVE_MEMPCPY@ +_GL_FUNCDECL_SYS (mempcpy, void *, + (void *restrict __dest, void const *restrict __src, + size_t __n) + _GL_ARG_NONNULL ((1, 2))); +# endif +_GL_CXXALIAS_SYS (mempcpy, void *, + (void *restrict __dest, void const *restrict __src, + size_t __n)); +_GL_CXXALIASWARN (mempcpy); +#elif defined GNULIB_POSIXCHECK +# undef mempcpy +# if HAVE_RAW_DECL_MEMPCPY +_GL_WARN_ON_USE (mempcpy, "mempcpy is unportable - " + "use gnulib module mempcpy for portability"); +# endif +#endif + +/* Search backwards through a block for a byte (specified as an int). */ +#if @GNULIB_MEMRCHR@ +# if ! @HAVE_DECL_MEMRCHR@ +_GL_FUNCDECL_SYS (memrchr, void *, (void const *, int, size_t) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1))); +# endif + /* On some systems, this function is defined as an overloaded function: + extern "C++" { const void * std::memrchr (const void *, int, size_t); } + extern "C++" { void * std::memrchr (void *, int, size_t); } */ +_GL_CXXALIAS_SYS_CAST2 (memrchr, + void *, (void const *, int, size_t), + void const *, (void const *, int, size_t)); +# if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ + && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) +_GL_CXXALIASWARN1 (memrchr, void *, (void *, int, size_t)); +_GL_CXXALIASWARN1 (memrchr, void const *, (void const *, int, size_t)); +# else +_GL_CXXALIASWARN (memrchr); +# endif +#elif defined GNULIB_POSIXCHECK +# undef memrchr +# if HAVE_RAW_DECL_MEMRCHR +_GL_WARN_ON_USE (memrchr, "memrchr is unportable - " + "use gnulib module memrchr for portability"); +# endif +#endif + +/* Find the first occurrence of C in S. More efficient than + memchr(S,C,N), at the expense of undefined behavior if C does not + occur within N bytes. */ +#if @GNULIB_RAWMEMCHR@ +# if ! @HAVE_RAWMEMCHR@ +_GL_FUNCDECL_SYS (rawmemchr, void *, (void const *__s, int __c_in) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1))); +# endif + /* On some systems, this function is defined as an overloaded function: + extern "C++" { const void * std::rawmemchr (const void *, int); } + extern "C++" { void * std::rawmemchr (void *, int); } */ +_GL_CXXALIAS_SYS_CAST2 (rawmemchr, + void *, (void const *__s, int __c_in), + void const *, (void const *__s, int __c_in)); +# if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ + && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) +_GL_CXXALIASWARN1 (rawmemchr, void *, (void *__s, int __c_in)); +_GL_CXXALIASWARN1 (rawmemchr, void const *, (void const *__s, int __c_in)); +# else +_GL_CXXALIASWARN (rawmemchr); +# endif +#elif defined GNULIB_POSIXCHECK +# undef rawmemchr +# if HAVE_RAW_DECL_RAWMEMCHR +_GL_WARN_ON_USE (rawmemchr, "rawmemchr is unportable - " + "use gnulib module rawmemchr for portability"); +# endif +#endif + +/* Copy SRC to DST, returning the address of the terminating '\0' in DST. */ +#if @GNULIB_STPCPY@ +# if ! @HAVE_STPCPY@ +_GL_FUNCDECL_SYS (stpcpy, char *, + (char *restrict __dst, char const *restrict __src) + _GL_ARG_NONNULL ((1, 2))); +# endif +_GL_CXXALIAS_SYS (stpcpy, char *, + (char *restrict __dst, char const *restrict __src)); +_GL_CXXALIASWARN (stpcpy); +#elif defined GNULIB_POSIXCHECK +# undef stpcpy +# if HAVE_RAW_DECL_STPCPY +_GL_WARN_ON_USE (stpcpy, "stpcpy is unportable - " + "use gnulib module stpcpy for portability"); +# endif +#endif + +/* Copy no more than N bytes of SRC to DST, returning a pointer past the + last non-NUL byte written into DST. */ +#if @GNULIB_STPNCPY@ +# if @REPLACE_STPNCPY@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# undef stpncpy +# define stpncpy rpl_stpncpy +# endif +_GL_FUNCDECL_RPL (stpncpy, char *, + (char *restrict __dst, char const *restrict __src, + size_t __n) + _GL_ARG_NONNULL ((1, 2))); +_GL_CXXALIAS_RPL (stpncpy, char *, + (char *restrict __dst, char const *restrict __src, + size_t __n)); +# else +# if ! @HAVE_STPNCPY@ +_GL_FUNCDECL_SYS (stpncpy, char *, + (char *restrict __dst, char const *restrict __src, + size_t __n) + _GL_ARG_NONNULL ((1, 2))); +# endif +_GL_CXXALIAS_SYS (stpncpy, char *, + (char *restrict __dst, char const *restrict __src, + size_t __n)); +# endif +_GL_CXXALIASWARN (stpncpy); +#elif defined GNULIB_POSIXCHECK +# undef stpncpy +# if HAVE_RAW_DECL_STPNCPY +_GL_WARN_ON_USE (stpncpy, "stpncpy is unportable - " + "use gnulib module stpncpy for portability"); +# endif +#endif + +#if defined GNULIB_POSIXCHECK +/* strchr() does not work with multibyte strings if the locale encoding is + GB18030 and the character to be searched is a digit. */ +# undef strchr +/* Assume strchr is always declared. */ +_GL_WARN_ON_USE (strchr, "strchr cannot work correctly on character strings " + "in some multibyte locales - " + "use mbschr if you care about internationalization"); +#endif + +/* Find the first occurrence of C in S or the final NUL byte. */ +#if @GNULIB_STRCHRNUL@ +# if @REPLACE_STRCHRNUL@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define strchrnul rpl_strchrnul +# endif +_GL_FUNCDECL_RPL (strchrnul, char *, (const char *__s, int __c_in) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_RPL (strchrnul, char *, + (const char *str, int ch)); +# else +# if ! @HAVE_STRCHRNUL@ +_GL_FUNCDECL_SYS (strchrnul, char *, (char const *__s, int __c_in) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1))); +# endif + /* On some systems, this function is defined as an overloaded function: + extern "C++" { const char * std::strchrnul (const char *, int); } + extern "C++" { char * std::strchrnul (char *, int); } */ +_GL_CXXALIAS_SYS_CAST2 (strchrnul, + char *, (char const *__s, int __c_in), + char const *, (char const *__s, int __c_in)); +# endif +# if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ + && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) +_GL_CXXALIASWARN1 (strchrnul, char *, (char *__s, int __c_in)); +_GL_CXXALIASWARN1 (strchrnul, char const *, (char const *__s, int __c_in)); +# else +_GL_CXXALIASWARN (strchrnul); +# endif +#elif defined GNULIB_POSIXCHECK +# undef strchrnul +# if HAVE_RAW_DECL_STRCHRNUL +_GL_WARN_ON_USE (strchrnul, "strchrnul is unportable - " + "use gnulib module strchrnul for portability"); +# endif +#endif + +/* Duplicate S, returning an identical malloc'd string. */ +#if @GNULIB_STRDUP@ +# if @REPLACE_STRDUP@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# undef strdup +# define strdup rpl_strdup +# endif +_GL_FUNCDECL_RPL (strdup, char *, (char const *__s) _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_RPL (strdup, char *, (char const *__s)); +# else +# if defined __cplusplus && defined GNULIB_NAMESPACE && defined strdup + /* strdup exists as a function and as a macro. Get rid of the macro. */ +# undef strdup +# endif +# if !(@HAVE_DECL_STRDUP@ || defined strdup) +_GL_FUNCDECL_SYS (strdup, char *, (char const *__s) _GL_ARG_NONNULL ((1))); +# endif +_GL_CXXALIAS_SYS (strdup, char *, (char const *__s)); +# endif +_GL_CXXALIASWARN (strdup); +#elif defined GNULIB_POSIXCHECK +# undef strdup +# if HAVE_RAW_DECL_STRDUP +_GL_WARN_ON_USE (strdup, "strdup is unportable - " + "use gnulib module strdup for portability"); +# endif +#endif + +/* Append no more than N characters from SRC onto DEST. */ +#if @GNULIB_STRNCAT@ +# if @REPLACE_STRNCAT@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# undef strncat +# define strncat rpl_strncat +# endif +_GL_FUNCDECL_RPL (strncat, char *, (char *dest, const char *src, size_t n) + _GL_ARG_NONNULL ((1, 2))); +_GL_CXXALIAS_RPL (strncat, char *, (char *dest, const char *src, size_t n)); +# else +_GL_CXXALIAS_SYS (strncat, char *, (char *dest, const char *src, size_t n)); +# endif +_GL_CXXALIASWARN (strncat); +#elif defined GNULIB_POSIXCHECK +# undef strncat +# if HAVE_RAW_DECL_STRNCAT +_GL_WARN_ON_USE (strncat, "strncat is unportable - " + "use gnulib module strncat for portability"); +# endif +#endif + +/* Return a newly allocated copy of at most N bytes of STRING. */ +#if @GNULIB_STRNDUP@ +# if @REPLACE_STRNDUP@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# undef strndup +# define strndup rpl_strndup +# endif +_GL_FUNCDECL_RPL (strndup, char *, (char const *__string, size_t __n) + _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_RPL (strndup, char *, (char const *__string, size_t __n)); +# else +# if ! @HAVE_DECL_STRNDUP@ +_GL_FUNCDECL_SYS (strndup, char *, (char const *__string, size_t __n) + _GL_ARG_NONNULL ((1))); +# endif +_GL_CXXALIAS_SYS (strndup, char *, (char const *__string, size_t __n)); +# endif +_GL_CXXALIASWARN (strndup); +#elif defined GNULIB_POSIXCHECK +# undef strndup +# if HAVE_RAW_DECL_STRNDUP +_GL_WARN_ON_USE (strndup, "strndup is unportable - " + "use gnulib module strndup for portability"); +# endif +#endif + +/* Find the length (number of bytes) of STRING, but scan at most + MAXLEN bytes. If no '\0' terminator is found in that many bytes, + return MAXLEN. */ +#if @GNULIB_STRNLEN@ +# if @REPLACE_STRNLEN@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# undef strnlen +# define strnlen rpl_strnlen +# endif +_GL_FUNCDECL_RPL (strnlen, size_t, (char const *__string, size_t __maxlen) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_RPL (strnlen, size_t, (char const *__string, size_t __maxlen)); +# else +# if ! @HAVE_DECL_STRNLEN@ +_GL_FUNCDECL_SYS (strnlen, size_t, (char const *__string, size_t __maxlen) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1))); +# endif +_GL_CXXALIAS_SYS (strnlen, size_t, (char const *__string, size_t __maxlen)); +# endif +_GL_CXXALIASWARN (strnlen); +#elif defined GNULIB_POSIXCHECK +# undef strnlen +# if HAVE_RAW_DECL_STRNLEN +_GL_WARN_ON_USE (strnlen, "strnlen is unportable - " + "use gnulib module strnlen for portability"); +# endif +#endif + +#if defined GNULIB_POSIXCHECK +/* strcspn() assumes the second argument is a list of single-byte characters. + Even in this simple case, it does not work with multibyte strings if the + locale encoding is GB18030 and one of the characters to be searched is a + digit. */ +# undef strcspn +/* Assume strcspn is always declared. */ +_GL_WARN_ON_USE (strcspn, "strcspn cannot work correctly on character strings " + "in multibyte locales - " + "use mbscspn if you care about internationalization"); +#endif + +/* Find the first occurrence in S of any character in ACCEPT. */ +#if @GNULIB_STRPBRK@ +# if ! @HAVE_STRPBRK@ +_GL_FUNCDECL_SYS (strpbrk, char *, (char const *__s, char const *__accept) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1, 2))); +# endif + /* On some systems, this function is defined as an overloaded function: + extern "C" { const char * strpbrk (const char *, const char *); } + extern "C++" { char * strpbrk (char *, const char *); } */ +_GL_CXXALIAS_SYS_CAST2 (strpbrk, + char *, (char const *__s, char const *__accept), + const char *, (char const *__s, char const *__accept)); +# if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ + && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) +_GL_CXXALIASWARN1 (strpbrk, char *, (char *__s, char const *__accept)); +_GL_CXXALIASWARN1 (strpbrk, char const *, + (char const *__s, char const *__accept)); +# else +_GL_CXXALIASWARN (strpbrk); +# endif +# if defined GNULIB_POSIXCHECK +/* strpbrk() assumes the second argument is a list of single-byte characters. + Even in this simple case, it does not work with multibyte strings if the + locale encoding is GB18030 and one of the characters to be searched is a + digit. */ +# undef strpbrk +_GL_WARN_ON_USE (strpbrk, "strpbrk cannot work correctly on character strings " + "in multibyte locales - " + "use mbspbrk if you care about internationalization"); +# endif +#elif defined GNULIB_POSIXCHECK +# undef strpbrk +# if HAVE_RAW_DECL_STRPBRK +_GL_WARN_ON_USE (strpbrk, "strpbrk is unportable - " + "use gnulib module strpbrk for portability"); +# endif +#endif + +#if defined GNULIB_POSIXCHECK +/* strspn() assumes the second argument is a list of single-byte characters. + Even in this simple case, it cannot work with multibyte strings. */ +# undef strspn +/* Assume strspn is always declared. */ +_GL_WARN_ON_USE (strspn, "strspn cannot work correctly on character strings " + "in multibyte locales - " + "use mbsspn if you care about internationalization"); +#endif + +#if defined GNULIB_POSIXCHECK +/* strrchr() does not work with multibyte strings if the locale encoding is + GB18030 and the character to be searched is a digit. */ +# undef strrchr +/* Assume strrchr is always declared. */ +_GL_WARN_ON_USE (strrchr, "strrchr cannot work correctly on character strings " + "in some multibyte locales - " + "use mbsrchr if you care about internationalization"); +#endif + +/* Search the next delimiter (char listed in DELIM) starting at *STRINGP. + If one is found, overwrite it with a NUL, and advance *STRINGP + to point to the next char after it. Otherwise, set *STRINGP to NULL. + If *STRINGP was already NULL, nothing happens. + Return the old value of *STRINGP. + + This is a variant of strtok() that is multithread-safe and supports + empty fields. + + Caveat: It modifies the original string. + Caveat: These functions cannot be used on constant strings. + Caveat: The identity of the delimiting character is lost. + Caveat: It doesn't work with multibyte strings unless all of the delimiter + characters are ASCII characters < 0x30. + + See also strtok_r(). */ +#if @GNULIB_STRSEP@ +# if ! @HAVE_STRSEP@ +_GL_FUNCDECL_SYS (strsep, char *, + (char **restrict __stringp, char const *restrict __delim) + _GL_ARG_NONNULL ((1, 2))); +# endif +_GL_CXXALIAS_SYS (strsep, char *, + (char **restrict __stringp, char const *restrict __delim)); +_GL_CXXALIASWARN (strsep); +# if defined GNULIB_POSIXCHECK +# undef strsep +_GL_WARN_ON_USE (strsep, "strsep cannot work correctly on character strings " + "in multibyte locales - " + "use mbssep if you care about internationalization"); +# endif +#elif defined GNULIB_POSIXCHECK +# undef strsep +# if HAVE_RAW_DECL_STRSEP +_GL_WARN_ON_USE (strsep, "strsep is unportable - " + "use gnulib module strsep for portability"); +# endif +#endif + +#if @GNULIB_STRSTR@ +# if @REPLACE_STRSTR@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define strstr rpl_strstr +# endif +_GL_FUNCDECL_RPL (strstr, char *, (const char *haystack, const char *needle) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1, 2))); +_GL_CXXALIAS_RPL (strstr, char *, (const char *haystack, const char *needle)); +# else + /* On some systems, this function is defined as an overloaded function: + extern "C++" { const char * strstr (const char *, const char *); } + extern "C++" { char * strstr (char *, const char *); } */ +_GL_CXXALIAS_SYS_CAST2 (strstr, + char *, (const char *haystack, const char *needle), + const char *, (const char *haystack, const char *needle)); +# endif +# if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ + && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) +_GL_CXXALIASWARN1 (strstr, char *, (char *haystack, const char *needle)); +_GL_CXXALIASWARN1 (strstr, const char *, + (const char *haystack, const char *needle)); +# else +_GL_CXXALIASWARN (strstr); +# endif +#elif defined GNULIB_POSIXCHECK +/* strstr() does not work with multibyte strings if the locale encoding is + different from UTF-8: + POSIX says that it operates on "strings", and "string" in POSIX is defined + as a sequence of bytes, not of characters. */ +# undef strstr +/* Assume strstr is always declared. */ +_GL_WARN_ON_USE (strstr, "strstr is quadratic on many systems, and cannot " + "work correctly on character strings in most " + "multibyte locales - " + "use mbsstr if you care about internationalization, " + "or use strstr if you care about speed"); +#endif + +/* Find the first occurrence of NEEDLE in HAYSTACK, using case-insensitive + comparison. */ +#if @GNULIB_STRCASESTR@ +# if @REPLACE_STRCASESTR@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define strcasestr rpl_strcasestr +# endif +_GL_FUNCDECL_RPL (strcasestr, char *, + (const char *haystack, const char *needle) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1, 2))); +_GL_CXXALIAS_RPL (strcasestr, char *, + (const char *haystack, const char *needle)); +# else +# if ! @HAVE_STRCASESTR@ +_GL_FUNCDECL_SYS (strcasestr, char *, + (const char *haystack, const char *needle) + _GL_ATTRIBUTE_PURE + _GL_ARG_NONNULL ((1, 2))); +# endif + /* On some systems, this function is defined as an overloaded function: + extern "C++" { const char * strcasestr (const char *, const char *); } + extern "C++" { char * strcasestr (char *, const char *); } */ +_GL_CXXALIAS_SYS_CAST2 (strcasestr, + char *, (const char *haystack, const char *needle), + const char *, (const char *haystack, const char *needle)); +# endif +# if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ + && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) +_GL_CXXALIASWARN1 (strcasestr, char *, (char *haystack, const char *needle)); +_GL_CXXALIASWARN1 (strcasestr, const char *, + (const char *haystack, const char *needle)); +# else +_GL_CXXALIASWARN (strcasestr); +# endif +#elif defined GNULIB_POSIXCHECK +/* strcasestr() does not work with multibyte strings: + It is a glibc extension, and glibc implements it only for unibyte + locales. */ +# undef strcasestr +# if HAVE_RAW_DECL_STRCASESTR +_GL_WARN_ON_USE (strcasestr, "strcasestr does work correctly on character " + "strings in multibyte locales - " + "use mbscasestr if you care about " + "internationalization, or use c-strcasestr if you want " + "a locale independent function"); +# endif +#endif + +/* Parse S into tokens separated by characters in DELIM. + If S is NULL, the saved pointer in SAVE_PTR is used as + the next starting point. For example: + char s[] = "-abc-=-def"; + char *sp; + x = strtok_r(s, "-", &sp); // x = "abc", sp = "=-def" + x = strtok_r(NULL, "-=", &sp); // x = "def", sp = NULL + x = strtok_r(NULL, "=", &sp); // x = NULL + // s = "abc\0-def\0" + + This is a variant of strtok() that is multithread-safe. + + For the POSIX documentation for this function, see: + http://www.opengroup.org/susv3xsh/strtok.html + + Caveat: It modifies the original string. + Caveat: These functions cannot be used on constant strings. + Caveat: The identity of the delimiting character is lost. + Caveat: It doesn't work with multibyte strings unless all of the delimiter + characters are ASCII characters < 0x30. + + See also strsep(). */ +#if @GNULIB_STRTOK_R@ +# if @REPLACE_STRTOK_R@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# undef strtok_r +# define strtok_r rpl_strtok_r +# endif +_GL_FUNCDECL_RPL (strtok_r, char *, + (char *restrict s, char const *restrict delim, + char **restrict save_ptr) + _GL_ARG_NONNULL ((2, 3))); +_GL_CXXALIAS_RPL (strtok_r, char *, + (char *restrict s, char const *restrict delim, + char **restrict save_ptr)); +# else +# if @UNDEFINE_STRTOK_R@ || defined GNULIB_POSIXCHECK +# undef strtok_r +# endif +# if ! @HAVE_DECL_STRTOK_R@ +_GL_FUNCDECL_SYS (strtok_r, char *, + (char *restrict s, char const *restrict delim, + char **restrict save_ptr) + _GL_ARG_NONNULL ((2, 3))); +# endif +_GL_CXXALIAS_SYS (strtok_r, char *, + (char *restrict s, char const *restrict delim, + char **restrict save_ptr)); +# endif +_GL_CXXALIASWARN (strtok_r); +# if defined GNULIB_POSIXCHECK +_GL_WARN_ON_USE (strtok_r, "strtok_r cannot work correctly on character " + "strings in multibyte locales - " + "use mbstok_r if you care about internationalization"); +# endif +#elif defined GNULIB_POSIXCHECK +# undef strtok_r +# if HAVE_RAW_DECL_STRTOK_R +_GL_WARN_ON_USE (strtok_r, "strtok_r is unportable - " + "use gnulib module strtok_r for portability"); +# endif +#endif + + +/* The following functions are not specified by POSIX. They are gnulib + extensions. */ + +#if @GNULIB_MBSLEN@ +/* Return the number of multibyte characters in the character string STRING. + This considers multibyte characters, unlike strlen, which counts bytes. */ +# ifdef __MirBSD__ /* MirBSD defines mbslen as a macro. Override it. */ +# undef mbslen +# endif +# if @HAVE_MBSLEN@ /* AIX, OSF/1, MirBSD define mbslen already in libc. */ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define mbslen rpl_mbslen +# endif +_GL_FUNCDECL_RPL (mbslen, size_t, (const char *string) _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_RPL (mbslen, size_t, (const char *string)); +# else +_GL_FUNCDECL_SYS (mbslen, size_t, (const char *string) _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_SYS (mbslen, size_t, (const char *string)); +# endif +_GL_CXXALIASWARN (mbslen); +#endif + +#if @GNULIB_MBSNLEN@ +/* Return the number of multibyte characters in the character string starting + at STRING and ending at STRING + LEN. */ +_GL_EXTERN_C size_t mbsnlen (const char *string, size_t len) + _GL_ARG_NONNULL ((1)); +#endif + +#if @GNULIB_MBSCHR@ +/* Locate the first single-byte character C in the character string STRING, + and return a pointer to it. Return NULL if C is not found in STRING. + Unlike strchr(), this function works correctly in multibyte locales with + encodings such as GB18030. */ +# if defined __hpux +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define mbschr rpl_mbschr /* avoid collision with HP-UX function */ +# endif +_GL_FUNCDECL_RPL (mbschr, char *, (const char *string, int c) + _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_RPL (mbschr, char *, (const char *string, int c)); +# else +_GL_FUNCDECL_SYS (mbschr, char *, (const char *string, int c) + _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_SYS (mbschr, char *, (const char *string, int c)); +# endif +_GL_CXXALIASWARN (mbschr); +#endif + +#if @GNULIB_MBSRCHR@ +/* Locate the last single-byte character C in the character string STRING, + and return a pointer to it. Return NULL if C is not found in STRING. + Unlike strrchr(), this function works correctly in multibyte locales with + encodings such as GB18030. */ +# if defined __hpux || defined __INTERIX +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define mbsrchr rpl_mbsrchr /* avoid collision with system function */ +# endif +_GL_FUNCDECL_RPL (mbsrchr, char *, (const char *string, int c) + _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_RPL (mbsrchr, char *, (const char *string, int c)); +# else +_GL_FUNCDECL_SYS (mbsrchr, char *, (const char *string, int c) + _GL_ARG_NONNULL ((1))); +_GL_CXXALIAS_SYS (mbsrchr, char *, (const char *string, int c)); +# endif +_GL_CXXALIASWARN (mbsrchr); +#endif + +#if @GNULIB_MBSSTR@ +/* Find the first occurrence of the character string NEEDLE in the character + string HAYSTACK. Return NULL if NEEDLE is not found in HAYSTACK. + Unlike strstr(), this function works correctly in multibyte locales with + encodings different from UTF-8. */ +_GL_EXTERN_C char * mbsstr (const char *haystack, const char *needle) + _GL_ARG_NONNULL ((1, 2)); +#endif + +#if @GNULIB_MBSCASECMP@ +/* Compare the character strings S1 and S2, ignoring case, returning less than, + equal to or greater than zero if S1 is lexicographically less than, equal to + or greater than S2. + Note: This function may, in multibyte locales, return 0 for strings of + different lengths! + Unlike strcasecmp(), this function works correctly in multibyte locales. */ +_GL_EXTERN_C int mbscasecmp (const char *s1, const char *s2) + _GL_ARG_NONNULL ((1, 2)); +#endif + +#if @GNULIB_MBSNCASECMP@ +/* Compare the initial segment of the character string S1 consisting of at most + N characters with the initial segment of the character string S2 consisting + of at most N characters, ignoring case, returning less than, equal to or + greater than zero if the initial segment of S1 is lexicographically less + than, equal to or greater than the initial segment of S2. + Note: This function may, in multibyte locales, return 0 for initial segments + of different lengths! + Unlike strncasecmp(), this function works correctly in multibyte locales. + But beware that N is not a byte count but a character count! */ +_GL_EXTERN_C int mbsncasecmp (const char *s1, const char *s2, size_t n) + _GL_ARG_NONNULL ((1, 2)); +#endif + +#if @GNULIB_MBSPCASECMP@ +/* Compare the initial segment of the character string STRING consisting of + at most mbslen (PREFIX) characters with the character string PREFIX, + ignoring case. If the two match, return a pointer to the first byte + after this prefix in STRING. Otherwise, return NULL. + Note: This function may, in multibyte locales, return non-NULL if STRING + is of smaller length than PREFIX! + Unlike strncasecmp(), this function works correctly in multibyte + locales. */ +_GL_EXTERN_C char * mbspcasecmp (const char *string, const char *prefix) + _GL_ARG_NONNULL ((1, 2)); +#endif + +#if @GNULIB_MBSCASESTR@ +/* Find the first occurrence of the character string NEEDLE in the character + string HAYSTACK, using case-insensitive comparison. + Note: This function may, in multibyte locales, return success even if + strlen (haystack) < strlen (needle) ! + Unlike strcasestr(), this function works correctly in multibyte locales. */ +_GL_EXTERN_C char * mbscasestr (const char *haystack, const char *needle) + _GL_ARG_NONNULL ((1, 2)); +#endif + +#if @GNULIB_MBSCSPN@ +/* Find the first occurrence in the character string STRING of any character + in the character string ACCEPT. Return the number of bytes from the + beginning of the string to this occurrence, or to the end of the string + if none exists. + Unlike strcspn(), this function works correctly in multibyte locales. */ +_GL_EXTERN_C size_t mbscspn (const char *string, const char *accept) + _GL_ARG_NONNULL ((1, 2)); +#endif + +#if @GNULIB_MBSPBRK@ +/* Find the first occurrence in the character string STRING of any character + in the character string ACCEPT. Return the pointer to it, or NULL if none + exists. + Unlike strpbrk(), this function works correctly in multibyte locales. */ +# if defined __hpux +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define mbspbrk rpl_mbspbrk /* avoid collision with HP-UX function */ +# endif +_GL_FUNCDECL_RPL (mbspbrk, char *, (const char *string, const char *accept) + _GL_ARG_NONNULL ((1, 2))); +_GL_CXXALIAS_RPL (mbspbrk, char *, (const char *string, const char *accept)); +# else +_GL_FUNCDECL_SYS (mbspbrk, char *, (const char *string, const char *accept) + _GL_ARG_NONNULL ((1, 2))); +_GL_CXXALIAS_SYS (mbspbrk, char *, (const char *string, const char *accept)); +# endif +_GL_CXXALIASWARN (mbspbrk); +#endif + +#if @GNULIB_MBSSPN@ +/* Find the first occurrence in the character string STRING of any character + not in the character string REJECT. Return the number of bytes from the + beginning of the string to this occurrence, or to the end of the string + if none exists. + Unlike strspn(), this function works correctly in multibyte locales. */ +_GL_EXTERN_C size_t mbsspn (const char *string, const char *reject) + _GL_ARG_NONNULL ((1, 2)); +#endif + +#if @GNULIB_MBSSEP@ +/* Search the next delimiter (multibyte character listed in the character + string DELIM) starting at the character string *STRINGP. + If one is found, overwrite it with a NUL, and advance *STRINGP to point + to the next multibyte character after it. Otherwise, set *STRINGP to NULL. + If *STRINGP was already NULL, nothing happens. + Return the old value of *STRINGP. + + This is a variant of mbstok_r() that supports empty fields. + + Caveat: It modifies the original string. + Caveat: These functions cannot be used on constant strings. + Caveat: The identity of the delimiting character is lost. + + See also mbstok_r(). */ +_GL_EXTERN_C char * mbssep (char **stringp, const char *delim) + _GL_ARG_NONNULL ((1, 2)); +#endif + +#if @GNULIB_MBSTOK_R@ +/* Parse the character string STRING into tokens separated by characters in + the character string DELIM. + If STRING is NULL, the saved pointer in SAVE_PTR is used as + the next starting point. For example: + char s[] = "-abc-=-def"; + char *sp; + x = mbstok_r(s, "-", &sp); // x = "abc", sp = "=-def" + x = mbstok_r(NULL, "-=", &sp); // x = "def", sp = NULL + x = mbstok_r(NULL, "=", &sp); // x = NULL + // s = "abc\0-def\0" + + Caveat: It modifies the original string. + Caveat: These functions cannot be used on constant strings. + Caveat: The identity of the delimiting character is lost. + + See also mbssep(). */ +_GL_EXTERN_C char * mbstok_r (char *string, const char *delim, char **save_ptr) + _GL_ARG_NONNULL ((2, 3)); +#endif + +/* Map any int, typically from errno, into an error message. */ +#if @GNULIB_STRERROR@ +# if @REPLACE_STRERROR@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# undef strerror +# define strerror rpl_strerror +# endif +_GL_FUNCDECL_RPL (strerror, char *, (int)); +_GL_CXXALIAS_RPL (strerror, char *, (int)); +# else +_GL_CXXALIAS_SYS (strerror, char *, (int)); +# endif +_GL_CXXALIASWARN (strerror); +#elif defined GNULIB_POSIXCHECK +# undef strerror +/* Assume strerror is always declared. */ +_GL_WARN_ON_USE (strerror, "strerror is unportable - " + "use gnulib module strerror to guarantee non-NULL result"); +#endif + +/* Map any int, typically from errno, into an error message. Multithread-safe. + Uses the POSIX declaration, not the glibc declaration. */ +#if @GNULIB_STRERROR_R@ +# if @REPLACE_STRERROR_R@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# undef strerror_r +# define strerror_r rpl_strerror_r +# endif +_GL_FUNCDECL_RPL (strerror_r, int, (int errnum, char *buf, size_t buflen) + _GL_ARG_NONNULL ((2))); +_GL_CXXALIAS_RPL (strerror_r, int, (int errnum, char *buf, size_t buflen)); +# else +# if !@HAVE_DECL_STRERROR_R@ +_GL_FUNCDECL_SYS (strerror_r, int, (int errnum, char *buf, size_t buflen) + _GL_ARG_NONNULL ((2))); +# endif +_GL_CXXALIAS_SYS (strerror_r, int, (int errnum, char *buf, size_t buflen)); +# endif +# if @HAVE_DECL_STRERROR_R@ +_GL_CXXALIASWARN (strerror_r); +# endif +#elif defined GNULIB_POSIXCHECK +# undef strerror_r +# if HAVE_RAW_DECL_STRERROR_R +_GL_WARN_ON_USE (strerror_r, "strerror_r is unportable - " + "use gnulib module strerror_r-posix for portability"); +# endif +#endif + +#if @GNULIB_STRSIGNAL@ +# if @REPLACE_STRSIGNAL@ +# if !(defined __cplusplus && defined GNULIB_NAMESPACE) +# define strsignal rpl_strsignal +# endif +_GL_FUNCDECL_RPL (strsignal, char *, (int __sig)); +_GL_CXXALIAS_RPL (strsignal, char *, (int __sig)); +# else +# if ! @HAVE_DECL_STRSIGNAL@ +_GL_FUNCDECL_SYS (strsignal, char *, (int __sig)); +# endif +/* Need to cast, because on Cygwin 1.5.x systems, the return type is + 'const char *'. */ +_GL_CXXALIAS_SYS_CAST (strsignal, char *, (int __sig)); +# endif +_GL_CXXALIASWARN (strsignal); +#elif defined GNULIB_POSIXCHECK +# undef strsignal +# if HAVE_RAW_DECL_STRSIGNAL +_GL_WARN_ON_USE (strsignal, "strsignal is unportable - " + "use gnulib module strsignal for portability"); +# endif +#endif + +#if @GNULIB_STRVERSCMP@ +# if !@HAVE_STRVERSCMP@ +_GL_FUNCDECL_SYS (strverscmp, int, (const char *, const char *) + _GL_ARG_NONNULL ((1, 2))); +# endif +_GL_CXXALIAS_SYS (strverscmp, int, (const char *, const char *)); +_GL_CXXALIASWARN (strverscmp); +#elif defined GNULIB_POSIXCHECK +# undef strverscmp +# if HAVE_RAW_DECL_STRVERSCMP +_GL_WARN_ON_USE (strverscmp, "strverscmp is unportable - " + "use gnulib module strverscmp for portability"); +# endif +#endif + + +#endif /* _@GUARD_PREFIX@_STRING_H */ +#endif /* _@GUARD_PREFIX@_STRING_H */ diff --git a/libbitfury.c b/libbitfury.c new file mode 100644 index 0000000..78f982c --- /dev/null +++ b/libbitfury.c @@ -0,0 +1,387 @@ +/* + * Copyright 2014 Con Kolivas + * Copyright 2013 Andrew Smith + * Copyright 2013 bitfury + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "miner.h" +#include "driver-bitfury.h" +#include "libbitfury.h" +#include "sha2.h" + +void ms3steps(uint32_t *p) +{ + uint32_t a, b, c, d, e, f, g, h, new_e, new_a; + int i; + + a = p[0]; + b = p[1]; + c = p[2]; + d = p[3]; + e = p[4]; + f = p[5]; + g = p[6]; + h = p[7]; + for (i = 0; i < 3; i++) { + new_e = p[i+16] + sha256_k[i] + h + CH(e,f,g) + SHA256_F2(e) + d; + new_a = p[i+16] + sha256_k[i] + h + CH(e,f,g) + SHA256_F2(e) + + SHA256_F1(a) + MAJ(a,b,c); + d = c; + c = b; + b = a; + a = new_a; + h = g; + g = f; + f = e; + e = new_e; + } + p[15] = a; + p[14] = b; + p[13] = c; + p[12] = d; + p[11] = e; + p[10] = f; + p[9] = g; + p[8] = h; +} + +uint32_t decnonce(uint32_t in) +{ + uint32_t out; + + /* First part load */ + out = (in & 0xFF) << 24; + in >>= 8; + + /* Byte reversal */ + in = (((in & 0xaaaaaaaa) >> 1) | ((in & 0x55555555) << 1)); + in = (((in & 0xcccccccc) >> 2) | ((in & 0x33333333) << 2)); + in = (((in & 0xf0f0f0f0) >> 4) | ((in & 0x0f0f0f0f) << 4)); + + out |= (in >> 2) & 0x3FFFFF; + + /* Extraction */ + if (in & 1) + out |= (1 << 23); + if (in & 2) + out |= (1 << 22); + + out -= 0x800004; + return out; +} + +/* Test vectors to calculate (using address-translated loads) */ +static unsigned int atrvec[] = { + 0xb0e72d8e, 0x1dc5b862, 0xe9e7c4a6, 0x3050f1f5, 0x8a1a6b7e, 0x7ec384e8, 0x42c1c3fc, 0x8ed158a1, /* MIDSTATE */ + 0,0,0,0,0,0,0,0, + 0x8a0bb7b7, 0x33af304f, 0x0b290c1a, 0xf0c4e61f, /* WDATA: hashMerleRoot[7], nTime, nBits, nNonce */ +}; +static bool atrvec_set; + +void bitfury_work_to_payload(struct bitfury_payload *p, struct work *work) +{ + memcpy(p->midstate, work->midstate, 32); + p->m7 = *(unsigned int *)(work->data + 64); + p->ntime = *(unsigned int *)(work->data + 68); + p->nbits = *(unsigned int *)(work->data + 72); + applog(LOG_INFO, "INFO nonc: %08x bitfury_scanHash MS0: %08x, ", p->nnonce, + ((unsigned int *)work->midstate)[0]); + applog(LOG_INFO, "INFO merkle[7]: %08x, ntime: %08x, nbits: %08x", p->m7, + p->ntime, p->nbits); +} + +/* Configuration registers - control oscillators and such stuff. PROGRAMMED when + * magic number matches, UNPROGRAMMED (default) otherwise */ +void spi_config_reg(struct bitfury_info *info, int cfgreg, int ena) +{ + static const uint8_t enaconf[4] = { 0xc1, 0x6a, 0x59, 0xe3 }; + static const uint8_t disconf[4] = { 0, 0, 0, 0 }; + + if (ena) + spi_add_data(info, 0x7000 + cfgreg * 32, enaconf, 4); + else + spi_add_data(info, 0x7000 + cfgreg * 32, disconf, 4); +} + +void spi_set_freq(struct bitfury_info *info) +{ + uint64_t freq; + const uint8_t *osc6 = (unsigned char *)&freq; + + freq = (1ULL << info->osc6_bits) - 1ULL; + spi_add_data(info, 0x6000, osc6, 8); /* Program internal on-die slow oscillator frequency */ +} + +#define FIRST_BASE 61 +#define SECOND_BASE 4 + +void spi_send_conf(struct bitfury_info *info) +{ + const int8_t nfu_counters[16] = { 64, 64, SECOND_BASE, SECOND_BASE+4, SECOND_BASE+2, + SECOND_BASE+2+16, SECOND_BASE, SECOND_BASE+1, (FIRST_BASE)%65, (FIRST_BASE+1)%65, + (FIRST_BASE+3)%65, (FIRST_BASE+3+16)%65, (FIRST_BASE+4)%65, (FIRST_BASE+4+4)%65, + (FIRST_BASE+3+3)%65, (FIRST_BASE+3+1+3)%65 }; + int i; + + for (i = 7; i <= 11; i++) + spi_config_reg(info, i, 0); + spi_config_reg(info, 6, 1); /* disable OUTSLK */ + spi_config_reg(info, 4, 1); /* Enable slow oscillator */ + for (i = 1; i <= 3; ++i) + spi_config_reg(info, i, 0); + /* Program counters correctly for rounds processing, here it should + * start consuming power */ + spi_add_data(info, 0x0100, nfu_counters, 16); +} + +void spi_send_init(struct bitfury_info *info) +{ + /* Prepare internal buffers */ + /* PREPARE BUFFERS (INITIAL PROGRAMMING) */ + unsigned int w[16]; + + if (!atrvec_set) { + atrvec_set = true; + ms3steps(atrvec); + } + memset(w, 0, sizeof(w)); + w[3] = 0xffffffff; + w[4] = 0x80000000; + w[15] = 0x00000280; + spi_add_data(info, 0x1000, w, 16 * 4); + spi_add_data(info, 0x1400, w, 8 * 4); + memset(w, 0, sizeof(w)); + w[0] = 0x80000000; + w[7] = 0x100; + spi_add_data(info, 0x1900, w, 8 * 4); /* Prepare MS and W buffers! */ + spi_add_data(info, 0x3000, atrvec, 19 * 4); +} +void spi_clear_buf(struct bitfury_info *info) +{ + info->spibufsz = 0; +} + +void spi_add_buf(struct bitfury_info *info, const void *buf, const int sz) +{ + if (unlikely(info->spibufsz + sz > SPIBUF_SIZE)) { + applog(LOG_WARNING, "SPI bufsize overflow!"); + return; + } + memcpy(&info->spibuf[info->spibufsz], buf, sz); + info->spibufsz += sz; +} + +void spi_add_break(struct bitfury_info *info) +{ + spi_add_buf(info, "\x4", 1); +} + +void spi_add_fasync(struct bitfury_info *info, int n) +{ + int i; + + for (i = 0; i < n; i++) + spi_add_buf(info, "\x5", 1); +} + +static void spi_add_buf_reverse(struct bitfury_info *info, const char *buf, const int sz) +{ + int i; + + for (i = 0; i < sz; i++) { // Reverse bit order in each byte! + unsigned char p = buf[i]; + + p = ((p & 0xaa) >> 1) | ((p & 0x55) << 1); + p = ((p & 0xcc) >> 2) | ((p & 0x33) << 2); + p = ((p & 0xf0) >> 4) | ((p & 0x0f) << 4); + info->spibuf[info->spibufsz + i] = p; + } + info->spibufsz += sz; +} + +void spi_add_data(struct bitfury_info *info, uint16_t addr, const void *buf, int len) +{ + unsigned char otmp[3]; + + if (len < 4 || len > 128) { + applog(LOG_WARNING, "Can't add SPI data size %d", len); + return; + } + len /= 4; /* Strip */ + otmp[0] = (len - 1) | 0xE0; + otmp[1] = (addr >> 8) & 0xFF; + otmp[2] = addr & 0xFF; + spi_add_buf(info, otmp, 3); + len *= 4; + spi_add_buf_reverse(info, buf, len); +} + +// Bit-banging reset... Each 3 reset cycles reset first chip in chain +bool spi_reset(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + struct mcp_settings *mcp = &info->mcp; + int r; + + // SCK_OVRRIDE + mcp->value.pin[NFU_PIN_SCK_OVR] = MCP2210_GPIO_PIN_HIGH; + mcp->direction.pin[NFU_PIN_SCK_OVR] = MCP2210_GPIO_OUTPUT; + mcp->designation.pin[NFU_PIN_SCK_OVR] = MCP2210_PIN_GPIO; + if (!mcp2210_set_gpio_settings(bitfury, mcp)) + return false; + + for (r = 0; r < 16; ++r) { + char buf[1] = {0x81}; // will send this waveform: - _ _ _ _ _ _ - + unsigned int length = 1; + + if (!mcp2210_spi_transfer(bitfury, &info->mcp, buf, &length)) + return false; + } + + // Deactivate override + mcp->direction.pin[NFU_PIN_SCK_OVR] = MCP2210_GPIO_INPUT; + if (!mcp2210_set_gpio_settings(bitfury, mcp)) + return false; + + return true; +} + +bool mcp_spi_txrx(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + unsigned int length, sendrcv; + int offset = 0; + + length = info->spibufsz; + applog(LOG_DEBUG, "%s %d: SPI sending %u bytes total", bitfury->drv->name, + bitfury->device_id, length); + while (length > MCP2210_TRANSFER_MAX) { + sendrcv = MCP2210_TRANSFER_MAX; + if (!mcp2210_spi_transfer(bitfury, &info->mcp, info->spibuf + offset, &sendrcv)) + return false; + if (sendrcv != MCP2210_TRANSFER_MAX) { + applog(LOG_DEBUG, "%s %d: Send/Receive size mismatch sent %d received %d", + bitfury->drv->name, bitfury->device_id, MCP2210_TRANSFER_MAX, sendrcv); + } + length -= MCP2210_TRANSFER_MAX; + offset += MCP2210_TRANSFER_MAX; + } + sendrcv = length; + if (!mcp2210_spi_transfer(bitfury, &info->mcp, info->spibuf + offset, &sendrcv)) + return false; + if (sendrcv != length) { + applog(LOG_WARNING, "%s %d: Send/Receive size mismatch sent %d received %d", + bitfury->drv->name, bitfury->device_id, length, sendrcv); + return false; + } + return true; +} + +#define READ_WRITE_BYTES_SPI0 0x31 + +bool ftdi_spi_txrx(struct cgpu_info *bitfury, struct bitfury_info *info) +{ + int err, amount, len; + uint16_t length; + char buf[1024]; + + len = info->spibufsz; + length = info->spibufsz - 1; //FTDI length is shifted by one 0x0000 = one byte + buf[0] = READ_WRITE_BYTES_SPI0; + buf[1] = length & 0x00FF; + buf[2] = (length & 0xFF00) >> 8; + memcpy(&buf[3], info->spibuf, info->spibufsz); + info->spibufsz += 3; + err = usb_write(bitfury, buf, info->spibufsz, &amount, C_BXM_SPITX); + if (err || amount != (int)info->spibufsz) { + applog(LOG_ERR, "%s %d: SPI TX error %d, sent %d of %d", bitfury->drv->name, + bitfury->device_id, err, amount, info->spibufsz); + return false; + } + info->spibufsz = len; + /* We shouldn't even get a timeout error on reads in spi mode */ + err = usb_read(bitfury, info->spibuf, len, &amount, C_BXM_SPIRX); + if (err || amount != len) { + applog(LOG_ERR, "%s %d: SPI RX error %d, read %d of %d", bitfury->drv->name, + bitfury->device_id, err, amount, info->spibufsz); + return false; + } + amount = usb_buffer_size(bitfury); + if (amount) { + applog(LOG_ERR, "%s %d: SPI RX Extra read buffer size %d", bitfury->drv->name, + bitfury->device_id, amount); + usb_buffer_clear(bitfury); + return false; + } + return true; +} + +#define BT_OFFSETS 3 + +bool bitfury_checkresults(struct thr_info *thr, struct work *work, uint32_t nonce) +{ + const uint32_t bf_offsets[] = {-0x800000, 0, -0x400000}; + int i; + + for (i = 0; i < BT_OFFSETS; i++) { + uint32_t noffset = nonce + bf_offsets[i]; + + if (test_nonce(work, noffset)) { + submit_tested_work(thr, work); + return true; + } + } + return false; +} + +/* Currently really only supports 2 chips, so chip_n can only be 0 or 1 */ +bool libbitfury_sendHashData(struct thr_info *thr, struct cgpu_info *bitfury, + struct bitfury_info *info, int chip_n) +{ + unsigned newbuf[17]; + unsigned *oldbuf = &info->oldbuf[17 * chip_n]; + struct bitfury_payload *p = &info->payload[chip_n]; + unsigned int localvec[20]; + + /* Programming next value */ + memcpy(localvec, p, 20 * 4); + ms3steps(localvec); + + spi_clear_buf(info); + spi_add_break(info); + spi_add_fasync(info, chip_n); + spi_add_data(info, 0x3000, (void*)localvec, 19 * 4); + if (!info->spi_txrx(bitfury, info)) + return false; + + memcpy(newbuf, info->spibuf + 4 + chip_n, 17 * 4); + + info->job_switched[chip_n] = newbuf[16] != oldbuf[16]; + + if (likely(info->second_run[chip_n])) { + if (info->job_switched[chip_n]) { + int i; + + for (i = 0; i < 16; i++) { + if (oldbuf[i] != newbuf[i] && info->owork[chip_n]) { + uint32_t nonce; //possible nonce + + nonce = decnonce(newbuf[i]); + if (bitfury_checkresults(thr, info->owork[chip_n], nonce)) { + info->submits[chip_n]++; + info->nonces++; + } + } + } + memcpy(oldbuf, newbuf, 17 * 4); + } + } else + info->second_run[chip_n] = true; + + cgsleep_ms(BITFURY_REFRESH_DELAY); + + return true; +} diff --git a/libbitfury.h b/libbitfury.h new file mode 100644 index 0000000..ad89632 --- /dev/null +++ b/libbitfury.h @@ -0,0 +1,34 @@ +/* + * Copyright 2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef LIBBITFURY_H +#define LIBBITFURY_H +#include "miner.h" +#include "driver-bitfury.h" + +void ms3steps(uint32_t *p); +uint32_t decnonce(uint32_t in); +void bitfury_work_to_payload(struct bitfury_payload *p, struct work *work); +void spi_config_reg(struct bitfury_info *info, int cfgreg, int ena); +void spi_set_freq(struct bitfury_info *info); +void spi_send_conf(struct bitfury_info *info); +void spi_send_init(struct bitfury_info *info); +void spi_clear_buf(struct bitfury_info *info); +void spi_add_buf(struct bitfury_info *info, const void *buf, const int sz); +void spi_add_break(struct bitfury_info *info); +void spi_add_fasync(struct bitfury_info *info, int n); +void spi_add_data(struct bitfury_info *info, uint16_t addr, const void *buf, int len); +bool spi_reset(struct cgpu_info *bitfury, struct bitfury_info *info); +bool mcp_spi_txrx(struct cgpu_info *bitfury, struct bitfury_info *info); +bool ftdi_spi_txrx(struct cgpu_info *bitfury, struct bitfury_info *info); +bool bitfury_checkresults(struct thr_info *thr, struct work *work, uint32_t nonce); +bool libbitfury_sendHashData(struct thr_info *thr, struct cgpu_info *bitfury, + struct bitfury_info *info, int chip_n); + +#endif /* LIBBITFURY_H */ diff --git a/linux-usb-cgminer b/linux-usb-cgminer new file mode 100644 index 0000000..f3d3b54 --- /dev/null +++ b/linux-usb-cgminer @@ -0,0 +1,305 @@ +How to setup a cgminer using xubuntu 11.04 live on a USB + +The master version of this document is here: + https://github.com/kanoi/linux-usb-cgminer + +The actual file is: + https://github.com/kanoi/linux-usb-cgminer/blob/master/linux-usb-cgminer + +The copy in cgminer (check to make sure it isn't older) is: + https://github.com/ckolivas/cgminer/blob/master/linux-usb-cgminer + +The original old verion on bitcointalk is: + https://bitcointalk.org/index.php?topic=28402.msg426741#msg426741 + +======== + +I have said to select English for the install process for 2 reasons: +1) I don't know any other spoken language very well +and +2) I'm not sure what problems installing under a different language +might cause (it will probably cause no problems but I don't know) + +Software +======== +Short hardware comment: +Your mining computer doesn't need any HDD or CD/DVD/BD as long as it has at +least 2GB of RAM, can boot USB, has some network connection to the internet +and of course a reasonable mining ATI graphics card +... Or you can boot a windows PC with the USB to only do mining ... and ignore +the system HDD ... wasting energy running the HDD (roughly 10 Watts per HDD) :) + +If you wish to install to an HDD instead of a USB, + see the changes to the instructions at the end + +To create the USB, you need of course a 4GB USB and temporarily need a PC +with a CD (or DVD/BD) writer, a USB port and of course an internet +connection to the PC + +1) Download the xubuntu 11.04 desktop live CD iso for amd64 + ( look here for mirrors: http://www.xubuntu.org/getubuntu ) + +2) Burn it to CD then boot that temporarily on any PC with a CD/DVD/BD and + a USB port (this and the next 2 step won't effect that PC) + Select "English" then select "Try Xubuntu without installing" + and wait for the desktop to appear + (this happens by default if you wait for the timeouts) + +3) Plug in your 4GB USB device and it should appear on the desktop - you can + leave it's contents as long as there is at least 2.8GB free + +4) Now run "Startup Disk Creator" in "Applications->System" + (the system menu is the little rat in the top left corner) + +(if you have no mouse you can get the menu with and navigate +the menu with the arrow keys and key) + +From here select the boot CD as the "Source" and the USB as the "Disk to use" +lastly move the slider to 2GB for reserved extra space + +The 2GB should be enough for modifications + +Click: "Make Install Disk" +After about 10-15 minutes you have a base xubuntu 11.04 boot USB +(you can shut down this computer now) + +5) Boot your cgminer PC with this USB stick, select "English" + then select "Try Xubuntu without installing" and wait for the desktop to + appear (this happens by default if you wait for the timeouts) + +6) Start a terminal + "Applications->Accessories->Terminal Emulator" + +7) sudo apt-get install openssh-server screen + + if you have a problem here then it's probably coz the internet isn't + available ... sort that out by reading elsewhere about routers etc + +8) sudo apt-get install fglrx fglrx-amdcccle fglrx-dev + sudo sync + sudo shutdown -r now + +N.B. always do a "sudo sync" and wait for it to finish every time before +shutting down the PC to ensure all data is written to the USB + +9) sudo aticonfig --lsa + this lists your ATI cards so you can see them + sudo aticonfig --adapter=all --odgt + this checks it can access all the cards ... + +10) sudo aticonfig --adapter=all --initial + this gets an error - no idea why but the xorg.conf is OK + sudo sync + sudo shutdown -r now + +11) sudo aticonfig --adapter=all --odgt + this checks it can access all the cards ... + +12) get AMD-APP-SDK-v2.4-lnx64.tgz from + http://developer.amd.com/sdks/amdappsdk/downloads/pages/default.aspx + ( http://developer.amd.com/Downloads/AMD-APP-SDK-v2.4-lnx64.tgz ) + + sudo su + cd /opt + (replace /home/ubuntu/ with wherever you put the file: ) + tar -xvzf /home/ubuntu/AMD-APP-SDK-v2.4-lnx64.tgz + + cd AMD-APP-SDK-v2.4-lnx64/ + cp -pv lib/x86_64/* /usr/lib/ + rsync -avl include/CL/ /usr/include/CL/ + tar -xvzf icd-registration.tgz + rsync -avl etc/OpenCL/ /etc/OpenCL/ + ldconfig + sync + shutdown -r now + + You now have an OpenCL enabled xubuntu + +13) cgminer: + sudo apt-get install curl + + get the binary linux cgminer + (see the bitcoin forum cgminer thread for where to get it) + https://bitcointalk.org/index.php?topic=28402.0 + + ./cgminer -n + this shows you the GPU's it found on your PC + See further below if you get an error regarding libtinfo.so.5 + +14) An OC option: + This is no longer needed since cgminer 2.* includes OC, however: + + sudo apt-get install libwxbase2.8-0 libwxgtk2.8-0 + + http://sourceforge.net/projects/amdovdrvctrl/ + for an Over/underclocking application and get the file listed below then: + sudo dpkg -i amdoverdrivectrl_1.2.1_amd64.deb + +15) set the screen saver to ONLY blank ... + + Move the mouse to the bottom of the screen and you see a set of icons like + on an Apple PC + Click on Settings, then in the Settings window "Screensaver" + Set "Mode:" to "Blank Screen Only" + +16) apt-get install ntpd + An accurate clock is always a good idea :) + +17) if you wish to ssh into the box you must set a password + to do this you simply have to be logged into it at the screen and type + + sudo passwd ubuntu + + it will prompt you (twice) to enter a password for the ubuntu account + + +Initial setup complete. + +======== + +If you want to SSH into the machine and run cgminer: + From a terminal on the miner display each time after you boot: + xhost + + + 'xhost +' isn't needed if you ssh into the machine with the same + username that the GUI boots into (which is 'ubuntu' in this case) + +Then after you ssh into the machine: + export DISPLAY=:0 +before running cgminer + +Also note, that you should force the screen to blank when mining if +the ATI card is displaying the screen (using the screen saver +application menu) +In my case it takes away 50Mh/s when the screen isn't blanked +It will auto blank - but make sure the blank is of course just blank +as mentioned above at 15) + + +This is of course just the basics ... but it should get you a computer +up and running and able to run cgminer + +======== + +You should keep an eye on USB disk space +The system logger writes log files in the /var/log/ directory +The two main ones that grow large are 'kern.log' and 'syslog' +If you want to keep them, save them away to some other computer +When space is low, just delete them e.g. + + sudo rm -i /var/log/syslog + sudo rm -i /var/log/kern.log + +The 'df' command will show you the current space e.g.: + + sudo df + +Filesystem 1K-blocks Used Available Use% Mounted on +aufs 2099420 892024 1100748 45% / +none 1015720 628 1015092 1% /dev +/dev/sda1 3909348 2837248 1072100 73% /cdrom +/dev/loop0 670848 670848 0 100% /rofs +none 1023772 136 1023636 1% /dev/shm +tmpfs 1023772 16 1023756 1% /tmp +none 1023772 124 1023648 1% /var/run +none 1023772 0 1023772 0% /var/lock + + +This shows the 2GB space allocated when you setup the USB as '/' (aufs) +In this example, it's currently 45% full with almost 1.1GB of free space + +======== + +The latest version (2.0.8) of cgminer is built with 11.10 (not 11.04) +If you get the following error when running the prebuilt version in 11.04: + + ./cgminer: error while loading shared libraries: libtinfo.so.5: cannot open shared object file: No such file or directory + +The fix is to simply link the old curses library to the new name e.g.: + + cd /lib64/ + sudo ln -s libncurses.so.5 libtinfo.so.5 + +======== + +If you wish to install to an HDD instead of a USB: +-------------------------------------------------- + +As per before: + +1) Download the xubuntu 11.04 desktop live CD iso for amd64 + ( look here for mirrors: http://www.xubuntu.org/getubuntu ) + +Then: + +2) Burn it to CD then boot that on your new mining PC + Select "English" then select "Install Xubuntu" + (you have 30 seconds to do this) + +3) When the Install window comes up - again select "English" and click "Forward" + +4) The next page will show you if you meet certain install requirements + (make sure you do meet them all) + Don't select the download option + The 3rd party option isn't needed for mining so ignore that also + + Click "Forward" + +5) With "Allocate drive space" it's probably easiest to say to use the + "Erase" option. + + This is just for mining right? :) + + However, if you have anything on the HDD that you want to keep - the + "Erase" install process will delete it - so back it up (quit the install) + Also make sure there are no OTHER HDD attached that it may erase also + i.e. only have attached the one HDD that you want to install onto unless + you know exactly what you are doing + + If you see the "Install Xubuntu 11.04 alongside 'something'" then that + just means that the HDD wasn't blank. + If you want to try this option - do that yourself and then skip to step + 7) below when you get to that. + + There are plenty of other options available if you select "Something else" + but I'm not going to go into all the details here other than to say that + my preferred partioning is: /boot = 1GB = ext2, swap = twice memory size, + / = 100GB = ext3 and the rest: /extra = ext3 + + Click "Forward" + +6) If you selected "Erase" then it allows you to choose the drive to install to + Then click "Install Now" + +7) "Where are you?" sort that out then click "Forward" + +8) "Keyboard layout" sort that out (use the default) then click "Forward" + +9) "Who are you?" The important one here is "Pick a username:" coz that's + the name you will need to ssh into, to access it remotely (and of course + the "Choose a Password" you set) + + If you set the "username" to anything but "ubuntu" then: wherever in this + document I have mentioned the username "ubuntu" you must of course use the + username you chose here instead of "ubuntu" + + Important: set it to "log in automatically" if you ever want to be able + to start cgminer without being in front of the computer since 'X' must + be running to use cgminer properly + That does of course mean that the computer isn't secure from anyone who + has access to it - but then again no computer that can automatically + reboot is secure from anyone who has access to the actual computer itself + + Then click "Forward" + +10) Of course when it completes click on "Restart Now" + ... and remove the Xubuntu CD when it asks you + +11) Wait for it to finish rebooting ... and it will auto login + (unless you didn't do step 9) "Important:") + +12) After it logs in, an upgrade popup for 11.10 (or later) will appear + Select "Don't Upgrade" + +13) Now go to step 6) of the USB script above for what to do next and that + covers everything else needed diff --git a/logging.c b/logging.c new file mode 100644 index 0000000..c54cbef --- /dev/null +++ b/logging.c @@ -0,0 +1,123 @@ +/* + * Copyright 2011-2012 Con Kolivas + * Copyright 2013 Andrew Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include + +#include "logging.h" +#include "miner.h" + +bool opt_debug = false; +bool opt_log_output = false; + +/* per default priorities higher than LOG_NOTICE are logged */ +int opt_log_level = LOG_NOTICE; +FILE * g_log_file = NULL; + +bool g_logfile_enable = false; +char g_logfile_path[256] = {0}; +char g_logfile_openflag[32] = {0}; + +static void my_log_curses(int prio, const char *datetime, const char *str, bool force) +{ + if (opt_quiet && prio != LOG_ERR) + return; + + /* Mutex could be locked by dead thread on shutdown so forcelog will + * invalidate any console lock status. */ + if (force) { + mutex_trylock(&console_lock); + mutex_unlock(&console_lock); + } +#ifdef HAVE_CURSES + extern bool use_curses; + if (use_curses && log_curses_only(prio, datetime, str)) + ; + else +#endif + { + mutex_lock(&console_lock); + printf("%s%s%s", datetime, str, " \n"); + mutex_unlock(&console_lock); + } +} + +/* high-level logging function, based on global opt_log_level */ + +/* + * log function + */ +void _applog(int prio, const char *str, bool force) +{ +#ifdef HAVE_SYSLOG_H + if (use_syslog) { + syslog(LOG_LOCAL0 | prio, "%s", str); + } +#else + if (0) {} +#endif + else { + char datetime[64]; + struct timeval tv = {0, 0}; + struct tm *tm; + + cgtime(&tv); + + const time_t tmp_time = tv.tv_sec; + tm = localtime(&tmp_time); + + snprintf(datetime, sizeof(datetime), " [%d-%02d-%02d %02d:%02d:%02d] ", + tm->tm_year + 1900, + tm->tm_mon + 1, + tm->tm_mday, + tm->tm_hour, + tm->tm_min, + tm->tm_sec); + + /* Only output to stderr if it's not going to the screen as well */ + if (!isatty(fileno((FILE *)stderr))) { + fprintf(stderr, "%s%s\n", datetime, str); /* atomic write to stderr */ + fflush(stderr); + } + if(g_logfile_enable) { + if(!g_log_file) { + g_log_file = fopen(g_logfile_path, g_logfile_openflag); + } + if(g_log_file) { + fwrite(datetime, strlen(datetime), 1, g_log_file); + fwrite(str, strlen(str), 1, g_log_file); + fwrite("\n", 1, 1, g_log_file); + fflush(g_log_file); + } + } + my_log_curses(prio, datetime, str, force); + } +} + +void _simplelog(int prio, const char *str, bool force) +{ +#ifdef HAVE_SYSLOG_H + if (use_syslog) { + syslog(LOG_LOCAL0 | prio, "%s", str); + } +#else + if (0) {} +#endif + else { + /* Only output to stderr if it's not going to the screen as well */ + if (!isatty(fileno((FILE *)stderr))) { + fprintf(stderr, "%s\n", str); /* atomic write to stderr */ + fflush(stderr); + } + + my_log_curses(prio, "", str, force); + } +} diff --git a/logging.h b/logging.h new file mode 100644 index 0000000..4483098 --- /dev/null +++ b/logging.h @@ -0,0 +1,130 @@ +#ifndef __LOGGING_H__ +#define __LOGGING_H__ + +#include "config.h" +#include +#include + +#ifdef HAVE_SYSLOG_H +#include +#else +enum { + LOG_ERR, + LOG_WARNING, + LOG_NOTICE, + LOG_INFO, + LOG_DEBUG, +}; +#endif + +/* debug flags */ +extern bool opt_debug; +extern bool opt_log_output; +extern bool opt_realquiet; +extern bool want_per_device_stats; + +/* global log_level, messages with lower or equal prio are logged */ +extern int opt_log_level; + +#define LOGBUFSIZ 2048 + +extern void _applog(int prio, const char *str, bool force); +extern void _simplelog(int prio, const char *str, bool force); + +#define IN_FMT_FFL " in %s %s():%d" + +#define applog(prio, fmt, ...) do { \ + if (opt_debug || prio != LOG_DEBUG) { \ + if (use_syslog || opt_log_output || prio <= opt_log_level) { \ + char tmp42[LOGBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + _applog(prio, tmp42, false); \ + } \ + } \ +} while (0) + +#define simplelog(prio, fmt, ...) do { \ + if (opt_debug || prio != LOG_DEBUG) { \ + if (use_syslog || opt_log_output || prio <= opt_log_level) { \ + char tmp42[LOGBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + _simplelog(prio, tmp42, false); \ + } \ + } \ +} while (0) + +#define applogsiz(prio, _SIZ, fmt, ...) do { \ + if (opt_debug || prio != LOG_DEBUG) { \ + if (use_syslog || opt_log_output || prio <= opt_log_level) { \ + char tmp42[_SIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + _applog(prio, tmp42, false); \ + } \ + } \ +} while (0) + +#define forcelog(prio, fmt, ...) do { \ + if (opt_debug || prio != LOG_DEBUG) { \ + if (use_syslog || opt_log_output || prio <= opt_log_level) { \ + char tmp42[LOGBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + _applog(prio, tmp42, true); \ + } \ + } \ +} while (0) + +#define quit(status, fmt, ...) do { \ + if (fmt) { \ + char tmp42[LOGBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + _applog(LOG_ERR, tmp42, true); \ + } \ + _quit(status); \ +} while (0) + +#define early_quit(status, fmt, ...) do { \ + if (fmt) { \ + char tmp42[LOGBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + _applog(LOG_ERR, tmp42, true); \ + } \ + __quit(status, false); \ +} while (0) + +#define quithere(status, fmt, ...) do { \ + if (fmt) { \ + char tmp42[LOGBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt IN_FMT_FFL, \ + ##__VA_ARGS__, __FILE__, __func__, __LINE__); \ + _applog(LOG_ERR, tmp42, true); \ + } \ + _quit(status); \ +} while (0) + +#define quitfrom(status, _file, _func, _line, fmt, ...) do { \ + if (fmt) { \ + char tmp42[LOGBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt IN_FMT_FFL, \ + ##__VA_ARGS__, _file, _func, _line); \ + _applog(LOG_ERR, tmp42, true); \ + } \ + _quit(status); \ +} while (0) + +#ifdef HAVE_CURSES + +#define wlog(fmt, ...) do { \ + char tmp42[LOGBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + _wlog(tmp42); \ +} while (0) + +#define wlogprint(fmt, ...) do { \ + char tmp42[LOGBUFSIZ]; \ + snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ + _wlogprint(tmp42); \ +} while (0) + +#endif + +#endif /* __LOGGING_H__ */ diff --git a/m4/00gnulib.m4 b/m4/00gnulib.m4 new file mode 100644 index 0000000..7feed46 --- /dev/null +++ b/m4/00gnulib.m4 @@ -0,0 +1,30 @@ +# 00gnulib.m4 serial 2 +dnl Copyright (C) 2009-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl This file must be named something that sorts before all other +dnl gnulib-provided .m4 files. It is needed until such time as we can +dnl assume Autoconf 2.64, with its improved AC_DEFUN_ONCE semantics. + +# AC_DEFUN_ONCE([NAME], VALUE) +# ---------------------------- +# Define NAME to expand to VALUE on the first use (whether by direct +# expansion, or by AC_REQUIRE), and to nothing on all subsequent uses. +# Avoid bugs in AC_REQUIRE in Autoconf 2.63 and earlier. This +# definition is slower than the version in Autoconf 2.64, because it +# can only use interfaces that existed since 2.59; but it achieves the +# same effect. Quoting is necessary to avoid confusing Automake. +m4_version_prereq([2.63.263], [], +[m4_define([AC][_DEFUN_ONCE], + [AC][_DEFUN([$1], + [AC_REQUIRE([_gl_DEFUN_ONCE([$1])], + [m4_indir([_gl_DEFUN_ONCE([$1])])])])]dnl +[AC][_DEFUN([_gl_DEFUN_ONCE([$1])], [$2])])]) + +# gl_00GNULIB +# ----------- +# Witness macro that this file has been included. Needed to force +# Automake to include this file prior to all other gnulib .m4 files. +AC_DEFUN([gl_00GNULIB]) diff --git a/m4/extensions.m4 b/m4/extensions.m4 new file mode 100644 index 0000000..1330503 --- /dev/null +++ b/m4/extensions.m4 @@ -0,0 +1,118 @@ +# serial 9 -*- Autoconf -*- +# Enable extensions on systems that normally disable them. + +# Copyright (C) 2003, 2006-2011 Free Software Foundation, Inc. +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This definition of AC_USE_SYSTEM_EXTENSIONS is stolen from CVS +# Autoconf. Perhaps we can remove this once we can assume Autoconf +# 2.62 or later everywhere, but since CVS Autoconf mutates rapidly +# enough in this area it's likely we'll need to redefine +# AC_USE_SYSTEM_EXTENSIONS for quite some time. + +# If autoconf reports a warning +# warning: AC_COMPILE_IFELSE was called before AC_USE_SYSTEM_EXTENSIONS +# or warning: AC_RUN_IFELSE was called before AC_USE_SYSTEM_EXTENSIONS +# the fix is +# 1) to ensure that AC_USE_SYSTEM_EXTENSIONS is never directly invoked +# but always AC_REQUIREd, +# 2) to ensure that for each occurrence of +# AC_REQUIRE([AC_USE_SYSTEM_EXTENSIONS]) +# or +# AC_REQUIRE([gl_USE_SYSTEM_EXTENSIONS]) +# the corresponding gnulib module description has 'extensions' among +# its dependencies. This will ensure that the gl_USE_SYSTEM_EXTENSIONS +# invocation occurs in gl_EARLY, not in gl_INIT. + +# AC_USE_SYSTEM_EXTENSIONS +# ------------------------ +# Enable extensions on systems that normally disable them, +# typically due to standards-conformance issues. +# Remember that #undef in AH_VERBATIM gets replaced with #define by +# AC_DEFINE. The goal here is to define all known feature-enabling +# macros, then, if reports of conflicts are made, disable macros that +# cause problems on some platforms (such as __EXTENSIONS__). +AC_DEFUN_ONCE([AC_USE_SYSTEM_EXTENSIONS], +[AC_BEFORE([$0], [AC_COMPILE_IFELSE])dnl +AC_BEFORE([$0], [AC_RUN_IFELSE])dnl + + AC_REQUIRE([AC_CANONICAL_HOST]) + + AC_CHECK_HEADER([minix/config.h], [MINIX=yes], [MINIX=]) + if test "$MINIX" = yes; then + AC_DEFINE([_POSIX_SOURCE], [1], + [Define to 1 if you need to in order for `stat' and other + things to work.]) + AC_DEFINE([_POSIX_1_SOURCE], [2], + [Define to 2 if the system does not provide POSIX.1 features + except with this defined.]) + AC_DEFINE([_MINIX], [1], + [Define to 1 if on MINIX.]) + fi + + dnl HP-UX 11.11 defines mbstate_t only if _XOPEN_SOURCE is defined to 500, + dnl regardless of whether the flags -Ae or _D_HPUX_SOURCE=1 are already + dnl provided. + case "$host_os" in + hpux*) + AC_DEFINE([_XOPEN_SOURCE], [500], + [Define to 500 only on HP-UX.]) + ;; + esac + + AH_VERBATIM([__EXTENSIONS__], +[/* Enable extensions on AIX 3, Interix. */ +#ifndef _ALL_SOURCE +# undef _ALL_SOURCE +#endif +/* Enable GNU extensions on systems that have them. */ +#ifndef _GNU_SOURCE +# undef _GNU_SOURCE +#endif +/* Enable threading extensions on Solaris. */ +#ifndef _POSIX_PTHREAD_SEMANTICS +# undef _POSIX_PTHREAD_SEMANTICS +#endif +/* Enable extensions on HP NonStop. */ +#ifndef _TANDEM_SOURCE +# undef _TANDEM_SOURCE +#endif +/* Enable general extensions on Solaris. */ +#ifndef __EXTENSIONS__ +# undef __EXTENSIONS__ +#endif +]) + AC_CACHE_CHECK([whether it is safe to define __EXTENSIONS__], + [ac_cv_safe_to_define___extensions__], + [AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM([[ +# define __EXTENSIONS__ 1 + ]AC_INCLUDES_DEFAULT])], + [ac_cv_safe_to_define___extensions__=yes], + [ac_cv_safe_to_define___extensions__=no])]) + test $ac_cv_safe_to_define___extensions__ = yes && + AC_DEFINE([__EXTENSIONS__]) + AC_DEFINE([_ALL_SOURCE]) + AC_DEFINE([_GNU_SOURCE]) + AC_DEFINE([_POSIX_PTHREAD_SEMANTICS]) + AC_DEFINE([_TANDEM_SOURCE]) +])# AC_USE_SYSTEM_EXTENSIONS + +# gl_USE_SYSTEM_EXTENSIONS +# ------------------------ +# Enable extensions on systems that normally disable them, +# typically due to standards-conformance issues. +AC_DEFUN_ONCE([gl_USE_SYSTEM_EXTENSIONS], +[ + dnl Require this macro before AC_USE_SYSTEM_EXTENSIONS. + dnl gnulib does not need it. But if it gets required by third-party macros + dnl after AC_USE_SYSTEM_EXTENSIONS is required, autoconf 2.62..2.63 emit a + dnl warning: "AC_COMPILE_IFELSE was called before AC_USE_SYSTEM_EXTENSIONS". + dnl Note: We can do this only for one of the macros AC_AIX, AC_GNU_SOURCE, + dnl AC_MINIX. If people still use AC_AIX or AC_MINIX, they are out of luck. + AC_REQUIRE([AC_GNU_SOURCE]) + + AC_REQUIRE([AC_USE_SYSTEM_EXTENSIONS]) +]) diff --git a/m4/gnulib-cache.m4 b/m4/gnulib-cache.m4 new file mode 100644 index 0000000..09970a0 --- /dev/null +++ b/m4/gnulib-cache.m4 @@ -0,0 +1,38 @@ +# Copyright (C) 2002-2011 Free Software Foundation, Inc. +# +# This file is free software, distributed under the terms of the GNU +# General Public License. As a special exception to the GNU General +# Public License, this file may be distributed as part of a program +# that contains a configuration script generated by Autoconf, under +# the same distribution terms as the rest of that program. +# +# Generated by gnulib-tool. +# +# This file represents the specification of how gnulib-tool is used. +# It acts as a cache: It is written and read by gnulib-tool. +# In projects that use version control, this file is meant to be put under +# version control, like the configure.ac and various Makefile.am files. + + +# Specification in the form of a command-line invocation: +# gnulib-tool --import --dir=. --lib=libgnu --source-base=lib --m4-base=m4 --doc-base=doc --tests-base=tests --aux-dir=. --no-conditional-dependencies --no-libtool --macro-prefix=gl --no-vc-files memmem sigaction signal + +# Specification in the form of a few gnulib-tool.m4 macro invocations: +gl_LOCAL_DIR([]) +gl_MODULES([ + memmem + sigaction + signal +]) +gl_AVOID([]) +gl_SOURCE_BASE([lib]) +gl_M4_BASE([m4]) +gl_PO_BASE([]) +gl_DOC_BASE([doc]) +gl_TESTS_BASE([tests]) +gl_LIB([libgnu]) +gl_MAKEFILE_NAME([]) +gl_MACRO_PREFIX([gl]) +gl_PO_DOMAIN([]) +gl_WITNESS_C_DOMAIN([]) +gl_VC_FILES([false]) diff --git a/m4/gnulib-common.m4 b/m4/gnulib-common.m4 new file mode 100644 index 0000000..843efe0 --- /dev/null +++ b/m4/gnulib-common.m4 @@ -0,0 +1,282 @@ +# gnulib-common.m4 serial 26 +dnl Copyright (C) 2007-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +# gl_COMMON +# is expanded unconditionally through gnulib-tool magic. +AC_DEFUN([gl_COMMON], [ + dnl Use AC_REQUIRE here, so that the code is expanded once only. + AC_REQUIRE([gl_00GNULIB]) + AC_REQUIRE([gl_COMMON_BODY]) +]) +AC_DEFUN([gl_COMMON_BODY], [ + AH_VERBATIM([isoc99_inline], +[/* Work around a bug in Apple GCC 4.0.1 build 5465: In C99 mode, it supports + the ISO C 99 semantics of 'extern inline' (unlike the GNU C semantics of + earlier versions), but does not display it by setting __GNUC_STDC_INLINE__. + __APPLE__ && __MACH__ test for MacOS X. + __APPLE_CC__ tests for the Apple compiler and its version. + __STDC_VERSION__ tests for the C99 mode. */ +#if defined __APPLE__ && defined __MACH__ && __APPLE_CC__ >= 5465 && !defined __cplusplus && __STDC_VERSION__ >= 199901L && !defined __GNUC_STDC_INLINE__ +# define __GNUC_STDC_INLINE__ 1 +#endif]) + AH_VERBATIM([unused_parameter], +[/* Define as a marker that can be attached to declarations that might not + be used. This helps to reduce warnings, such as from + GCC -Wunused-parameter. */ +#if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 7) +# define _GL_UNUSED __attribute__ ((__unused__)) +#else +# define _GL_UNUSED +#endif +/* The name _UNUSED_PARAMETER_ is an earlier spelling, although the name + is a misnomer outside of parameter lists. */ +#define _UNUSED_PARAMETER_ _GL_UNUSED + +/* The __pure__ attribute was added in gcc 2.96. */ +#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96) +# define _GL_ATTRIBUTE_PURE __attribute__ ((__pure__)) +#else +# define _GL_ATTRIBUTE_PURE /* empty */ +#endif + +/* The __const__ attribute was added in gcc 2.95. */ +#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95) +# define _GL_ATTRIBUTE_CONST __attribute__ ((__const__)) +#else +# define _GL_ATTRIBUTE_CONST /* empty */ +#endif +]) + dnl Preparation for running test programs: + dnl Tell glibc to write diagnostics from -D_FORTIFY_SOURCE=2 to stderr, not + dnl to /dev/tty, so they can be redirected to log files. Such diagnostics + dnl arise e.g., in the macros gl_PRINTF_DIRECTIVE_N, gl_SNPRINTF_DIRECTIVE_N. + LIBC_FATAL_STDERR_=1 + export LIBC_FATAL_STDERR_ +]) + +# gl_MODULE_INDICATOR_CONDITION +# expands to a C preprocessor expression that evaluates to 1 or 0, depending +# whether a gnulib module that has been requested shall be considered present +# or not. +m4_define([gl_MODULE_INDICATOR_CONDITION], [1]) + +# gl_MODULE_INDICATOR_SET_VARIABLE([modulename]) +# sets the shell variable that indicates the presence of the given module to +# a C preprocessor expression that will evaluate to 1. +AC_DEFUN([gl_MODULE_INDICATOR_SET_VARIABLE], +[ + gl_MODULE_INDICATOR_SET_VARIABLE_AUX( + [GNULIB_[]m4_translit([[$1]], + [abcdefghijklmnopqrstuvwxyz./-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])], + [gl_MODULE_INDICATOR_CONDITION]) +]) + +# gl_MODULE_INDICATOR_SET_VARIABLE_AUX([variable]) +# modifies the shell variable to include the gl_MODULE_INDICATOR_CONDITION. +# The shell variable's value is a C preprocessor expression that evaluates +# to 0 or 1. +AC_DEFUN([gl_MODULE_INDICATOR_SET_VARIABLE_AUX], +[ + m4_if(m4_defn([gl_MODULE_INDICATOR_CONDITION]), [1], + [ + dnl Simplify the expression VALUE || 1 to 1. + $1=1 + ], + [gl_MODULE_INDICATOR_SET_VARIABLE_AUX_OR([$1], + [gl_MODULE_INDICATOR_CONDITION])]) +]) + +# gl_MODULE_INDICATOR_SET_VARIABLE_AUX_OR([variable], [condition]) +# modifies the shell variable to include the given condition. The shell +# variable's value is a C preprocessor expression that evaluates to 0 or 1. +AC_DEFUN([gl_MODULE_INDICATOR_SET_VARIABLE_AUX_OR], +[ + dnl Simplify the expression 1 || CONDITION to 1. + if test "$[]$1" != 1; then + dnl Simplify the expression 0 || CONDITION to CONDITION. + if test "$[]$1" = 0; then + $1=$2 + else + $1="($[]$1 || $2)" + fi + fi +]) + +# gl_MODULE_INDICATOR([modulename]) +# defines a C macro indicating the presence of the given module +# in a location where it can be used. +# | Value | Value | +# | in lib/ | in tests/ | +# --------------------------------------------+---------+-----------+ +# Module present among main modules: | 1 | 1 | +# --------------------------------------------+---------+-----------+ +# Module present among tests-related modules: | 0 | 1 | +# --------------------------------------------+---------+-----------+ +# Module not present at all: | 0 | 0 | +# --------------------------------------------+---------+-----------+ +AC_DEFUN([gl_MODULE_INDICATOR], +[ + AC_DEFINE_UNQUOTED([GNULIB_]m4_translit([[$1]], + [abcdefghijklmnopqrstuvwxyz./-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ___]), + [gl_MODULE_INDICATOR_CONDITION], + [Define to a C preprocessor expression that evaluates to 1 or 0, + depending whether the gnulib module $1 shall be considered present.]) +]) + +# gl_MODULE_INDICATOR_FOR_TESTS([modulename]) +# defines a C macro indicating the presence of the given module +# in lib or tests. This is useful to determine whether the module +# should be tested. +# | Value | Value | +# | in lib/ | in tests/ | +# --------------------------------------------+---------+-----------+ +# Module present among main modules: | 1 | 1 | +# --------------------------------------------+---------+-----------+ +# Module present among tests-related modules: | 1 | 1 | +# --------------------------------------------+---------+-----------+ +# Module not present at all: | 0 | 0 | +# --------------------------------------------+---------+-----------+ +AC_DEFUN([gl_MODULE_INDICATOR_FOR_TESTS], +[ + AC_DEFINE([GNULIB_TEST_]m4_translit([[$1]], + [abcdefghijklmnopqrstuvwxyz./-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ___]), [1], + [Define to 1 when the gnulib module $1 should be tested.]) +]) + +# gl_ASSERT_NO_GNULIB_POSIXCHECK +# asserts that there will never be a need to #define GNULIB_POSIXCHECK. +# and thereby enables an optimization of configure and config.h. +# Used by Emacs. +AC_DEFUN([gl_ASSERT_NO_GNULIB_POSIXCHECK], +[ + dnl Override gl_WARN_ON_USE_PREPARE. + dnl But hide this definition from 'aclocal'. + AC_DEFUN([gl_W][ARN_ON_USE_PREPARE], []) +]) + +# gl_ASSERT_NO_GNULIB_TESTS +# asserts that there will be no gnulib tests in the scope of the configure.ac +# and thereby enables an optimization of config.h. +# Used by Emacs. +AC_DEFUN([gl_ASSERT_NO_GNULIB_TESTS], +[ + dnl Override gl_MODULE_INDICATOR_FOR_TESTS. + AC_DEFUN([gl_MODULE_INDICATOR_FOR_TESTS], []) +]) + +# Test whether exists. +# Set HAVE_FEATURES_H. +AC_DEFUN([gl_FEATURES_H], +[ + AC_CHECK_HEADERS_ONCE([features.h]) + if test $ac_cv_header_features_h = yes; then + HAVE_FEATURES_H=1 + else + HAVE_FEATURES_H=0 + fi + AC_SUBST([HAVE_FEATURES_H]) +]) + +# m4_foreach_w +# is a backport of autoconf-2.59c's m4_foreach_w. +# Remove this macro when we can assume autoconf >= 2.60. +m4_ifndef([m4_foreach_w], + [m4_define([m4_foreach_w], + [m4_foreach([$1], m4_split(m4_normalize([$2]), [ ]), [$3])])]) + +# AS_VAR_IF(VAR, VALUE, [IF-MATCH], [IF-NOT-MATCH]) +# ---------------------------------------------------- +# Backport of autoconf-2.63b's macro. +# Remove this macro when we can assume autoconf >= 2.64. +m4_ifndef([AS_VAR_IF], +[m4_define([AS_VAR_IF], +[AS_IF([test x"AS_VAR_GET([$1])" = x""$2], [$3], [$4])])]) + +# AC_PROG_MKDIR_P +# is a backport of autoconf-2.60's AC_PROG_MKDIR_P, with a fix +# for interoperability with automake-1.9.6 from autoconf-2.62. +# Remove this macro when we can assume autoconf >= 2.62 or +# autoconf >= 2.60 && automake >= 1.10. +m4_ifdef([AC_PROG_MKDIR_P], [ + dnl For automake-1.9.6 && autoconf < 2.62: Ensure MKDIR_P is AC_SUBSTed. + m4_define([AC_PROG_MKDIR_P], + m4_defn([AC_PROG_MKDIR_P])[ + AC_SUBST([MKDIR_P])])], [ + dnl For autoconf < 2.60: Backport of AC_PROG_MKDIR_P. + AC_DEFUN_ONCE([AC_PROG_MKDIR_P], + [AC_REQUIRE([AM_PROG_MKDIR_P])dnl defined by automake + MKDIR_P='$(mkdir_p)' + AC_SUBST([MKDIR_P])])]) + +# AC_C_RESTRICT +# This definition overrides the AC_C_RESTRICT macro from autoconf 2.60..2.61, +# so that mixed use of GNU C and GNU C++ and mixed use of Sun C and Sun C++ +# works. +# This definition can be removed once autoconf >= 2.62 can be assumed. +m4_if(m4_version_compare(m4_defn([m4_PACKAGE_VERSION]),[2.62]),[-1],[ +AC_DEFUN([AC_C_RESTRICT], +[AC_CACHE_CHECK([for C/C++ restrict keyword], [ac_cv_c_restrict], + [ac_cv_c_restrict=no + # The order here caters to the fact that C++ does not require restrict. + for ac_kw in __restrict __restrict__ _Restrict restrict; do + AC_COMPILE_IFELSE([AC_LANG_PROGRAM( + [[typedef int * int_ptr; + int foo (int_ptr $ac_kw ip) { + return ip[0]; + }]], + [[int s[1]; + int * $ac_kw t = s; + t[0] = 0; + return foo(t)]])], + [ac_cv_c_restrict=$ac_kw]) + test "$ac_cv_c_restrict" != no && break + done + ]) + AH_VERBATIM([restrict], +[/* Define to the equivalent of the C99 'restrict' keyword, or to + nothing if this is not supported. Do not define if restrict is + supported directly. */ +#undef restrict +/* Work around a bug in Sun C++: it does not support _Restrict, even + though the corresponding Sun C compiler does, which causes + "#define restrict _Restrict" in the previous line. Perhaps some future + version of Sun C++ will work with _Restrict; if so, it'll probably + define __RESTRICT, just as Sun C does. */ +#if defined __SUNPRO_CC && !defined __RESTRICT +# define _Restrict +#endif]) + case $ac_cv_c_restrict in + restrict) ;; + no) AC_DEFINE([restrict], []) ;; + *) AC_DEFINE_UNQUOTED([restrict], [$ac_cv_c_restrict]) ;; + esac +]) +]) + +# gl_BIGENDIAN +# is like AC_C_BIGENDIAN, except that it can be AC_REQUIREd. +# Note that AC_REQUIRE([AC_C_BIGENDIAN]) does not work reliably because some +# macros invoke AC_C_BIGENDIAN with arguments. +AC_DEFUN([gl_BIGENDIAN], +[ + AC_C_BIGENDIAN +]) + +# gl_CACHE_VAL_SILENT(cache-id, command-to-set-it) +# is like AC_CACHE_VAL(cache-id, command-to-set-it), except that it does not +# output a spurious "(cached)" mark in the midst of other configure output. +# This macro should be used instead of AC_CACHE_VAL when it is not surrounded +# by an AC_MSG_CHECKING/AC_MSG_RESULT pair. +AC_DEFUN([gl_CACHE_VAL_SILENT], +[ + saved_as_echo_n="$as_echo_n" + as_echo_n=':' + AC_CACHE_VAL([$1], [$2]) + as_echo_n="$saved_as_echo_n" +]) diff --git a/m4/gnulib-comp.m4 b/m4/gnulib-comp.m4 new file mode 100644 index 0000000..ba727d7 --- /dev/null +++ b/m4/gnulib-comp.m4 @@ -0,0 +1,268 @@ +# DO NOT EDIT! GENERATED AUTOMATICALLY! +# Copyright (C) 2002-2011 Free Software Foundation, Inc. +# +# This file is free software, distributed under the terms of the GNU +# General Public License. As a special exception to the GNU General +# Public License, this file may be distributed as part of a program +# that contains a configuration script generated by Autoconf, under +# the same distribution terms as the rest of that program. +# +# Generated by gnulib-tool. +# +# This file represents the compiled summary of the specification in +# gnulib-cache.m4. It lists the computed macro invocations that need +# to be invoked from configure.ac. +# In projects that use version control, this file can be treated like +# other built files. + + +# This macro should be invoked from ./configure.ac, in the section +# "Checks for programs", right after AC_PROG_CC, and certainly before +# any checks for libraries, header files, types and library functions. +AC_DEFUN([gl_EARLY], +[ + m4_pattern_forbid([^gl_[A-Z]])dnl the gnulib macro namespace + m4_pattern_allow([^gl_ES$])dnl a valid locale name + m4_pattern_allow([^gl_LIBOBJS$])dnl a variable + m4_pattern_allow([^gl_LTLIBOBJS$])dnl a variable + AC_REQUIRE([AC_PROG_RANLIB]) + # Code from module arg-nonnull: + # Code from module c++defs: + # Code from module extensions: + AC_REQUIRE([gl_USE_SYSTEM_EXTENSIONS]) + # Code from module include_next: + # Code from module memchr: + # Code from module memmem: + # Code from module memmem-simple: + # Code from module multiarch: + # Code from module sigaction: + # Code from module signal: + # Code from module sigprocmask: + # Code from module stddef: + # Code from module stdint: + # Code from module string: + # Code from module warn-on-use: +]) + +# This macro should be invoked from ./configure.ac, in the section +# "Check for header files, types and library functions". +AC_DEFUN([gl_INIT], +[ + AM_CONDITIONAL([GL_COND_LIBTOOL], [false]) + gl_cond_libtool=false + gl_libdeps= + gl_ltlibdeps= + gl_m4_base='m4' + m4_pushdef([AC_LIBOBJ], m4_defn([gl_LIBOBJ])) + m4_pushdef([AC_REPLACE_FUNCS], m4_defn([gl_REPLACE_FUNCS])) + m4_pushdef([AC_LIBSOURCES], m4_defn([gl_LIBSOURCES])) + m4_pushdef([gl_LIBSOURCES_LIST], []) + m4_pushdef([gl_LIBSOURCES_DIR], []) + gl_COMMON + gl_source_base='lib' +gl_FUNC_MEMCHR +if test $HAVE_MEMCHR = 0 || test $REPLACE_MEMCHR = 1; then + AC_LIBOBJ([memchr]) + gl_PREREQ_MEMCHR +fi +gl_STRING_MODULE_INDICATOR([memchr]) +gl_FUNC_MEMMEM +if test $HAVE_MEMMEM = 0 || test $REPLACE_MEMMEM = 1; then + AC_LIBOBJ([memmem]) +fi +gl_FUNC_MEMMEM_SIMPLE +if test $HAVE_MEMMEM = 0 || test $REPLACE_MEMMEM = 1; then + AC_LIBOBJ([memmem]) +fi +gl_STRING_MODULE_INDICATOR([memmem]) +gl_MULTIARCH +gl_SIGACTION +if test $HAVE_SIGACTION = 0; then + AC_LIBOBJ([sigaction]) + gl_PREREQ_SIGACTION +fi +gl_SIGNAL_MODULE_INDICATOR([sigaction]) +gl_SIGNAL_H +gl_SIGNALBLOCKING +if test $HAVE_POSIX_SIGNALBLOCKING = 0; then + AC_LIBOBJ([sigprocmask]) + gl_PREREQ_SIGPROCMASK +fi +gl_SIGNAL_MODULE_INDICATOR([sigprocmask]) +gl_STDDEF_H +gl_STDINT_H +gl_HEADER_STRING_H + # End of code from modules + m4_ifval(gl_LIBSOURCES_LIST, [ + m4_syscmd([test ! -d ]m4_defn([gl_LIBSOURCES_DIR])[ || + for gl_file in ]gl_LIBSOURCES_LIST[ ; do + if test ! -r ]m4_defn([gl_LIBSOURCES_DIR])[/$gl_file ; then + echo "missing file ]m4_defn([gl_LIBSOURCES_DIR])[/$gl_file" >&2 + exit 1 + fi + done])dnl + m4_if(m4_sysval, [0], [], + [AC_FATAL([expected source file, required through AC_LIBSOURCES, not found])]) + ]) + m4_popdef([gl_LIBSOURCES_DIR]) + m4_popdef([gl_LIBSOURCES_LIST]) + m4_popdef([AC_LIBSOURCES]) + m4_popdef([AC_REPLACE_FUNCS]) + m4_popdef([AC_LIBOBJ]) + AC_CONFIG_COMMANDS_PRE([ + gl_libobjs= + gl_ltlibobjs= + if test -n "$gl_LIBOBJS"; then + # Remove the extension. + sed_drop_objext='s/\.o$//;s/\.obj$//' + for i in `for i in $gl_LIBOBJS; do echo "$i"; done | sed -e "$sed_drop_objext" | sort | uniq`; do + gl_libobjs="$gl_libobjs $i.$ac_objext" + gl_ltlibobjs="$gl_ltlibobjs $i.lo" + done + fi + AC_SUBST([gl_LIBOBJS], [$gl_libobjs]) + AC_SUBST([gl_LTLIBOBJS], [$gl_ltlibobjs]) + ]) + gltests_libdeps= + gltests_ltlibdeps= + m4_pushdef([AC_LIBOBJ], m4_defn([gltests_LIBOBJ])) + m4_pushdef([AC_REPLACE_FUNCS], m4_defn([gltests_REPLACE_FUNCS])) + m4_pushdef([AC_LIBSOURCES], m4_defn([gltests_LIBSOURCES])) + m4_pushdef([gltests_LIBSOURCES_LIST], []) + m4_pushdef([gltests_LIBSOURCES_DIR], []) + gl_COMMON + gl_source_base='tests' +changequote(,)dnl + gltests_WITNESS=IN_`echo "${PACKAGE-$PACKAGE_TARNAME}" | LC_ALL=C tr abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ | LC_ALL=C sed -e 's/[^A-Z0-9_]/_/g'`_GNULIB_TESTS +changequote([, ])dnl + AC_SUBST([gltests_WITNESS]) + gl_module_indicator_condition=$gltests_WITNESS + m4_pushdef([gl_MODULE_INDICATOR_CONDITION], [$gl_module_indicator_condition]) + m4_popdef([gl_MODULE_INDICATOR_CONDITION]) + m4_ifval(gltests_LIBSOURCES_LIST, [ + m4_syscmd([test ! -d ]m4_defn([gltests_LIBSOURCES_DIR])[ || + for gl_file in ]gltests_LIBSOURCES_LIST[ ; do + if test ! -r ]m4_defn([gltests_LIBSOURCES_DIR])[/$gl_file ; then + echo "missing file ]m4_defn([gltests_LIBSOURCES_DIR])[/$gl_file" >&2 + exit 1 + fi + done])dnl + m4_if(m4_sysval, [0], [], + [AC_FATAL([expected source file, required through AC_LIBSOURCES, not found])]) + ]) + m4_popdef([gltests_LIBSOURCES_DIR]) + m4_popdef([gltests_LIBSOURCES_LIST]) + m4_popdef([AC_LIBSOURCES]) + m4_popdef([AC_REPLACE_FUNCS]) + m4_popdef([AC_LIBOBJ]) + AC_CONFIG_COMMANDS_PRE([ + gltests_libobjs= + gltests_ltlibobjs= + if test -n "$gltests_LIBOBJS"; then + # Remove the extension. + sed_drop_objext='s/\.o$//;s/\.obj$//' + for i in `for i in $gltests_LIBOBJS; do echo "$i"; done | sed -e "$sed_drop_objext" | sort | uniq`; do + gltests_libobjs="$gltests_libobjs $i.$ac_objext" + gltests_ltlibobjs="$gltests_ltlibobjs $i.lo" + done + fi + AC_SUBST([gltests_LIBOBJS], [$gltests_libobjs]) + AC_SUBST([gltests_LTLIBOBJS], [$gltests_ltlibobjs]) + ]) + LIBGNU_LIBDEPS="$gl_libdeps" + AC_SUBST([LIBGNU_LIBDEPS]) + LIBGNU_LTLIBDEPS="$gl_ltlibdeps" + AC_SUBST([LIBGNU_LTLIBDEPS]) +]) + +# Like AC_LIBOBJ, except that the module name goes +# into gl_LIBOBJS instead of into LIBOBJS. +AC_DEFUN([gl_LIBOBJ], [ + AS_LITERAL_IF([$1], [gl_LIBSOURCES([$1.c])])dnl + gl_LIBOBJS="$gl_LIBOBJS $1.$ac_objext" +]) + +# Like AC_REPLACE_FUNCS, except that the module name goes +# into gl_LIBOBJS instead of into LIBOBJS. +AC_DEFUN([gl_REPLACE_FUNCS], [ + m4_foreach_w([gl_NAME], [$1], [AC_LIBSOURCES(gl_NAME[.c])])dnl + AC_CHECK_FUNCS([$1], , [gl_LIBOBJ($ac_func)]) +]) + +# Like AC_LIBSOURCES, except the directory where the source file is +# expected is derived from the gnulib-tool parameterization, +# and alloca is special cased (for the alloca-opt module). +# We could also entirely rely on EXTRA_lib..._SOURCES. +AC_DEFUN([gl_LIBSOURCES], [ + m4_foreach([_gl_NAME], [$1], [ + m4_if(_gl_NAME, [alloca.c], [], [ + m4_define([gl_LIBSOURCES_DIR], [lib]) + m4_append([gl_LIBSOURCES_LIST], _gl_NAME, [ ]) + ]) + ]) +]) + +# Like AC_LIBOBJ, except that the module name goes +# into gltests_LIBOBJS instead of into LIBOBJS. +AC_DEFUN([gltests_LIBOBJ], [ + AS_LITERAL_IF([$1], [gltests_LIBSOURCES([$1.c])])dnl + gltests_LIBOBJS="$gltests_LIBOBJS $1.$ac_objext" +]) + +# Like AC_REPLACE_FUNCS, except that the module name goes +# into gltests_LIBOBJS instead of into LIBOBJS. +AC_DEFUN([gltests_REPLACE_FUNCS], [ + m4_foreach_w([gl_NAME], [$1], [AC_LIBSOURCES(gl_NAME[.c])])dnl + AC_CHECK_FUNCS([$1], , [gltests_LIBOBJ($ac_func)]) +]) + +# Like AC_LIBSOURCES, except the directory where the source file is +# expected is derived from the gnulib-tool parameterization, +# and alloca is special cased (for the alloca-opt module). +# We could also entirely rely on EXTRA_lib..._SOURCES. +AC_DEFUN([gltests_LIBSOURCES], [ + m4_foreach([_gl_NAME], [$1], [ + m4_if(_gl_NAME, [alloca.c], [], [ + m4_define([gltests_LIBSOURCES_DIR], [tests]) + m4_append([gltests_LIBSOURCES_LIST], _gl_NAME, [ ]) + ]) + ]) +]) + +# This macro records the list of files which have been installed by +# gnulib-tool and may be removed by future gnulib-tool invocations. +AC_DEFUN([gl_FILE_LIST], [ + build-aux/arg-nonnull.h + build-aux/c++defs.h + build-aux/warn-on-use.h + lib/dummy.c + lib/memchr.c + lib/memchr.valgrind + lib/memmem.c + lib/sig-handler.h + lib/sigaction.c + lib/signal.in.h + lib/sigprocmask.c + lib/stddef.in.h + lib/stdint.in.h + lib/str-two-way.h + lib/string.in.h + m4/00gnulib.m4 + m4/extensions.m4 + m4/gnulib-common.m4 + m4/include_next.m4 + m4/longlong.m4 + m4/memchr.m4 + m4/memmem.m4 + m4/mmap-anon.m4 + m4/multiarch.m4 + m4/onceonly.m4 + m4/sigaction.m4 + m4/signal_h.m4 + m4/signalblocking.m4 + m4/stddef_h.m4 + m4/stdint.m4 + m4/string_h.m4 + m4/warn-on-use.m4 + m4/wchar_t.m4 +]) diff --git a/m4/gnulib-tool.m4 b/m4/gnulib-tool.m4 new file mode 100644 index 0000000..ed41e9d --- /dev/null +++ b/m4/gnulib-tool.m4 @@ -0,0 +1,57 @@ +# gnulib-tool.m4 serial 2 +dnl Copyright (C) 2004-2005, 2009-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl The following macros need not be invoked explicitly. +dnl Invoking them does nothing except to declare default arguments +dnl for "gnulib-tool --import". + +dnl Usage: gl_LOCAL_DIR([DIR]) +AC_DEFUN([gl_LOCAL_DIR], []) + +dnl Usage: gl_MODULES([module1 module2 ...]) +AC_DEFUN([gl_MODULES], []) + +dnl Usage: gl_AVOID([module1 module2 ...]) +AC_DEFUN([gl_AVOID], []) + +dnl Usage: gl_SOURCE_BASE([DIR]) +AC_DEFUN([gl_SOURCE_BASE], []) + +dnl Usage: gl_M4_BASE([DIR]) +AC_DEFUN([gl_M4_BASE], []) + +dnl Usage: gl_PO_BASE([DIR]) +AC_DEFUN([gl_PO_BASE], []) + +dnl Usage: gl_DOC_BASE([DIR]) +AC_DEFUN([gl_DOC_BASE], []) + +dnl Usage: gl_TESTS_BASE([DIR]) +AC_DEFUN([gl_TESTS_BASE], []) + +dnl Usage: gl_WITH_TESTS +AC_DEFUN([gl_WITH_TESTS], []) + +dnl Usage: gl_LIB([LIBNAME]) +AC_DEFUN([gl_LIB], []) + +dnl Usage: gl_LGPL or gl_LGPL([VERSION]) +AC_DEFUN([gl_LGPL], []) + +dnl Usage: gl_MAKEFILE_NAME([FILENAME]) +AC_DEFUN([gl_MAKEFILE_NAME], []) + +dnl Usage: gl_LIBTOOL +AC_DEFUN([gl_LIBTOOL], []) + +dnl Usage: gl_MACRO_PREFIX([PREFIX]) +AC_DEFUN([gl_MACRO_PREFIX], []) + +dnl Usage: gl_PO_DOMAIN([DOMAIN]) +AC_DEFUN([gl_PO_DOMAIN], []) + +dnl Usage: gl_VC_FILES([BOOLEAN]) +AC_DEFUN([gl_VC_FILES], []) diff --git a/m4/include_next.m4 b/m4/include_next.m4 new file mode 100644 index 0000000..b3c7849 --- /dev/null +++ b/m4/include_next.m4 @@ -0,0 +1,244 @@ +# include_next.m4 serial 18 +dnl Copyright (C) 2006-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Paul Eggert and Derek Price. + +dnl Sets INCLUDE_NEXT and PRAGMA_SYSTEM_HEADER. +dnl +dnl INCLUDE_NEXT expands to 'include_next' if the compiler supports it, or to +dnl 'include' otherwise. +dnl +dnl INCLUDE_NEXT_AS_FIRST_DIRECTIVE expands to 'include_next' if the compiler +dnl supports it in the special case that it is the first include directive in +dnl the given file, or to 'include' otherwise. +dnl +dnl PRAGMA_SYSTEM_HEADER can be used in files that contain #include_next, +dnl so as to avoid GCC warnings when the gcc option -pedantic is used. +dnl '#pragma GCC system_header' has the same effect as if the file was found +dnl through the include search path specified with '-isystem' options (as +dnl opposed to the search path specified with '-I' options). Namely, gcc +dnl does not warn about some things, and on some systems (Solaris and Interix) +dnl __STDC__ evaluates to 0 instead of to 1. The latter is an undesired side +dnl effect; we are therefore careful to use 'defined __STDC__' or '1' instead +dnl of plain '__STDC__'. +dnl +dnl PRAGMA_COLUMNS can be used in files that override system header files, so +dnl as to avoid compilation errors on HP NonStop systems when the gnulib file +dnl is included by a system header file that does a "#pragma COLUMNS 80" (which +dnl has the effect of truncating the lines of that file and all files that it +dnl includes to 80 columns) and the gnulib file has lines longer than 80 +dnl columns. + +AC_DEFUN([gl_INCLUDE_NEXT], +[ + AC_LANG_PREPROC_REQUIRE() + AC_CACHE_CHECK([whether the preprocessor supports include_next], + [gl_cv_have_include_next], + [rm -rf conftestd1a conftestd1b conftestd2 + mkdir conftestd1a conftestd1b conftestd2 + dnl IBM C 9.0, 10.1 (original versions, prior to the 2009-01 updates) on + dnl AIX 6.1 support include_next when used as first preprocessor directive + dnl in a file, but not when preceded by another include directive. Check + dnl for this bug by including . + dnl Additionally, with this same compiler, include_next is a no-op when + dnl used in a header file that was included by specifying its absolute + dnl file name. Despite these two bugs, include_next is used in the + dnl compiler's . By virtue of the second bug, we need to use + dnl include_next as well in this case. + cat < conftestd1a/conftest.h +#define DEFINED_IN_CONFTESTD1 +#include_next +#ifdef DEFINED_IN_CONFTESTD2 +int foo; +#else +#error "include_next doesn't work" +#endif +EOF + cat < conftestd1b/conftest.h +#define DEFINED_IN_CONFTESTD1 +#include +#include_next +#ifdef DEFINED_IN_CONFTESTD2 +int foo; +#else +#error "include_next doesn't work" +#endif +EOF + cat < conftestd2/conftest.h +#ifndef DEFINED_IN_CONFTESTD1 +#error "include_next test doesn't work" +#endif +#define DEFINED_IN_CONFTESTD2 +EOF + gl_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$gl_save_CPPFLAGS -Iconftestd1b -Iconftestd2" +dnl We intentionally avoid using AC_LANG_SOURCE here. + AC_COMPILE_IFELSE([AC_LANG_DEFINES_PROVIDED[#include ]], + [gl_cv_have_include_next=yes], + [CPPFLAGS="$gl_save_CPPFLAGS -Iconftestd1a -Iconftestd2" + AC_COMPILE_IFELSE([AC_LANG_DEFINES_PROVIDED[#include ]], + [gl_cv_have_include_next=buggy], + [gl_cv_have_include_next=no]) + ]) + CPPFLAGS="$gl_save_CPPFLAGS" + rm -rf conftestd1a conftestd1b conftestd2 + ]) + PRAGMA_SYSTEM_HEADER= + if test $gl_cv_have_include_next = yes; then + INCLUDE_NEXT=include_next + INCLUDE_NEXT_AS_FIRST_DIRECTIVE=include_next + if test -n "$GCC"; then + PRAGMA_SYSTEM_HEADER='#pragma GCC system_header' + fi + else + if test $gl_cv_have_include_next = buggy; then + INCLUDE_NEXT=include + INCLUDE_NEXT_AS_FIRST_DIRECTIVE=include_next + else + INCLUDE_NEXT=include + INCLUDE_NEXT_AS_FIRST_DIRECTIVE=include + fi + fi + AC_SUBST([INCLUDE_NEXT]) + AC_SUBST([INCLUDE_NEXT_AS_FIRST_DIRECTIVE]) + AC_SUBST([PRAGMA_SYSTEM_HEADER]) + AC_CACHE_CHECK([whether system header files limit the line length], + [gl_cv_pragma_columns], + [dnl HP NonStop systems, which define __TANDEM, have this misfeature. + AC_EGREP_CPP([choke me], + [ +#ifdef __TANDEM +choke me +#endif + ], + [gl_cv_pragma_columns=yes], + [gl_cv_pragma_columns=no]) + ]) + if test $gl_cv_pragma_columns = yes; then + PRAGMA_COLUMNS="#pragma COLUMNS 10000" + else + PRAGMA_COLUMNS= + fi + AC_SUBST([PRAGMA_COLUMNS]) +]) + +# gl_CHECK_NEXT_HEADERS(HEADER1 HEADER2 ...) +# ------------------------------------------ +# For each arg foo.h, if #include_next works, define NEXT_FOO_H to be +# ''; otherwise define it to be +# '"///usr/include/foo.h"', or whatever other absolute file name is suitable. +# Also, if #include_next works as first preprocessing directive in a file, +# define NEXT_AS_FIRST_DIRECTIVE_FOO_H to be ''; otherwise define it to +# be +# '"///usr/include/foo.h"', or whatever other absolute file name is suitable. +# That way, a header file with the following line: +# #@INCLUDE_NEXT@ @NEXT_FOO_H@ +# or +# #@INCLUDE_NEXT_AS_FIRST_DIRECTIVE@ @NEXT_AS_FIRST_DIRECTIVE_FOO_H@ +# behaves (after sed substitution) as if it contained +# #include_next +# even if the compiler does not support include_next. +# The three "///" are to pacify Sun C 5.8, which otherwise would say +# "warning: #include of /usr/include/... may be non-portable". +# Use `""', not `<>', so that the /// cannot be confused with a C99 comment. +# Note: This macro assumes that the header file is not empty after +# preprocessing, i.e. it does not only define preprocessor macros but also +# provides some type/enum definitions or function/variable declarations. +# +# This macro also checks whether each header exists, by invoking +# AC_CHECK_HEADERS_ONCE or AC_CHECK_HEADERS on each argument. +AC_DEFUN([gl_CHECK_NEXT_HEADERS], +[ + gl_NEXT_HEADERS_INTERNAL([$1], [check]) +]) + +# gl_NEXT_HEADERS(HEADER1 HEADER2 ...) +# ------------------------------------ +# Like gl_CHECK_NEXT_HEADERS, except do not check whether the headers exist. +# This is suitable for headers like that are standardized by C89 +# and therefore can be assumed to exist. +AC_DEFUN([gl_NEXT_HEADERS], +[ + gl_NEXT_HEADERS_INTERNAL([$1], [assume]) +]) + +# The guts of gl_CHECK_NEXT_HEADERS and gl_NEXT_HEADERS. +AC_DEFUN([gl_NEXT_HEADERS_INTERNAL], +[ + AC_REQUIRE([gl_INCLUDE_NEXT]) + AC_REQUIRE([AC_CANONICAL_HOST]) + + m4_if([$2], [check], + [AC_CHECK_HEADERS_ONCE([$1]) + ]) + + m4_foreach_w([gl_HEADER_NAME], [$1], + [AS_VAR_PUSHDEF([gl_next_header], + [gl_cv_next_]m4_defn([gl_HEADER_NAME])) + if test $gl_cv_have_include_next = yes; then + AS_VAR_SET([gl_next_header], ['<'gl_HEADER_NAME'>']) + else + AC_CACHE_CHECK( + [absolute name of <]m4_defn([gl_HEADER_NAME])[>], + m4_defn([gl_next_header]), + [m4_if([$2], [check], + [AS_VAR_PUSHDEF([gl_header_exists], + [ac_cv_header_]m4_defn([gl_HEADER_NAME])) + if test AS_VAR_GET(gl_header_exists) = yes; then + AS_VAR_POPDEF([gl_header_exists]) + ]) + AC_LANG_CONFTEST( + [AC_LANG_SOURCE( + [[#include <]]m4_dquote(m4_defn([gl_HEADER_NAME]))[[>]] + )]) + dnl AIX "xlc -E" and "cc -E" omit #line directives for header + dnl files that contain only a #include of other header files and + dnl no non-comment tokens of their own. This leads to a failure + dnl to detect the absolute name of , , + dnl and others. The workaround is to force preservation + dnl of comments through option -C. This ensures all necessary + dnl #line directives are present. GCC supports option -C as well. + case "$host_os" in + aix*) gl_absname_cpp="$ac_cpp -C" ;; + *) gl_absname_cpp="$ac_cpp" ;; + esac + dnl eval is necessary to expand gl_absname_cpp. + dnl Ultrix and Pyramid sh refuse to redirect output of eval, + dnl so use subshell. + AS_VAR_SET([gl_next_header], + ['"'`(eval "$gl_absname_cpp conftest.$ac_ext") 2>&AS_MESSAGE_LOG_FD | + sed -n '\#/]m4_defn([gl_HEADER_NAME])[#{ + s#.*"\(.*/]m4_defn([gl_HEADER_NAME])[\)".*#\1# + s#^/[^/]#//&# + p + q + }'`'"']) + m4_if([$2], [check], + [else + AS_VAR_SET([gl_next_header], ['<'gl_HEADER_NAME'>']) + fi + ]) + ]) + fi + AC_SUBST( + AS_TR_CPP([NEXT_]m4_defn([gl_HEADER_NAME])), + [AS_VAR_GET([gl_next_header])]) + if test $gl_cv_have_include_next = yes || test $gl_cv_have_include_next = buggy; then + # INCLUDE_NEXT_AS_FIRST_DIRECTIVE='include_next' + gl_next_as_first_directive='<'gl_HEADER_NAME'>' + else + # INCLUDE_NEXT_AS_FIRST_DIRECTIVE='include' + gl_next_as_first_directive=AS_VAR_GET([gl_next_header]) + fi + AC_SUBST( + AS_TR_CPP([NEXT_AS_FIRST_DIRECTIVE_]m4_defn([gl_HEADER_NAME])), + [$gl_next_as_first_directive]) + AS_VAR_POPDEF([gl_next_header])]) +]) + +# Autoconf 2.68 added warnings for our use of AC_COMPILE_IFELSE; +# this fallback is safe for all earlier autoconf versions. +m4_define_default([AC_LANG_DEFINES_PROVIDED]) diff --git a/m4/longlong.m4 b/m4/longlong.m4 new file mode 100644 index 0000000..aed816c --- /dev/null +++ b/m4/longlong.m4 @@ -0,0 +1,113 @@ +# longlong.m4 serial 16 +dnl Copyright (C) 1999-2007, 2009-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Paul Eggert. + +# Define HAVE_LONG_LONG_INT if 'long long int' works. +# This fixes a bug in Autoconf 2.61, and can be faster +# than what's in Autoconf 2.62 through 2.68. + +# Note: If the type 'long long int' exists but is only 32 bits large +# (as on some very old compilers), HAVE_LONG_LONG_INT will not be +# defined. In this case you can treat 'long long int' like 'long int'. + +AC_DEFUN([AC_TYPE_LONG_LONG_INT], +[ + AC_REQUIRE([AC_TYPE_UNSIGNED_LONG_LONG_INT]) + AC_CACHE_CHECK([for long long int], [ac_cv_type_long_long_int], + [ac_cv_type_long_long_int=yes + if test "x${ac_cv_prog_cc_c99-no}" = xno; then + ac_cv_type_long_long_int=$ac_cv_type_unsigned_long_long_int + if test $ac_cv_type_long_long_int = yes; then + dnl Catch a bug in Tandem NonStop Kernel (OSS) cc -O circa 2004. + dnl If cross compiling, assume the bug is not important, since + dnl nobody cross compiles for this platform as far as we know. + AC_RUN_IFELSE( + [AC_LANG_PROGRAM( + [[@%:@include + @%:@ifndef LLONG_MAX + @%:@ define HALF \ + (1LL << (sizeof (long long int) * CHAR_BIT - 2)) + @%:@ define LLONG_MAX (HALF - 1 + HALF) + @%:@endif]], + [[long long int n = 1; + int i; + for (i = 0; ; i++) + { + long long int m = n << i; + if (m >> i != n) + return 1; + if (LLONG_MAX / 2 < m) + break; + } + return 0;]])], + [], + [ac_cv_type_long_long_int=no], + [:]) + fi + fi]) + if test $ac_cv_type_long_long_int = yes; then + AC_DEFINE([HAVE_LONG_LONG_INT], [1], + [Define to 1 if the system has the type `long long int'.]) + fi +]) + +# Define HAVE_UNSIGNED_LONG_LONG_INT if 'unsigned long long int' works. +# This fixes a bug in Autoconf 2.61, and can be faster +# than what's in Autoconf 2.62 through 2.68. + +# Note: If the type 'unsigned long long int' exists but is only 32 bits +# large (as on some very old compilers), AC_TYPE_UNSIGNED_LONG_LONG_INT +# will not be defined. In this case you can treat 'unsigned long long int' +# like 'unsigned long int'. + +AC_DEFUN([AC_TYPE_UNSIGNED_LONG_LONG_INT], +[ + AC_CACHE_CHECK([for unsigned long long int], + [ac_cv_type_unsigned_long_long_int], + [ac_cv_type_unsigned_long_long_int=yes + if test "x${ac_cv_prog_cc_c99-no}" = xno; then + AC_LINK_IFELSE( + [_AC_TYPE_LONG_LONG_SNIPPET], + [], + [ac_cv_type_unsigned_long_long_int=no]) + fi]) + if test $ac_cv_type_unsigned_long_long_int = yes; then + AC_DEFINE([HAVE_UNSIGNED_LONG_LONG_INT], [1], + [Define to 1 if the system has the type `unsigned long long int'.]) + fi +]) + +# Expands to a C program that can be used to test for simultaneous support +# of 'long long' and 'unsigned long long'. We don't want to say that +# 'long long' is available if 'unsigned long long' is not, or vice versa, +# because too many programs rely on the symmetry between signed and unsigned +# integer types (excluding 'bool'). +AC_DEFUN([_AC_TYPE_LONG_LONG_SNIPPET], +[ + AC_LANG_PROGRAM( + [[/* For now, do not test the preprocessor; as of 2007 there are too many + implementations with broken preprocessors. Perhaps this can + be revisited in 2012. In the meantime, code should not expect + #if to work with literals wider than 32 bits. */ + /* Test literals. */ + long long int ll = 9223372036854775807ll; + long long int nll = -9223372036854775807LL; + unsigned long long int ull = 18446744073709551615ULL; + /* Test constant expressions. */ + typedef int a[((-9223372036854775807LL < 0 && 0 < 9223372036854775807ll) + ? 1 : -1)]; + typedef int b[(18446744073709551615ULL <= (unsigned long long int) -1 + ? 1 : -1)]; + int i = 63;]], + [[/* Test availability of runtime routines for shift and division. */ + long long int llmax = 9223372036854775807ll; + unsigned long long int ullmax = 18446744073709551615ull; + return ((ll << 63) | (ll >> 63) | (ll < i) | (ll > i) + | (llmax / ll) | (llmax % ll) + | (ull << 63) | (ull >> 63) | (ull << i) | (ull >> i) + | (ullmax / ull) | (ullmax % ull));]]) +]) diff --git a/m4/memchr.m4 b/m4/memchr.m4 new file mode 100644 index 0000000..f6dc3e7 --- /dev/null +++ b/m4/memchr.m4 @@ -0,0 +1,88 @@ +# memchr.m4 serial 12 +dnl Copyright (C) 2002-2004, 2009-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +AC_DEFUN_ONCE([gl_FUNC_MEMCHR], +[ + dnl Check for prerequisites for memory fence checks. + gl_FUNC_MMAP_ANON + AC_CHECK_HEADERS_ONCE([sys/mman.h]) + AC_CHECK_FUNCS_ONCE([mprotect]) + + AC_REQUIRE([gl_HEADER_STRING_H_DEFAULTS]) + m4_ifdef([gl_FUNC_MEMCHR_OBSOLETE], [ + dnl These days, we assume memchr is present. But if support for old + dnl platforms is desired: + AC_CHECK_FUNCS_ONCE([memchr]) + if test $ac_cv_func_memchr = no; then + HAVE_MEMCHR=0 + fi + ]) + if test $HAVE_MEMCHR = 1; then + # Detect platform-specific bugs in some versions of glibc: + # memchr should not dereference anything with length 0 + # http://bugzilla.redhat.com/499689 + # memchr should not dereference overestimated length after a match + # http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=521737 + # http://sourceware.org/bugzilla/show_bug.cgi?id=10162 + # Assume that memchr works on platforms that lack mprotect. + AC_CACHE_CHECK([whether memchr works], [gl_cv_func_memchr_works], + [AC_RUN_IFELSE([AC_LANG_PROGRAM([[ +#include +#if HAVE_SYS_MMAN_H +# include +# include +# include +# include +# ifndef MAP_FILE +# define MAP_FILE 0 +# endif +#endif +]], [[ + int result = 0; + char *fence = NULL; +#if HAVE_SYS_MMAN_H && HAVE_MPROTECT +# if HAVE_MAP_ANONYMOUS + const int flags = MAP_ANONYMOUS | MAP_PRIVATE; + const int fd = -1; +# else /* !HAVE_MAP_ANONYMOUS */ + const int flags = MAP_FILE | MAP_PRIVATE; + int fd = open ("/dev/zero", O_RDONLY, 0666); + if (fd >= 0) +# endif + { + int pagesize = getpagesize (); + char *two_pages = + (char *) mmap (NULL, 2 * pagesize, PROT_READ | PROT_WRITE, + flags, fd, 0); + if (two_pages != (char *)(-1) + && mprotect (two_pages + pagesize, pagesize, PROT_NONE) == 0) + fence = two_pages + pagesize; + } +#endif + if (fence) + { + if (memchr (fence, 0, 0)) + result |= 1; + strcpy (fence - 9, "12345678"); + if (memchr (fence - 9, 0, 79) != fence - 1) + result |= 2; + if (memchr (fence - 1, 0, 3) != fence - 1) + result |= 4; + } + return result; +]])], [gl_cv_func_memchr_works=yes], [gl_cv_func_memchr_works=no], + [dnl Be pessimistic for now. + gl_cv_func_memchr_works="guessing no"])]) + if test "$gl_cv_func_memchr_works" != yes; then + REPLACE_MEMCHR=1 + fi + fi +]) + +# Prerequisites of lib/memchr.c. +AC_DEFUN([gl_PREREQ_MEMCHR], [ + AC_CHECK_HEADERS([bp-sym.h]) +]) diff --git a/m4/memmem.m4 b/m4/memmem.m4 new file mode 100644 index 0000000..e912205 --- /dev/null +++ b/m4/memmem.m4 @@ -0,0 +1,145 @@ +# memmem.m4 serial 23 +dnl Copyright (C) 2002-2004, 2007-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl Check that memmem is present and functional. +AC_DEFUN([gl_FUNC_MEMMEM_SIMPLE], +[ + dnl Persuade glibc to declare memmem(). + AC_REQUIRE([AC_USE_SYSTEM_EXTENSIONS]) + + AC_REQUIRE([gl_HEADER_STRING_H_DEFAULTS]) + AC_CHECK_FUNCS([memmem]) + if test $ac_cv_func_memmem = yes; then + HAVE_MEMMEM=1 + else + HAVE_MEMMEM=0 + fi + AC_CHECK_DECLS_ONCE([memmem]) + if test $ac_cv_have_decl_memmem = no; then + HAVE_DECL_MEMMEM=0 + else + dnl Detect http://sourceware.org/bugzilla/show_bug.cgi?id=12092. + dnl Also check that we handle empty needles correctly. + AC_CACHE_CHECK([whether memmem works], + [gl_cv_func_memmem_works_always], + [AC_RUN_IFELSE([AC_LANG_PROGRAM([[ +#include /* for memmem */ +#define P "_EF_BF_BD" +#define HAYSTACK "F_BD_CE_BD" P P P P "_C3_88_20" P P P "_C3_A7_20" P +#define NEEDLE P P P P P +]], [[ + int result = 0; + if (memmem (HAYSTACK, strlen (HAYSTACK), NEEDLE, strlen (NEEDLE))) + result |= 1; + /* Check for empty needle behavior. */ + { + const char *haystack = "AAA"; + if (memmem (haystack, 3, NULL, 0) != haystack) + result |= 2; + } + return result; + ]])], + [gl_cv_func_memmem_works_always=yes], + [gl_cv_func_memmem_works_always=no], + [dnl glibc 2.9..2.12 and cygwin 1.7.7 have issue #12092 above. + dnl Also empty needles work on glibc >= 2.1 and cygwin >= 1.7.0. + dnl uClibc is not affected, since it uses different source code. + dnl Assume that it works on all other platforms (even if not linear). + AC_EGREP_CPP([Lucky user], + [ +#ifdef __GNU_LIBRARY__ + #include + #if ((__GLIBC__ == 2 && ((__GLIBC_MINOR > 0 && __GLIBC_MINOR__ < 9) \ + || __GLIBC_MINOR__ > 12)) \ + || (__GLIBC__ > 2)) \ + || defined __UCLIBC__ + Lucky user + #endif +#elif defined __CYGWIN__ + #include + #if CYGWIN_VERSION_DLL_COMBINED > CYGWIN_VERSION_DLL_MAKE_COMBINED (1007, 7) + Lucky user + #endif +#else + Lucky user +#endif + ], + [gl_cv_func_memmem_works_always=yes], + [gl_cv_func_memmem_works_always="guessing no"]) + ]) + ]) + if test "$gl_cv_func_memmem_works_always" != yes; then + REPLACE_MEMMEM=1 + fi + fi + gl_PREREQ_MEMMEM +]) # gl_FUNC_MEMMEM_SIMPLE + +dnl Additionally, check that memmem has linear performance characteristics +AC_DEFUN([gl_FUNC_MEMMEM], +[ + AC_REQUIRE([gl_FUNC_MEMMEM_SIMPLE]) + if test $HAVE_DECL_MEMMEM = 1 && test $REPLACE_MEMMEM = 0; then + AC_CACHE_CHECK([whether memmem works in linear time], + [gl_cv_func_memmem_works_fast], + [AC_RUN_IFELSE([AC_LANG_PROGRAM([[ +#include /* for signal */ +#include /* for memmem */ +#include /* for malloc */ +#include /* for alarm */ +static void quit (int sig) { exit (sig + 128); } +]], [[ + int result = 0; + size_t m = 1000000; + char *haystack = (char *) malloc (2 * m + 1); + char *needle = (char *) malloc (m + 1); + /* Failure to compile this test due to missing alarm is okay, + since all such platforms (mingw) also lack memmem. */ + signal (SIGALRM, quit); + alarm (5); + /* Check for quadratic performance. */ + if (haystack && needle) + { + memset (haystack, 'A', 2 * m); + haystack[2 * m] = 'B'; + memset (needle, 'A', m); + needle[m] = 'B'; + if (!memmem (haystack, 2 * m + 1, needle, m + 1)) + result |= 1; + } + return result; + ]])], + [gl_cv_func_memmem_works_fast=yes], [gl_cv_func_memmem_works_fast=no], + [dnl Only glibc >= 2.9 and cygwin > 1.7.0 are known to have a + dnl memmem that works in linear time. + AC_EGREP_CPP([Lucky user], + [ +#include +#ifdef __GNU_LIBRARY__ + #if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 9) || (__GLIBC__ > 2)) \ + && !defined __UCLIBC__ + Lucky user + #endif +#endif +#ifdef __CYGWIN__ + #include + #if CYGWIN_VERSION_DLL_COMBINED > CYGWIN_VERSION_DLL_MAKE_COMBINED (1007, 0) + Lucky user + #endif +#endif + ], + [gl_cv_func_memmem_works_fast=yes], + [gl_cv_func_memmem_works_fast="guessing no"]) + ]) + ]) + if test "$gl_cv_func_memmem_works_fast" != yes; then + REPLACE_MEMMEM=1 + fi + fi +]) # gl_FUNC_MEMMEM + +# Prerequisites of lib/memmem.c. +AC_DEFUN([gl_PREREQ_MEMMEM], [:]) diff --git a/m4/mmap-anon.m4 b/m4/mmap-anon.m4 new file mode 100644 index 0000000..7ba7fd2 --- /dev/null +++ b/m4/mmap-anon.m4 @@ -0,0 +1,55 @@ +# mmap-anon.m4 serial 9 +dnl Copyright (C) 2005, 2007, 2009-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +# Detect how mmap can be used to create anonymous (not file-backed) memory +# mappings. +# - On Linux, AIX, OSF/1, Solaris, Cygwin, Interix, Haiku, both MAP_ANONYMOUS +# and MAP_ANON exist and have the same value. +# - On HP-UX, only MAP_ANONYMOUS exists. +# - On MacOS X, FreeBSD, NetBSD, OpenBSD, only MAP_ANON exists. +# - On IRIX, neither exists, and a file descriptor opened to /dev/zero must be +# used. + +AC_DEFUN([gl_FUNC_MMAP_ANON], +[ + dnl Persuade glibc to define MAP_ANONYMOUS. + AC_REQUIRE([gl_USE_SYSTEM_EXTENSIONS]) + + # Check for mmap(). Don't use AC_FUNC_MMAP, because it checks too much: it + # fails on HP-UX 11, because MAP_FIXED mappings do not work. But this is + # irrelevant for anonymous mappings. + AC_CHECK_FUNC([mmap], [gl_have_mmap=yes], [gl_have_mmap=no]) + + # Try to allow MAP_ANONYMOUS. + gl_have_mmap_anonymous=no + if test $gl_have_mmap = yes; then + AC_MSG_CHECKING([for MAP_ANONYMOUS]) + AC_EGREP_CPP([I cant identify this map.], [ +#include +#ifdef MAP_ANONYMOUS + I cant identify this map. +#endif +], + [gl_have_mmap_anonymous=yes]) + if test $gl_have_mmap_anonymous != yes; then + AC_EGREP_CPP([I cant identify this map.], [ +#include +#ifdef MAP_ANON + I cant identify this map. +#endif +], + [AC_DEFINE([MAP_ANONYMOUS], [MAP_ANON], + [Define to a substitute value for mmap()'s MAP_ANONYMOUS flag.]) + gl_have_mmap_anonymous=yes]) + fi + AC_MSG_RESULT([$gl_have_mmap_anonymous]) + if test $gl_have_mmap_anonymous = yes; then + AC_DEFINE([HAVE_MAP_ANONYMOUS], [1], + [Define to 1 if mmap()'s MAP_ANONYMOUS flag is available after including + config.h and .]) + fi + fi +]) diff --git a/m4/multiarch.m4 b/m4/multiarch.m4 new file mode 100644 index 0000000..691d892 --- /dev/null +++ b/m4/multiarch.m4 @@ -0,0 +1,62 @@ +# multiarch.m4 serial 6 +dnl Copyright (C) 2008-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +# Determine whether the compiler is or may be producing universal binaries. +# +# On MacOS X 10.5 and later systems, the user can create libraries and +# executables that work on multiple system types--known as "fat" or +# "universal" binaries--by specifying multiple '-arch' options to the +# compiler but only a single '-arch' option to the preprocessor. Like +# this: +# +# ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ +# CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ +# CPP="gcc -E" CXXCPP="g++ -E" +# +# Detect this situation and set APPLE_UNIVERSAL_BUILD accordingly. + +AC_DEFUN_ONCE([gl_MULTIARCH], +[ + dnl Code similar to autoconf-2.63 AC_C_BIGENDIAN. + gl_cv_c_multiarch=no + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#ifndef __APPLE_CC__ + not a universal capable compiler + #endif + typedef int dummy; + ]])], + [ + dnl Check for potential -arch flags. It is not universal unless + dnl there are at least two -arch flags with different values. + arch= + prev= + for word in ${CC} ${CFLAGS} ${CPPFLAGS} ${LDFLAGS}; do + if test -n "$prev"; then + case $word in + i?86 | x86_64 | ppc | ppc64) + if test -z "$arch" || test "$arch" = "$word"; then + arch="$word" + else + gl_cv_c_multiarch=yes + fi + ;; + esac + prev= + else + if test "x$word" = "x-arch"; then + prev=arch + fi + fi + done + ]) + if test $gl_cv_c_multiarch = yes; then + APPLE_UNIVERSAL_BUILD=1 + else + APPLE_UNIVERSAL_BUILD=0 + fi + AC_SUBST([APPLE_UNIVERSAL_BUILD]) +]) diff --git a/m4/onceonly.m4 b/m4/onceonly.m4 new file mode 100644 index 0000000..223071a --- /dev/null +++ b/m4/onceonly.m4 @@ -0,0 +1,91 @@ +# onceonly.m4 serial 7 +dnl Copyright (C) 2002-2003, 2005-2006, 2008-2011 Free Software Foundation, +dnl Inc. +dnl This file is free software, distributed under the terms of the GNU +dnl General Public License. As a special exception to the GNU General +dnl Public License, this file may be distributed as part of a program +dnl that contains a configuration script generated by Autoconf, under +dnl the same distribution terms as the rest of that program. + +dnl This file defines some "once only" variants of standard autoconf macros. +dnl AC_CHECK_HEADERS_ONCE like AC_CHECK_HEADERS +dnl AC_CHECK_FUNCS_ONCE like AC_CHECK_FUNCS +dnl AC_CHECK_DECLS_ONCE like AC_CHECK_DECLS +dnl AC_REQUIRE([AC_FUNC_STRCOLL]) like AC_FUNC_STRCOLL +dnl The advantage is that the check for each of the headers/functions/decls +dnl will be put only once into the 'configure' file. It keeps the size of +dnl the 'configure' file down, and avoids redundant output when 'configure' +dnl is run. +dnl The drawback is that the checks cannot be conditionalized. If you write +dnl if some_condition; then gl_CHECK_HEADERS(stdlib.h); fi +dnl inside an AC_DEFUNed function, the gl_CHECK_HEADERS macro call expands to +dnl empty, and the check will be inserted before the body of the AC_DEFUNed +dnl function. + +dnl The original code implemented AC_CHECK_HEADERS_ONCE and AC_CHECK_FUNCS_ONCE +dnl in terms of AC_DEFUN and AC_REQUIRE. This implementation uses diversions to +dnl named sections DEFAULTS and INIT_PREPARE in order to check all requested +dnl headers at once, thus reducing the size of 'configure'. It is known to work +dnl with autoconf 2.57..2.62 at least . The size reduction is ca. 9%. + +dnl Autoconf version 2.59 plus gnulib is required; this file is not needed +dnl with Autoconf 2.60 or greater. But note that autoconf's implementation of +dnl AC_CHECK_DECLS_ONCE expects a comma-separated list of symbols as first +dnl argument! +AC_PREREQ([2.59]) + +# AC_CHECK_HEADERS_ONCE(HEADER1 HEADER2 ...) is a once-only variant of +# AC_CHECK_HEADERS(HEADER1 HEADER2 ...). +AC_DEFUN([AC_CHECK_HEADERS_ONCE], [ + : + m4_foreach_w([gl_HEADER_NAME], [$1], [ + AC_DEFUN([gl_CHECK_HEADER_]m4_quote(m4_translit(gl_HEADER_NAME, + [./-], [___])), [ + m4_divert_text([INIT_PREPARE], + [gl_header_list="$gl_header_list gl_HEADER_NAME"]) + gl_HEADERS_EXPANSION + AH_TEMPLATE(AS_TR_CPP([HAVE_]m4_defn([gl_HEADER_NAME])), + [Define to 1 if you have the <]m4_defn([gl_HEADER_NAME])[> header file.]) + ]) + AC_REQUIRE([gl_CHECK_HEADER_]m4_quote(m4_translit(gl_HEADER_NAME, + [./-], [___]))) + ]) +]) +m4_define([gl_HEADERS_EXPANSION], [ + m4_divert_text([DEFAULTS], [gl_header_list=]) + AC_CHECK_HEADERS([$gl_header_list]) + m4_define([gl_HEADERS_EXPANSION], []) +]) + +# AC_CHECK_FUNCS_ONCE(FUNC1 FUNC2 ...) is a once-only variant of +# AC_CHECK_FUNCS(FUNC1 FUNC2 ...). +AC_DEFUN([AC_CHECK_FUNCS_ONCE], [ + : + m4_foreach_w([gl_FUNC_NAME], [$1], [ + AC_DEFUN([gl_CHECK_FUNC_]m4_defn([gl_FUNC_NAME]), [ + m4_divert_text([INIT_PREPARE], + [gl_func_list="$gl_func_list gl_FUNC_NAME"]) + gl_FUNCS_EXPANSION + AH_TEMPLATE(AS_TR_CPP([HAVE_]m4_defn([gl_FUNC_NAME])), + [Define to 1 if you have the `]m4_defn([gl_FUNC_NAME])[' function.]) + ]) + AC_REQUIRE([gl_CHECK_FUNC_]m4_defn([gl_FUNC_NAME])) + ]) +]) +m4_define([gl_FUNCS_EXPANSION], [ + m4_divert_text([DEFAULTS], [gl_func_list=]) + AC_CHECK_FUNCS([$gl_func_list]) + m4_define([gl_FUNCS_EXPANSION], []) +]) + +# AC_CHECK_DECLS_ONCE(DECL1 DECL2 ...) is a once-only variant of +# AC_CHECK_DECLS(DECL1, DECL2, ...). +AC_DEFUN([AC_CHECK_DECLS_ONCE], [ + : + m4_foreach_w([gl_DECL_NAME], [$1], [ + AC_DEFUN([gl_CHECK_DECL_]m4_defn([gl_DECL_NAME]), [ + AC_CHECK_DECLS(m4_defn([gl_DECL_NAME])) + ]) + AC_REQUIRE([gl_CHECK_DECL_]m4_defn([gl_DECL_NAME])) + ]) +]) diff --git a/m4/sigaction.m4 b/m4/sigaction.m4 new file mode 100644 index 0000000..b365e26 --- /dev/null +++ b/m4/sigaction.m4 @@ -0,0 +1,43 @@ +# sigaction.m4 serial 6 +dnl Copyright (C) 2008-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +# Determine if sigaction interface is present. +AC_DEFUN([gl_SIGACTION], +[ + AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) + AC_CHECK_FUNCS_ONCE([sigaction]) + if test $ac_cv_func_sigaction = yes; then + AC_CHECK_MEMBERS([struct sigaction.sa_sigaction], , , + [[#include ]]) + if test $ac_cv_member_struct_sigaction_sa_sigaction = no; then + HAVE_STRUCT_SIGACTION_SA_SIGACTION=0 + fi + else + HAVE_SIGACTION=0 + fi +]) + +# Prerequisites of the part of lib/signal.in.h and of lib/sigaction.c. +AC_DEFUN([gl_PREREQ_SIGACTION], +[ + AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) + AC_REQUIRE([AC_C_RESTRICT]) + AC_REQUIRE([AC_TYPE_UID_T]) + AC_REQUIRE([gl_PREREQ_SIG_HANDLER_H]) + AC_CHECK_FUNCS_ONCE([sigaltstack siginterrupt]) + AC_CHECK_TYPES([siginfo_t], [], [], [[ +#include + ]]) + if test $ac_cv_type_siginfo_t = no; then + HAVE_SIGINFO_T=0 + fi +]) + +# Prerequisites of lib/sig-handler.h. +AC_DEFUN([gl_PREREQ_SIG_HANDLER_H], +[ + AC_REQUIRE([AC_C_INLINE]) +]) diff --git a/m4/signal_h.m4 b/m4/signal_h.m4 new file mode 100644 index 0000000..459ec00 --- /dev/null +++ b/m4/signal_h.m4 @@ -0,0 +1,58 @@ +# signal_h.m4 serial 12 +dnl Copyright (C) 2007-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +AC_DEFUN([gl_SIGNAL_H], +[ + AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) + gl_NEXT_HEADERS([signal.h]) + +# AIX declares sig_atomic_t to already include volatile, and C89 compilers +# then choke on 'volatile sig_atomic_t'. C99 requires that it compile. + AC_CHECK_TYPE([volatile sig_atomic_t], [], + [HAVE_TYPE_VOLATILE_SIG_ATOMIC_T=0], [[ +#include + ]]) + + AC_REQUIRE([AC_TYPE_UID_T]) + + dnl Persuade glibc to define sighandler_t. + AC_REQUIRE([AC_USE_SYSTEM_EXTENSIONS]) + AC_CHECK_TYPE([sighandler_t], [], [HAVE_SIGHANDLER_T=0], [[ +#include + ]]) + + dnl Check for declarations of anything we want to poison if the + dnl corresponding gnulib module is not in use. + gl_WARN_ON_USE_PREPARE([[#include + ]], [sigaction sigaddset sigdelset sigemptyset sigfillset sigismember + sigpending sigprocmask]) +]) + +AC_DEFUN([gl_SIGNAL_MODULE_INDICATOR], +[ + dnl Use AC_REQUIRE here, so that the default settings are expanded once only. + AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) + gl_MODULE_INDICATOR_SET_VARIABLE([$1]) + dnl Define it also as a C macro, for the benefit of the unit tests. + gl_MODULE_INDICATOR_FOR_TESTS([$1]) +]) + +AC_DEFUN([gl_SIGNAL_H_DEFAULTS], +[ + GNULIB_SIGNAL_H_SIGPIPE=0; AC_SUBST([GNULIB_SIGNAL_H_SIGPIPE]) + GNULIB_SIGPROCMASK=0; AC_SUBST([GNULIB_SIGPROCMASK]) + GNULIB_SIGACTION=0; AC_SUBST([GNULIB_SIGACTION]) + dnl Assume proper GNU behavior unless another module says otherwise. + HAVE_POSIX_SIGNALBLOCKING=1; AC_SUBST([HAVE_POSIX_SIGNALBLOCKING]) + HAVE_SIGSET_T=1; AC_SUBST([HAVE_SIGSET_T]) + HAVE_SIGINFO_T=1; AC_SUBST([HAVE_SIGINFO_T]) + HAVE_SIGACTION=1; AC_SUBST([HAVE_SIGACTION]) + HAVE_STRUCT_SIGACTION_SA_SIGACTION=1; + AC_SUBST([HAVE_STRUCT_SIGACTION_SA_SIGACTION]) + HAVE_TYPE_VOLATILE_SIG_ATOMIC_T=1; + AC_SUBST([HAVE_TYPE_VOLATILE_SIG_ATOMIC_T]) + HAVE_SIGHANDLER_T=1; AC_SUBST([HAVE_SIGHANDLER_T]) +]) diff --git a/m4/signalblocking.m4 b/m4/signalblocking.m4 new file mode 100644 index 0000000..6e83f1b --- /dev/null +++ b/m4/signalblocking.m4 @@ -0,0 +1,40 @@ +# signalblocking.m4 serial 11 +dnl Copyright (C) 2001-2002, 2006-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +# Determine available signal blocking primitives. Three different APIs exist: +# 1) POSIX: sigemptyset, sigaddset, sigprocmask +# 2) SYSV: sighold, sigrelse +# 3) BSD: sigblock, sigsetmask +# For simplicity, here we check only for the POSIX signal blocking. +AC_DEFUN([gl_SIGNALBLOCKING], +[ + AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) + signals_not_posix= + AC_EGREP_HEADER([sigset_t], [signal.h], , [signals_not_posix=1]) + if test -z "$signals_not_posix"; then + AC_CHECK_FUNC([sigprocmask], [gl_cv_func_sigprocmask=1]) + fi + if test -z "$gl_cv_func_sigprocmask"; then + HAVE_POSIX_SIGNALBLOCKING=0 + fi +]) + +# Prerequisites of the part of lib/signal.in.h and of lib/sigprocmask.c. +AC_DEFUN([gl_PREREQ_SIGPROCMASK], +[ + AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) + AC_CHECK_TYPES([sigset_t], + [gl_cv_type_sigset_t=yes], [gl_cv_type_sigset_t=no], + [#include +/* Mingw defines sigset_t not in , but in . */ +#include ]) + if test $gl_cv_type_sigset_t != yes; then + HAVE_SIGSET_T=0 + fi + dnl HAVE_SIGSET_T is 1 if the system lacks the sigprocmask function but has + dnl the sigset_t type. + AC_SUBST([HAVE_SIGSET_T]) +]) diff --git a/m4/stddef_h.m4 b/m4/stddef_h.m4 new file mode 100644 index 0000000..1ae2344 --- /dev/null +++ b/m4/stddef_h.m4 @@ -0,0 +1,47 @@ +dnl A placeholder for POSIX 2008 , for platforms that have issues. +# stddef_h.m4 serial 4 +dnl Copyright (C) 2009-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +AC_DEFUN([gl_STDDEF_H], +[ + AC_REQUIRE([gl_STDDEF_H_DEFAULTS]) + AC_REQUIRE([gt_TYPE_WCHAR_T]) + STDDEF_H= + if test $gt_cv_c_wchar_t = no; then + HAVE_WCHAR_T=0 + STDDEF_H=stddef.h + fi + AC_CACHE_CHECK([whether NULL can be used in arbitrary expressions], + [gl_cv_decl_null_works], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include + int test[2 * (sizeof NULL == sizeof (void *)) -1]; +]])], + [gl_cv_decl_null_works=yes], + [gl_cv_decl_null_works=no])]) + if test $gl_cv_decl_null_works = no; then + REPLACE_NULL=1 + STDDEF_H=stddef.h + fi + AC_SUBST([STDDEF_H]) + AM_CONDITIONAL([GL_GENERATE_STDDEF_H], [test -n "$STDDEF_H"]) + if test -n "$STDDEF_H"; then + gl_NEXT_HEADERS([stddef.h]) + fi +]) + +AC_DEFUN([gl_STDDEF_MODULE_INDICATOR], +[ + dnl Use AC_REQUIRE here, so that the default settings are expanded once only. + AC_REQUIRE([gl_STDDEF_H_DEFAULTS]) + gl_MODULE_INDICATOR_SET_VARIABLE([$1]) +]) + +AC_DEFUN([gl_STDDEF_H_DEFAULTS], +[ + dnl Assume proper GNU behavior unless another module says otherwise. + REPLACE_NULL=0; AC_SUBST([REPLACE_NULL]) + HAVE_WCHAR_T=1; AC_SUBST([HAVE_WCHAR_T]) +]) diff --git a/m4/stdint.m4 b/m4/stdint.m4 new file mode 100644 index 0000000..c75e957 --- /dev/null +++ b/m4/stdint.m4 @@ -0,0 +1,480 @@ +# stdint.m4 serial 41 +dnl Copyright (C) 2001-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Paul Eggert and Bruno Haible. +dnl Test whether is supported or must be substituted. + +AC_DEFUN_ONCE([gl_STDINT_H], +[ + AC_PREREQ([2.59])dnl + + dnl Check for long long int and unsigned long long int. + AC_REQUIRE([AC_TYPE_LONG_LONG_INT]) + if test $ac_cv_type_long_long_int = yes; then + HAVE_LONG_LONG_INT=1 + else + HAVE_LONG_LONG_INT=0 + fi + AC_SUBST([HAVE_LONG_LONG_INT]) + AC_REQUIRE([AC_TYPE_UNSIGNED_LONG_LONG_INT]) + if test $ac_cv_type_unsigned_long_long_int = yes; then + HAVE_UNSIGNED_LONG_LONG_INT=1 + else + HAVE_UNSIGNED_LONG_LONG_INT=0 + fi + AC_SUBST([HAVE_UNSIGNED_LONG_LONG_INT]) + + dnl Check for , in the same way as gl_WCHAR_H does. + AC_CHECK_HEADERS_ONCE([wchar.h]) + if test $ac_cv_header_wchar_h = yes; then + HAVE_WCHAR_H=1 + else + HAVE_WCHAR_H=0 + fi + AC_SUBST([HAVE_WCHAR_H]) + + dnl Check for . + dnl AC_INCLUDES_DEFAULT defines $ac_cv_header_inttypes_h. + if test $ac_cv_header_inttypes_h = yes; then + HAVE_INTTYPES_H=1 + else + HAVE_INTTYPES_H=0 + fi + AC_SUBST([HAVE_INTTYPES_H]) + + dnl Check for . + dnl AC_INCLUDES_DEFAULT defines $ac_cv_header_sys_types_h. + if test $ac_cv_header_sys_types_h = yes; then + HAVE_SYS_TYPES_H=1 + else + HAVE_SYS_TYPES_H=0 + fi + AC_SUBST([HAVE_SYS_TYPES_H]) + + gl_CHECK_NEXT_HEADERS([stdint.h]) + if test $ac_cv_header_stdint_h = yes; then + HAVE_STDINT_H=1 + else + HAVE_STDINT_H=0 + fi + AC_SUBST([HAVE_STDINT_H]) + + dnl Now see whether we need a substitute . + if test $ac_cv_header_stdint_h = yes; then + AC_CACHE_CHECK([whether stdint.h conforms to C99], + [gl_cv_header_working_stdint_h], + [gl_cv_header_working_stdint_h=no + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([[ +#define __STDC_LIMIT_MACROS 1 /* to make it work also in C++ mode */ +#define __STDC_CONSTANT_MACROS 1 /* to make it work also in C++ mode */ +#define _GL_JUST_INCLUDE_SYSTEM_STDINT_H 1 /* work if build isn't clean */ +#include +/* Dragonfly defines WCHAR_MIN, WCHAR_MAX only in . */ +#if !(defined WCHAR_MIN && defined WCHAR_MAX) +#error "WCHAR_MIN, WCHAR_MAX not defined in " +#endif +] +gl_STDINT_INCLUDES +[ +#ifdef INT8_MAX +int8_t a1 = INT8_MAX; +int8_t a1min = INT8_MIN; +#endif +#ifdef INT16_MAX +int16_t a2 = INT16_MAX; +int16_t a2min = INT16_MIN; +#endif +#ifdef INT32_MAX +int32_t a3 = INT32_MAX; +int32_t a3min = INT32_MIN; +#endif +#ifdef INT64_MAX +int64_t a4 = INT64_MAX; +int64_t a4min = INT64_MIN; +#endif +#ifdef UINT8_MAX +uint8_t b1 = UINT8_MAX; +#else +typedef int b1[(unsigned char) -1 != 255 ? 1 : -1]; +#endif +#ifdef UINT16_MAX +uint16_t b2 = UINT16_MAX; +#endif +#ifdef UINT32_MAX +uint32_t b3 = UINT32_MAX; +#endif +#ifdef UINT64_MAX +uint64_t b4 = UINT64_MAX; +#endif +int_least8_t c1 = INT8_C (0x7f); +int_least8_t c1max = INT_LEAST8_MAX; +int_least8_t c1min = INT_LEAST8_MIN; +int_least16_t c2 = INT16_C (0x7fff); +int_least16_t c2max = INT_LEAST16_MAX; +int_least16_t c2min = INT_LEAST16_MIN; +int_least32_t c3 = INT32_C (0x7fffffff); +int_least32_t c3max = INT_LEAST32_MAX; +int_least32_t c3min = INT_LEAST32_MIN; +int_least64_t c4 = INT64_C (0x7fffffffffffffff); +int_least64_t c4max = INT_LEAST64_MAX; +int_least64_t c4min = INT_LEAST64_MIN; +uint_least8_t d1 = UINT8_C (0xff); +uint_least8_t d1max = UINT_LEAST8_MAX; +uint_least16_t d2 = UINT16_C (0xffff); +uint_least16_t d2max = UINT_LEAST16_MAX; +uint_least32_t d3 = UINT32_C (0xffffffff); +uint_least32_t d3max = UINT_LEAST32_MAX; +uint_least64_t d4 = UINT64_C (0xffffffffffffffff); +uint_least64_t d4max = UINT_LEAST64_MAX; +int_fast8_t e1 = INT_FAST8_MAX; +int_fast8_t e1min = INT_FAST8_MIN; +int_fast16_t e2 = INT_FAST16_MAX; +int_fast16_t e2min = INT_FAST16_MIN; +int_fast32_t e3 = INT_FAST32_MAX; +int_fast32_t e3min = INT_FAST32_MIN; +int_fast64_t e4 = INT_FAST64_MAX; +int_fast64_t e4min = INT_FAST64_MIN; +uint_fast8_t f1 = UINT_FAST8_MAX; +uint_fast16_t f2 = UINT_FAST16_MAX; +uint_fast32_t f3 = UINT_FAST32_MAX; +uint_fast64_t f4 = UINT_FAST64_MAX; +#ifdef INTPTR_MAX +intptr_t g = INTPTR_MAX; +intptr_t gmin = INTPTR_MIN; +#endif +#ifdef UINTPTR_MAX +uintptr_t h = UINTPTR_MAX; +#endif +intmax_t i = INTMAX_MAX; +uintmax_t j = UINTMAX_MAX; + +#include /* for CHAR_BIT */ +#define TYPE_MINIMUM(t) \ + ((t) ((t) 0 < (t) -1 ? (t) 0 : ~ TYPE_MAXIMUM (t))) +#define TYPE_MAXIMUM(t) \ + ((t) ((t) 0 < (t) -1 \ + ? (t) -1 \ + : ((((t) 1 << (sizeof (t) * CHAR_BIT - 2)) - 1) * 2 + 1))) +struct s { + int check_PTRDIFF: + PTRDIFF_MIN == TYPE_MINIMUM (ptrdiff_t) + && PTRDIFF_MAX == TYPE_MAXIMUM (ptrdiff_t) + ? 1 : -1; + /* Detect bug in FreeBSD 6.0 / ia64. */ + int check_SIG_ATOMIC: + SIG_ATOMIC_MIN == TYPE_MINIMUM (sig_atomic_t) + && SIG_ATOMIC_MAX == TYPE_MAXIMUM (sig_atomic_t) + ? 1 : -1; + int check_SIZE: SIZE_MAX == TYPE_MAXIMUM (size_t) ? 1 : -1; + int check_WCHAR: + WCHAR_MIN == TYPE_MINIMUM (wchar_t) + && WCHAR_MAX == TYPE_MAXIMUM (wchar_t) + ? 1 : -1; + /* Detect bug in mingw. */ + int check_WINT: + WINT_MIN == TYPE_MINIMUM (wint_t) + && WINT_MAX == TYPE_MAXIMUM (wint_t) + ? 1 : -1; + + /* Detect bugs in glibc 2.4 and Solaris 10 stdint.h, among others. */ + int check_UINT8_C: + (-1 < UINT8_C (0)) == (-1 < (uint_least8_t) 0) ? 1 : -1; + int check_UINT16_C: + (-1 < UINT16_C (0)) == (-1 < (uint_least16_t) 0) ? 1 : -1; + + /* Detect bugs in OpenBSD 3.9 stdint.h. */ +#ifdef UINT8_MAX + int check_uint8: (uint8_t) -1 == UINT8_MAX ? 1 : -1; +#endif +#ifdef UINT16_MAX + int check_uint16: (uint16_t) -1 == UINT16_MAX ? 1 : -1; +#endif +#ifdef UINT32_MAX + int check_uint32: (uint32_t) -1 == UINT32_MAX ? 1 : -1; +#endif +#ifdef UINT64_MAX + int check_uint64: (uint64_t) -1 == UINT64_MAX ? 1 : -1; +#endif + int check_uint_least8: (uint_least8_t) -1 == UINT_LEAST8_MAX ? 1 : -1; + int check_uint_least16: (uint_least16_t) -1 == UINT_LEAST16_MAX ? 1 : -1; + int check_uint_least32: (uint_least32_t) -1 == UINT_LEAST32_MAX ? 1 : -1; + int check_uint_least64: (uint_least64_t) -1 == UINT_LEAST64_MAX ? 1 : -1; + int check_uint_fast8: (uint_fast8_t) -1 == UINT_FAST8_MAX ? 1 : -1; + int check_uint_fast16: (uint_fast16_t) -1 == UINT_FAST16_MAX ? 1 : -1; + int check_uint_fast32: (uint_fast32_t) -1 == UINT_FAST32_MAX ? 1 : -1; + int check_uint_fast64: (uint_fast64_t) -1 == UINT_FAST64_MAX ? 1 : -1; + int check_uintptr: (uintptr_t) -1 == UINTPTR_MAX ? 1 : -1; + int check_uintmax: (uintmax_t) -1 == UINTMAX_MAX ? 1 : -1; + int check_size: (size_t) -1 == SIZE_MAX ? 1 : -1; +}; + ]])], + [dnl Determine whether the various *_MIN, *_MAX macros are usable + dnl in preprocessor expression. We could do it by compiling a test + dnl program for each of these macros. It is faster to run a program + dnl that inspects the macro expansion. + dnl This detects a bug on HP-UX 11.23/ia64. + AC_RUN_IFELSE([ + AC_LANG_PROGRAM([[ +#define __STDC_LIMIT_MACROS 1 /* to make it work also in C++ mode */ +#define __STDC_CONSTANT_MACROS 1 /* to make it work also in C++ mode */ +#define _GL_JUST_INCLUDE_SYSTEM_STDINT_H 1 /* work if build isn't clean */ +#include +] +gl_STDINT_INCLUDES +[ +#include +#include +#define MVAL(macro) MVAL1(macro) +#define MVAL1(expression) #expression +static const char *macro_values[] = + { +#ifdef INT8_MAX + MVAL (INT8_MAX), +#endif +#ifdef INT16_MAX + MVAL (INT16_MAX), +#endif +#ifdef INT32_MAX + MVAL (INT32_MAX), +#endif +#ifdef INT64_MAX + MVAL (INT64_MAX), +#endif +#ifdef UINT8_MAX + MVAL (UINT8_MAX), +#endif +#ifdef UINT16_MAX + MVAL (UINT16_MAX), +#endif +#ifdef UINT32_MAX + MVAL (UINT32_MAX), +#endif +#ifdef UINT64_MAX + MVAL (UINT64_MAX), +#endif + NULL + }; +]], [[ + const char **mv; + for (mv = macro_values; *mv != NULL; mv++) + { + const char *value = *mv; + /* Test whether it looks like a cast expression. */ + if (strncmp (value, "((unsigned int)"/*)*/, 15) == 0 + || strncmp (value, "((unsigned short)"/*)*/, 17) == 0 + || strncmp (value, "((unsigned char)"/*)*/, 16) == 0 + || strncmp (value, "((int)"/*)*/, 6) == 0 + || strncmp (value, "((signed short)"/*)*/, 15) == 0 + || strncmp (value, "((signed char)"/*)*/, 14) == 0) + return mv - macro_values + 1; + } + return 0; +]])], + [gl_cv_header_working_stdint_h=yes], + [], + [dnl When cross-compiling, assume it works. + gl_cv_header_working_stdint_h=yes + ]) + ]) + ]) + fi + if test "$gl_cv_header_working_stdint_h" = yes; then + STDINT_H= + else + dnl Check for , and for + dnl (used in Linux libc4 >= 4.6.7 and libc5). + AC_CHECK_HEADERS([sys/inttypes.h sys/bitypes.h]) + if test $ac_cv_header_sys_inttypes_h = yes; then + HAVE_SYS_INTTYPES_H=1 + else + HAVE_SYS_INTTYPES_H=0 + fi + AC_SUBST([HAVE_SYS_INTTYPES_H]) + if test $ac_cv_header_sys_bitypes_h = yes; then + HAVE_SYS_BITYPES_H=1 + else + HAVE_SYS_BITYPES_H=0 + fi + AC_SUBST([HAVE_SYS_BITYPES_H]) + + gl_STDINT_TYPE_PROPERTIES + STDINT_H=stdint.h + fi + AC_SUBST([STDINT_H]) + AM_CONDITIONAL([GL_GENERATE_STDINT_H], [test -n "$STDINT_H"]) +]) + +dnl gl_STDINT_BITSIZEOF(TYPES, INCLUDES) +dnl Determine the size of each of the given types in bits. +AC_DEFUN([gl_STDINT_BITSIZEOF], +[ + dnl Use a shell loop, to avoid bloating configure, and + dnl - extra AH_TEMPLATE calls, so that autoheader knows what to put into + dnl config.h.in, + dnl - extra AC_SUBST calls, so that the right substitutions are made. + m4_foreach_w([gltype], [$1], + [AH_TEMPLATE([BITSIZEOF_]m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_]), + [Define to the number of bits in type ']gltype['.])]) + for gltype in $1 ; do + AC_CACHE_CHECK([for bit size of $gltype], [gl_cv_bitsizeof_${gltype}], + [AC_COMPUTE_INT([result], [sizeof ($gltype) * CHAR_BIT], + [$2 +#include ], [result=unknown]) + eval gl_cv_bitsizeof_${gltype}=\$result + ]) + eval result=\$gl_cv_bitsizeof_${gltype} + if test $result = unknown; then + dnl Use a nonempty default, because some compilers, such as IRIX 5 cc, + dnl do a syntax check even on unused #if conditions and give an error + dnl on valid C code like this: + dnl #if 0 + dnl # if > 32 + dnl # endif + dnl #endif + result=0 + fi + GLTYPE=`echo "$gltype" | tr 'abcdefghijklmnopqrstuvwxyz ' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'` + AC_DEFINE_UNQUOTED([BITSIZEOF_${GLTYPE}], [$result]) + eval BITSIZEOF_${GLTYPE}=\$result + done + m4_foreach_w([gltype], [$1], + [AC_SUBST([BITSIZEOF_]m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_]))]) +]) + +dnl gl_CHECK_TYPES_SIGNED(TYPES, INCLUDES) +dnl Determine the signedness of each of the given types. +dnl Define HAVE_SIGNED_TYPE if type is signed. +AC_DEFUN([gl_CHECK_TYPES_SIGNED], +[ + dnl Use a shell loop, to avoid bloating configure, and + dnl - extra AH_TEMPLATE calls, so that autoheader knows what to put into + dnl config.h.in, + dnl - extra AC_SUBST calls, so that the right substitutions are made. + m4_foreach_w([gltype], [$1], + [AH_TEMPLATE([HAVE_SIGNED_]m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_]), + [Define to 1 if ']gltype[' is a signed integer type.])]) + for gltype in $1 ; do + AC_CACHE_CHECK([whether $gltype is signed], [gl_cv_type_${gltype}_signed], + [AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM([$2[ + int verify[2 * (($gltype) -1 < ($gltype) 0) - 1];]])], + result=yes, result=no) + eval gl_cv_type_${gltype}_signed=\$result + ]) + eval result=\$gl_cv_type_${gltype}_signed + GLTYPE=`echo $gltype | tr 'abcdefghijklmnopqrstuvwxyz ' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'` + if test "$result" = yes; then + AC_DEFINE_UNQUOTED([HAVE_SIGNED_${GLTYPE}], [1]) + eval HAVE_SIGNED_${GLTYPE}=1 + else + eval HAVE_SIGNED_${GLTYPE}=0 + fi + done + m4_foreach_w([gltype], [$1], + [AC_SUBST([HAVE_SIGNED_]m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_]))]) +]) + +dnl gl_INTEGER_TYPE_SUFFIX(TYPES, INCLUDES) +dnl Determine the suffix to use for integer constants of the given types. +dnl Define t_SUFFIX for each such type. +AC_DEFUN([gl_INTEGER_TYPE_SUFFIX], +[ + dnl Use a shell loop, to avoid bloating configure, and + dnl - extra AH_TEMPLATE calls, so that autoheader knows what to put into + dnl config.h.in, + dnl - extra AC_SUBST calls, so that the right substitutions are made. + m4_foreach_w([gltype], [$1], + [AH_TEMPLATE(m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_])[_SUFFIX], + [Define to l, ll, u, ul, ull, etc., as suitable for + constants of type ']gltype['.])]) + for gltype in $1 ; do + AC_CACHE_CHECK([for $gltype integer literal suffix], + [gl_cv_type_${gltype}_suffix], + [eval gl_cv_type_${gltype}_suffix=no + eval result=\$gl_cv_type_${gltype}_signed + if test "$result" = yes; then + glsufu= + else + glsufu=u + fi + for glsuf in "$glsufu" ${glsufu}l ${glsufu}ll ${glsufu}i64; do + case $glsuf in + '') gltype1='int';; + l) gltype1='long int';; + ll) gltype1='long long int';; + i64) gltype1='__int64';; + u) gltype1='unsigned int';; + ul) gltype1='unsigned long int';; + ull) gltype1='unsigned long long int';; + ui64)gltype1='unsigned __int64';; + esac + AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM([$2[ + extern $gltype foo; + extern $gltype1 foo;]])], + [eval gl_cv_type_${gltype}_suffix=\$glsuf]) + eval result=\$gl_cv_type_${gltype}_suffix + test "$result" != no && break + done]) + GLTYPE=`echo $gltype | tr 'abcdefghijklmnopqrstuvwxyz ' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'` + eval result=\$gl_cv_type_${gltype}_suffix + test "$result" = no && result= + eval ${GLTYPE}_SUFFIX=\$result + AC_DEFINE_UNQUOTED([${GLTYPE}_SUFFIX], [$result]) + done + m4_foreach_w([gltype], [$1], + [AC_SUBST(m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_])[_SUFFIX])]) +]) + +dnl gl_STDINT_INCLUDES +AC_DEFUN([gl_STDINT_INCLUDES], +[[ + /* BSD/OS 4.0.1 has a bug: , and must be + included before . */ + #include + #include + #if HAVE_WCHAR_H + # include + # include + # include + #endif +]]) + +dnl gl_STDINT_TYPE_PROPERTIES +dnl Compute HAVE_SIGNED_t, BITSIZEOF_t and t_SUFFIX, for all the types t +dnl of interest to stdint.in.h. +AC_DEFUN([gl_STDINT_TYPE_PROPERTIES], +[ + AC_REQUIRE([gl_MULTIARCH]) + if test $APPLE_UNIVERSAL_BUILD = 0; then + gl_STDINT_BITSIZEOF([ptrdiff_t size_t], + [gl_STDINT_INCLUDES]) + fi + gl_STDINT_BITSIZEOF([sig_atomic_t wchar_t wint_t], + [gl_STDINT_INCLUDES]) + gl_CHECK_TYPES_SIGNED([sig_atomic_t wchar_t wint_t], + [gl_STDINT_INCLUDES]) + gl_cv_type_ptrdiff_t_signed=yes + gl_cv_type_size_t_signed=no + if test $APPLE_UNIVERSAL_BUILD = 0; then + gl_INTEGER_TYPE_SUFFIX([ptrdiff_t size_t], + [gl_STDINT_INCLUDES]) + fi + gl_INTEGER_TYPE_SUFFIX([sig_atomic_t wchar_t wint_t], + [gl_STDINT_INCLUDES]) +]) + +dnl Autoconf >= 2.61 has AC_COMPUTE_INT built-in. +dnl Remove this when we can assume autoconf >= 2.61. +m4_ifdef([AC_COMPUTE_INT], [], [ + AC_DEFUN([AC_COMPUTE_INT], [_AC_COMPUTE_INT([$2],[$1],[$3],[$4])]) +]) + +# Hey Emacs! +# Local Variables: +# indent-tabs-mode: nil +# End: diff --git a/m4/string_h.m4 b/m4/string_h.m4 new file mode 100644 index 0000000..df8c403 --- /dev/null +++ b/m4/string_h.m4 @@ -0,0 +1,116 @@ +# Configure a GNU-like replacement for . + +# Copyright (C) 2007-2011 Free Software Foundation, Inc. +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 20 + +# Written by Paul Eggert. + +AC_DEFUN([gl_HEADER_STRING_H], +[ + dnl Use AC_REQUIRE here, so that the default behavior below is expanded + dnl once only, before all statements that occur in other macros. + AC_REQUIRE([gl_HEADER_STRING_H_BODY]) +]) + +AC_DEFUN([gl_HEADER_STRING_H_BODY], +[ + AC_REQUIRE([AC_C_RESTRICT]) + AC_REQUIRE([gl_HEADER_STRING_H_DEFAULTS]) + gl_NEXT_HEADERS([string.h]) + + dnl Check for declarations of anything we want to poison if the + dnl corresponding gnulib module is not in use, and which is not + dnl guaranteed by C89. + gl_WARN_ON_USE_PREPARE([[#include + ]], + [memmem mempcpy memrchr rawmemchr stpcpy stpncpy strchrnul strdup + strncat strndup strnlen strpbrk strsep strcasestr strtok_r strerror_r + strsignal strverscmp]) +]) + +AC_DEFUN([gl_STRING_MODULE_INDICATOR], +[ + dnl Use AC_REQUIRE here, so that the default settings are expanded once only. + AC_REQUIRE([gl_HEADER_STRING_H_DEFAULTS]) + gl_MODULE_INDICATOR_SET_VARIABLE([$1]) + dnl Define it also as a C macro, for the benefit of the unit tests. + gl_MODULE_INDICATOR_FOR_TESTS([$1]) +]) + +AC_DEFUN([gl_HEADER_STRING_H_DEFAULTS], +[ + GNULIB_MEMCHR=0; AC_SUBST([GNULIB_MEMCHR]) + GNULIB_MEMMEM=0; AC_SUBST([GNULIB_MEMMEM]) + GNULIB_MEMPCPY=0; AC_SUBST([GNULIB_MEMPCPY]) + GNULIB_MEMRCHR=0; AC_SUBST([GNULIB_MEMRCHR]) + GNULIB_RAWMEMCHR=0; AC_SUBST([GNULIB_RAWMEMCHR]) + GNULIB_STPCPY=0; AC_SUBST([GNULIB_STPCPY]) + GNULIB_STPNCPY=0; AC_SUBST([GNULIB_STPNCPY]) + GNULIB_STRCHRNUL=0; AC_SUBST([GNULIB_STRCHRNUL]) + GNULIB_STRDUP=0; AC_SUBST([GNULIB_STRDUP]) + GNULIB_STRNCAT=0; AC_SUBST([GNULIB_STRNCAT]) + GNULIB_STRNDUP=0; AC_SUBST([GNULIB_STRNDUP]) + GNULIB_STRNLEN=0; AC_SUBST([GNULIB_STRNLEN]) + GNULIB_STRPBRK=0; AC_SUBST([GNULIB_STRPBRK]) + GNULIB_STRSEP=0; AC_SUBST([GNULIB_STRSEP]) + GNULIB_STRSTR=0; AC_SUBST([GNULIB_STRSTR]) + GNULIB_STRCASESTR=0; AC_SUBST([GNULIB_STRCASESTR]) + GNULIB_STRTOK_R=0; AC_SUBST([GNULIB_STRTOK_R]) + GNULIB_MBSLEN=0; AC_SUBST([GNULIB_MBSLEN]) + GNULIB_MBSNLEN=0; AC_SUBST([GNULIB_MBSNLEN]) + GNULIB_MBSCHR=0; AC_SUBST([GNULIB_MBSCHR]) + GNULIB_MBSRCHR=0; AC_SUBST([GNULIB_MBSRCHR]) + GNULIB_MBSSTR=0; AC_SUBST([GNULIB_MBSSTR]) + GNULIB_MBSCASECMP=0; AC_SUBST([GNULIB_MBSCASECMP]) + GNULIB_MBSNCASECMP=0; AC_SUBST([GNULIB_MBSNCASECMP]) + GNULIB_MBSPCASECMP=0; AC_SUBST([GNULIB_MBSPCASECMP]) + GNULIB_MBSCASESTR=0; AC_SUBST([GNULIB_MBSCASESTR]) + GNULIB_MBSCSPN=0; AC_SUBST([GNULIB_MBSCSPN]) + GNULIB_MBSPBRK=0; AC_SUBST([GNULIB_MBSPBRK]) + GNULIB_MBSSPN=0; AC_SUBST([GNULIB_MBSSPN]) + GNULIB_MBSSEP=0; AC_SUBST([GNULIB_MBSSEP]) + GNULIB_MBSTOK_R=0; AC_SUBST([GNULIB_MBSTOK_R]) + GNULIB_STRERROR=0; AC_SUBST([GNULIB_STRERROR]) + GNULIB_STRERROR_R=0; AC_SUBST([GNULIB_STRERROR_R]) + GNULIB_STRSIGNAL=0; AC_SUBST([GNULIB_STRSIGNAL]) + GNULIB_STRVERSCMP=0; AC_SUBST([GNULIB_STRVERSCMP]) + HAVE_MBSLEN=0; AC_SUBST([HAVE_MBSLEN]) + dnl Assume proper GNU behavior unless another module says otherwise. + HAVE_MEMCHR=1; AC_SUBST([HAVE_MEMCHR]) + HAVE_DECL_MEMMEM=1; AC_SUBST([HAVE_DECL_MEMMEM]) + HAVE_MEMPCPY=1; AC_SUBST([HAVE_MEMPCPY]) + HAVE_DECL_MEMRCHR=1; AC_SUBST([HAVE_DECL_MEMRCHR]) + HAVE_RAWMEMCHR=1; AC_SUBST([HAVE_RAWMEMCHR]) + HAVE_STPCPY=1; AC_SUBST([HAVE_STPCPY]) + HAVE_STPNCPY=1; AC_SUBST([HAVE_STPNCPY]) + HAVE_STRCHRNUL=1; AC_SUBST([HAVE_STRCHRNUL]) + HAVE_DECL_STRDUP=1; AC_SUBST([HAVE_DECL_STRDUP]) + HAVE_DECL_STRNDUP=1; AC_SUBST([HAVE_DECL_STRNDUP]) + HAVE_DECL_STRNLEN=1; AC_SUBST([HAVE_DECL_STRNLEN]) + HAVE_STRPBRK=1; AC_SUBST([HAVE_STRPBRK]) + HAVE_STRSEP=1; AC_SUBST([HAVE_STRSEP]) + HAVE_STRCASESTR=1; AC_SUBST([HAVE_STRCASESTR]) + HAVE_DECL_STRTOK_R=1; AC_SUBST([HAVE_DECL_STRTOK_R]) + HAVE_DECL_STRERROR_R=1; AC_SUBST([HAVE_DECL_STRERROR_R]) + HAVE_DECL_STRSIGNAL=1; AC_SUBST([HAVE_DECL_STRSIGNAL]) + HAVE_STRVERSCMP=1; AC_SUBST([HAVE_STRVERSCMP]) + REPLACE_MEMCHR=0; AC_SUBST([REPLACE_MEMCHR]) + REPLACE_MEMMEM=0; AC_SUBST([REPLACE_MEMMEM]) + REPLACE_STPNCPY=0; AC_SUBST([REPLACE_STPNCPY]) + REPLACE_STRDUP=0; AC_SUBST([REPLACE_STRDUP]) + REPLACE_STRSTR=0; AC_SUBST([REPLACE_STRSTR]) + REPLACE_STRCASESTR=0; AC_SUBST([REPLACE_STRCASESTR]) + REPLACE_STRCHRNUL=0; AC_SUBST([REPLACE_STRCHRNUL]) + REPLACE_STRERROR=0; AC_SUBST([REPLACE_STRERROR]) + REPLACE_STRERROR_R=0; AC_SUBST([REPLACE_STRERROR_R]) + REPLACE_STRNCAT=0; AC_SUBST([REPLACE_STRNCAT]) + REPLACE_STRNDUP=0; AC_SUBST([REPLACE_STRNDUP]) + REPLACE_STRNLEN=0; AC_SUBST([REPLACE_STRNLEN]) + REPLACE_STRSIGNAL=0; AC_SUBST([REPLACE_STRSIGNAL]) + REPLACE_STRTOK_R=0; AC_SUBST([REPLACE_STRTOK_R]) + UNDEFINE_STRTOK_R=0; AC_SUBST([UNDEFINE_STRTOK_R]) +]) diff --git a/m4/warn-on-use.m4 b/m4/warn-on-use.m4 new file mode 100644 index 0000000..e0d0f27 --- /dev/null +++ b/m4/warn-on-use.m4 @@ -0,0 +1,45 @@ +# warn-on-use.m4 serial 2 +dnl Copyright (C) 2010-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +# gl_WARN_ON_USE_PREPARE(INCLUDES, NAMES) +# --------------------------------------- +# For each whitespace-separated element in the list of NAMES, define +# HAVE_RAW_DECL_name if the function has a declaration among INCLUDES +# even after being undefined as a macro. +# +# See warn-on-use.h for some hints on how to poison function names, as +# well as ideas on poisoning global variables and macros. NAMES may +# include global variables, but remember that only functions work with +# _GL_WARN_ON_USE. Typically, INCLUDES only needs to list a single +# header, but if the replacement header pulls in other headers because +# some systems declare functions in the wrong header, then INCLUDES +# should do likewise. +# +# If you assume C89, then it is generally safe to assume declarations +# for functions declared in that standard (such as gets) without +# needing gl_WARN_ON_USE_PREPARE. +AC_DEFUN([gl_WARN_ON_USE_PREPARE], +[ + m4_foreach_w([gl_decl], [$2], + [AH_TEMPLATE([HAVE_RAW_DECL_]AS_TR_CPP(m4_defn([gl_decl])), + [Define to 1 if ]m4_defn([gl_decl])[ is declared even after + undefining macros.])])dnl + for gl_func in m4_flatten([$2]); do + AS_VAR_PUSHDEF([gl_Symbol], [gl_cv_have_raw_decl_$gl_func])dnl + AC_CACHE_CHECK([whether $gl_func is declared without a macro], + gl_Symbol, + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([$1], +[@%:@undef $gl_func + (void) $gl_func;])], + [AS_VAR_SET(gl_Symbol, [yes])], [AS_VAR_SET(gl_Symbol, [no])])]) + AS_VAR_IF(gl_Symbol, [yes], + [AC_DEFINE_UNQUOTED(AS_TR_CPP([HAVE_RAW_DECL_$gl_func]), [1]) + dnl shortcut - if the raw declaration exists, then set a cache + dnl variable to allow skipping any later AC_CHECK_DECL efforts + eval ac_cv_have_decl_$gl_func=yes]) + AS_VAR_POPDEF([gl_Symbol])dnl + done +]) diff --git a/m4/wchar_t.m4 b/m4/wchar_t.m4 new file mode 100644 index 0000000..d2c03c4 --- /dev/null +++ b/m4/wchar_t.m4 @@ -0,0 +1,24 @@ +# wchar_t.m4 serial 4 (gettext-0.18.2) +dnl Copyright (C) 2002-2003, 2008-2011 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. +dnl Test whether has the 'wchar_t' type. +dnl Prerequisite: AC_PROG_CC + +AC_DEFUN([gt_TYPE_WCHAR_T], +[ + AC_CACHE_CHECK([for wchar_t], [gt_cv_c_wchar_t], + [AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [[#include + wchar_t foo = (wchar_t)'\0';]], + [[]])], + [gt_cv_c_wchar_t=yes], + [gt_cv_c_wchar_t=no])]) + if test $gt_cv_c_wchar_t = yes; then + AC_DEFINE([HAVE_WCHAR_T], [1], [Define if you have the 'wchar_t' type.]) + fi +]) diff --git a/mcp2210.c b/mcp2210.c new file mode 100644 index 0000000..83ff95f --- /dev/null +++ b/mcp2210.c @@ -0,0 +1,366 @@ +/* + * Copyright 2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ +#include "miner.h" +#include "usbutils.h" +#include "mcp2210.h" + +static bool mcp2210_send(struct cgpu_info *cgpu, char *buf, enum usb_cmds cmd) +{ + int amount, err; + + if (unlikely(cgpu->usbinfo.nodev)) + return false; + + err = usb_write(cgpu, buf, MCP2210_BUFFER_LENGTH, &amount, cmd); + if (err || amount != MCP2210_BUFFER_LENGTH) { + applog(LOG_WARNING, "%s %d: Error %d sending %s sent %d of %d", + cgpu->drv->name, cgpu->device_id, err, usb_cmdname(cmd), + amount, MCP2210_BUFFER_LENGTH); + return false; + } + return true; +} + +static bool mcp2210_recv(struct cgpu_info *cgpu, char *buf, enum usb_cmds cmd) +{ + int amount, err; + + if (unlikely(cgpu->usbinfo.nodev)) + return false; + + err = usb_read(cgpu, buf, MCP2210_BUFFER_LENGTH, &amount, cmd); + if (err || amount != MCP2210_BUFFER_LENGTH) { + applog(LOG_WARNING, "%s %d: Error %d receiving %s received %d of %d", + cgpu->drv->name, cgpu->device_id, err, usb_cmdname(cmd), + amount, MCP2210_BUFFER_LENGTH); + return false; + } + return true; +} + +bool mcp2210_send_recv(struct cgpu_info *cgpu, char *buf, enum usb_cmds cmd) +{ + uint8_t mcp_cmd = buf[0]; + + if (!mcp2210_send(cgpu, buf, cmd)) + return false; + + if (!mcp2210_recv(cgpu, buf, cmd)) + return false; + + /* Return code should always echo original command */ + if (buf[0] != mcp_cmd) { + applog(LOG_WARNING, "%s %d: Response code mismatch, asked for %u got %u", + cgpu->drv->name, cgpu->device_id, mcp_cmd, buf[0]); + return false; + } + return true; +} + +bool mcp2210_get_gpio_settings(struct cgpu_info *cgpu, struct mcp_settings *mcp) +{ + char buf[MCP2210_BUFFER_LENGTH]; + int i; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_GET_GPIO_SETTING; + if (!mcp2210_send_recv(cgpu, buf, C_MCP_GETGPIOSETTING)) + return false; + + for (i = 0; i < 8; i++) { + mcp->designation.pin[i] = buf[4 + i]; + mcp->value.pin[i] = !!(buf[13] & (0x01u << i)); + mcp->direction.pin[i] = !!(buf[15] & (0x01u << i)); + } + mcp->designation.pin[8] = buf[12]; + mcp->value.pin[8] = buf[14] & 0x01u; + mcp->direction.pin[8] = buf[16] & 0x01u; + + return true; +} + +bool mcp2210_set_gpio_settings(struct cgpu_info *cgpu, struct mcp_settings *mcp) +{ + char buf[MCP2210_BUFFER_LENGTH]; + uint8_t buf17; + int i; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_GET_GPIO_SETTING; + if (!mcp2210_send_recv(cgpu, buf, C_MCP_GETGPIOSETTING)) + return false; + buf17 = buf[17]; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_SET_GPIO_SETTING; + buf[17] = buf17; + for (i = 0; i < 8; i++) { + buf[4 + i] = mcp->designation.pin[i]; + buf[13] |= mcp->value.pin[i] << i; + buf[15] |= mcp->direction.pin[i] << i; + } + buf[12] = mcp->designation.pin[8]; + buf[14] = mcp->value.pin[8]; + buf[16] = mcp->direction.pin[8]; + return mcp2210_send_recv(cgpu, buf, C_MCP_SETGPIOSETTING); +} + +/* Get all the pin designations and store them in a gpio_pin struct */ +bool mcp2210_get_gpio_pindes(struct cgpu_info *cgpu, struct gpio_pin *gp) +{ + char buf[MCP2210_BUFFER_LENGTH]; + int i; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_GET_GPIO_SETTING; + if (!mcp2210_send_recv(cgpu, buf, C_MCP_GETGPIOSETTING)) + return false; + + for (i = 0; i < 9; i++) + gp->pin[i] = buf[4 + i]; + return true; +} + + +/* Get all the pin vals and store them in a gpio_pin struct */ +bool mcp2210_get_gpio_pinvals(struct cgpu_info *cgpu, struct gpio_pin *gp) +{ + char buf[MCP2210_BUFFER_LENGTH]; + int i; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_GET_GPIO_PIN_VAL; + if (!mcp2210_send_recv(cgpu, buf, C_MCP_GETGPIOPINVAL)) + return false; + + for (i = 0; i < 8; i++) + gp->pin[i] = !!(buf[4] & (0x01u << i)); + gp->pin[8] = buf[5] & 0x01u; + + return true; +} + +/* Get all the pindirs */ +bool mcp2210_get_gpio_pindirs(struct cgpu_info *cgpu, struct gpio_pin *gp) +{ + char buf[MCP2210_BUFFER_LENGTH]; + int i; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_GET_GPIO_PIN_DIR; + if (!mcp2210_send_recv(cgpu, buf, C_MCP_GETGPIOPINDIR)) + return false; + + for (i = 0; i < 8; i++) + gp->pin[i] = !!(buf[4] & (0x01u << i)); + gp->pin[8] = buf[5] & 0x01u; + + return true; +} + +/* Get the designation of one pin */ +bool mcp2210_get_gpio_pin(struct cgpu_info *cgpu, int pin, int *des) +{ + struct gpio_pin gp; + + if (!mcp2210_get_gpio_pindes(cgpu, &gp)) + return false; + + *des = gp.pin[pin]; + return true; +} + +/* Get one pinval */ +bool mcp2210_get_gpio_pinval(struct cgpu_info *cgpu, int pin, int *val) +{ + char buf[MCP2210_BUFFER_LENGTH]; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_GET_GPIO_PIN_VAL; + if (!mcp2210_send_recv(cgpu, buf, C_MCP_GETGPIOPINVAL)) + return false; + + buf[0] = MCP2210_GET_GPIO_PIN_VAL; + + if (pin < 8) + *val = !!(buf[4] & (0x01u << pin)); + else + *val = !!(buf[5] & 0x01u); + + return true; +} + +/* Get one pindir */ +bool mcp2210_get_gpio_pindir(struct cgpu_info *cgpu, int pin, int *dir) +{ + char buf[MCP2210_BUFFER_LENGTH]; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_GET_GPIO_PIN_DIR; + if (!mcp2210_send_recv(cgpu, buf, C_MCP_GETGPIOPINDIR)) + return false; + + buf[0] = MCP2210_GET_GPIO_PIN_DIR; + + if (pin < 8) + *dir = !!(buf[4] & (0x01u << pin)); + else + *dir = !!(buf[5] & 0x01u); + + return true; +} + +bool mcp2210_spi_cancel(struct cgpu_info *cgpu) +{ + char buf[MCP2210_BUFFER_LENGTH]; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_SPI_CANCEL; + return mcp2210_send_recv(cgpu, buf, C_MCP_SPICANCEL); +} + +/* Abbreviations correspond to: + * IdleChipSelectValue, ActiveChipSelectValue, CSToDataDelay, LastDataByteToCSDelay, + * SubsequentDataByteDelay, BytesPerSPITransfer + */ +bool +mcp2210_get_spi_transfer_settings(struct cgpu_info *cgpu, unsigned int *bitrate, unsigned int *icsv, + unsigned int *acsv, unsigned int *cstdd, unsigned int *ldbtcsd, + unsigned int *sdbd, unsigned int *bpst, unsigned int *spimode) +{ + char buf[MCP2210_BUFFER_LENGTH]; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_GET_SPI_SETTING; + + if (!mcp2210_send_recv(cgpu, buf, C_MCP_GETSPISETTING)) + return false; + *bitrate = buf[7] << 24 | buf[6] << 16 | buf[5] << 8 | buf[4]; + *icsv = (buf[9] & 0x1) << 8 | buf[8]; + *acsv = (buf[11] & 0x1) << 8 | buf[10]; + *cstdd = buf[13] << 8 | buf[12]; + *ldbtcsd = buf[15] << 8 | buf[14]; + *sdbd = buf[17] << 8 | buf[16]; + *bpst = buf[19] << 8 | buf[18]; + *spimode = buf[20]; + return true; +} + +bool +mcp2210_set_spi_transfer_settings(struct cgpu_info *cgpu, unsigned int bitrate, unsigned int icsv, + unsigned int acsv, unsigned int cstdd, unsigned int ldbtcsd, + unsigned int sdbd, unsigned int bpst, unsigned int spimode) +{ + char buf[MCP2210_BUFFER_LENGTH]; + bool ret; + + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_SET_SPI_SETTING; + + buf[4] = bitrate & 0xfful; + buf[5] = (bitrate & 0xff00ul) >> 8; + buf[6] = (bitrate & 0xff0000ul) >> 16; + buf[7] = (bitrate & 0xff000000ul) >> 24; + + buf[8] = icsv & 0xff; + buf[9] = (icsv & 0x100) >> 8; + + buf[10] = acsv & 0xff; + buf[11] = (acsv & 0x100) >> 8; + + buf[12] = cstdd & 0xff; + buf[13] = (cstdd & 0xff00) >> 8; + + buf[14] = ldbtcsd & 0xff; + buf[15] = (ldbtcsd & 0xff00) >> 8; + + buf[16] = sdbd & 0xff; + buf[17] = (sdbd & 0xff00) >> 8; + + buf[18] = bpst & 0xff; + buf[19] = (bpst & 0xff00) >> 8; + + buf[20] = spimode; + ret = mcp2210_send_recv(cgpu, buf, C_MCP_SETSPISETTING); + if (!ret) + return ret; + if (buf[1] != 0) { + applog(LOG_DEBUG, "Failed to set spi settings"); + return false; + } + return true; +} + +/* Perform an spi transfer of *length bytes and return the amount of data + * returned in the same buffer in *length */ +bool mcp2210_spi_transfer(struct cgpu_info *cgpu, struct mcp_settings *mcp, + char *data, unsigned int *length) +{ + uint8_t res, status, orig_len, offset = 0; + char buf[MCP2210_BUFFER_LENGTH]; + + if (unlikely(*length > MCP2210_TRANSFER_MAX || !*length)) { + applog(LOG_ERR, "%s %d: Unable to spi transfer %u bytes", cgpu->drv->name, + cgpu->device_id, *length); + return false; + } + if (mcp->bpst != *length) { + /* Set the transfer setting only when it changes. */ + mcp->bpst = *length; + if (!mcp2210_set_spi_transfer_settings(cgpu, mcp->bitrate, mcp->icsv, + mcp->acsv, mcp->cstdd, mcp->ldbtcsd, mcp->sdbd, mcp->bpst, mcp->spimode)) + return false; + } + orig_len = *length; +retry: + applog(LOG_DEBUG, "%s %d: SPI sending %u bytes", cgpu->drv->name, cgpu->device_id, + *length); + memset(buf, 0, MCP2210_BUFFER_LENGTH); + buf[0] = MCP2210_SPI_TRANSFER; + buf[1] = *length; + + if (*length) + memcpy(buf + 4, data + offset, *length); + if (!mcp2210_send_recv(cgpu, buf, C_MCP_SPITRANSFER)) + return false; + + res = (uint8_t)buf[1]; + switch(res) { + case MCP2210_SPI_TRANSFER_SUCCESS: + *length = buf[2]; + status = buf[3]; + applog(LOG_DEBUG, "%s %d: SPI transfer success, received %u bytes status 0x%x", + cgpu->drv->name, cgpu->device_id, *length, status); + if (*length) { + memcpy(data + offset, buf + 4, *length); + offset += *length; + } + if (status == 0x30) { + /* This shouldn't happen */ + applog(LOG_DEBUG, "%s %d: SPI expecting more data inappropriately", + cgpu->drv->name, cgpu->device_id); + return false; + } + if (offset < orig_len) { + *length = 0; + goto retry; + } + *length = orig_len; + return true; + case MCP2210_SPI_TRANSFER_ERROR_IP: + applog(LOG_DEBUG, "%s %d: SPI transfer error in progress", + cgpu->drv->name, cgpu->device_id); + goto retry; + case MCP2210_SPI_TRANSFER_ERROR_NA: + applog(LOG_WARNING, "%s %d: External owner error on mcp2210 spi transfer", + cgpu->drv->name, cgpu->device_id); + default: + return false; + } +} diff --git a/mcp2210.h b/mcp2210.h new file mode 100644 index 0000000..cb75443 --- /dev/null +++ b/mcp2210.h @@ -0,0 +1,73 @@ +/* + * Copyright 2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef MCP2210_H +#define MCP2210_H + +#define MCP2210_BUFFER_LENGTH 64 +#define MCP2210_TRANSFER_MAX 60 + +#define MCP2210_PIN_GPIO 0x0 +#define MCP2210_PIN_CS 0x1 +#define MCP2210_PIN_DEDICATED 0x2 + +#define MCP2210_GPIO_PIN_LOW 0 +#define MCP2210_GPIO_PIN_HIGH 1 + +#define MCP2210_GPIO_OUTPUT 0 +#define MCP2210_GPIO_INPUT 1 + +#define MCP2210_SPI_CANCEL 0x11 +#define MCP2210_GET_GPIO_SETTING 0x20 +#define MCP2210_SET_GPIO_SETTING 0x21 +#define MCP2210_SET_GPIO_PIN_VAL 0x30 +#define MCP2210_GET_GPIO_PIN_VAL 0x31 +#define MCP2210_SET_GPIO_PIN_DIR 0x32 +#define MCP2210_GET_GPIO_PIN_DIR 0x33 +#define MCP2210_SET_SPI_SETTING 0X40 +#define MCP2210_GET_SPI_SETTING 0X41 +#define MCP2210_SPI_TRANSFER 0x42 + +#define MCP2210_SPI_TRANSFER_SUCCESS 0x00 +#define MCP2210_SPI_TRANSFER_ERROR_NA 0xF7 // SPI not available due to external owner +#define MCP2210_SPI_TRANSFER_ERROR_IP 0xF8 // SPI not available due to transfer in progress + +struct gpio_pin { + uint8_t pin[9]; +}; + +struct mcp_settings { + struct gpio_pin designation; + struct gpio_pin value; + struct gpio_pin direction; + unsigned int bitrate, icsv, acsv, cstdd, ldbtcsd, sdbd, bpst, spimode; +}; + +bool mcp2210_send_recv(struct cgpu_info *cgpu, char *buf, enum usb_cmds cmd); +bool mcp2210_get_gpio_settings(struct cgpu_info *cgpu, struct mcp_settings *mcp); +bool mcp2210_set_gpio_settings(struct cgpu_info *cgpu, struct mcp_settings *mcp); +bool mcp2210_get_gpio_pindes(struct cgpu_info *cgpu, struct gpio_pin *gp); +bool mcp2210_get_gpio_pinvals(struct cgpu_info *cgpu, struct gpio_pin *gp); +bool mcp2210_get_gpio_pindirs(struct cgpu_info *cgpu, struct gpio_pin *gp); +bool mcp2210_get_gpio_pin(struct cgpu_info *cgpu, int pin, int *des); +bool mcp2210_get_gpio_pinval(struct cgpu_info *cgpu, int pin, int *val); +bool mcp2210_get_gpio_pindir(struct cgpu_info *cgpu, int pin, int *dir); +bool mcp2210_spi_cancel(struct cgpu_info *cgpu); +bool +mcp2210_get_spi_transfer_settings(struct cgpu_info *cgpu, unsigned int *bitrate, unsigned int *icsv, + unsigned int *acsv, unsigned int *cstdd, unsigned int *ldbtcsd, + unsigned int *sdbd, unsigned int *bpst, unsigned int *spimode); +bool +mcp2210_set_spi_transfer_settings(struct cgpu_info *cgpu, unsigned int bitrate, unsigned int icsv, + unsigned int acsv, unsigned int cstdd, unsigned int ldbtcsd, + unsigned int sdbd, unsigned int bpst, unsigned int spimode); +bool mcp2210_spi_transfer(struct cgpu_info *cgpu, struct mcp_settings *mcp, + char *data, unsigned int *length); + +#endif /* MCP2210_H */ diff --git a/miner.h b/miner.h new file mode 100644 index 0000000..04e1afe --- /dev/null +++ b/miner.h @@ -0,0 +1,1670 @@ +#ifndef __MINER_H__ +#define __MINER_H__ + +#include "config.h" + +#include +#include +#include +#include +#include +#ifdef HAVE_LIBCURL +#include +#else +typedef char CURL; +extern char *curly; +#define curl_easy_init(curl) (curly) +#define curl_easy_cleanup(curl) {} +#define curl_global_cleanup() {} +#define CURL_GLOBAL_ALL 0 +#define curl_global_init(X) (0) +#endif +#include + +#include "elist.h" +#include "uthash.h" +#include "logging.h" +#include "util.h" +#include +#ifndef WIN32 +# include +# include +#endif + +#ifdef USE_USBUTILS +#include +#endif + +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_ALLOCA_H +# include +#elif defined __GNUC__ +# ifndef WIN32 +# define alloca __builtin_alloca +# else +# include +# endif +#elif defined _AIX +# define alloca __alloca +#elif defined _MSC_VER +# include +# define alloca _alloca +#else +# ifndef HAVE_ALLOCA +# ifdef __cplusplus +extern "C" +# endif +void *alloca (size_t); +# endif +#endif + +#ifdef __MINGW32__ +#include +#include +static inline int fsync (int fd) +{ + return (FlushFileBuffers ((HANDLE) _get_osfhandle (fd))) ? 0 : -1; +} + +#ifndef EWOULDBLOCK +# define EWOULDBLOCK EAGAIN +#endif + +#ifndef MSG_DONTWAIT +# define MSG_DONTWAIT 0x1000000 +#endif +#endif /* __MINGW32__ */ + +#if defined (__linux) + #ifndef LINUX + #define LINUX + #endif +#endif + +#ifdef WIN32 + #ifndef timersub + #define timersub(a, b, result) \ + do { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ + if ((result)->tv_usec < 0) { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ + } while (0) + #endif + #ifndef timeradd + # define timeradd(a, b, result) \ + do { \ + (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ + if ((result)->tv_usec >= 1000000) \ + { \ + ++(result)->tv_sec; \ + (result)->tv_usec -= 1000000; \ + } \ + } while (0) + #endif +#endif + + +#ifdef USE_USBUTILS + #include +#endif + +#ifdef USE_USBUTILS + #include "usbutils.h" +#endif + +#if (!defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \ + || (defined(WIN32) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))) +#ifndef bswap_16 + #define bswap_16(value) \ + ((((value) & 0xff) << 8) | ((value) >> 8)) + #define bswap_32 __builtin_bswap32 + #define bswap_64 __builtin_bswap64 +#endif +#else +#if HAVE_BYTESWAP_H +#include +#elif defined(USE_SYS_ENDIAN_H) +#include +#elif defined(__APPLE__) +#include +#define bswap_16 OSSwapInt16 +#define bswap_32 OSSwapInt32 +#define bswap_64 OSSwapInt64 +#else +#define bswap_16(value) \ + ((((value) & 0xff) << 8) | ((value) >> 8)) + +#define bswap_32(value) \ + (((uint32_t)bswap_16((uint16_t)((value) & 0xffff)) << 16) | \ + (uint32_t)bswap_16((uint16_t)((value) >> 16))) + +#define bswap_64(value) \ + (((uint64_t)bswap_32((uint32_t)((value) & 0xffffffff)) \ + << 32) | \ + (uint64_t)bswap_32((uint32_t)((value) >> 32))) +#endif +#endif /* !defined(__GLXBYTEORDER_H__) */ + +#ifndef bswap_8 +extern unsigned char bit_swap_table[256]; +#define bswap_8(x) (bit_swap_table[x]) +#endif + +/* This assumes htobe32 is a macro in endian.h, and if it doesn't exist, then + * htobe64 also won't exist */ +#ifndef htobe32 +# if __BYTE_ORDER == __LITTLE_ENDIAN +# define htole8(x) (x) +# define htole16(x) (x) +# define le16toh(x) (x) +# define htole32(x) (x) +# define htole64(x) (x) +# define le32toh(x) (x) +# define le64toh(x) (x) +# define be32toh(x) bswap_32(x) +# define be64toh(x) bswap_64(x) +# define htobe16(x) bswap_16(x) +# define htobe32(x) bswap_32(x) +# define htobe64(x) bswap_64(x) +# elif __BYTE_ORDER == __BIG_ENDIAN +# define htole8(x) bswap_8(x) +# define htole16(x) bswap_16(x) +# define le16toh(x) bswap_16(x) +# define htole32(x) bswap_32(x) +# define le32toh(x) bswap_32(x) +# define le64toh(x) bswap_64(x) +# define htole64(x) bswap_64(x) +# define be32toh(x) (x) +# define be64toh(x) (x) +# define htobe16(x) (x) +# define htobe32(x) (x) +# define htobe64(x) (x) +#else +#error UNKNOWN BYTE ORDER +#endif + +#else + +# if __BYTE_ORDER == __LITTLE_ENDIAN +# define htole8(x) (x) +# elif __BYTE_ORDER == __BIG_ENDIAN +# define htole8(x) bswap_8(x) +#else +#error UNKNOWN BYTE ORDER +#endif + +#endif + +#undef unlikely +#undef likely +#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__) +#define unlikely(expr) (__builtin_expect(!!(expr), 0)) +#define likely(expr) (__builtin_expect(!!(expr), 1)) +#else +#define unlikely(expr) (expr) +#define likely(expr) (expr) +#endif +#define __maybe_unused __attribute__((unused)) + +#define uninitialised_var(x) x = x + +#if defined(__i386__) +#define WANT_CRYPTOPP_ASM32 +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif + +/* No semtimedop on apple so ignore timeout till we implement one */ +#ifdef __APPLE__ +#define semtimedop(SEM, SOPS, VAL, TIMEOUT) semop(SEM, SOPS, VAL) +#endif + +#ifndef MIN +#define MIN(x, y) ((x) > (y) ? (y) : (x)) +#endif +#ifndef MAX +#define MAX(x, y) ((x) > (y) ? (x) : (y)) +#endif + +/* Put avalon last to make it the last device it tries to detect to prevent it + * trying to claim same chip but different devices. Adding a device here will + * update all macros in the code that use the *_PARSE_COMMANDS macros for each + * listed driver. */ +#define FPGA_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \ + DRIVER_ADD_COMMAND(bitforce) \ + DRIVER_ADD_COMMAND(modminer) + +#define ASIC_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \ + DRIVER_ADD_COMMAND(bitmain) \ + DRIVER_ADD_COMMAND(bitmain_c5) \ + DRIVER_ADD_COMMAND(bmsc) \ + DRIVER_ADD_COMMAND(avalon) \ + DRIVER_ADD_COMMAND(avalon2) \ + DRIVER_ADD_COMMAND(avalon4) \ + DRIVER_ADD_COMMAND(bflsc) \ + DRIVER_ADD_COMMAND(bitfury) \ + DRIVER_ADD_COMMAND(blockerupter) \ + DRIVER_ADD_COMMAND(cointerra) \ + DRIVER_ADD_COMMAND(hashfast) \ + DRIVER_ADD_COMMAND(hashratio) \ + DRIVER_ADD_COMMAND(icarus) \ + DRIVER_ADD_COMMAND(klondike) \ + DRIVER_ADD_COMMAND(knc) \ + DRIVER_ADD_COMMAND(bitmineA1) \ + DRIVER_ADD_COMMAND(drillbit) \ + DRIVER_ADD_COMMAND(bab) \ + DRIVER_ADD_COMMAND(minion) \ + DRIVER_ADD_COMMAND(sp10) \ + DRIVER_ADD_COMMAND(sp30) + +#define DRIVER_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \ + FPGA_PARSE_COMMANDS(DRIVER_ADD_COMMAND) \ + ASIC_PARSE_COMMANDS(DRIVER_ADD_COMMAND) + +#define DRIVER_ENUM(X) DRIVER_##X, +#define DRIVER_PROTOTYPE(X) struct device_drv X##_drv; + +/* Create drv_driver enum from DRIVER_PARSE_COMMANDS macro */ +enum drv_driver { + DRIVER_PARSE_COMMANDS(DRIVER_ENUM) + DRIVER_MAX +}; + +/* Use DRIVER_PARSE_COMMANDS to generate extern device_drv prototypes */ +DRIVER_PARSE_COMMANDS(DRIVER_PROTOTYPE) + +enum alive { + LIFE_WELL, + LIFE_SICK, + LIFE_DEAD, + LIFE_NOSTART, + LIFE_INIT, +}; + + +enum pool_strategy { + POOL_FAILOVER, + POOL_ROUNDROBIN, + POOL_ROTATE, + POOL_LOADBALANCE, + POOL_BALANCE, +}; + +#define TOP_STRATEGY (POOL_BALANCE) + +struct strategies { + const char *s; +}; + +struct cgpu_info; + +extern void blank_get_statline_before(char *buf, size_t bufsiz, struct cgpu_info __maybe_unused *cgpu); + +struct api_data; +struct thr_info; +struct work; + +struct device_drv { + enum drv_driver drv_id; + + char *dname; + char *name; + + // DRV-global functions + void (*drv_detect)(bool); + + // Device-specific functions + void (*reinit_device)(struct cgpu_info *); + void (*get_statline_before)(char *, size_t, struct cgpu_info *); + void (*get_statline)(char *, size_t, struct cgpu_info *); + struct api_data *(*get_api_stats)(struct cgpu_info *); + bool (*get_stats)(struct cgpu_info *); + void (*identify_device)(struct cgpu_info *); // e.g. to flash a led + char *(*set_device)(struct cgpu_info *, char *option, char *setting, char *replybuf); + + // Thread-specific functions + bool (*thread_prepare)(struct thr_info *); + uint64_t (*can_limit_work)(struct thr_info *); + bool (*thread_init)(struct thr_info *); + bool (*prepare_work)(struct thr_info *, struct work *); + + /* Which hash work loop this driver uses. */ + void (*hash_work)(struct thr_info *); + /* Two variants depending on whether the device divides work up into + * small pieces or works with whole work items and may or may not have + * a queue of its own. */ + int64_t (*scanhash)(struct thr_info *, struct work *, int64_t); + int64_t (*scanwork)(struct thr_info *); + + /* Used to extract work from the hash table of queued work and tell + * the main loop that it should not add any further work to the table. + */ + bool (*queue_full)(struct cgpu_info *); + /* Tell the driver of a block change */ + void (*flush_work)(struct cgpu_info *); + /* Tell the driver of an updated work template for eg. stratum */ + void (*update_work)(struct cgpu_info *); + + void (*hw_error)(struct thr_info *); + void (*thread_shutdown)(struct thr_info *); + void (*thread_enable)(struct thr_info *); + + /* What should be zeroed in this device when global zero stats is sent */ + void (*zero_stats)(struct cgpu_info *); + + // Does it need to be free()d? + bool copy; + + /* Highest target diff the device supports */ + double max_diff; + + /* Lowest diff the controller can safely run at */ + double min_diff; +}; + +extern struct device_drv *copy_drv(struct device_drv*); + +enum dev_enable { + DEV_ENABLED, + DEV_DISABLED, + DEV_RECOVER, +}; + +enum dev_reason { + REASON_THREAD_FAIL_INIT, + REASON_THREAD_ZERO_HASH, + REASON_THREAD_FAIL_QUEUE, + REASON_DEV_SICK_IDLE_60, + REASON_DEV_DEAD_IDLE_600, + REASON_DEV_NOSTART, + REASON_DEV_OVER_HEAT, + REASON_DEV_THERMAL_CUTOFF, + REASON_DEV_COMMS_ERROR, + REASON_DEV_THROTTLE, +}; + +#define REASON_NONE "None" +#define REASON_THREAD_FAIL_INIT_STR "Thread failed to init" +#define REASON_THREAD_ZERO_HASH_STR "Thread got zero hashes" +#define REASON_THREAD_FAIL_QUEUE_STR "Thread failed to queue work" +#define REASON_DEV_SICK_IDLE_60_STR "Device idle for 60s" +#define REASON_DEV_DEAD_IDLE_600_STR "Device dead - idle for 600s" +#define REASON_DEV_NOSTART_STR "Device failed to start" +#define REASON_DEV_OVER_HEAT_STR "Device over heated" +#define REASON_DEV_THERMAL_CUTOFF_STR "Device reached thermal cutoff" +#define REASON_DEV_COMMS_ERROR_STR "Device comms error" +#define REASON_DEV_THROTTLE_STR "Device throttle" +#define REASON_UNKNOWN_STR "Unknown reason - code bug" + +#define MIN_SEC_UNSET 99999999 + +struct cgminer_stats { + uint32_t getwork_calls; + struct timeval getwork_wait; + struct timeval getwork_wait_max; + struct timeval getwork_wait_min; +}; + +// Just the actual network getworks to the pool +struct cgminer_pool_stats { + uint32_t getwork_calls; + uint32_t getwork_attempts; + struct timeval getwork_wait; + struct timeval getwork_wait_max; + struct timeval getwork_wait_min; + double getwork_wait_rolling; + bool hadrolltime; + bool canroll; + bool hadexpire; + uint32_t rolltime; + double min_diff; + double max_diff; + double last_diff; + uint32_t min_diff_count; + uint32_t max_diff_count; + uint64_t times_sent; + uint64_t bytes_sent; + uint64_t net_bytes_sent; + uint64_t times_received; + uint64_t bytes_received; + uint64_t net_bytes_received; +}; + +struct cgpu_info { + int cgminer_id; + struct device_drv *drv; + int device_id; + char *name; + char *device_path; + void *device_data; + void *dup_data; + char *unique_id; +#ifdef USE_USBUTILS + struct cg_usb_device *usbdev; + struct cg_usb_info usbinfo; + bool blacklisted; + bool nozlp; // Device prefers no zero length packet +#endif +#if defined(USE_AVALON) || defined(USE_AVALON2) + struct work **works; + int work_array; + int queued; + int results; +#endif +#ifdef USE_BITMAIN + int device_fd; + struct work **works; + int work_array; + int queued; + int results; +#endif +#ifdef USE_MODMINER + char fpgaid; + unsigned char clock; + pthread_mutex_t *modminer_mutex; +#endif +#ifdef USE_BITFORCE + struct timeval work_start_tv; + unsigned int wait_ms; + unsigned int sleep_ms; + double avg_wait_f; + unsigned int avg_wait_d; + uint32_t nonces; + bool nonce_range; + bool polling; + bool flash_led; +#endif /* USE_BITFORCE */ +#if defined(USE_BITFORCE) || defined(USE_BFLSC) + pthread_mutex_t device_mutex; +#endif /* USE_BITFORCE || USE_BFLSC */ + enum dev_enable deven; + int accepted; + int rejected; + int hw_errors; + double rolling; + double rolling1; + double rolling5; + double rolling15; + double total_mhashes; + double utility; + enum alive status; + char init[40]; + struct timeval last_message_tv; + + int threads; + struct thr_info **thr; + + int64_t max_hashes; + + const char *kname; + + bool new_work; + + double temp; + int cutofftemp; + + int64_t diff1; + double diff_accepted; + double diff_rejected; + int last_share_pool; + time_t last_share_pool_time; + double last_share_diff; + time_t last_device_valid_work; + uint32_t last_nonce; + + time_t device_last_well; + time_t device_last_not_well; + enum dev_reason device_not_well_reason; + int thread_fail_init_count; + int thread_zero_hash_count; + int thread_fail_queue_count; + int dev_sick_idle_60_count; + int dev_dead_idle_600_count; + int dev_nostart_count; + int dev_over_heat_count; // It's a warning but worth knowing + int dev_thermal_cutoff_count; + int dev_comms_error_count; + int dev_throttle_count; + + struct cgminer_stats cgminer_stats; + + pthread_rwlock_t qlock; + struct work *queued_work; + struct work *unqueued_work; + unsigned int queued_count; + + bool shutdown; + + struct timeval dev_start_tv; + + /* For benchmarking only */ + int hidiff; + int lodiff; + int direction; +}; + +extern bool add_cgpu(struct cgpu_info*); + +struct thread_q { + struct list_head q; + + bool frozen; + + pthread_mutex_t mutex; + pthread_cond_t cond; +}; + +struct thr_info { + int id; + int device_thread; + bool primary_thread; + + pthread_t pth; + cgsem_t sem; + struct thread_q *q; + struct cgpu_info *cgpu; + void *cgpu_data; + struct timeval last; + struct timeval sick; + + bool pause; + bool getwork; + + bool work_restart; + bool work_update; +}; + +struct string_elist { + char *string; + bool free_me; + + struct list_head list; +}; + +static inline void string_elist_add(const char *s, struct list_head *head) +{ + struct string_elist *n; + + n = calloc(1, sizeof(*n)); + n->string = strdup(s); + n->free_me = true; + list_add_tail(&n->list, head); +} + +static inline void string_elist_del(struct string_elist *item) +{ + if (item->free_me) + free(item->string); + list_del(&item->list); +} + + +static inline uint32_t swab32(uint32_t v) +{ + return bswap_32(v); +} + +static inline void swap256(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + + dest[0] = src[7]; + dest[1] = src[6]; + dest[2] = src[5]; + dest[3] = src[4]; + dest[4] = src[3]; + dest[5] = src[2]; + dest[6] = src[1]; + dest[7] = src[0]; +} + +static inline void swab256(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + + dest[0] = swab32(src[7]); + dest[1] = swab32(src[6]); + dest[2] = swab32(src[5]); + dest[3] = swab32(src[4]); + dest[4] = swab32(src[3]); + dest[5] = swab32(src[2]); + dest[6] = swab32(src[1]); + dest[7] = swab32(src[0]); +} + +static inline void flip12(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + int i; + + for (i = 0; i < 3; i++) + dest[i] = swab32(src[i]); +} + +static inline void flip32(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + int i; + + for (i = 0; i < 8; i++) + dest[i] = swab32(src[i]); +} + +static inline void flip64(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + int i; + + for (i = 0; i < 16; i++) + dest[i] = swab32(src[i]); +} + +static inline void flip80(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + int i; + + for (i = 0; i < 20; i++) + dest[i] = swab32(src[i]); +} + +static inline void flip128(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + int i; + + for (i = 0; i < 32; i++) + dest[i] = swab32(src[i]); +} + +/* For flipping to the correct endianness if necessary */ +#if defined(__BIG_ENDIAN__) || defined(MIPSEB) +static inline void endian_flip32(void *dest_p, const void *src_p) +{ + flip32(dest_p, src_p); +} + +static inline void endian_flip128(void *dest_p, const void *src_p) +{ + flip128(dest_p, src_p); +} +#else +static inline void +endian_flip32(void __maybe_unused *dest_p, const void __maybe_unused *src_p) +{ +} + +static inline void +endian_flip128(void __maybe_unused *dest_p, const void __maybe_unused *src_p) +{ +} +#endif + +extern double cgpu_runtime(struct cgpu_info *cgpu); +extern double tsince_restart(void); +extern double tsince_update(void); +extern void __quit(int status, bool clean); +extern void _quit(int status); + +/* + * Set this to non-zero to enable lock tracking + * Use the API lockstats command to see the locking status on stderr + * i.e. in your log file if you 2> log.log - but not on the screen + * API lockstats is privilidged but will always exist and will return + * success if LOCK_TRACKING is enabled and warning if disabled + * In production code, this should never be enabled since it will slow down all locking + * So, e.g. use it to track down a deadlock - after a reproducable deadlock occurs + * ... Of course if the API code itself deadlocks, it wont help :) + */ +#define LOCK_TRACKING 0 + +#if LOCK_TRACKING +enum cglock_typ { + CGLOCK_MUTEX, + CGLOCK_RW, + CGLOCK_UNKNOWN +}; + +extern uint64_t api_getlock(void *lock, const char *file, const char *func, const int line); +extern void api_gotlock(uint64_t id, void *lock, const char *file, const char *func, const int line); +extern uint64_t api_trylock(void *lock, const char *file, const char *func, const int line); +extern void api_didlock(uint64_t id, int ret, void *lock, const char *file, const char *func, const int line); +extern void api_gunlock(void *lock, const char *file, const char *func, const int line); +extern void api_initlock(void *lock, enum cglock_typ typ, const char *file, const char *func, const int line); + +#define GETLOCK(_lock, _file, _func, _line) uint64_t _id1 = api_getlock((void *)(_lock), _file, _func, _line) +#define GOTLOCK(_lock, _file, _func, _line) api_gotlock(_id1, (void *)(_lock), _file, _func, _line) +#define TRYLOCK(_lock, _file, _func, _line) uint64_t _id2 = api_trylock((void *)(_lock), _file, _func, _line) +#define DIDLOCK(_ret, _lock, _file, _func, _line) api_didlock(_id2, _ret, (void *)(_lock), _file, _func, _line) +#define GUNLOCK(_lock, _file, _func, _line) api_gunlock((void *)(_lock), _file, _func, _line) +#define INITLOCK(_lock, _typ, _file, _func, _line) api_initlock((void *)(_lock), _typ, _file, _func, _line) +#else +#define GETLOCK(_lock, _file, _func, _line) +#define GOTLOCK(_lock, _file, _func, _line) +#define TRYLOCK(_lock, _file, _func, _line) +#define DIDLOCK(_ret, _lock, _file, _func, _line) +#define GUNLOCK(_lock, _file, _func, _line) +#define INITLOCK(_typ, _lock, _file, _func, _line) +#endif + +#define mutex_lock(_lock) _mutex_lock(_lock, __FILE__, __func__, __LINE__) +#define mutex_unlock_noyield(_lock) _mutex_unlock_noyield(_lock, __FILE__, __func__, __LINE__) +#define mutex_unlock(_lock) _mutex_unlock(_lock, __FILE__, __func__, __LINE__) +#define mutex_trylock(_lock) _mutex_trylock(_lock, __FILE__, __func__, __LINE__) +#define wr_lock(_lock) _wr_lock(_lock, __FILE__, __func__, __LINE__) +#define wr_trylock(_lock) _wr_trylock(_lock, __FILE__, __func__, __LINE__) +#define rd_lock(_lock) _rd_lock(_lock, __FILE__, __func__, __LINE__) +#define rw_unlock(_lock) _rw_unlock(_lock, __FILE__, __func__, __LINE__) +#define rd_unlock_noyield(_lock) _rd_unlock_noyield(_lock, __FILE__, __func__, __LINE__) +#define wr_unlock_noyield(_lock) _wr_unlock_noyield(_lock, __FILE__, __func__, __LINE__) +#define rd_unlock(_lock) _rd_unlock(_lock, __FILE__, __func__, __LINE__) +#define wr_unlock(_lock) _wr_unlock(_lock, __FILE__, __func__, __LINE__) +#define mutex_init(_lock) _mutex_init(_lock, __FILE__, __func__, __LINE__) +#define rwlock_init(_lock) _rwlock_init(_lock, __FILE__, __func__, __LINE__) +#define cglock_init(_lock) _cglock_init(_lock, __FILE__, __func__, __LINE__) +#define cg_rlock(_lock) _cg_rlock(_lock, __FILE__, __func__, __LINE__) +#define cg_ilock(_lock) _cg_ilock(_lock, __FILE__, __func__, __LINE__) +#define cg_uilock(_lock) _cg_uilock(_lock, __FILE__, __func__, __LINE__) +#define cg_ulock(_lock) _cg_ulock(_lock, __FILE__, __func__, __LINE__) +#define cg_wlock(_lock) _cg_wlock(_lock, __FILE__, __func__, __LINE__) +#define cg_dwlock(_lock) _cg_dwlock(_lock, __FILE__, __func__, __LINE__) +#define cg_dwilock(_lock) _cg_dwilock(_lock, __FILE__, __func__, __LINE__) +#define cg_dlock(_lock) _cg_dlock(_lock, __FILE__, __func__, __LINE__) +#define cg_runlock(_lock) _cg_runlock(_lock, __FILE__, __func__, __LINE__) +#define cg_ruwlock(_lock) _cg_ruwlock(_lock, __FILE__, __func__, __LINE__) +#define cg_wunlock(_lock) _cg_wunlock(_lock, __FILE__, __func__, __LINE__) + +static inline void _mutex_lock(pthread_mutex_t *lock, const char *file, const char *func, const int line) +{ + GETLOCK(lock, file, func, line); + if (unlikely(pthread_mutex_lock(lock))) + quitfrom(1, file, func, line, "WTF MUTEX ERROR ON LOCK! errno=%d", errno); + GOTLOCK(lock, file, func, line); +} + +static inline void _mutex_unlock_noyield(pthread_mutex_t *lock, const char *file, const char *func, const int line) +{ + if (unlikely(pthread_mutex_unlock(lock))) + quitfrom(1, file, func, line, "WTF MUTEX ERROR ON UNLOCK! errno=%d", errno); + GUNLOCK(lock, file, func, line); +} + +static inline void _mutex_unlock(pthread_mutex_t *lock, const char *file, const char *func, const int line) +{ + _mutex_unlock_noyield(lock, file, func, line); + selective_yield(); +} + +static inline int _mutex_trylock(pthread_mutex_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line) +{ + TRYLOCK(lock, file, func, line); + int ret = pthread_mutex_trylock(lock); + DIDLOCK(ret, lock, file, func, line); + return ret; +} + +static inline void _wr_lock(pthread_rwlock_t *lock, const char *file, const char *func, const int line) +{ + GETLOCK(lock, file, func, line); + if (unlikely(pthread_rwlock_wrlock(lock))) + quitfrom(1, file, func, line, "WTF WRLOCK ERROR ON LOCK! errno=%d", errno); + GOTLOCK(lock, file, func, line); +} + +static inline int _wr_trylock(pthread_rwlock_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line) +{ + TRYLOCK(lock, file, func, line); + int ret = pthread_rwlock_trywrlock(lock); + DIDLOCK(ret, lock, file, func, line); + return ret; +} + +static inline void _rd_lock(pthread_rwlock_t *lock, const char *file, const char *func, const int line) +{ + GETLOCK(lock, file, func, line); + if (unlikely(pthread_rwlock_rdlock(lock))) + quitfrom(1, file, func, line, "WTF RDLOCK ERROR ON LOCK! errno=%d", errno); + GOTLOCK(lock, file, func, line); +} + +static inline void _rw_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line) +{ + if (unlikely(pthread_rwlock_unlock(lock))) + quitfrom(1, file, func, line, "WTF RWLOCK ERROR ON UNLOCK! errno=%d", errno); + GUNLOCK(lock, file, func, line); +} + +static inline void _rd_unlock_noyield(pthread_rwlock_t *lock, const char *file, const char *func, const int line) +{ + _rw_unlock(lock, file, func, line); +} + +static inline void _wr_unlock_noyield(pthread_rwlock_t *lock, const char *file, const char *func, const int line) +{ + _rw_unlock(lock, file, func, line); +} + +static inline void _rd_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line) +{ + _rw_unlock(lock, file, func, line); + selective_yield(); +} + +static inline void _wr_unlock(pthread_rwlock_t *lock, const char *file, const char *func, const int line) +{ + _rw_unlock(lock, file, func, line); + selective_yield(); +} + +static inline void _mutex_init(pthread_mutex_t *lock, const char *file, const char *func, const int line) +{ + if (unlikely(pthread_mutex_init(lock, NULL))) + quitfrom(1, file, func, line, "Failed to pthread_mutex_init errno=%d", errno); + INITLOCK(lock, CGLOCK_MUTEX, file, func, line); +} + +static inline void mutex_destroy(pthread_mutex_t *lock) +{ + /* Ignore return code. This only invalidates the mutex on linux but + * releases resources on windows. */ + pthread_mutex_destroy(lock); +} + +static inline void _rwlock_init(pthread_rwlock_t *lock, const char *file, const char *func, const int line) +{ + if (unlikely(pthread_rwlock_init(lock, NULL))) + quitfrom(1, file, func, line, "Failed to pthread_rwlock_init errno=%d", errno); + INITLOCK(lock, CGLOCK_RW, file, func, line); +} + +static inline void rwlock_destroy(pthread_rwlock_t *lock) +{ + pthread_rwlock_destroy(lock); +} + +static inline void _cglock_init(cglock_t *lock, const char *file, const char *func, const int line) +{ + _mutex_init(&lock->mutex, file, func, line); + _rwlock_init(&lock->rwlock, file, func, line); +} + +static inline void cglock_destroy(cglock_t *lock) +{ + rwlock_destroy(&lock->rwlock); + mutex_destroy(&lock->mutex); +} + +/* Read lock variant of cglock. Cannot be promoted. */ +static inline void _cg_rlock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _mutex_lock(&lock->mutex, file, func, line); + _rd_lock(&lock->rwlock, file, func, line); + _mutex_unlock_noyield(&lock->mutex, file, func, line); +} + +/* Intermediate variant of cglock - behaves as a read lock but can be promoted + * to a write lock or demoted to read lock. */ +static inline void _cg_ilock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _mutex_lock(&lock->mutex, file, func, line); +} + +/* Unlock intermediate variant without changing to read or write version */ +static inline void _cg_uilock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _mutex_unlock(&lock->mutex, file, func, line); +} + +/* Upgrade intermediate variant to a write lock */ +static inline void _cg_ulock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _wr_lock(&lock->rwlock, file, func, line); +} + +/* Write lock variant of cglock */ +static inline void _cg_wlock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _mutex_lock(&lock->mutex, file, func, line); + _wr_lock(&lock->rwlock, file, func, line); +} + +/* Downgrade write variant to a read lock */ +static inline void _cg_dwlock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _wr_unlock_noyield(&lock->rwlock, file, func, line); + _rd_lock(&lock->rwlock, file, func, line); + _mutex_unlock_noyield(&lock->mutex, file, func, line); +} + +/* Demote a write variant to an intermediate variant */ +static inline void _cg_dwilock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _wr_unlock(&lock->rwlock, file, func, line); +} + +/* Downgrade intermediate variant to a read lock */ +static inline void _cg_dlock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _rd_lock(&lock->rwlock, file, func, line); + _mutex_unlock_noyield(&lock->mutex, file, func, line); +} + +static inline void _cg_runlock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _rd_unlock(&lock->rwlock, file, func, line); +} + +/* This drops the read lock and grabs a write lock. It does NOT protect data + * between the two locks! */ +static inline void _cg_ruwlock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _rd_unlock_noyield(&lock->rwlock, file, func, line); + _cg_wlock(lock, file, func, line); +} + +static inline void _cg_wunlock(cglock_t *lock, const char *file, const char *func, const int line) +{ + _wr_unlock_noyield(&lock->rwlock, file, func, line); + _mutex_unlock(&lock->mutex, file, func, line); +} + +struct pool; + +#define API_LISTEN_ADDR "0.0.0.0" +#define API_MCAST_CODE "FTW" +#define API_MCAST_ADDR "224.0.0.75" + +extern bool g_logfile_enable; +extern char g_logfile_path[256]; +extern char g_logfile_openflag[32]; +extern FILE * g_logwork_file; +extern FILE * g_logwork_files[65]; +extern FILE * g_logwork_diffs[65]; +extern int g_logwork_asicnum; + +extern bool opt_work_update; +extern bool opt_protocol; +extern bool have_longpoll; +extern char *opt_kernel_path; +extern char *opt_socks_proxy; +extern int opt_suggest_diff; +extern int opt_multi_version; +extern char *cgminer_path; +extern bool opt_fail_only; +extern bool opt_lowmem; +extern bool opt_autofan; +extern bool opt_autoengine; +extern bool use_curses; +extern char *opt_logwork_path; +extern char *opt_logwork_asicnum; +extern bool opt_logwork_diff; +extern char *opt_api_allow; +extern bool opt_api_mcast; +extern char *opt_api_mcast_addr; +extern char *opt_api_mcast_code; +extern char *opt_api_mcast_des; +extern int opt_api_mcast_port; +extern char *opt_api_groups; +extern char *opt_api_description; +extern int opt_api_port; +extern char *opt_api_host; +extern bool opt_api_listen; +extern bool opt_api_network; +extern bool opt_delaynet; +extern time_t last_getwork; +extern bool opt_restart; +#ifdef USE_ICARUS +extern char *opt_icarus_options; +extern char *opt_icarus_timing; +extern float opt_anu_freq; +extern float opt_au3_freq; +extern int opt_au3_volt; +extern float opt_rock_freq; +#endif +extern bool opt_worktime; +#ifdef USE_AVALON +extern char *opt_avalon_options; +extern char *opt_bitburner_fury_options; +#endif +#ifdef USE_KLONDIKE +extern char *opt_klondike_options; +#endif +#ifdef USE_DRILLBIT +extern char *opt_drillbit_options; +extern char *opt_drillbit_auto; +#endif +#ifdef USE_BAB +extern char *opt_bab_options; +#endif +#ifdef USE_BITMINE_A1 +extern char *opt_bitmine_a1_options; +#endif +#ifdef USE_BITMAIN +extern char *opt_bitmain_options; +extern bool opt_bitmain_hwerror; +extern bool opt_bitmain_checkall; +extern char *opt_bitmain_freq; +extern char *opt_bitmain_voltage; +extern bool opt_bitmain_checkn2diff; +extern bool opt_bitmain_nobeeper; +extern bool opt_bitmain_notempoverctrl; +extern bool opt_bitmain_homemode; +#endif +#ifdef USE_BMSC +extern char *opt_bmsc_options; +extern char *opt_bmsc_timing; +extern bool opt_bmsc_gray; +extern char *opt_bmsc_bandops; +extern char *opt_bmsc_voltage; +extern bool opt_bmsc_bootstart; +extern char *opt_bmsc_freq; +extern char *opt_bmsc_rdreg; +extern bool opt_bmsc_rdworktest; +#endif +#ifdef USE_MINION +extern int opt_minion_chipreport; +extern char *opt_minion_cores; +extern bool opt_minion_extra; +extern char *opt_minion_freq; +extern int opt_minion_freqchange; +extern int opt_minion_freqpercent; +extern bool opt_minion_idlecount; +extern int opt_minion_ledcount; +extern int opt_minion_ledlimit; +extern bool opt_minion_noautofreq; +extern bool opt_minion_overheat; +extern int opt_minion_spidelay; +extern char *opt_minion_spireset; +extern int opt_minion_spisleep; +extern int opt_minion_spiusec; +extern char *opt_minion_temp; +#endif +#ifdef USE_USBUTILS +extern char *opt_usb_select; +extern int opt_usbdump; +extern bool opt_usb_list_all; +extern cgsem_t usb_resource_sem; +#endif +#ifdef USE_BITFORCE +extern bool opt_bfl_noncerange; +#endif +extern int swork_id; + +#if LOCK_TRACKING +extern pthread_mutex_t lockstat_lock; +#endif + +extern pthread_rwlock_t netacc_lock; + +extern const uint32_t sha256_init_state[]; +#ifdef HAVE_LIBCURL +extern json_t *json_web_config(const char *url); +extern json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, + const char *rpc_req, bool, bool, int *, + struct pool *pool, bool); +#endif +extern const char *proxytype(proxytypes_t proxytype); +extern char *get_proxy(char *url, struct pool *pool); +extern void __bin2hex(char *s, const unsigned char *p, size_t len); +extern char *bin2hex(const unsigned char *p, size_t len); +extern bool hex2bin(unsigned char *p, const char *hexstr, size_t len); + +typedef bool (*sha256_func)(struct thr_info*, const unsigned char *pmidstate, + unsigned char *pdata, + unsigned char *phash1, unsigned char *phash, + const unsigned char *ptarget, + uint32_t max_nonce, + uint32_t *last_nonce, + uint32_t nonce); + +extern bool fulltest(const unsigned char *hash, const unsigned char *target); + +extern int opt_queue; +extern int opt_scantime; +extern int opt_expiry; + +extern cglock_t control_lock; +extern pthread_mutex_t hash_lock; +extern pthread_mutex_t console_lock; +extern cglock_t ch_lock; +extern pthread_rwlock_t mining_thr_lock; +extern pthread_rwlock_t devices_lock; + +extern pthread_mutex_t restart_lock; +extern pthread_cond_t restart_cond; + +extern void clear_stratum_shares(struct pool *pool); +extern void clear_pool_work(struct pool *pool); +extern void set_target(unsigned char *dest_target, double diff); +#if defined (USE_AVALON2) || defined (USE_AVALON4) || defined (USE_HASHRATIO) +bool submit_nonce2_nonce(struct thr_info *thr, struct pool *pool, struct pool *real_pool, + uint32_t nonce2, uint32_t nonce, uint32_t ntime); +#endif +extern int restart_wait(struct thr_info *thr, unsigned int mstime); + +extern void kill_work(void); + +extern void reinit_device(struct cgpu_info *cgpu); + +extern void api(int thr_id); + +extern struct pool *current_pool(void); +extern int enabled_pools; +extern void get_intrange(char *arg, int *val1, int *val2); +extern bool detect_stratum(struct pool *pool, char *url); +extern void print_summary(void); +extern void adjust_quota_gcd(void); +extern struct pool *add_pool(void); +extern bool add_pool_details(struct pool *pool, bool live, char *url, char *user, char *pass); + +#define MAX_DEVICES 4096 + +extern char g_miner_version[256]; +extern char g_miner_compiletime[256]; +extern char g_miner_type[256]; +extern bool hotplug_mode; +extern int hotplug_time; +extern struct list_head scan_devices; +extern int nDevs; +extern int num_processors; +extern int hw_errors; +extern bool use_syslog; +extern bool opt_quiet; +extern struct thr_info *control_thr; +extern struct thr_info **mining_thr; +extern double total_secs; +extern int mining_threads; +extern int total_devices; +extern int zombie_devs; +extern struct cgpu_info **devices; +extern int total_pools; +extern struct pool **pools; +extern struct strategies strategies[]; +extern enum pool_strategy pool_strategy; +extern int opt_rotate_period; +extern double rolling1, rolling5, rolling15; +extern double total_rolling; +extern double total_mhashes_done; +extern double g_displayed_rolling; +extern char displayed_hash_rate[16]; +extern unsigned int new_blocks; +extern unsigned int found_blocks; +extern int g_max_fan, g_max_temp; +extern int64_t total_accepted, total_rejected, total_diff1; +extern int64_t total_getworks, total_stale, total_discarded; +extern double total_diff_accepted, total_diff_rejected, total_diff_stale; +extern unsigned int local_work; +extern unsigned int total_go, total_ro; +extern const int opt_cutofftemp; +extern int opt_log_interval; +extern unsigned long long global_hashrate; +extern char current_hash[68]; +extern double current_diff; +extern uint64_t best_diff; +extern struct timeval block_timeval; +extern char *workpadding; + +struct curl_ent { + CURL *curl; + struct list_head node; + struct timeval tv; +}; + +/* Disabled needs to be the lowest enum as a freshly calloced value will then + * equal disabled */ +enum pool_enable { + POOL_DISABLED, + POOL_ENABLED, + POOL_REJECTING, +}; + +struct stratum_work { + char *job_id; + unsigned char **merkle_bin; + bool clean; + + double diff; +}; + +#define RBUFSIZE 8192 +#define RECVSIZE (RBUFSIZE - 4) + +struct pool { + int pool_no; + int prio; + int64_t accepted, rejected; + int seq_rejects; + int seq_getfails; + int solved; + int64_t diff1; + char diff[8]; + int quota; + int quota_gcd; + int quota_used; + int works; + + double diff_accepted; + double diff_rejected; + double diff_stale; + + bool submit_fail; + bool idle; + bool lagging; + bool probed; + enum pool_enable enabled; + bool submit_old; + bool removed; + bool lp_started; + bool blocking; + + char *hdr_path; + char *lp_url; + + unsigned int getwork_requested; + unsigned int stale_shares; + unsigned int discarded_work; + unsigned int getfail_occasions; + unsigned int remotefail_occasions; + struct timeval tv_idle; + + double utility; + int last_shares, shares; + + char *rpc_req; + char *rpc_url; + char *rpc_userpass; + char *rpc_user, *rpc_pass; + proxytypes_t rpc_proxytype; + char *rpc_proxy; + + pthread_mutex_t pool_lock; + cglock_t data_lock; + + struct thread_q *submit_q; + struct thread_q *getwork_q; + + pthread_t longpoll_thread; + pthread_t test_thread; + bool testing; + + int curls; + pthread_cond_t cr_cond; + struct list_head curlring; + + time_t last_share_time; + double last_share_diff; + uint64_t best_diff; + + struct cgminer_stats cgminer_stats; + struct cgminer_pool_stats cgminer_pool_stats; + + /* The last block this particular pool knows about */ + char prev_block[32]; + + /* Stratum variables */ + char *stratum_url; + bool extranonce_subscribe; + char *stratum_port; + SOCKETTYPE sock; + char *sockbuf; + size_t sockbuf_size; + char *sockaddr_url; /* stripped url used for sockaddr */ + char *sockaddr_proxy_url; + char *sockaddr_proxy_port; + + char *nonce1; + unsigned char *nonce1bin; + uint64_t nonce2; + int n2size; + char *sessionid; + bool has_stratum; + bool stratum_active; + bool stratum_init; + bool stratum_notify; +#ifdef USE_BITMAIN_C5 + bool support_vil; + int version_num; + int version[4]; +#endif + struct stratum_work swork; + pthread_t stratum_sthread; + pthread_t stratum_rthread; + pthread_mutex_t stratum_lock; + struct thread_q *stratum_q; + int sshares; /* stratum shares submitted waiting on response */ + + /* GBT variables */ + bool has_gbt; + cglock_t gbt_lock; + unsigned char previousblockhash[32]; + unsigned char gbt_target[32]; + char *coinbasetxn; + char *longpollid; + char *gbt_workid; + int gbt_expires; + uint32_t gbt_version; + uint32_t curtime; + uint32_t gbt_bits; + unsigned char *txn_hashes; + int gbt_txns; + int height; + + bool gbt_solo; + unsigned char merklebin[16 * 32]; + int transactions; + char *txn_data; + unsigned char scriptsig_base[100]; + unsigned char script_pubkey[25 + 3]; + int nValue; + CURL *gbt_curl; + bool gbt_curl_inuse; + + /* Shared by both stratum & GBT */ + size_t n1_len; + unsigned char *coinbase; + int coinbase_len; + int nonce2_offset; + unsigned char header_bin[128]; + int merkles; + char prev_hash[68]; + char bbversion[12]; + char nbit[12]; + char ntime[12]; + double sdiff; + + struct timeval tv_lastwork; +}; + +#define GETWORK_MODE_TESTPOOL 'T' +#define GETWORK_MODE_POOL 'P' +#define GETWORK_MODE_LP 'L' +#define GETWORK_MODE_BENCHMARK 'B' +#define GETWORK_MODE_STRATUM 'S' +#define GETWORK_MODE_GBT 'G' +#define GETWORK_MODE_SOLO 'C' + +struct work { + unsigned char data[128]; + unsigned char midstate[32]; + unsigned char target[32]; + unsigned char hash[32]; + + /* This is the diff the device is currently aiming for and must be + * the minimum of work_difficulty & drv->max_diff */ + double device_diff; + uint64_t share_diff; + + int rolls; + int drv_rolllimit; /* How much the driver can roll ntime */ + uint32_t nonce; /* For devices that hash sole work */ + + struct thr_info *thr; + int thr_id; + struct pool *pool; + struct timeval tv_staged; + + bool mined; + bool clone; + bool cloned; + int rolltime; + bool longpoll; + bool stale; + bool mandatory; + bool block; + + bool stratum; + char *job_id; + uint64_t nonce2; + size_t nonce2_len; + char *ntime; + double sdiff; + char *nonce1; + + bool gbt; + char *coinbase; + int gbt_txns; + + unsigned int work_block; + uint32_t id; + UT_hash_handle hh; + + /* This is the diff work we're aiming to submit and should match the + * work->target binary */ + double work_difficulty; + + // Allow devices to identify work if multiple sub-devices + int subid; + // Allow devices to flag work for their own purposes + bool devflag; + // Allow devices to timestamp work for their own purposes + struct timeval tv_stamp; + + struct timeval tv_getwork; + struct timeval tv_getwork_reply; + struct timeval tv_cloned; + struct timeval tv_work_start; + struct timeval tv_work_found; + char getwork_mode; +#ifdef USE_BITMAIN_C5 + int version; +#endif + + +}; + +#ifdef USE_MODMINER +struct modminer_fpga_state { + bool work_running; + struct work running_work; + struct timeval tv_workstart; + uint32_t hashes; + + char next_work_cmd[46]; + char fpgaid; + + bool overheated; + bool new_work; + + uint32_t shares; + uint32_t shares_last_hw; + uint32_t hw_errors; + uint32_t shares_to_good; + uint32_t timeout_fail; + uint32_t success_more; + struct timeval last_changed; + struct timeval last_nonce; + struct timeval first_work; + bool death_stage_one; + bool tried_two_byte_temp; + bool one_byte_temp; +}; +#endif + +#define TAILBUFSIZ 64 + +#define tailsprintf(buf, bufsiz, fmt, ...) do { \ + char tmp13[TAILBUFSIZ]; \ + size_t len13, buflen = strlen(buf); \ + snprintf(tmp13, sizeof(tmp13), fmt, ##__VA_ARGS__); \ + len13 = strlen(tmp13); \ + if ((buflen + len13) >= bufsiz) \ + quit(1, "tailsprintf buffer overflow in %s %s line %d", __FILE__, __func__, __LINE__); \ + strcat(buf, tmp13); \ +} while (0) + +extern uint64_t share_ndiff(const struct work *work); +extern void get_datestamp(char *, size_t, struct timeval *); +extern void inc_hw_errors(struct thr_info *thr); +extern void inc_dev_status(int max_fan, int max_temp); +extern void inc_work_stats(struct thr_info *thr, struct pool *pool, int diff1); +extern bool test_nonce(struct work *work, uint32_t nonce); +extern bool test_nonce_diff(struct work *work, uint32_t nonce, double diff); +extern bool submit_tested_work(struct thr_info *thr, struct work *work); +extern bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce); +extern bool submit_noffset_nonce(struct thr_info *thr, struct work *work, uint32_t nonce, + int noffset); +extern int share_work_tdiff(struct cgpu_info *cgpu); +extern bool submit_nonce_1(struct thr_info *thr, struct work *work, uint32_t nonce, int * nofull); +extern void submit_nonce_2(struct work *work); +extern bool submit_nonce_direct(struct thr_info *thr, struct work *work, uint32_t nonce); +extern bool submit_noffset_nonce(struct thr_info *thr, struct work *work, uint32_t nonce, int noffset); +extern struct work *get_work(struct thr_info *thr, const int thr_id); +extern void __add_queued(struct cgpu_info *cgpu, struct work *work); +extern struct work *get_queued(struct cgpu_info *cgpu); +extern struct work *__get_queued(struct cgpu_info *cgpu); +extern void add_queued(struct cgpu_info *cgpu, struct work *work); +extern struct work *get_queue_work(struct thr_info *thr, struct cgpu_info *cgpu, int thr_id); +extern struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen); +extern struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen); +extern struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen); +extern struct work *__find_work_byid(struct work *que, uint32_t id); +extern struct work *find_queued_work_byid(struct cgpu_info *cgpu, uint32_t id); +extern struct work *clone_queued_work_byid(struct cgpu_info *cgpu, uint32_t id); +extern void __work_completed(struct cgpu_info *cgpu, struct work *work); +extern int age_queued_work(struct cgpu_info *cgpu, double secs); +extern void work_completed(struct cgpu_info *cgpu, struct work *work); +extern struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen); +extern void flush_queue(struct cgpu_info *cgpu); +extern void hash_driver_work(struct thr_info *mythr); +extern void hash_queued_work(struct thr_info *mythr); +extern void _wlog(const char *str); +extern void _wlogprint(const char *str); +extern int curses_int(const char *query); +extern char *curses_input(const char *query); +extern void kill_work(void); +extern void switch_pools(struct pool *selected); +extern void _discard_work(struct work *work); +#define discard_work(WORK) do { \ + _discard_work(WORK); \ + WORK = NULL; \ +} while (0) +extern void remove_pool(struct pool *pool); +extern void write_config(FILE *fcfg); +extern void zero_bestshare(void); +extern void zero_stats(void); +extern void default_save_file(char *filename); +extern bool log_curses_only(int prio, const char *datetime, const char *str); +extern void clear_logwin(void); +extern void logwin_update(void); +extern bool pool_tclear(struct pool *pool, bool *var); +extern void stratum_resumed(struct pool *pool); +extern void pool_died(struct pool *pool); +extern struct thread_q *tq_new(void); +extern void tq_free(struct thread_q *tq); +extern bool tq_push(struct thread_q *tq, void *data); +extern void *tq_pop(struct thread_q *tq, const struct timespec *abstime); +extern void tq_freeze(struct thread_q *tq); +extern void tq_thaw(struct thread_q *tq); +extern bool successful_connect; +extern void adl(void); +extern void app_restart(void); +extern void roll_work(struct work *work); +extern struct work *make_clone(struct work *work); +extern void clean_work(struct work *work); +extern void _free_work(struct work *work); +#define free_work(WORK) do { \ + _free_work(WORK); \ + WORK = NULL; \ +} while (0) +extern void set_work_ntime(struct work *work, int ntime); +extern struct work *copy_work_noffset(struct work *base_work, int noffset); +#define copy_work(work_in) copy_work_noffset(work_in, 0) +extern uint64_t share_diff(const struct work *work); +extern struct thr_info *get_thread(int thr_id); +extern struct cgpu_info *get_devices(int id); + +enum api_data_type { + API_ESCAPE, + API_STRING, + API_CONST, + API_UINT8, + API_INT16, + API_UINT16, + API_INT, + API_UINT, + API_UINT32, + API_HEX32, + API_UINT64, + API_INT64, + API_DOUBLE, + API_ELAPSED, + API_BOOL, + API_TIMEVAL, + API_TIME, + API_MHS, + API_MHTOTAL, + API_TEMP, + API_UTILITY, + API_FREQ, + API_VOLTS, + API_HS, + API_DIFF, + API_PERCENT, + API_AVG +}; + +struct api_data { + enum api_data_type type; + char *name; + void *data; + bool data_was_malloc; + struct api_data *prev; + struct api_data *next; +}; + +extern struct api_data *api_add_escape(struct api_data *root, char *name, char *data, bool copy_data); +extern struct api_data *api_add_string(struct api_data *root, char *name, char *data, bool copy_data); +extern struct api_data *api_add_const(struct api_data *root, char *name, const char *data, bool copy_data); +extern struct api_data *api_add_uint8(struct api_data *root, char *name, uint8_t *data, bool copy_data); +extern struct api_data *api_add_int16(struct api_data *root, char *name, uint16_t *data, bool copy_data); +extern struct api_data *api_add_uint16(struct api_data *root, char *name, uint16_t *data, bool copy_data); +extern struct api_data *api_add_int(struct api_data *root, char *name, int *data, bool copy_data); +extern struct api_data *api_add_uint(struct api_data *root, char *name, unsigned int *data, bool copy_data); +extern struct api_data *api_add_uint32(struct api_data *root, char *name, uint32_t *data, bool copy_data); +extern struct api_data *api_add_hex32(struct api_data *root, char *name, uint32_t *data, bool copy_data); +extern struct api_data *api_add_uint64(struct api_data *root, char *name, uint64_t *data, bool copy_data); +extern struct api_data *api_add_double(struct api_data *root, char *name, double *data, bool copy_data); +extern struct api_data *api_add_elapsed(struct api_data *root, char *name, double *data, bool copy_data); +extern struct api_data *api_add_bool(struct api_data *root, char *name, bool *data, bool copy_data); +extern struct api_data *api_add_timeval(struct api_data *root, char *name, struct timeval *data, bool copy_data); +extern struct api_data *api_add_time(struct api_data *root, char *name, time_t *data, bool copy_data); +extern struct api_data *api_add_mhs(struct api_data *root, char *name, double *data, bool copy_data); +extern struct api_data *api_add_mhstotal(struct api_data *root, char *name, double *data, bool copy_data); +extern struct api_data *api_add_temp(struct api_data *root, char *name, float *data, bool copy_data); +extern struct api_data *api_add_utility(struct api_data *root, char *name, double *data, bool copy_data); +extern struct api_data *api_add_freq(struct api_data *root, char *name, double *data, bool copy_data); +extern struct api_data *api_add_volts(struct api_data *root, char *name, float *data, bool copy_data); +extern struct api_data *api_add_hs(struct api_data *root, char *name, double *data, bool copy_data); +extern struct api_data *api_add_diff(struct api_data *root, char *name, double *data, bool copy_data); +extern struct api_data *api_add_percent(struct api_data *root, char *name, double *data, bool copy_data); +extern struct api_data *api_add_avg(struct api_data *root, char *name, float *data, bool copy_data); + +extern void dupalloc(struct cgpu_info *cgpu, int timelimit); +extern void dupcounters(struct cgpu_info *cgpu, uint64_t *checked, uint64_t *dups); +extern bool isdupnonce(struct cgpu_info *cgpu, struct work *work, uint32_t nonce); + +#if defined(USE_BITMAIN) || defined(USE_BMSC) +extern void rev(unsigned char *s, size_t l); +extern int check_asicnum(int asic_num, unsigned char nonce); +#endif +#endif /* __MINER_H__ */ diff --git a/miner.php b/miner.php new file mode 100644 index 0000000..6d4d210 --- /dev/null +++ b/miner.php @@ -0,0 +1,3306 @@ +\n"; +# +# See API-README for more details of these variables and how +# to configure miner.php +# +# Web page title +$title = 'Mine'; +# +# Set $readonly to true to force miner.php to be readonly +# Set $readonly to false then it will check cgminer 'privileged' +$readonly = false; +# +# Set $userlist to null to allow anyone access or read API-README +$userlist = null; +# +# Set $notify to false to NOT attempt to display the notify command +# Set $notify to true to attempt to display the notify command +$notify = true; +# +# Set $checklastshare to true to do the following checks: +# If a device's last share is 12x expected ago then display as an error +# If a device's last share is 8x expected ago then display as a warning +# If either of the above is true, also display the whole line highlighted +# This assumes shares are 1 difficulty shares +$checklastshare = true; +# +# Set $poolinputs to true to show the input fields for adding a pool +# and changing the pool priorities +# N.B. also if $readonly is true, it will not display the fields +$poolinputs = false; +# +# Default port to use if any $rigs entries don't specify the port number +$rigport = 4028; +# +# Set $rigs to an array of your cgminer rigs that are running +# format: 'IP' or 'Host' or 'IP:Port' or 'Host:Port' or 'Host:Port:Name' +$rigs = array('127.0.0.1:4028'); +# +# Set $rignames to false, or one of 'ip' or 'ipx' +# this says what to use if $rigs doesn't have a 'name' +$rignames = false; +# +# Set $rigbuttons to false to display a link rather than a button +$rigbuttons = true; +# +# Set $mcast to true to look for your rigs and ignore $rigs +$mcast = false; +# +# Set $mcastexpect to at least how many rigs you expect it to find +$mcastexpect = 0; +# +# API Multicast address all cgminers are listening on +$mcastaddr = '224.0.0.75'; +# +# API Multicast UDP port all cgminers are listening on +$mcastport = 4028; +# +# The code all cgminers expect in the Multicast message sent +$mcastcode = 'FTW'; +# +# UDP port cgminers are to reply on (by request) +$mcastlistport = 4027; +# +# Set $mcasttimeout to the number of seconds (floating point) +# to wait for replies to the Multicast message +$mcasttimeout = 1.5; +# +# Set $mcastretries to the number of times to retry the multicast +$mcastretries = 0; +# +# Set $allowgen to true to allow customsummarypages to use 'gen' +# false means ignore any 'gen' options +$allowgen = false; +# +# Set $rigipsecurity to false to show the IP/Port of the rig +# in the socket error messages and also show the full socket message +$rigipsecurity = true; +# +# Set $rigtotals to true to display totals on the single rig page +# 'false' means no totals (and ignores $forcerigtotals) +# You can force it to always show rig totals when there is only +# one line by setting $forcerigtotals = true; +$rigtotals = true; +$forcerigtotals = false; +# +# These should be OK for most cases +$socksndtimeoutsec = 10; +$sockrcvtimeoutsec = 40; +# +# List of fields NOT to be displayed +# This example would hide the slightly more sensitive pool information +#$hidefields = array('POOL.URL' => 1, 'POOL.User' => 1); +$hidefields = array(); +# +# Auto-refresh of the page (in seconds) - integers only +# $ignorerefresh = true/false always ignore refresh parameters +# $changerefresh = true/false show buttons to change the value +# $autorefresh = default value, 0 means dont auto-refresh +$ignorerefresh = false; +$changerefresh = true; +$autorefresh = 0; +# +# Should we allow custom pages? +# (or just completely ignore them and don't display the buttons) +$allowcustompages = true; +# +# OK this is a bit more complex item: Custom Summary Pages +# As mentioned above, see API-README +# see the example below (if there is no matching data, no total will show) +$mobilepage = array( + 'DATE' => null, + 'RIGS' => null, + 'SUMMARY' => array('Elapsed', 'MHS av', 'MHS 5m', 'Found Blocks=Blks', + 'Difficulty Accepted=DiffA', + 'Difficulty Rejected=DiffR', + 'Hardware Errors=HW', + 'Work Utility=WU'), + 'DEVS+NOTIFY' => array('DEVS.Name=Name', 'DEVS.ID=ID', 'DEVS.Status=Status', + 'DEVS.Temperature=Temp', 'DEVS.MHS av=MHS av', + 'DEVS.MHS 5m=MHS 5m', 'DEVS.Difficulty Accepted=DiffA', + 'DEVS.Difficulty Rejected=DiffR', + 'DEVS.Work Utility=WU', + 'NOTIFY.Last Not Well=Not Well'), + 'POOL' => array('POOL', 'Status', 'Difficulty Accepted=DiffA', + 'Difficulty Rejected=DiffR', 'Last Share Time=LST')); +$mobilesum = array( + 'SUMMARY' => array('MHS av', 'MHS 5m', 'Found Blocks', 'Difficulty Accepted', + 'Difficulty Rejected', 'Hardware Errors', + 'Work Utility'), + 'DEVS+NOTIFY' => array('DEVS.MHS av', 'DEVS.Difficulty Accepted', + 'DEVS.Difficulty Rejected'), + 'POOL' => array('Difficulty Accepted', 'Difficulty Rejected')); +# +$statspage = array( + 'DATE' => null, + 'RIGS' => null, + 'SUMMARY' => array('Elapsed', 'MHS av', 'MHS 5m', 'Found Blocks=Blks', + 'Difficulty Accepted=DiffA', + 'Difficulty Rejected=DiffR', + 'Work Utility=WU', 'Hardware Errors=HW Errs', + 'Network Blocks=Net Blks'), + 'COIN' => array('*'), + 'STATS' => array('*')); +# +$statssum = array( + 'SUMMARY' => array('MHS av', 'MHS 5m', 'Found Blocks', + 'Difficulty Accepted', 'Difficulty Rejected', + 'Work Utility', 'Hardware Errors')); +# +$poolspage = array( + 'DATE' => null, + 'RIGS' => null, + 'SUMMARY' => array('Elapsed', 'MHS av', 'MHS 5m', 'Found Blocks=Blks', + 'Difficulty Accepted=DiffA', + 'Difficulty Rejected=DiffR', + 'Work Utility', 'Hardware Errors=HW', + 'Network Blocks=Net Blks', 'Best Share'), + 'POOL+STATS' => array('STATS.ID=ID', 'POOL.URL=URL', + 'POOL.Difficulty Accepted=DiffA', + 'POOL.Difficulty Rejected=DiffR', + 'POOL.Has Stratum=Stratum', + 'POOL.Stratum Active=StrAct', + 'POOL.Has GBT=GBT', 'STATS.Times Sent=TSent', + 'STATS.Bytes Sent=BSent', 'STATS.Net Bytes Sent=NSent', + 'STATS.Times Recv=TRecv', 'STATS.Bytes Recv=BRecv', + 'STATS.Net Bytes Recv=NRecv', 'GEN.AvShr=AvShr')); +# +$poolssum = array( + 'SUMMARY' => array('MHS av', 'MHS 5m', 'Found Blocks', + 'Difficulty Accepted', 'Difficulty Rejected', + 'Work Utility', 'Hardware Errors'), + 'POOL+STATS' => array('POOL.Difficulty Accepted', 'POOL.Difficulty Rejected', + 'STATS.Times Sent', 'STATS.Bytes Sent', + 'STATS.Net Bytes Sent', 'STATS.Times Recv', + 'STATS.Bytes Recv', 'STATS.Net Bytes Recv')); +# +$poolsext = array( + 'POOL+STATS' => array( + 'where' => null, + 'group' => array('POOL.URL', 'POOL.Has Stratum', + 'POOL.Stratum Active', 'POOL.Has GBT'), + 'calc' => array('POOL.Difficulty Accepted' => 'sum', + 'POOL.Difficulty Rejected' => 'sum', + 'STATS.Times Sent' => 'sum', + 'STATS.Bytes Sent' => 'sum', + 'STATS.Net Bytes Sent' => 'sum', + 'STATS.Times Recv' => 'sum', + 'STATS.Bytes Recv' => 'sum', + 'STATS.Net Bytes Recv' => 'sum', + 'POOL.Accepted' => 'sum'), + 'gen' => array('AvShr' => + 'round(POOL.Difficulty Accepted/'. + 'max(POOL.Accepted,1)*100)/100'), + 'having' => array(array('STATS.Bytes Recv', '>', 0))) +); +# +$devnotpage = array( + 'DATE' => null, + 'RIGS' => null, + 'DEVS+NOTIFY' => array('DEVS.Name=Name', 'DEVS.ID=ID', + 'DEVS.Temperature=Temp', 'DEVS.MHS av=MHS av', + 'DEVS.Difficulty Accepted=DiffA', + 'DEVS.Difficulty Rejected=DiffR', + 'NOTIFY.Last Not Well=Last Not Well')); +$devnotsum = array( + 'DEVS+NOTIFY' => array('DEVS.MHS av', 'DEVS.Difficulty Accepted', + 'DEVS.Difficulty Rejected')); +# +$devdetpage = array( + 'DATE' => null, + 'RIGS' => null, + 'DEVS+DEVDETAILS' => array('DEVS.Name=Name', 'DEVS.ID=ID', + 'DEVS.Temperature=Temp', + 'DEVS.MHS av=MHS av', + 'DEVS.Difficulty Accepted=DiffA', + 'DEVS.Difficulty Rejected=DiffR', + 'DEVDETAILS.Device Path=Device')); +$devdetsum = array( + 'DEVS+DEVDETAILS' => array('DEVS.MHS av', 'DEVS.Difficulty Accepted', + 'DEVS.Difficulty Rejected')); +# +$protopage = array( + 'DATE' => null, + 'RIGS' => null, + 'CONFIG' => array('ASC Count=ASCs', 'PGA Count=PGAs', 'Pool Count=Pools', + 'Strategy', 'Device Code', 'OS', 'Failover-Only'), + 'SUMMARY' => array('Elapsed', 'MHS av', 'Found Blocks=Blks', + 'Difficulty Accepted=Diff Acc', + 'Difficulty Rejected=Diff Rej', + 'Hardware Errors=HW Errs', + 'Network Blocks=Net Blks', 'Utility', 'Work Utility'), + 'POOL+STATS' => array('STATS.ID=ID', 'POOL.URL=URL', 'POOL.Accepted=Acc', + 'POOL.Difficulty Accepted=DiffA', + 'POOL.Difficulty Rejected=DiffR', 'POOL.Has GBT=GBT', + 'STATS.Max Diff=Max Work Diff', + 'STATS.Times Sent=#Sent', 'STATS.Bytes Sent=Byte Sent', + 'STATS.Net Bytes Sent=Net Sent', + 'STATS.Times Recv=#Recv', + 'STATS.Bytes Recv=Byte Recv', + 'STATS.Net Bytes Recv=Net Recv')); +$protosum = array( + 'SUMMARY' => array('MHS av', 'Found Blocks', 'Difficulty Accepted', + 'Difficulty Rejected', 'Hardware Errors', + 'Utility', 'Work Utility'), + 'POOL+STATS' => array('POOL.Accepted', 'POOL.Difficulty Accepted', + 'POOL.Difficulty Rejected', + 'STATS.Times Sent', 'STATS.Bytes Sent', + 'STATS.Net Bytes Sent', 'STATS.Times Recv', + 'STATS.Bytes Recv', 'STATS.Net Bytes Recv')); +$protoext = array( + 'POOL+STATS' => array( + 'where' => null, + 'group' => array('POOL.URL', 'POOL.Has GBT'), + 'calc' => array('POOL.Accepted' => 'sum', + 'POOL.Difficulty Accepted' => 'sum', + 'POOL.Difficulty Rejected' => 'sum', + 'STATS.Max Diff' => 'max', + 'STATS.Times Sent' => 'sum', + 'STATS.Bytes Sent' => 'sum', + 'STATS.Net Bytes Sent' => 'sum', + 'STATS.Times Recv' => 'sum', + 'STATS.Bytes Recv' => 'sum', + 'STATS.Net Bytes Recv' => 'sum'), + 'having' => array(array('STATS.Bytes Recv', '>', 0))) +); +# +# If 'gen' isn't enabled, the 'GEN' fields won't show but +# where present, will be replaced with the ||SUMMARY fields +$kanogenpage = array( + 'DATE' => null, + 'RIGS' => null, + 'SUMMARY+COIN' => array('SUMMARY.Elapsed=Elapsed', + 'GEN.Mined=Block%', 'GEN.GHS Acc=GH/s Acc', + 'GEN.GHS av=GH/s av||SUMMARY.MHS av=MHS av', + 'GEN.GHS 5m=GH/s 5m||SUMMARY.MHS 5m=MHS 5m', + 'GEN.GHS WU=GH/s WU||SUMMARY.Work Utility=WU', + 'SUMMARY.Found Blocks=Blks', + 'SUMMARY.Difficulty Accepted=DiffA', + 'SUMMARY.Difficulty Rejected=DiffR', + 'SUMMARY.Hardware Errors=HW', + 'SUMMARY.Difficulty Stale=DiffS', + 'SUMMARY.Best Share=Best Share', + 'SUMMARY.Device Hardware%=Dev HW%', + 'SUMMARY.Device Rejected%=Dev Rej%', + 'SUMMARY.Pool Rejected%=Pool Rej%', + 'SUMMARY.Pool Stale%=Pool Stale%'), + 'POOL' => array('URL', 'Diff1 Shares=Diff Work', + 'Difficulty Accepted=DiffA', + 'Difficulty Rejected=DiffR', + 'Difficulty Stale=DiffS', + 'Best Share', 'GEN.Acc=Pool Acc%', 'GEN.Rej=Pool Rej%') +); +# sum should list all fields seperately including GEN/BGEN || replacements +$kanogensum = array( + 'SUMMARY+COIN' => array('GEN.Mined', 'GEN.GHS Acc', 'GEN.GHS av', + 'GEN.GHS 5m', 'GEN.GHS WU', + 'SUMMARY.MHS av', 'SUMMARY.MHS 5m', + 'SUMMARY.Work Utility', + 'SUMMARY.Found Blocks', + 'SUMMARY.Difficulty Accepted', + 'SUMMARY.Difficulty Rejected', + 'SUMMARY.Hardware Errors', + 'SUMMARY.Difficulty Stale'), + 'POOL' => array('Diff1 Shares', 'Difficulty Accepted', + 'Difficulty Rejected', 'Difficulty Stale') +); +# 'where', 'calc' and 'having' should list GEN/BGEN || replacements seperately +# 'group' must use the 'name1||name2' format for GEN/BGEN fields +$kanogenext = array( + 'SUMMARY+COIN' => array( + 'gen' => array('GHS Acc' => + 'round(pow(2,32) * SUMMARY.Difficulty Accepted / '. + 'SUMMARY.Elapsed / 10000000) / 100', + 'Mined' => + 'SUMMARY.Elapsed * SUMMARY.Work Utility / 60 / '. + 'COIN.Network Difficulty', + 'GHS av' => + 'SUMMARY.MHS av / 1000.0', + 'GHS 5m' => + 'SUMMARY.MHS 5m / 1000.0', + 'GHS WU' => + 'round(pow(2,32) * SUMMARY.Work Utility / 60 / '. + '10000000 ) / 100')), + 'POOL' => array( + 'group' => array('URL'), + 'calc' => array('Diff1 Shares' => 'sum', 'Difficulty Accepted' => 'sum', + 'Difficulty Rejected' => 'sum', + 'Difficulty Stale' => 'sum', 'Best Share' => 'max'), + 'gen' => array('Rej' => 'Difficulty Rejected / '. + 'max(1,Difficulty Accepted+Difficulty Rejected)', + 'Acc' => 'Difficulty Accepted / '. + 'max(1,Difficulty Accepted+Difficulty Rejected)')) +); +# +$syspage = array( + 'DATE' => null, + 'RIGS' => null, + 'SUMMARY' => array('#', 'Elapsed', 'MHS av', 'MHS 5m', 'Found Blocks=Blks', + 'Difficulty Accepted=DiffA', + 'Difficulty Rejected=DiffR', + 'Difficulty Stale=DiffS', 'Hardware Errors=HW', + 'Work Utility', 'Network Blocks=Net Blks', 'Total MH', + 'Best Share', 'Device Hardware%=Dev HW%', + 'Device Rejected%=Dev Rej%', + 'Pool Rejected%=Pool Rej%', 'Pool Stale%', + 'Last getwork'), + 'DEVS' => array('#', 'ID', 'Name', 'ASC', 'Device Elapsed', 'Enabled', + 'Status', 'No Device', 'Temperature=Temp', + 'MHS av', 'MHS 5s', 'MHS 5m', 'Diff1 Work', + 'Difficulty Accepted=DiffA', + 'Difficulty Rejected=DiffR', + 'Hardware Errors=HW', 'Work Utility', + 'Last Valid Work', 'Last Share Pool', + 'Last Share Time', 'Total MH', + 'Device Hardware%=Dev HW%', + 'Device Rejected%=Dev Rej%'), + 'POOL' => array('POOL', 'URL', 'Status', 'Priority', 'Quota', + 'Getworks', 'Diff1 Shares', + 'Difficulty Accepted=DiffA', + 'Difficulty Rejected=DiffR', + 'Difficulty Stale=DiffS', + 'Last Share Difficulty', + 'Last Share Time', + 'Best Share', 'Pool Rejected%=Pool Rej%', + 'Pool Stale%') +); +$syssum = array( + 'SUMMARY' => array('MHS av', 'MHS 5m', 'Found Blocks', + 'Difficulty Accepted', 'Difficulty Rejected', + 'Difficulty Stale', 'Hardware Errors', + 'Work Utility', 'Total MH'), + 'DEVS' => array('MHS av', 'MHS 5s', 'MHS 5m', 'Diff1 Work', + 'Difficulty Accepted', 'Difficulty Rejected', + 'Hardware Errors', 'Total MH'), + 'POOL' => array('Getworks', 'Diff1 Shares', 'Difficulty Accepted', + 'Difficulty Rejected', 'Difficulty Stale') +); +# +# $customsummarypages is an array of these Custom Summary Pages +# that you can override in myminer.php +# It can be 'Name' => 1 with 'Name' in any of $user_pages or $sys_pages +# and it can be a fully defined 'Name' => array(...) like in $sys_pages below +$customsummarypages = array( + 'Kano' => 1, + 'Mobile' => 1, + 'Stats' => 1, + 'Pools' => 1 +); +# +# $user_pages are the myminer.php definable version of $sys_pages +# It should contain a set of 'Name' => array(...) like in $sys_pages +# that $customsummarypages can refer to by 'Name' +# If a 'Name' is in both $user_pages and $sys_pages, then the one +# in $user_pages will override the one in $sys_pages +$user_pages = array(); +# +$here = $_SERVER['PHP_SELF']; +# +global $tablebegin, $tableend, $warnfont, $warnoff, $dfmt; +# +$tablebegin = ''; +$tableend = '
'; +$warnfont = ''; +$warnoff = ''; +$dfmt = 'H:i:s j-M-Y \U\T\CP'; +# +$miner_font_family = 'Verdana, Arial, sans-serif, sans'; +$miner_font_size = '13pt'; +# +$bad_font_family = '"Times New Roman", Times, serif'; +$bad_font_size = '18pt'; +# +# List of css names to add to the css style object +# e.g. array('td.cool' => false); +# true/false to not include the default $miner_font +# The css name/value pairs must be defined in $colouroverride below +$add_css_names = array(); +# +# Edit this or redefine it in myminer.php to change the colour scheme +# See $colourtable below for the list of names +$colouroverride = array(); +# +# Where to place the buttons: 'top' 'bot' 'both' +# anything else means don't show them - case sensitive +$placebuttons = 'top'; +# +# This below allows you to put your own settings into a seperate file +# so you don't need to update miner.php with your preferred settings +# every time a new version is released +# Just create the file 'myminer.php' in the same directory as +# 'miner.php' - and put your own settings in there +if (file_exists('myminer.php')) + include_once('myminer.php'); +# +# This is the system default that must always contain all necessary +# colours so it must be a constant +# You can override these values with $colouroverride +# The only one missing is $warnfont +# - which you can override directly anyway +global $colourtable; +$colourtable = array( + 'body bgcolor' => '#ecffff', + 'td color' => 'blue', + 'td.two color' => 'blue', + 'td.two background' => '#ecffff', + 'td.h color' => 'blue', + 'td.h background' => '#c4ffff', + 'td.err color' => 'black', + 'td.err background' => '#ff3050', + 'td.bad color' => 'black', + 'td.bad background' => '#ff3050', + 'td.warn color' => 'black', + 'td.warn background' => '#ffb050', + 'td.sta color' => 'green', + 'td.tot color' => 'blue', + 'td.tot background' => '#fff8f2', + 'td.lst color' => 'blue', + 'td.lst background' => '#ffffdd', + 'td.hi color' => 'blue', + 'td.hi background' => '#f6ffff', + 'td.lo color' => 'blue', + 'td.lo background' => '#deffff' +); +# +# A list of system default summary pages (defined further above) +# that you can use by 'Name' in $customsummarypages +global $sys_pages; +$sys_pages = array( + 'Mobile' => array($mobilepage, $mobilesum), + 'Stats' => array($statspage, $statssum), + 'Pools' => array($poolspage, $poolssum, $poolsext), + 'DevNot' => array($devnotpage, $devnotsum), + 'DevDet' => array($devdetpage, $devdetsum), + 'Proto' => array($protopage, $protosum, $protoext), + 'Kano' => array($kanogenpage, $kanogensum, $kanogenext), + 'Summary' => array($syspage, $syssum) +); +# +# Don't touch these 2 +$miner = null; +$port = null; +# +global $rigips; +$rigips = array(); +# +# Ensure it is only ever shown once +global $showndate; +$showndate = false; +# +global $rownum; +$rownum = 0; +# +// Login +global $ses; +$ses = 'rutroh'; +# +function getcsp($name, $systempage = false) +{ + global $customsummarypages, $user_pages, $sys_pages; + + if ($systempage === false) + { + if (!isset($customsummarypages[$name])) + return false; + + $csp = $customsummarypages[$name]; + if (is_array($csp)) + { + if (count($csp) < 2 || count($csp) > 3) + return false; + else + return $csp; + } + } + + if (isset($user_pages[$name])) + { + $csp = $user_pages[$name]; + if (!is_array($csp) || count($csp) < 2 || count($csp) > 3) + return false; + else + return $csp; + } + + if (isset($sys_pages[$name])) + { + $csp = $sys_pages[$name]; + if (!is_array($csp) || count($csp) < 2 || count($csp) > 3) + return false; + else + return $csp; + } + + return false; +} +# +function degenfields(&$sec, $name, $fields) +{ + global $allowgen; + + if (!is_array($fields)) + return; + + foreach ($fields as $num => $fld) + if (substr($fld, 0, 5) == 'BGEN.' || substr($fld, 0, 4) == 'GEN.') + { + $opts = explode('||', $fld, 2); + if ($allowgen) + { + if (count($opts) > 1) + $sec[$name][$num] = $opts[0]; + } + else + { + if (count($opts) > 1) + $sec[$name][$num] = $opts[1]; + else + unset($sec[$name][$num]); + } + } +} +# +# Allow BGEN/GEN fields to have a '||' replacement when gen is disabled +# N.B. if gen is disabled and all page fields are GBEN/GEN without '||' then +# the table will disappear +# Replacements can be in the page fields and then also the ext group fields +# All other $csp sections should list both separately +function degen(&$csp) +{ + $page = 0; + if (isset($csp[$page]) && is_array($csp[$page])) + foreach ($csp[$page] as $sec => $fields) + degenfields($csp[$page], $sec, $fields); + + $ext = 2; + if (isset($csp[$ext]) && is_array($csp[$ext])) + foreach ($csp[$ext] as $sec => $types) + if (is_array($types) && isset($types['group'])) + degenfields($types, 'group', $types['group']); +} +# +function getcss($cssname, $dom = false) +{ + global $colourtable, $colouroverride; + + $css = ''; + foreach ($colourtable as $cssdata => $value) + { + $cssobj = explode(' ', $cssdata, 2); + if ($cssobj[0] == $cssname) + { + if (isset($colouroverride[$cssdata])) + $value = $colouroverride[$cssdata]; + + if ($dom == true) + $css .= ' '.$cssobj[1].'='.$value; + else + $css .= $cssobj[1].':'.$value.'; '; + } + } + return $css; +} +# +function getdom($domname) +{ + return getcss($domname, true); +} +# +# N.B. don't call this before calling htmlhead() +function php_pr($cmd) +{ + global $here, $autorefresh; + + return "$here?ref=$autorefresh$cmd"; +} +# +function htmlhead($mcerr, $checkapi, $rig, $pg = null, $noscript = false) +{ + global $doctype, $title, $miner_font_family, $miner_font_size; + global $bad_font_family, $bad_font_size, $add_css_names; + global $error, $readonly, $poolinputs, $here; + global $ignorerefresh, $autorefresh; + + $extraparams = ''; + if ($rig != null && $rig != '') + $extraparams = "&rig=$rig"; + else + if ($pg != null && $pg != '') + $extraparams = "&pg=$pg"; + + if ($ignorerefresh == true || $autorefresh == 0) + $refreshmeta = ''; + else + { + $url = "$here?ref=$autorefresh$extraparams"; + $refreshmeta = "\n"; + } + + if ($readonly === false && $checkapi === true) + { + $error = null; + $access = api($rig, 'privileged'); + if ($error != null + || !isset($access['STATUS']['STATUS']) + || $access['STATUS']['STATUS'] != 'S') + $readonly = true; + } + $miner_font = "font-family:$miner_font_family; font-size:$miner_font_size;"; + $bad_font = "font-family:$bad_font_family; font-size:$bad_font_size;"; + + echo "$doctype$refreshmeta +$title + +\n"; +if ($noscript === false) +{ +echo "\n"; +} +?> + +
+ + 0); + do + { + $mcast_soc = socket_create(AF_INET, SOCK_DGRAM, SOL_UDP); + if ($mcast_soc === false || $mcast_soc == null) + { + $msg = "ERR: mcast send socket create(UDP) failed"; + if ($rigipsecurity === false) + { + $error = socket_strerror(socket_last_error()); + $error = "$msg '$error'\n"; + } + else + $error = "$msg\n"; + + socket_close($rep_soc); + return; + } + + $buf = "cgminer-$mcastcode-$mcastlistport"; + socket_sendto($mcast_soc, $buf, strlen($buf), 0, $mcastaddr, $mcastport); + socket_close($mcast_soc); + + $stt = microtime(true); + while (true) + { + $got = @socket_recvfrom($rep_soc, $buf, 32, MSG_DONTWAIT, $ip, $p); + if ($got !== false && $got > 0) + { + $ans = explode('-', $buf, 4); + if (count($ans) >= 3 && $ans[0] == 'cgm' && $ans[1] == 'FTW') + { + $rp = intval($ans[2]); + + if (count($ans) > 3) + $mdes = str_replace("\0", '', $ans[3]); + else + $mdes = ''; + + if (strlen($mdes) > 0) + $rig = "$ip:$rp:$mdes"; + else + $rig = "$ip:$rp"; + + if (!in_array($rig, $rigs)) + $rigs[] = $rig; + } + } + if ((microtime(true) - $stt) >= $mcasttimeout) + break; + + usleep(100000); + } + + if ($mcastexpect > 0 && count($rigs) >= $mcastexpect) + $doretry = false; + + } while ($doretry && --$retries > 0); + + socket_close($rep_soc); +} +# +function getrigs() +{ + global $rigs; + + mcastrigs(); + + sort($rigs); +} +# +function getsock($rig, $addr, $port) +{ + global $rigport, $rigips, $rignames, $rigipsecurity; + global $haderror, $error, $socksndtimeoutsec, $sockrcvtimeoutsec; + + $port = trim($port); + if (strlen($port) == 0) + $port = $rigport; + $error = null; + $socket = null; + $socket = socket_create(AF_INET, SOCK_STREAM, SOL_TCP); + if ($socket === false || $socket === null) + { + $haderror = true; + if ($rigipsecurity === false) + { + $error = socket_strerror(socket_last_error()); + $msg = "socket create(TCP) failed"; + $error = "ERR: $msg '$error'\n"; + } + else + $error = "ERR: socket create(TCP) failed\n"; + + return null; + } + + // Ignore if this fails since the socket connect may work anyway + // and nothing is gained by aborting if the option cannot be set + // since we don't know in advance if it can connect + socket_set_option($socket, SOL_SOCKET, SO_SNDTIMEO, array('sec' => $socksndtimeoutsec, 'usec' => 0)); + socket_set_option($socket, SOL_SOCKET, SO_RCVTIMEO, array('sec' => $sockrcvtimeoutsec, 'usec' => 0)); + + $res = socket_connect($socket, $addr, $port); + if ($res === false) + { + $haderror = true; + if ($rigipsecurity === false) + { + $error = socket_strerror(socket_last_error()); + $msg = "socket connect($addr,$port) failed"; + $error = "ERR: $msg '$error'\n"; + } + else + $error = "ERR: socket connect($rig) failed\n"; + + socket_close($socket); + return null; + } + if ($rignames !== false && !isset($rigips[$addr])) + if (socket_getpeername($socket, $ip) == true) + $rigips[$addr] = $ip; + return $socket; +} +# +function readsockline($socket) +{ + $line = ''; + while (true) + { + $byte = socket_read($socket, 1); + if ($byte === false || $byte === '') + break; + if ($byte === "\0") + break; + $line .= $byte; + } + return $line; +} +# +function api_convert_escape($str) +{ + $res = ''; + $len = strlen($str); + for ($i = 0; $i < $len; $i++) + { + $ch = substr($str, $i, 1); + if ($ch != '\\' || $i == ($len-1)) + $res .= $ch; + else + { + $i++; + $ch = substr($str, $i, 1); + switch ($ch) + { + case '|': + $res .= "\1"; + break; + case '\\': + $res .= "\2"; + break; + case '=': + $res .= "\3"; + break; + case ',': + $res .= "\4"; + break; + default: + $res .= $ch; + } + } + } + return $res; +} +# +function revert($str) +{ + return str_replace(array("\1", "\2", "\3", "\4"), array("|", "\\", "=", ","), $str); +} +# +function api($rig, $cmd) +{ + global $haderror, $error; + global $miner, $port, $hidefields; + + $socket = getsock($rig, $miner, $port); + if ($socket != null) + { + socket_write($socket, $cmd, strlen($cmd)); + $line = readsockline($socket); + socket_close($socket); + + if (strlen($line) == 0) + { + $haderror = true; + $error = "WARN: '$cmd' returned nothing\n"; + return $line; + } + +# print "$cmd returned '$line'\n"; + + $line = api_convert_escape($line); + + $data = array(); + + $objs = explode('|', $line); + foreach ($objs as $obj) + { + if (strlen($obj) > 0) + { + $items = explode(',', $obj); + $item = $items[0]; + $id = explode('=', $items[0], 2); + if (count($id) == 1 or !ctype_digit($id[1])) + $name = $id[0]; + else + $name = $id[0].$id[1]; + + if (strlen($name) == 0) + $name = 'null'; + + $sectionname = preg_replace('/\d/', '', $name); + + if (isset($data[$name])) + { + $num = 1; + while (isset($data[$name.$num])) + $num++; + $name .= $num; + } + + $counter = 0; + foreach ($items as $item) + { + $id = explode('=', $item, 2); + + if (isset($hidefields[$sectionname.'.'.$id[0]])) + continue; + + if (count($id) == 2) + $data[$name][$id[0]] = revert($id[1]); + else + $data[$name][$counter] = $id[0]; + + $counter++; + } + } + } + return $data; + } + return null; +} +# +function getparam($name, $both = false) +{ + $a = null; + if (isset($_POST[$name])) + $a = $_POST[$name]; + + if (($both === true) and ($a === null)) + { + if (isset($_GET[$name])) + $a = $_GET[$name]; + } + + if ($a == '' || $a == null) + return null; + + // limit to 1K just to be safe + return substr($a, 0, 1024); +} +# +function newtable() +{ + global $tablebegin, $rownum; + echo $tablebegin; + $rownum = 0; +} +# +function newrow() +{ + echo ''; +} +# +function othrow($row) +{ + return "$row"; +} +# +function otherrow($row) +{ + echo othrow($row); +} +# +function endrow() +{ + global $rownum; + echo ''; + $rownum++; +} +# +function endtable() +{ + global $tableend; + echo $tableend; +} +# +function classlastshare($when, $alldata, $warnclass, $errorclass) +{ + global $checklastshare; + + if ($checklastshare === false) + return ''; + + if ($when == 0) + return ''; + + if (!isset($alldata['MHS av'])) + return ''; + + if ($alldata['MHS av'] == 0) + return ''; + + if (!isset($alldata['Last Share Time'])) + return ''; + + if (!isset($alldata['Last Share Difficulty'])) + return ''; + + $expected = pow(2, 32) / ($alldata['MHS av'] * pow(10, 6)); + + // If the share difficulty changes while waiting on a share, + // this calculation will of course be incorrect + $expected *= $alldata['Last Share Difficulty']; + + $howlong = $when - $alldata['Last Share Time']; + if ($howlong < 1) + $howlong = 1; + + if ($howlong > ($expected * 12)) + return $errorclass; + + if ($howlong > ($expected * 8)) + return $warnclass; + + return ''; +} +# +function endzero($num) +{ + $rep = preg_replace('/0*$/', '', $num); + if ($rep === '') + $rep = '0'; + return $rep; +} +# +function fmt($section, $name, $value, $when, $alldata, $cf = NULL) +{ + global $dfmt, $rownum; + + if ($alldata == null) + $alldata = array(); + + $errorclass = 'err'; + $warnclass = 'warn'; + $lstclass = 'lst'; + $hiclass = 'hi'; + $loclass = 'lo'; + $c2class = 'two'; + $totclass = 'tot'; + $b = ' '; + + $class = ''; + + $nams = explode('.', $name); + if (count($nams) > 1) + $name = $nams[count($nams)-1]; + + $done = false; + if ($value === null) + { + $ret = $b; + $done = true; + } + else + if ($cf != NULL and function_exists($cf)) + { + list($ret, $class) = $cf($section, $name, $value, $when, $alldata, + $warnclass, $errorclass, $hiclass, $loclass, $totclass); + if ($ret !== '') + $done = true; + } + + if ($done === false) + { + $ret = $value; + /* + * To speed up the PHP, the case statement is just $name + * It used to be $section.'.'.$name + * If any names clash, the case code will need to check the value of + * $section to resolve the clash - as with 'Last Share Time' below + * If the code picks up a field that you wish to format differently, + * then you'll need a customsummarypage 'fmt' extension + */ + switch ($name) + { + case '0': + break; + case 'Last Share Time': + if ($section == 'total') + break; + if ($section == 'POOL') + { + if ($value == 0) + $ret = 'Never'; + else + $ret = date('H:i:s d-M', $value); + } + else + { + if ($value == 0 + || (isset($alldata['Last Share Pool']) && $alldata['Last Share Pool'] == -1)) + { + $ret = 'Never'; + $class = $warnclass; + } + else + { + $ret = date('H:i:s', $value); + $class = classlastshare($when, $alldata, $warnclass, $errorclass); + } + } + break; + case 'Last getwork': + case 'Last Valid Work': + if ($section == 'total') + break; + if ($value == 0) + $ret = 'Never'; + else + $ret = ($value - $when) . 's'; + break; + case 'Last Share Pool': + if ($section == 'total') + break; + if ($value == -1) + { + $ret = 'None'; + $class = $warnclass; + } + break; + case 'Elapsed': + case 'Device Elapsed': + if ($section == 'total') + break; + $s = $value % 60; + $value -= $s; + $value /= 60; + if ($value == 0) + $ret = $s.'s'; + else + { + $m = $value % 60; + $value -= $m; + $value /= 60; + if ($value == 0) + $ret = sprintf("%dm$b%02ds", $m, $s); + else + { + $h = $value % 24; + $value -= $h; + $value /= 24; + if ($value == 0) + $ret = sprintf("%dh$b%02dm$b%02ds", $h, $m, $s); + else + { + if ($value == 1) + $days = ''; + else + $days = 's'; + + $ret = sprintf("%dday$days$b%02dh$b%02dm$b%02ds", $value, $h, $m, $s); + } + } + } + break; + case 'Last Well': + if ($section == 'total') + break; + if ($value == '0') + { + $ret = 'Never'; + $class = $warnclass; + } + else + $ret = date('H:i:s', $value); + break; + case 'Last Not Well': + if ($section == 'total') + break; + if ($value == '0') + $ret = 'Never'; + else + { + $ret = date('H:i:s', $value); + $class = $errorclass; + } + break; + case 'Reason Not Well': + if ($section == 'total') + break; + if ($value != 'None') + $class = $errorclass; + break; + case 'Utility': + $ret = number_format($value, 2).'/m'; + if ($value == 0) + $class = $errorclass; + else + if (isset($alldata['Difficulty Accepted']) + && isset($alldata['Accepted']) + && isset($alldata['MHS av']) + && ($alldata['Difficulty Accepted'] > 0) + && ($alldata['Accepted'] > 0)) + { + $expected = 60 * $alldata['MHS av'] * (pow(10, 6) / pow(2, 32)); + if ($expected == 0) + $expected = 0.000001; // 1 H/s + + $da = $alldata['Difficulty Accepted']; + $a = $alldata['Accepted']; + $expected /= ($da / $a); + + $ratio = $value / $expected; + if ($ratio < 0.9) + $class = $loclass; + else + if ($ratio > 1.1) + $class = $hiclass; + } + break; + case 'Work Utility': + $ret = number_format($value, 2).'/m'; + break; + case 'Temperature': + if ($section == 'total') + break; + $ret = $value.'°C'; + if (!isset($alldata['GPU'])) + { + if ($value == 0) + $ret = ' '; + break; + } + case 'GPU Clock': + case 'Memory Clock': + case 'GPU Voltage': + case 'GPU Activity': + if ($section == 'total') + break; + if ($value == 0) + $class = $warnclass; + break; + case 'Fan Percent': + if ($section == 'total') + break; + if ($value == 0) + $class = $warnclass; + else + { + if ($value == 100) + $class = $errorclass; + else + if ($value > 85) + $class = $warnclass; + } + break; + case 'Fan Speed': + if ($section == 'total') + break; + if ($value == 0) + $class = $warnclass; + else + if (isset($alldata['Fan Percent'])) + { + $test = $alldata['Fan Percent']; + if ($test == 100) + $class = $errorclass; + else + if ($test > 85) + $class = $warnclass; + } + break; + case 'MHS av': + case 'MHS 5s': + case 'MHS 1m': + case 'MHS 5m': + case 'MHS 15m': + $parts = explode('.', $value, 2); + if (count($parts) == 1) + $dec = ''; + else + $dec = '.'.$parts[1]; + $ret = number_format((float)$parts[0]).$dec; + + if ($value == 0) + $class = $errorclass; + else + if (isset($alldata['Difficulty Accepted']) + && isset($alldata['Accepted']) + && isset($alldata['Utility']) + && ($alldata['Difficulty Accepted'] > 0) + && ($alldata['Accepted'] > 0)) + { + $expected = 60 * $value * (pow(10, 6) / pow(2, 32)); + if ($expected == 0) + $expected = 0.000001; // 1 H/s + + $da = $alldata['Difficulty Accepted']; + $a = $alldata['Accepted']; + $expected /= ($da / $a); + + $ratio = $alldata['Utility'] / $expected; + if ($ratio < 0.9) + $class = $hiclass; + else + if ($ratio > 1.1) + $class = $loclass; + } + break; + case 'Total MH': + case 'Getworks': + case 'Works': + case 'Accepted': + case 'Rejected': + case 'Local Work': + case 'Discarded': + case 'Diff1 Shares': + case 'Diff1 Work': + case 'Times Sent': + case 'Bytes Sent': + case 'Net Bytes Sent': + case 'Times Recv': + case 'Bytes Recv': + case 'Net Bytes Recv': + $parts = explode('.', $value, 2); + if (count($parts) == 1) + $dec = ''; + else + $dec = '.'.$parts[1]; + $ret = number_format((float)$parts[0]).$dec; + break; + case 'Hs': + case 'W': + case 'history_time': + case 'Pool Wait': + case 'Pool Max': + case 'Pool Min': + case 'Pool Av': + case 'Min Diff': + case 'Max Diff': + case 'Work Diff': + $parts = explode('.', $value, 2); + if (count($parts) == 1) + $dec = ''; + else + $dec = '.'.endzero($parts[1]); + $ret = number_format((float)$parts[0]).$dec; + break; + case 'Status': + if ($section == 'total') + break; + if ($value != 'Alive') + $class = $errorclass; + break; + case 'Enabled': + if ($section == 'total') + break; + if ($value != 'Y') + $class = $warnclass; + break; + case 'No Device': + if ($section == 'total') + break; + if ($value != 'false') + $class = $errorclass; + break; + case 'When': + case 'Current Block Time': + if ($section == 'total') + break; + $ret = date($dfmt, $value); + break; + case 'Last Share Difficulty': + if ($section == 'total') + break; + case 'Difficulty Accepted': + case 'Difficulty Rejected': + case 'Difficulty Stale': + if ($value != '') + $ret = number_format((float)$value, 2); + break; + case 'Device Hardware%': + case 'Device Rejected%': + case 'Pool Rejected%': + case 'Pool Stale%': + if ($section == 'total') + break; + if ($value != '') + $ret = number_format((float)$value, 2) . '%'; + break; + case 'Best Share': + if ($section == 'total') + break; + case 'Hardware Errors': + if ($value != '') + $ret = number_format((float)$value); + break; + // BUTTON. + case 'Rig': + case 'Pool': + case 'GPU': + break; + // Sample GEN fields + case 'Mined': + if ($value != '') + $ret = number_format((float)$value * 100.0, 3) . '%'; + break; + case 'Acc': + case 'Rej': + if ($value != '') + $ret = number_format((float)$value * 100.0, 2) . '%'; + break; + case 'GHS av': + case 'GHS 5m': + case 'GHS WU': + case 'GHS Acc': + if ($value != '') + $ret = number_format((float)$value, 2); + break; + case 'AvShr': + if ($section == 'total') + break; + if ($value != '') + $ret = number_format((float)$value, 2); + if ($value == 0) + $class = $warnclass; + break; + } + } + + if ($section == 'NOTIFY' && substr($name, 0, 1) == '*' && $value != '0') + $class = $errorclass; + + if ($class == '' && $section != 'POOL') + $class = classlastshare($when, $alldata, $lstclass, $lstclass); + + if ($class == '' && $section == 'total') + $class = $totclass; + + if ($class == '' && ($rownum % 2) == 0) + $class = $c2class; + + if ($ret === '') + $ret = $b; + + if ($class !== '') + $class = " class=$class"; + + return array($ret, $class); +} +# +global $poolcmd; +$poolcmd = array( 'Switch to' => 'switchpool', + 'Enable' => 'enablepool', + 'Disable' => 'disablepool', + 'Remove' => 'removepool' ); +# +function showhead($cmd, $values, $justnames = false) +{ + global $poolcmd, $readonly; + + newrow(); + + foreach ($values as $name => $value) + { + if ($name == '0' or $name == '') + $name = ' '; + echo ""; + } + + if ($justnames === false && $cmd == 'pools' && $readonly === false) + foreach ($poolcmd as $name => $pcmd) + echo ""; + + endrow(); +} +# +function showdatetime() +{ + global $dfmt; + + otherrow(''); +} +# +global $singlerigsum; +$singlerigsum = array( + 'devs' => array('MHS av' => 1, 'MHS 5s' => 1, 'MHS 1m' => 1, 'MHS 5m' => 1, + 'MHS 15m' => 1, 'Accepted' => 1, 'Rejected' => 1, + 'Hardware Errors' => 1, 'Utility' => 1, 'Total MH' => 1, + 'Diff1 Shares' => 1, 'Diff1 Work' => 1, + 'Difficulty Accepted' => 1, 'Difficulty Rejected' => 1), + 'pools' => array('Getworks' => 1, 'Accepted' => 1, 'Rejected' => 1, 'Discarded' => 1, + 'Stale' => 1, 'Get Failures' => 1, 'Remote Failures' => 1, + 'Diff1 Shares' => 1, 'Diff1 Work' => 1, + 'Difficulty Accepted' => 1, 'Difficulty Rejected' => 1, + 'Difficulty Stale' => 1), + 'notify' => array('*' => 1)); +# +function showtotal($total, $when, $oldvalues) +{ + global $rigtotals; + + list($showvalue, $class) = fmt('total', '', 'Total:', $when, null); + echo "$showvalue"; + + $skipfirst = true; + foreach ($oldvalues as $name => $value) + { + if ($skipfirst === true) + { + $skipfirst = false; + continue; + } + + if (isset($total[$name])) + $newvalue = $total[$name]; + else + $newvalue = ''; + + list($showvalue, $class) = fmt('total', $name, $newvalue, $when, null); + echo "$showvalue"; + } +} +# +function details($cmd, $list, $rig) +{ + global $dfmt, $poolcmd, $readonly, $showndate; + global $rownum, $rigtotals, $forcerigtotals, $singlerigsum; + + $when = 0; + + $stas = array('S' => 'Success', 'W' => 'Warning', 'I' => 'Informational', 'E' => 'Error', 'F' => 'Fatal'); + + newtable(); + + if ($showndate === false) + { + showdatetime(); + + endtable(); + newtable(); + + $showndate = true; + } + + if (isset($list['STATUS'])) + { + newrow(); + echo ''; + if (isset($list['STATUS']['When'])) + { + echo ''; + $when = $list['STATUS']['When']; + } + $sta = $list['STATUS']['STATUS']; + echo ''; + echo ''; + endrow(); + } + + if ($rigtotals === true && isset($singlerigsum[$cmd])) + $dototal = $singlerigsum[$cmd]; + else + $dototal = array(); + + $total = array(); + + $section = ''; + $oldvalues = null; + foreach ($list as $item => $values) + { + if ($item == 'STATUS') + continue; + + $sectionname = preg_replace('/\d/', '', $item); + + // Handle 'devs' possibly containing >1 table + if ($sectionname != $section) + { + if ($oldvalues != null && count($total) > 0 + && ($rownum > 2 || $forcerigtotals === true)) + showtotal($total, $when, $oldvalues); + + endtable(); + newtable(); + showhead($cmd, $values); + $section = $sectionname; + } + + newrow(); + + foreach ($values as $name => $value) + { + list($showvalue, $class) = fmt($section, $name, $value, $when, $values); + echo "$showvalue"; + + if (isset($dototal[$name]) + || (isset($dototal['*']) and substr($name, 0, 1) == '*')) + { + if (isset($total[$name])) + $total[$name] += $value; + else + $total[$name] = $value; + } + } + + if ($cmd == 'pools' && $readonly === false) + { + reset($values); + $pool = current($values); + foreach ($poolcmd as $name => $pcmd) + { + list($ignore, $class) = fmt('BUTTON', 'Pool', '', $when, $values); + echo ""; + if ($pool === false) + echo ' '; + else + { + echo ""; + } + echo ''; + } + } + endrow(); + + $oldvalues = $values; + } + + if ($oldvalues != null && count($total) > 0 + && ($rownum > 2 || $forcerigtotals === true)) + showtotal($total, $when, $oldvalues); + + endtable(); +} +# +global $devs; +$devs = null; +# +function gpubuttons($count, $rig) +{ + global $devs; + + $basic = array( 'GPU', 'Enable', 'Disable', 'Restart' ); + + $options = array( 'intensity' => 'Intensity', + 'fan' => 'Fan Percent', + 'engine' => 'GPU Clock', + 'mem' => 'Memory Clock', + 'vddc' => 'GPU Voltage' ); + + newtable(); + newrow(); + + foreach ($basic as $head) + echo ""; + + foreach ($options as $name => $des) + echo ""; + + $n = 0; + for ($c = 0; $c < $count; $c++) + { + endrow(); + newrow(); + + foreach ($basic as $name) + { + list($ignore, $class) = fmt('BUTTON', 'GPU', '', 0, null); + echo ""; + + if ($name == 'GPU') + echo $c; + else + { + echo ""; + } + + echo ''; + } + + foreach ($options as $name => $des) + { + list($ignore, $class) = fmt('BUTTON', 'GPU', '', 0, null); + echo ""; + + if (!isset($devs["GPU$c"][$des])) + echo ' '; + else + { + $value = $devs["GPU$c"][$des]; + echo ""; + echo ""; + $n++; + } + + echo ''; + } + + } + endrow(); + endtable(); +} +# +function processgpus($rig) +{ + global $error; + global $warnfont, $warnoff; + + $gpus = api($rig, 'gpucount'); + + if ($error != null) + otherrow(""); + else + { + if (!isset($gpus['GPUS']['Count'])) + { + $rw = ''; + otherrow($rw); + } + else + { + $count = $gpus['GPUS']['Count']; + if ($count == 0) + otherrow(''); + else + gpubuttons($count, $rig); + } + } +} +# +function showpoolinputs($rig, $ans) +{ + global $readonly, $poolinputs; + + if ($readonly === true || $poolinputs === false) + return; + + newtable(); + newrow(); + + $inps = array('Pool URL' => array('purl', 20), + 'Worker Name' => array('pwork', 10), + 'Worker Password' => array('ppass', 10)); + $b = ' '; + + echo ""; + + endrow(); + + if (count($ans) > 1) + { + newrow(); + + echo ''; + echo ""; + + endrow(); + } + endtable(); +} +# +function process($cmds, $rig) +{ + global $error, $devs; + global $warnfont, $warnoff; + + $count = count($cmds); + foreach ($cmds as $cmd => $des) + { + $process = api($rig, $cmd); + + if ($error != null) + { + otherrow(""); + break; + } + else + { + details($cmd, $process, $rig); + + if ($cmd == 'devs') + $devs = $process; + + if ($cmd == 'pools') + showpoolinputs($rig, $process); + + # Not after the last one + if (--$count > 0) + otherrow(''); + } + } +} +# +function rigname($rig, $rigname) +{ + global $rigs, $rignames, $rigips; + + if (isset($rigs[$rig])) + { + $parts = explode(':', $rigs[$rig], 3); + if (count($parts) == 3) + $rigname = $parts[2]; + else + if ($rignames !== false) + { + switch ($rignames) + { + case 'ip': + if (isset($parts[0]) && isset($rigips[$parts[0]])) + { + $ip = explode('.', $rigips[$parts[0]]); + if (count($ip) == 4) + $rigname = intval($ip[3]); + } + break; + case 'ipx': + if (isset($parts[0]) && isset($rigips[$parts[0]])) + { + $ip = explode('.', $rigips[$parts[0]]); + if (count($ip) == 4) + $rigname = intval($ip[3], 16); + } + break; + } + } + } + + return $rigname; +} +# +function riginput($rig, $rigname, $usebuttons) +{ + $rigname = rigname($rig, $rigname); + + if ($usebuttons === true) + return ""; + else + return "$rigname"; +} +# +function rigbutton($rig, $rigname, $when, $row, $usebuttons) +{ + list($value, $class) = fmt('BUTTON', 'Rig', '', $when, $row); + + if ($rig === '') + $ri = ' '; + else + $ri = riginput($rig, $rigname, $usebuttons); + + return ""; +} +# +function showrigs($anss, $headname, $rigname) +{ + global $rigbuttons; + + $dthead = array($headname => 1, 'STATUS' => 1, 'Description' => 1, 'When' => 1, 'API' => 1, 'CGMiner' => 1); + showhead('', $dthead); + + foreach ($anss as $rig => $ans) + { + if ($ans == null) + continue; + + newrow(); + + $when = 0; + if (isset($ans['STATUS']['When'])) + $when = $ans['STATUS']['When']; + + foreach ($ans as $item => $row) + { + if ($item != 'STATUS' && $item != 'VERSION') + continue; + + foreach ($dthead as $name => $x) + { + if ($item == 'STATUS' && $name == $headname) + echo rigbutton($rig, $rigname.$rig, $when, null, $rigbuttons); + else + { + if (isset($row[$name])) + { + list($showvalue, $class) = fmt('STATUS', $name, $row[$name], $when, null); + echo "$showvalue"; + } + } + } + } + endrow(); + } +} +# +function refreshbuttons() +{ + global $ignorerefresh, $changerefresh, $autorefresh; + + if ($ignorerefresh == false && $changerefresh == true) + { + echo '    '; + echo ""; + echo ""; + echo ""; + } +} +# +function pagebuttons($rig, $pg) +{ + global $readonly, $rigs, $rigbuttons, $userlist, $ses; + global $allowcustompages, $customsummarypages; + + if ($rig === null) + { + $prev = null; + $next = null; + + if ($pg === null) + $refresh = ''; + else + $refresh = "&pg=$pg"; + } + else + { + switch (count($rigs)) + { + case 0: + case 1: + $prev = null; + $next = null; + break; + case 2: + $prev = null; + $next = ($rig + 1) % count($rigs); + break; + default: + $prev = ($rig - 1) % count($rigs); + $next = ($rig + 1) % count($rigs); + break; + } + + $refresh = "&rig=$rig"; + } + + echo '"; +} +# +function doOne($rig, $preprocess) +{ + global $haderror, $readonly, $notify, $rigs; + global $placebuttons; + + if ($placebuttons == 'top' || $placebuttons == 'both') + pagebuttons($rig, null); + + if ($preprocess != null) + process(array($preprocess => $preprocess), $rig); + + $cmds = array( 'devs' => 'device list', + 'summary' => 'summary information', + 'pools' => 'pool list'); + + if ($notify) + $cmds['notify'] = 'device status'; + + $cmds['config'] = 'cgminer config'; + + process($cmds, $rig); + + if ($haderror == false && $readonly === false) + processgpus($rig); + + if ($placebuttons == 'bot' || $placebuttons == 'both') + pagebuttons($rig, null); +} +# +global $sectionmap; +# map sections to their api command +# DEVS is a special case that will match GPU, PGA or ASC +# so you can have a single table with both in it +# DATE is hard coded so not in here +$sectionmap = array( + 'RIGS' => 'version', + 'SUMMARY' => 'summary', + 'POOL' => 'pools', + 'DEVS' => 'devs', + 'EDEVS' => 'edevs', + 'GPU' => 'devs', // You would normally use DEVS + 'PGA' => 'devs', // You would normally use DEVS + 'ASC' => 'devs', // You would normally use DEVS + 'NOTIFY' => 'notify', + 'DEVDETAILS' => 'devdetails', + 'STATS' => 'stats', + 'ESTATS' => 'estats', + 'CONFIG' => 'config', + 'COIN' => 'coin', + 'USBSTATS' => 'usbstats'); +# +function joinfields($section1, $section2, $join, $results) +{ + global $sectionmap; + + $name1 = $sectionmap[$section1]; + $name2 = $sectionmap[$section2]; + $newres = array(); + + // foreach rig in section1 + foreach ($results[$name1] as $rig => $result) + { + $status = null; + + // foreach answer section in the rig api call + foreach ($result as $name1b => $fields1b) + { + if ($name1b == 'STATUS') + { + // remember the STATUS from section1 + $status = $result[$name1b]; + continue; + } + + // foreach answer section in the rig api call (for the other api command) + foreach ($results[$name2][$rig] as $name2b => $fields2b) + { + if ($name2b == 'STATUS') + continue; + + // If match the same field values of fields in $join + $match = true; + foreach ($join as $field) + if ($fields1b[$field] != $fields2b[$field]) + { + $match = false; + break; + } + + if ($match === true) + { + if ($status != null) + { + $newres[$rig]['STATUS'] = $status; + $status = null; + } + + $subsection = $section1.'+'.$section2; + $subsection .= preg_replace('/[^0-9]/', '', $name1b.$name2b); + + foreach ($fields1b as $nam => $val) + $newres[$rig][$subsection]["$section1.$nam"] = $val; + foreach ($fields2b as $nam => $val) + $newres[$rig][$subsection]["$section2.$nam"] = $val; + } + } + } + } + return $newres; +} +# +function joinlr($section1, $section2, $join, $results) +{ + global $sectionmap; + + $name1 = $sectionmap[$section1]; + $name2 = $sectionmap[$section2]; + $newres = array(); + + // foreach rig in section1 + foreach ($results[$name1] as $rig => $result) + { + $status = null; + + // foreach answer section in the rig api call + foreach ($result as $name1b => $fields1b) + { + if ($name1b == 'STATUS') + { + // remember the STATUS from section1 + $status = $result[$name1b]; + continue; + } + + // Build L string to be matched + // : means a string constant otherwise it's a field name + $Lval = ''; + foreach ($join['L'] as $field) + { + if (substr($field, 0, 1) == ':') + $Lval .= substr($field, 1); + else + $Lval .= $fields1b[$field]; + } + + // foreach answer section in the rig api call (for the other api command) + foreach ($results[$name2][$rig] as $name2b => $fields2b) + { + if ($name2b == 'STATUS') + continue; + + // Build R string and compare + // : means a string constant otherwise it's a field name + $Rval = ''; + foreach ($join['R'] as $field) + { + if (substr($field, 0, 1) == ':') + $Rval .= substr($field, 1); + else + $Rval .= $fields2b[$field]; + } + + if ($Lval === $Rval) + { + if ($status != null) + { + $newres[$rig]['STATUS'] = $status; + $status = null; + } + + $subsection = $section1.'+'.$section2; + $subsection .= preg_replace('/[^0-9]/', '', $name1b.$name2b); + + foreach ($fields1b as $nam => $val) + $newres[$rig][$subsection]["$section1.$nam"] = $val; + foreach ($fields2b as $nam => $val) + $newres[$rig][$subsection]["$section2.$nam"] = $val; + } + } + } + } + return $newres; +} +# +function joinall($section1, $section2, $results) +{ + global $sectionmap; + + $name1 = $sectionmap[$section1]; + $name2 = $sectionmap[$section2]; + $newres = array(); + + // foreach rig in section1 + foreach ($results[$name1] as $rig => $result) + { + // foreach answer section in the rig api call + foreach ($result as $name1b => $fields1b) + { + if ($name1b == 'STATUS') + { + // copy the STATUS from section1 + $newres[$rig][$name1b] = $result[$name1b]; + continue; + } + + // foreach answer section in the rig api call (for the other api command) + foreach ($results[$name2][$rig] as $name2b => $fields2b) + { + if ($name2b == 'STATUS') + continue; + + $subsection = $section1.'+'.$section2; + $subsection .= preg_replace('/[^0-9]/', '', $name1b.$name2b); + + foreach ($fields1b as $nam => $val) + $newres[$rig][$subsection]["$section1.$nam"] = $val; + foreach ($fields2b as $nam => $val) + $newres[$rig][$subsection]["$section2.$nam"] = $val; + } + } + } + return $newres; +} +# +function joinsections($sections, $results, $errors) +{ + global $sectionmap; + + // GPU's don't have Name,ID fields - so create them + foreach ($results as $section => $res) + foreach ($res as $rig => $result) + foreach ($result as $name => $fields) + { + $subname = preg_replace('/[0-9]/', '', $name); + if ($subname == 'GPU' and isset($result[$name]['GPU'])) + { + $results[$section][$rig][$name]['Name'] = 'GPU'; + $results[$section][$rig][$name]['ID'] = $result[$name]['GPU']; + } + } + + foreach ($sections as $section => $fields) + if ($section != 'DATE' && !isset($sectionmap[$section])) + { + $both = explode('+', $section, 2); + if (count($both) > 1) + { + switch($both[0]) + { + case 'SUMMARY': + switch($both[1]) + { + case 'POOL': + case 'DEVS': + case 'EDEVS': + case 'CONFIG': + case 'COIN': + $sectionmap[$section] = $section; + $results[$section] = joinall($both[0], $both[1], $results); + break; + default: + $errors[] = "Error: Invalid section '$section'"; + break; + } + break; + case 'DEVS': + case 'EDEVS': + switch($both[1]) + { + case 'NOTIFY': + case 'DEVDETAILS': + case 'USBSTATS': + $join = array('Name', 'ID'); + $sectionmap[$section] = $section; + $results[$section] = joinfields($both[0], $both[1], $join, $results); + break; + case 'STATS': + case 'ESTATS': + $join = array('L' => array('Name','ID'), 'R' => array('ID')); + $sectionmap[$section] = $section; + $results[$section] = joinlr($both[0], $both[1], $join, $results); + break; + default: + $errors[] = "Error: Invalid section '$section'"; + break; + } + break; + case 'POOL': + switch($both[1]) + { + case 'STATS': + $join = array('L' => array(':POOL','POOL'), 'R' => array('ID')); + $sectionmap[$section] = $section; + $results[$section] = joinlr($both[0], $both[1], $join, $results); + break; + default: + $errors[] = "Error: Invalid section '$section'"; + break; + } + break; + default: + $errors[] = "Error: Invalid section '$section'"; + break; + } + } + else + $errors[] = "Error: Invalid section '$section'"; + } + + return array($results, $errors); +} +# +function secmatch($section, $field) +{ + if ($section == $field) + return true; + + if (($section == 'DEVS' || $section == 'EDEVS') + && ($field == 'GPU' || $field == 'PGA' || $field == 'ASC')) + return true; + + return false; +} +# +function customset($showfields, $sum, $section, $rig, $isbutton, $result, $total, $cf = NULL) +{ + global $rigbuttons; + + $rn = 0; + foreach ($result as $sec => $row) + { + $secname = preg_replace('/\d/', '', $sec); + + if ($sec != 'total') + if (!secmatch($section, $secname)) + continue; + + newrow(); + + $when = 0; + if (isset($result['STATUS']['When'])) + $when = $result['STATUS']['When']; + + + if ($isbutton) + echo rigbutton($rig, $rig, $when, $row, $rigbuttons); + else + { + list($ignore, $class) = fmt('total', '', '', $when, $row, $cf); + echo ""; + } + + foreach ($showfields as $name => $one) + { + if ($name === '#' and $sec != 'total') + { + $rn++; + $value = $rn; + if (isset($total[$name])) + $total[$name]++; + else + $total[$name] = 1; + } + elseif (isset($row[$name])) + { + $value = $row[$name]; + + if (isset($sum[$section][$name])) + { + if (isset($total[$name])) + $total[$name] += $value; + else + $total[$name] = $value; + } + } + else + { + if ($sec == 'total' && isset($total[$name])) + $value = $total[$name]; + else + $value = null; + } + + if (strpos($secname, '+') === false) + list($showvalue, $class) = fmt($secname, $name, $value, $when, $row, $cf); + else + { + if ($name != '#') + $parts = explode('.', $name, 2); + else + $parts[0] = $parts[1] = '#'; + list($showvalue, $class) = fmt($parts[0], $parts[1], $value, $when, $row, $cf); + } + + echo "$showvalue"; + } + endrow(); + } + return $total; +} +# +function docalc($func, $data) +{ + switch ($func) + { + case 'sum': + $tot = 0; + foreach ($data as $val) + $tot += $val; + return $tot; + case 'avg': + $tot = 0; + foreach ($data as $val) + $tot += $val; + return ($tot / count($data)); + case 'min': + $ans = null; + foreach ($data as $val) + if ($ans === null) + $ans = $val; + else + if ($val < $ans) + $ans = $val; + return $ans; + case 'max': + $ans = null; + foreach ($data as $val) + if ($ans === null) + $ans = $val; + else + if ($val > $ans) + $ans = $val; + return $ans; + case 'lo': + $ans = null; + foreach ($data as $val) + if ($ans === null) + $ans = $val; + else + if (strcasecmp($val, $ans) < 0) + $ans = $val; + return $ans; + case 'hi': + $ans = null; + foreach ($data as $val) + if ($ans === null) + $ans = $val; + else + if (strcasecmp($val, $ans) > 0) + $ans = $val; + return $ans; + case 'count': + return count($data); + case 'any': + default: + return $data[0]; + } +} +# +function docompare($row, $test) +{ + // invalid $test data means true + if (count($test) < 2) + return true; + + if (isset($row[$test[0]])) + $val = $row[$test[0]]; + else + $val = null; + + if ($test[1] == 'set') + return ($val !== null); + + if ($val === null || count($test) < 3) + return true; + + switch($test[1]) + { + case '=': + return ($val == $test[2]); + case '<': + return ($val < $test[2]); + case '<=': + return ($val <= $test[2]); + case '>': + return ($val > $test[2]); + case '>=': + return ($val >= $test[2]); + case 'eq': + return (strcasecmp($val, $test[2]) == 0); + case 'lt': + return (strcasecmp($val, $test[2]) < 0); + case 'le': + return (strcasecmp($val, $test[2]) <= 0); + case 'gt': + return (strcasecmp($val, $test[2]) > 0); + case 'ge': + return (strcasecmp($val, $test[2]) >= 0); + default: + return true; + } +} +# +function processcompare($which, $ext, $section, $res) +{ + if (isset($ext[$section][$which])) + { + $proc = $ext[$section][$which]; + if ($proc !== null) + { + $res2 = array(); + foreach ($res as $rig => $result) + foreach ($result as $sec => $row) + { + $secname = preg_replace('/\d/', '', $sec); + if (!secmatch($section, $secname)) + $res2[$rig][$sec] = $row; + else + { + $keep = true; + foreach ($proc as $test) + if (!docompare($row, $test)) + { + $keep = false; + break; + } + if ($keep) + $res2[$rig][$sec] = $row; + } + } + + $res = $res2; + } + } + return $res; +} +# +function ss($a, $b) +{ + $la = strlen($a); + $lb = strlen($b); + if ($la != $lb) + return $lb - $la; + return strcmp($a, $b); +} +# +# If you are developing a customsummarypage that uses BGEN or GEN, +# you may want to remove the '@' in front of '@eval()' to help with debugging +# The '@' removes php comments from the web log about missing fields +# Since there are many forks of cgminer that break the API or do not +# keep their fork up to date with current cgminer, the addition of +# '@' solves the problem of generating unnecessary and excessive web logs +# about the eval() +function genfld($row, $calc) +{ + uksort($row, "ss"); + + foreach ($row as $name => $value) + if (strstr($calc, $name) !== FALSE) + $calc = str_replace($name, $value, $calc); + + @eval("\$val = $calc;"); + + if (!isset($val)) + return ''; + else + return $val; +} +# +function dogen($ext, $wg, $gname, $section, &$res, &$fields) +{ + $gen = $ext[$section][$wg]; + + foreach ($gen as $fld => $calc) + $fields[] = "$gname.$fld"; + + foreach ($res as $rig => $result) + foreach ($result as $sec => $row) + { + $secname = preg_replace('/\d/', '', $sec); + if (secmatch($section, $secname)) + foreach ($gen as $fld => $calc) + { + $name = "$gname.$fld"; + + $val = genfld($row, $calc); + + $res[$rig][$sec][$name] = $val; + } + } +} +# +function processext($ext, $section, $res, &$fields) +{ + global $allowgen; + + $res = processcompare('where', $ext, $section, $res); + + // Generated fields (functions of other fields before grouping) + if ($allowgen === true && isset($ext[$section]['bgen'])) + dogen($ext, 'bgen', 'BGEN', $section, $res, $fields); + + if (isset($ext[$section]['group'])) + { + $grp = $ext[$section]['group']; + $calc = $ext[$section]['calc']; + if ($grp !== null) + { + $interim = array(); + $res2 = array(); + $cou = 0; + foreach ($res as $rig => $result) + foreach ($result as $sec => $row) + { + $secname = preg_replace('/\d/', '', $sec); + if (!secmatch($section, $secname)) + { + // STATUS may be problematic ... + if (!isset($res2[$sec])) + $res2[$sec] = $row; + } + else + { + $grpkey = ''; + $newrow = array(); + foreach ($grp as $field) + { + if (isset($row[$field])) + { + $grpkey .= $row[$field].'.'; + $newrow[$field] = $row[$field]; + } + else + $grpkey .= '.'; + } + + if (!isset($interim[$grpkey])) + { + $interim[$grpkey]['grp'] = $newrow; + $interim[$grpkey]['sec'] = $secname.$cou; + $cou++; + } + + if ($calc !== null) + foreach ($calc as $field => $func) + { + if (isset($row[$field])) + { + if (!isset($interim[$grpkey]['cal'][$field])) + $interim[$grpkey]['cal'][$field] = array(); + $interim[$grpkey]['cal'][$field][] = $row[$field]; + } + } + } + } + + // Build the rest of $res2 from $interim + foreach ($interim as $rowkey => $row) + { + $key = $row['sec']; + foreach ($row['grp'] as $field => $value) + $res2[$key][$field] = $value; + foreach ($row['cal'] as $field => $data) + $res2[$key][$field] = docalc($calc[$field], $data); + } + + $res = array('' => $res2); + } + } + + // Generated fields (functions of other fields after grouping) + if ($allowgen === true && isset($ext[$section]['gen'])) + dogen($ext, 'gen', 'GEN', $section, $res, $fields); + + return processcompare('having', $ext, $section, $res); +} +# +function processcustompage($pagename, $sections, $sum, $ext, $namemap) +{ + global $sectionmap; + global $miner, $port; + global $rigs, $error; + global $warnfont, $warnoff; + global $dfmt; + global $readonly, $showndate; + + $cmds = array(); + $errors = array(); + foreach ($sections as $section => $fields) + { + $all = explode('+', $section); + foreach ($all as $section) + { + if (isset($sectionmap[$section])) + { + $cmd = $sectionmap[$section]; + if (!isset($cmds[$cmd])) + $cmds[$cmd] = 1; + } + else + if ($section != 'DATE') + $errors[] = "Error: unknown section '$section' in custom summary page '$pagename'"; + } + } + + $results = array(); + foreach ($rigs as $num => $rig) + { + $parts = explode(':', $rig, 3); + if (count($parts) >= 1) + { + $miner = $parts[0]; + if (count($parts) >= 2) + $port = $parts[1]; + else + $port = ''; + + if (count($parts) > 2) + $name = $parts[2]; + else + $name = $rig; + + foreach ($cmds as $cmd => $one) + { + $process = api($name, $cmd); + + if ($error != null) + { + $errors[] = "Error getting $cmd for $name $warnfont$error$warnoff"; + break; + } + else + $results[$cmd][$num] = $process; + } + } + else + otherrow(''); + } + + // Show API errors at the top + if (count($errors) > 0) + { + foreach ($errors as $err) + otherrow(""); + $errors = array(); + } + + $shownsomething = false; + if (count($results) > 0) + { + list($results, $errors) = joinsections($sections, $results, $errors); + $first = true; + foreach ($sections as $section => $fields) + { + if ($section === 'DATE') + { + if ($shownsomething) + otherrow(''); + + newtable(); + showdatetime(); + endtable(); + // On top of the next table + $shownsomething = false; + continue; + } + + if ($section === 'RIGS') + { + if ($shownsomething) + otherrow(''); + + newtable(); + showrigs($results['version'], 'Rig', ''); + endtable(); + $shownsomething = true; + continue; + } + + if (isset($results[$sectionmap[$section]])) + { + if (isset($ext[$section]['fmt'])) + $cf = $ext[$section]['fmt']; + else + $cf = NULL; + + $rigresults = processext($ext, $section, $results[$sectionmap[$section]], $fields); + + $showfields = array(); + $showhead = array(); + foreach ($fields as $field) + foreach ($rigresults as $result) + foreach ($result as $sec => $row) + { + $secname = preg_replace('/\d/', '', $sec); + if (secmatch($section, $secname)) + { + if ($field === '*') + { + foreach ($row as $f => $v) + { + $showfields[$f] = 1; + $map = $section.'.'.$f; + if (isset($namemap[$map])) + $showhead[$namemap[$map]] = 1; + else + $showhead[$f] = 1; + } + } + elseif ($field === '#') + { + $showfields[$field] = 1; + $showhead[$field] = 1; + } + elseif (isset($row[$field])) + { + $showfields[$field] = 1; + $map = $section.'.'.$field; + if (isset($namemap[$map])) + $showhead[$namemap[$map]] = 1; + else + $showhead[$field] = 1; + } + } + } + + if (count($showfields) > 0) + { + if ($shownsomething) + otherrow(''); + + newtable(); + if (count($rigresults) == 1 && isset($rigresults[''])) + $ri = array('' => 1) + $showhead; + else + $ri = array('Rig' => 1) + $showhead; + showhead('', $ri, true); + + $total = array(); + $add = array('total' => array()); + + foreach ($rigresults as $num => $result) + $total = customset($showfields, $sum, $section, $num, true, $result, $total, $cf); + + if (count($total) > 0) + customset($showfields, $sum, $section, 'Σ', false, $add, $total, $cf); + + $first = false; + + endtable(); + $shownsomething = true; + } + } + } + } + + if (count($errors) > 0) + { + if (count($results) > 0) + otherrow(''); + + foreach ($errors as $err) + otherrow(""); + } +} +# +function showcustompage($pagename, $systempage = false) +{ + global $customsummarypages; + global $placebuttons; + + if ($placebuttons == 'top' || $placebuttons == 'both') + pagebuttons(null, $pagename); + + if ($systempage === false && !isset($customsummarypages[$pagename])) + { + otherrow(""); + return; + } + + $csp = getcsp($pagename, $systempage); + if ($csp === false) + { + otherrow(""); + return; + } + + degen($csp); + + $page = $csp[0]; + $namemap = array(); + foreach ($page as $name => $fields) + { + if ($fields === null) + $page[$name] = array(); + else + foreach ($fields as $num => $field) + { + $pos = strpos($field, '='); + if ($pos !== false) + { + $names = explode('=', $field, 2); + if (strlen($names[1]) > 0) + $namemap[$name.'.'.$names[0]] = $names[1]; + $page[$name][$num] = $names[0]; + } + } + } + + $ext = null; + if (isset($csp[2])) + $ext = $csp[2]; + + $sum = $csp[1]; + if ($sum === null) + $sum = array(); + + // convert them to searchable via isset() + foreach ($sum as $section => $fields) + { + $newfields = array(); + foreach ($fields as $field) + $newfields[$field] = 1; + $sum[$section] = $newfields; + } + + if (count($page) <= 1) + { + otherrow(""); + return; + } + + processcustompage($pagename, $page, $sum, $ext, $namemap); + + if ($placebuttons == 'bot' || $placebuttons == 'both') + pagebuttons(null, $pagename); +} +# +function onlylogin() +{ + global $here; + + htmlhead('', false, null, null, true); + +?> + + +No rigs $action"); + return; + } + else + { + if ($mcast === true && count($rigs) < $mcastexpect) + $mcerr = othrow('"); + } + + if ($ignorerefresh == false) + { + $ref = trim(getparam('ref', true)); + if ($ref != null && $ref != '') + $autorefresh = intval($ref); + } + + if ($pagesonly !== true) + { + $rig = trim(getparam('rig', true)); + + $arg = trim(getparam('arg', true)); + $preprocess = null; + if ($arg != null and $arg != '') + { + if ($rig != null and $rig != '' and $rig >= 0 and $rig < count($rigs)) + { + $parts = explode(':', $rigs[$rig], 3); + if (count($parts) >= 1) + { + $miner = $parts[0]; + if (count($parts) >= 2) + $port = $parts[1]; + else + $port = ''; + + if ($readonly !== true) + $preprocess = $arg; + } + } + } + } + + if ($allowcustompages === true) + { + $pg = urlencode(trim(getparam('pg', true))); + if ($pagesonly === true) + { + if ($pg !== null && $pg !== '') + { + if ($userlist !== null && isset($userlist['def']) + && !in_array($pg, $userlist['def'])) + $pg = null; + } + else + { + if ($userlist !== null && isset($userlist['def'])) + foreach ($userlist['def'] as $pglook) + if (getcsp($pglook) !== false) + { + $pg = $pglook; + break; + } + } + } + + if ($pg !== null && $pg !== '') + { + htmlhead($mcerr, false, null, $pg); + showcustompage($pg); + return; + } + } + + if ($pagesonly === true) + { + onlylogin(); + return; + } + + if (count($rigs) == 1) + { + $parts = explode(':', $rigs[0], 3); + if (count($parts) >= 1) + { + $miner = $parts[0]; + if (count($parts) >= 2) + $port = $parts[1]; + else + $port = ''; + + htmlhead($mcerr, true, 0); + doOne(0, $preprocess); + } + else + { + minhead($mcerr); + otherrow(''); + } + + return; + } + + if ($rig != null and $rig != '' and $rig >= 0 and $rig < count($rigs)) + { + $parts = explode(':', $rigs[$rig], 3); + if (count($parts) >= 1) + { + $miner = $parts[0]; + if (count($parts) >= 2) + $port = $parts[1]; + else + $port = ''; + + htmlhead($mcerr, true, 0); + doOne($rig, $preprocess); + } + else + { + minhead($mcerr); + otherrow(''); + } + + return; + } + + htmlhead($mcerr, false, null); + + if ($preprocess != null) + process(array($preprocess => $preprocess), $rig); + + if (getcsp('Summary', true) !== false) + showcustompage('Summary', true); +} +# +if ($mcast === true) + getrigs(); +display(); +# +?> +
$name$nameDate: '.date($dfmt).'Computer: '.$list['STATUS']['Description'].'When: '.date($dfmt, $list['STATUS']['When']).'Status: '.$stas[$sta].'Message: '.$list['STATUS']['Msg'].'$head$desError getting GPU count: $warnfont$error$warnoffNo GPU count returned: '.$warnfont; + $rw .= $gpus['STATUS']['STATUS'].' '.$gpus['STATUS']['Msg']; + $rw .= $warnoff.'No GPUs Add a pool: "; + + foreach ($inps as $text => $name) + echo "$text: "; + + echo " Set pool priorities: Comma list of pool numbers: "; + echo "Error getting $des: $warnfont$error$warnoff

$ri
'; + if ($userlist === null || isset($_SESSION[$ses])) + { + if ($prev !== null) + echo riginput($prev, 'Prev', true).' '; + + echo " "; + + if ($next !== null) + echo riginput($next, 'Next', true).' '; + echo ' '; + if (count($rigs) > 1 and getcsp('Summary', true) !== false) + echo " "; + } + + if ($allowcustompages === true) + { + if ($userlist === null || isset($_SESSION[$ses])) + $list = $customsummarypages; + else + { + if ($userlist !== null && isset($userlist['def'])) + $list = array_flip($userlist['def']); + else + $list = array(); + } + + foreach ($list as $pagename => $data) + if (getcsp($pagename) !== false) + echo " "; + } + + echo ' '; + if ($rig !== null && $readonly === false) + { + $rg = ''; + if (count($rigs) > 1) + $rg = " Rig $rig"; + echo ""; + echo " "; + } + refreshbuttons(); + if (isset($_SESSION[$ses])) + echo " "; + else + if ($userlist !== null) + echo " "; + + echo "
$rigBad "$rigs" array$err    $errUnknown custom summary page '$pagename'Invalid custom summary page '$pagename'Invalid custom summary page '$pagename' no content
 
+
+ + +
+ + +
+ + + + + + + + +
 
+

LOGIN

Username:
+
Password:
+
+
+
Found '.count($rigs)." rigs but expected at least $mcastexpectInvalid "$rigs" arrayInvalid "$rigs" array
+ diff --git a/mknsis.sh b/mknsis.sh new file mode 100644 index 0000000..cc97dbe --- /dev/null +++ b/mknsis.sh @@ -0,0 +1,34 @@ +#!/bin/sh + +MINGW_PATH=/usr/i686-pc-mingw32/sys-root/mingw/bin + +OUT_BASE="cpuminer-installer" +OUT_EXE="$OUT_BASE.exe" + +PATH=$PATH:$MINGW_PATH \ + nsiswrapper --run \ + --name "CPU miner" \ + --outfile "$OUT_EXE" \ + minerd.exe \ + $MINGW_PATH/libcurl-4.dll=libcurl-4.dll \ + $MINGW_PATH/pthreadgc2.dll=pthreadgc2.dll \ + $MINGW_PATH/libidn-11.dll=libidn-11.dll \ + $MINGW_PATH/libssh2-1.dll=libssh2-1.dll \ + $MINGW_PATH/libssl-10.dll=libssl-10.dll \ + $MINGW_PATH/zlib1.dll=zlib1.dll \ + $MINGW_PATH/libcrypto-10.dll=libcrypto-10.dll \ + $MINGW_PATH/libiconv-2.dll=libiconv-2.dll \ + $MINGW_PATH/libintl-8.dll=libintl-8.dll + +chmod 0755 "$OUT_EXE" +zip -9 "$OUT_BASE" "$OUT_EXE" +rm -f "$OUT_EXE" + +chmod 0644 "$OUT_BASE.zip" + +echo -n "SHA1: " +sha1sum "$OUT_BASE.zip" + +echo -n "MD5: " +md5sum "$OUT_BASE.zip" + diff --git a/noncedup.c b/noncedup.c new file mode 100644 index 0000000..6b94fd6 --- /dev/null +++ b/noncedup.c @@ -0,0 +1,99 @@ +/* + * Copyright 2014 Andrew Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "miner.h" +#include "klist.h" + +// Nonce +typedef struct nitem { + uint32_t work_id; + uint32_t nonce; + struct timeval when; +} NITEM; + +#define DATAN(_item) ((NITEM *)(_item->data)) + +struct dupdata { + int timelimit; + K_LIST *nfree_list; + K_STORE *nonce_list; + uint64_t checked; + uint64_t dups; +}; + +void dupalloc(struct cgpu_info *cgpu, int timelimit) +{ + struct dupdata *dup; + + dup = calloc(1, sizeof(*dup)); + if (unlikely(!dup)) + quithere(1, "Failed to calloc dupdata"); + + dup->timelimit = timelimit; + dup->nfree_list = k_new_list("Nonces", sizeof(NITEM), 1024, 0, true); + dup->nonce_list = k_new_store(dup->nfree_list); + + cgpu->dup_data = dup; +} + +void dupcounters(struct cgpu_info *cgpu, uint64_t *checked, uint64_t *dups) +{ + struct dupdata *dup = (struct dupdata *)(cgpu->dup_data); + + if (!dup) { + *checked = 0; + *dups = 0; + } else { + *checked = dup->checked; + *dups = dup->dups; + } +} + +bool isdupnonce(struct cgpu_info *cgpu, struct work *work, uint32_t nonce) +{ + struct dupdata *dup = (struct dupdata *)(cgpu->dup_data); + struct timeval now; + bool unique = true; + K_ITEM *item; + + if (!dup) + return false; + + cgtime(&now); + dup->checked++; + K_WLOCK(dup->nfree_list); + item = dup->nonce_list->tail; + while (unique && item) { + if (DATAN(item)->work_id == work->id && DATAN(item)->nonce == nonce) { + unique = false; + applog(LOG_WARNING, "%s%d: Duplicate nonce %08x", + cgpu->drv->name, cgpu->device_id, nonce); + } else + item = item->prev; + } + if (unique) { + item = k_unlink_head(dup->nfree_list); + DATAN(item)->work_id = work->id; + DATAN(item)->nonce = nonce; + memcpy(&(DATAN(item)->when), &now, sizeof(now)); + k_add_head(dup->nonce_list, item); + } + item = dup->nonce_list->tail; + while (item && tdiff(&(DATAN(item)->when), &now) > dup->timelimit) { + item = k_unlink_tail(dup->nonce_list); + k_add_head(dup->nfree_list, item); + item = dup->nonce_list->tail; + } + K_WUNLOCK(dup->nfree_list); + + if (!unique) + dup->dups++; + + return !unique; +} diff --git a/sha2.c b/sha2.c new file mode 100644 index 0000000..6777b28 --- /dev/null +++ b/sha2.c @@ -0,0 +1,208 @@ +/* + * FIPS 180-2 SHA-224/256/384/512 implementation + * Last update: 02/02/2007 + * Issue date: 04/30/2005 + * + * Copyright (C) 2013, Con Kolivas + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include + +#include "sha2.h" + +#define UNPACK32(x, str) \ +{ \ + *((str) + 3) = (uint8_t) ((x) ); \ + *((str) + 2) = (uint8_t) ((x) >> 8); \ + *((str) + 1) = (uint8_t) ((x) >> 16); \ + *((str) + 0) = (uint8_t) ((x) >> 24); \ +} + +#define PACK32(str, x) \ +{ \ + *(x) = ((uint32_t) *((str) + 3) ) \ + | ((uint32_t) *((str) + 2) << 8) \ + | ((uint32_t) *((str) + 1) << 16) \ + | ((uint32_t) *((str) + 0) << 24); \ +} + +#define SHA256_SCR(i) \ +{ \ + w[i] = SHA256_F4(w[i - 2]) + w[i - 7] \ + + SHA256_F3(w[i - 15]) + w[i - 16]; \ +} + +uint32_t sha256_h0[8] = + {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; + +uint32_t sha256_k[64] = + {0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2}; + +/* SHA-256 functions */ + +void sha256_transf(sha256_ctx *ctx, const unsigned char *message, + unsigned int block_nb) +{ + uint32_t w[64]; + uint32_t wv[8]; + uint32_t t1, t2; + const unsigned char *sub_block; + int i; + + int j; + + for (i = 0; i < (int) block_nb; i++) { + sub_block = message + (i << 6); + + for (j = 0; j < 16; j++) { + PACK32(&sub_block[j << 2], &w[j]); + } + + for (j = 16; j < 64; j++) { + SHA256_SCR(j); + } + + for (j = 0; j < 8; j++) { + wv[j] = ctx->h[j]; + } + + for (j = 0; j < 64; j++) { + t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + + sha256_k[j] + w[j]; + t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); + wv[7] = wv[6]; + wv[6] = wv[5]; + wv[5] = wv[4]; + wv[4] = wv[3] + t1; + wv[3] = wv[2]; + wv[2] = wv[1]; + wv[1] = wv[0]; + wv[0] = t1 + t2; + } + + for (j = 0; j < 8; j++) { + ctx->h[j] += wv[j]; + } + } +} + +void sha256(const unsigned char *message, unsigned int len, unsigned char *digest) +{ + sha256_ctx ctx; + + sha256_init(&ctx); + sha256_update(&ctx, message, len); + sha256_final(&ctx, digest); +} + +void sha256_init(sha256_ctx *ctx) +{ + int i; + for (i = 0; i < 8; i++) { + ctx->h[i] = sha256_h0[i]; + } + + ctx->len = 0; + ctx->tot_len = 0; +} + +void sha256_update(sha256_ctx *ctx, const unsigned char *message, + unsigned int len) +{ + unsigned int block_nb; + unsigned int new_len, rem_len, tmp_len; + const unsigned char *shifted_message; + + tmp_len = SHA256_BLOCK_SIZE - ctx->len; + rem_len = len < tmp_len ? len : tmp_len; + + memcpy(&ctx->block[ctx->len], message, rem_len); + + if (ctx->len + len < SHA256_BLOCK_SIZE) { + ctx->len += len; + return; + } + + new_len = len - rem_len; + block_nb = new_len / SHA256_BLOCK_SIZE; + + shifted_message = message + rem_len; + + sha256_transf(ctx, ctx->block, 1); + sha256_transf(ctx, shifted_message, block_nb); + + rem_len = new_len % SHA256_BLOCK_SIZE; + + memcpy(ctx->block, &shifted_message[block_nb << 6], + rem_len); + + ctx->len = rem_len; + ctx->tot_len += (block_nb + 1) << 6; +} + +void sha256_final(sha256_ctx *ctx, unsigned char *digest) +{ + unsigned int block_nb; + unsigned int pm_len; + unsigned int len_b; + + int i; + + block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) + < (ctx->len % SHA256_BLOCK_SIZE))); + + len_b = (ctx->tot_len + ctx->len) << 3; + pm_len = block_nb << 6; + + memset(ctx->block + ctx->len, 0, pm_len - ctx->len); + ctx->block[ctx->len] = 0x80; + UNPACK32(len_b, ctx->block + pm_len - 4); + + sha256_transf(ctx, ctx->block, block_nb); + + for (i = 0 ; i < 8; i++) { + UNPACK32(ctx->h[i], &digest[i << 2]); + } +} diff --git a/sha2.h b/sha2.h new file mode 100644 index 0000000..71d4404 --- /dev/null +++ b/sha2.h @@ -0,0 +1,70 @@ +/* + * FIPS 180-2 SHA-224/256/384/512 implementation + * Last update: 02/02/2007 + * Issue date: 04/30/2005 + * + * Copyright (C) 2013, Con Kolivas + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "config.h" +#include "miner.h" + +#ifndef SHA2_H +#define SHA2_H + +#define SHA256_DIGEST_SIZE ( 256 / 8) +#define SHA256_BLOCK_SIZE ( 512 / 8) + +#define SHFR(x, n) (x >> n) +#define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) +#define CH(x, y, z) ((x & y) ^ (~x & z)) +#define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) + +#define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) +#define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) +#define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) +#define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10)) + +typedef struct { + unsigned int tot_len; + unsigned int len; + unsigned char block[2 * SHA256_BLOCK_SIZE]; + uint32_t h[8]; +} sha256_ctx; + +extern uint32_t sha256_k[64]; + +void sha256_init(sha256_ctx * ctx); +void sha256_update(sha256_ctx *ctx, const unsigned char *message, + unsigned int len); +void sha256_final(sha256_ctx *ctx, unsigned char *digest); +void sha256(const unsigned char *message, unsigned int len, + unsigned char *digest); + +#endif /* !SHA2_H */ diff --git a/sha2_c5.c b/sha2_c5.c new file mode 100644 index 0000000..fe1068d --- /dev/null +++ b/sha2_c5.c @@ -0,0 +1,310 @@ +/* + * FIPS-180-2 compliant SHA-256 implementation + * + * Copyright (C) 2011, Con Kolivas + * Copyright (C) 2006-2010, Brainspark B.V. + * + * This file is part of PolarSSL (http://www.polarssl.org) + * Lead Maintainer: Paul Bakker + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +/* + * The SHA-256 Secure Hash Standard was published by NIST in 2002. + * + * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf + */ +#include + +#include "sha2_c5.h" + +extern void dump_hex(uint8_t *data, uint16_t len); +/* + * 32-bit integer manipulation macros (big endian) + */ +#ifndef GET_ULONG_BE +#define GET_ULONG_BE(n,b,i) \ +{ \ + (n) = ( (uint32_t) (b)[(i) ] << 24 ) \ + | ( (uint32_t) (b)[(i) + 1] << 16 ) \ + | ( (uint32_t) (b)[(i) + 2] << 8 ) \ + | ( (uint32_t) (b)[(i) + 3] ); \ +} +#endif + +#ifndef PUT_ULONG_BE +#define PUT_ULONG_BE(n,b,i) \ +{ \ + (b)[(i) ] = (unsigned char) ( (n) >> 24 ); \ + (b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \ + (b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \ + (b)[(i) + 3] = (unsigned char) ( (n) ); \ +} +#endif + +/* + * SHA-256 context setup + */ +void sha2_starts( sha2_context *ctx ) +{ + ctx->total[0] = 0; + ctx->total[1] = 0; + + ctx->state[0] = 0x6A09E667; + ctx->state[1] = 0xBB67AE85; + ctx->state[2] = 0x3C6EF372; + ctx->state[3] = 0xA54FF53A; + ctx->state[4] = 0x510E527F; + ctx->state[5] = 0x9B05688C; + ctx->state[6] = 0x1F83D9AB; + ctx->state[7] = 0x5BE0CD19; +} + +void sha2_process( sha2_context *ctx, const unsigned char data[64] ) +{ + uint32_t temp1, temp2, W[64]; + uint32_t A, B, C, D, E, F, G, H; + + GET_ULONG_BE( W[ 0], data, 0 ); + GET_ULONG_BE( W[ 1], data, 4 ); + GET_ULONG_BE( W[ 2], data, 8 ); + GET_ULONG_BE( W[ 3], data, 12 ); + GET_ULONG_BE( W[ 4], data, 16 ); + GET_ULONG_BE( W[ 5], data, 20 ); + GET_ULONG_BE( W[ 6], data, 24 ); + GET_ULONG_BE( W[ 7], data, 28 ); + GET_ULONG_BE( W[ 8], data, 32 ); + GET_ULONG_BE( W[ 9], data, 36 ); + GET_ULONG_BE( W[10], data, 40 ); + GET_ULONG_BE( W[11], data, 44 ); + GET_ULONG_BE( W[12], data, 48 ); + GET_ULONG_BE( W[13], data, 52 ); + GET_ULONG_BE( W[14], data, 56 ); + GET_ULONG_BE( W[15], data, 60 ); + +#define SHR(x,n) ((x & 0xFFFFFFFF) >> n) +#define ROTR(x,n) (SHR(x,n) | (x << (32 - n))) + +#define S0(x) (ROTR(x, 7) ^ ROTR(x,18) ^ SHR(x, 3)) +#define S1(x) (ROTR(x,17) ^ ROTR(x,19) ^ SHR(x,10)) + +#define S2(x) (ROTR(x, 2) ^ ROTR(x,13) ^ ROTR(x,22)) +#define S3(x) (ROTR(x, 6) ^ ROTR(x,11) ^ ROTR(x,25)) + +#define F0(x,y,z) ((x & y) | (z & (x | y))) +#define F1(x,y,z) (z ^ (x & (y ^ z))) + +#define R(t) \ +( \ + W[t] = S1(W[t - 2]) + W[t - 7] + \ + S0(W[t - 15]) + W[t - 16] \ +) + +#define P(a,b,c,d,e,f,g,h,x,K) \ +{ \ + temp1 = h + S3(e) + F1(e,f,g) + K + x; \ + temp2 = S2(a) + F0(a,b,c); \ + d += temp1; h = temp1 + temp2; \ +} + + A = ctx->state[0]; + B = ctx->state[1]; + C = ctx->state[2]; + D = ctx->state[3]; + E = ctx->state[4]; + F = ctx->state[5]; + G = ctx->state[6]; + H = ctx->state[7]; + + P( A, B, C, D, E, F, G, H, W[ 0], 0x428A2F98 ); + P( H, A, B, C, D, E, F, G, W[ 1], 0x71374491 ); + P( G, H, A, B, C, D, E, F, W[ 2], 0xB5C0FBCF ); + P( F, G, H, A, B, C, D, E, W[ 3], 0xE9B5DBA5 ); + P( E, F, G, H, A, B, C, D, W[ 4], 0x3956C25B ); + P( D, E, F, G, H, A, B, C, W[ 5], 0x59F111F1 ); + P( C, D, E, F, G, H, A, B, W[ 6], 0x923F82A4 ); + P( B, C, D, E, F, G, H, A, W[ 7], 0xAB1C5ED5 ); + P( A, B, C, D, E, F, G, H, W[ 8], 0xD807AA98 ); + P( H, A, B, C, D, E, F, G, W[ 9], 0x12835B01 ); + P( G, H, A, B, C, D, E, F, W[10], 0x243185BE ); + P( F, G, H, A, B, C, D, E, W[11], 0x550C7DC3 ); + P( E, F, G, H, A, B, C, D, W[12], 0x72BE5D74 ); + P( D, E, F, G, H, A, B, C, W[13], 0x80DEB1FE ); + P( C, D, E, F, G, H, A, B, W[14], 0x9BDC06A7 ); + P( B, C, D, E, F, G, H, A, W[15], 0xC19BF174 ); + P( A, B, C, D, E, F, G, H, R(16), 0xE49B69C1 ); + P( H, A, B, C, D, E, F, G, R(17), 0xEFBE4786 ); + P( G, H, A, B, C, D, E, F, R(18), 0x0FC19DC6 ); + P( F, G, H, A, B, C, D, E, R(19), 0x240CA1CC ); + P( E, F, G, H, A, B, C, D, R(20), 0x2DE92C6F ); + P( D, E, F, G, H, A, B, C, R(21), 0x4A7484AA ); + P( C, D, E, F, G, H, A, B, R(22), 0x5CB0A9DC ); + P( B, C, D, E, F, G, H, A, R(23), 0x76F988DA ); + P( A, B, C, D, E, F, G, H, R(24), 0x983E5152 ); + P( H, A, B, C, D, E, F, G, R(25), 0xA831C66D ); + P( G, H, A, B, C, D, E, F, R(26), 0xB00327C8 ); + P( F, G, H, A, B, C, D, E, R(27), 0xBF597FC7 ); + P( E, F, G, H, A, B, C, D, R(28), 0xC6E00BF3 ); + P( D, E, F, G, H, A, B, C, R(29), 0xD5A79147 ); + P( C, D, E, F, G, H, A, B, R(30), 0x06CA6351 ); + P( B, C, D, E, F, G, H, A, R(31), 0x14292967 ); + P( A, B, C, D, E, F, G, H, R(32), 0x27B70A85 ); + P( H, A, B, C, D, E, F, G, R(33), 0x2E1B2138 ); + P( G, H, A, B, C, D, E, F, R(34), 0x4D2C6DFC ); + P( F, G, H, A, B, C, D, E, R(35), 0x53380D13 ); + P( E, F, G, H, A, B, C, D, R(36), 0x650A7354 ); + P( D, E, F, G, H, A, B, C, R(37), 0x766A0ABB ); + P( C, D, E, F, G, H, A, B, R(38), 0x81C2C92E ); + P( B, C, D, E, F, G, H, A, R(39), 0x92722C85 ); + P( A, B, C, D, E, F, G, H, R(40), 0xA2BFE8A1 ); + P( H, A, B, C, D, E, F, G, R(41), 0xA81A664B ); + P( G, H, A, B, C, D, E, F, R(42), 0xC24B8B70 ); + P( F, G, H, A, B, C, D, E, R(43), 0xC76C51A3 ); + P( E, F, G, H, A, B, C, D, R(44), 0xD192E819 ); + P( D, E, F, G, H, A, B, C, R(45), 0xD6990624 ); + P( C, D, E, F, G, H, A, B, R(46), 0xF40E3585 ); + P( B, C, D, E, F, G, H, A, R(47), 0x106AA070 ); + P( A, B, C, D, E, F, G, H, R(48), 0x19A4C116 ); + P( H, A, B, C, D, E, F, G, R(49), 0x1E376C08 ); + P( G, H, A, B, C, D, E, F, R(50), 0x2748774C ); + P( F, G, H, A, B, C, D, E, R(51), 0x34B0BCB5 ); + P( E, F, G, H, A, B, C, D, R(52), 0x391C0CB3 ); + P( D, E, F, G, H, A, B, C, R(53), 0x4ED8AA4A ); + P( C, D, E, F, G, H, A, B, R(54), 0x5B9CCA4F ); + P( B, C, D, E, F, G, H, A, R(55), 0x682E6FF3 ); + P( A, B, C, D, E, F, G, H, R(56), 0x748F82EE ); + P( H, A, B, C, D, E, F, G, R(57), 0x78A5636F ); + P( G, H, A, B, C, D, E, F, R(58), 0x84C87814 ); + P( F, G, H, A, B, C, D, E, R(59), 0x8CC70208 ); + P( E, F, G, H, A, B, C, D, R(60), 0x90BEFFFA ); + P( D, E, F, G, H, A, B, C, R(61), 0xA4506CEB ); + P( C, D, E, F, G, H, A, B, R(62), 0xBEF9A3F7 ); + P( B, C, D, E, F, G, H, A, R(63), 0xC67178F2 ); + + ctx->state[0] += A; + ctx->state[1] += B; + ctx->state[2] += C; + ctx->state[3] += D; + ctx->state[4] += E; + ctx->state[5] += F; + ctx->state[6] += G; + ctx->state[7] += H; +} + +/* + * SHA-256 process buffer + */ +void sha2_update( sha2_context *ctx, const unsigned char *input, int ilen ) +{ + int fill; + uint32_t left; + + if( ilen <= 0 ) + return; + + left = ctx->total[0] & 0x3F; + fill = 64 - left; + + ctx->total[0] += ilen; + ctx->total[0] &= 0xFFFFFFFF; + + if( ctx->total[0] < (uint32_t) ilen ) + ctx->total[1]++; + + if( left && ilen >= fill ) + { + memcpy( (void *) (ctx->buffer + left), + (void *) input, fill ); + sha2_process( ctx, ctx->buffer ); + input += fill; + ilen -= fill; + left = 0; + } + + while( ilen >= 64 ) + { + sha2_process( ctx, input ); + input += 64; + ilen -= 64; + } + + if( ilen > 0 ) + { + memcpy((void *) (ctx->buffer + left), + (void *) input, ilen ); + } + /* + printk("ctx sha2_update:"); + dump_hex((uint8_t*)ctx,sizeof(*ctx)); + */ +} + +static const unsigned char sha2_padding[64] = +{ + 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +/* + * SHA-256 final digest + */ +void sha2_finish( sha2_context *ctx, unsigned char output[32] ) +{ + uint32_t last, padn; + uint32_t high, low; + unsigned char msglen[8]; + + high = ( ctx->total[0] >> 29 ) + | ( ctx->total[1] << 3 ); + low = ( ctx->total[0] << 3 ); + + PUT_ULONG_BE( high, msglen, 0 ); + PUT_ULONG_BE( low, msglen, 4 ); + + last = ctx->total[0] & 0x3F; + padn = ( last < 56 ) ? ( 56 - last ) : ( 120 - last ); + + sha2_update( ctx, (unsigned char *) sha2_padding, padn ); + sha2_update( ctx, msglen, 8 ); + + PUT_ULONG_BE( ctx->state[0], output, 0 ); + PUT_ULONG_BE( ctx->state[1], output, 4 ); + PUT_ULONG_BE( ctx->state[2], output, 8 ); + PUT_ULONG_BE( ctx->state[3], output, 12 ); + PUT_ULONG_BE( ctx->state[4], output, 16 ); + PUT_ULONG_BE( ctx->state[5], output, 20 ); + PUT_ULONG_BE( ctx->state[6], output, 24 ); + + PUT_ULONG_BE( ctx->state[7], output, 28 ); +} + +/* + * output = SHA-256( input buffer ) + */ +void sha2( const unsigned char *input, int ilen, + unsigned char output[32] ) +{ + sha2_context ctx; + + sha2_starts( &ctx ); + sha2_update( &ctx, input, ilen ); + sha2_finish( &ctx, output ); + + memset(&ctx, 0, sizeof(sha2_context)); +} diff --git a/sha2_c5.h b/sha2_c5.h new file mode 100644 index 0000000..11619e9 --- /dev/null +++ b/sha2_c5.h @@ -0,0 +1,96 @@ +/** + * \file sha2.h + * + * Copyright (C) 2011, Con Kolivas + * Copyright (C) 2006-2010, Brainspark B.V. + * + * This file is part of PolarSSL (http://www.polarssl.org) + * Lead Maintainer: Paul Bakker + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include "config.h" +#include "miner.h" + +#ifndef POLARSSL_SHA2_H +#define POLARSSL_SHA2_H + + + +/** + * \brief SHA-256 context structure + */ +typedef struct +{ + uint32_t total[2]; /*!< number of bytes processed */ + uint32_t state[8]; /*!< intermediate digest state */ + unsigned char buffer[64]; /*!< data block being processed */ + + unsigned char ipad[64]; /*!< HMAC: inner padding */ + unsigned char opad[64]; /*!< HMAC: outer padding */ +} +sha2_context; + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \brief SHA-256 context setup + * + * \param ctx context to be initialized + */ +void sha2_starts( sha2_context *ctx); + +/** + * \brief SHA-256 process buffer + * + * \param ctx SHA-256 context + * \param input buffer holding the data + * \param ilen length of the input data + */ +void sha2_update( sha2_context *ctx, const unsigned char *input, int ilen ); + +/** + * \brief SHA-256 final digest + * + * \param ctx SHA-256 context + * \param output SHA-256 checksum result + */ +void sha2_finish( sha2_context *ctx, unsigned char output[32] ); + +/** + * \brief Output = SHA-256( input buffer ) + * + * \param input buffer holding the data + * \param ilen length of the input data + * \param output SHA-256 checksum result + */ +void sha2( const unsigned char *input, int ilen, + unsigned char output[32]); + + +void sha2_process( sha2_context *ctx, const unsigned char data[64] ); + +#ifdef __cplusplus +} +#endif + +#endif /* sha2.h */ + + diff --git a/spi-context.c b/spi-context.c new file mode 100644 index 0000000..e0ec139 --- /dev/null +++ b/spi-context.c @@ -0,0 +1,94 @@ +/* + * generic SPI functions + * + * Copyright 2013, 2014 Zefir Kurtisi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "spi-context.h" + +#include "logging.h" +#include "miner.h" + +#include +#include +#include +#include + +struct spi_ctx *spi_init(struct spi_config *config) +{ + char dev_fname[PATH_MAX]; + struct spi_ctx *ctx; + + if (config == NULL) + return NULL; + + sprintf(dev_fname, SPI_DEVICE_TEMPLATE, config->bus, config->cs_line); + + int fd = open(dev_fname, O_RDWR); + if (fd < 0) { + applog(LOG_ERR, "SPI: Can not open SPI device %s", dev_fname); + return NULL; + } + + if ((ioctl(fd, SPI_IOC_WR_MODE, &config->mode) < 0) || + (ioctl(fd, SPI_IOC_RD_MODE, &config->mode) < 0) || + (ioctl(fd, SPI_IOC_WR_BITS_PER_WORD, &config->bits) < 0) || + (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &config->bits) < 0) || + (ioctl(fd, SPI_IOC_WR_MAX_SPEED_HZ, &config->speed) < 0) || + (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &config->speed) < 0)) { + applog(LOG_ERR, "SPI: ioctl error on SPI device %s", dev_fname); + close(fd); + return NULL; + } + + ctx = malloc(sizeof(*ctx)); + assert(ctx != NULL); + + ctx->fd = fd; + ctx->config = *config; + applog(LOG_WARNING, "SPI '%s': mode=%hhu, bits=%hhu, speed=%u", + dev_fname, ctx->config.mode, ctx->config.bits, + ctx->config.speed); + return ctx; +} + +extern void spi_exit(struct spi_ctx *ctx) +{ + if (NULL == ctx) + return; + + close(ctx->fd); + free(ctx); +} + +extern bool spi_transfer(struct spi_ctx *ctx, uint8_t *txbuf, + uint8_t *rxbuf, int len) +{ + struct spi_ioc_transfer xfr; + int ret; + + if (rxbuf != NULL) + memset(rxbuf, 0xff, len); + + ret = len; + + xfr.tx_buf = (unsigned long)txbuf; + xfr.rx_buf = (unsigned long)rxbuf; + xfr.len = len; + xfr.speed_hz = ctx->config.speed; + xfr.delay_usecs = ctx->config.delay; + xfr.bits_per_word = ctx->config.bits; + xfr.cs_change = 0; + xfr.pad = 0; + + ret = ioctl(ctx->fd, SPI_IOC_MESSAGE(1), &xfr); + if (ret < 1) + applog(LOG_ERR, "SPI: ioctl error on SPI device: %d", ret); + + return ret > 0; +} diff --git a/spi-context.h b/spi-context.h new file mode 100644 index 0000000..a341e69 --- /dev/null +++ b/spi-context.h @@ -0,0 +1,48 @@ +#ifndef SPI_CONTEXT_H +#define SPI_CONTEXT_H + +#include +#include +#include +#include + +#define SPI_DEVICE_TEMPLATE "/dev/spidev%d.%d" +#define DEFAULT_SPI_BUS 0 +#define DEFAULT_SPI_CS_LINE 0 +#define DEFAULT_SPI_MODE SPI_MODE_0 +#define DEFAULT_SPI_BITS_PER_WORD 8 +#define DEFAULT_SPI_SPEED 1500000 +#define DEFAULT_SPI_DELAY_USECS 0 + +struct spi_config { + int bus; + int cs_line; + uint8_t mode; + uint32_t speed; + uint8_t bits; + uint16_t delay; +}; + +static const struct spi_config default_spi_config = { + .bus = DEFAULT_SPI_BUS, + .cs_line = DEFAULT_SPI_CS_LINE, + .mode = DEFAULT_SPI_MODE, + .speed = DEFAULT_SPI_SPEED, + .bits = DEFAULT_SPI_BITS_PER_WORD, + .delay = DEFAULT_SPI_DELAY_USECS, +}; + +struct spi_ctx { + int fd; + struct spi_config config; +}; + +/* create SPI context with given configuration, returns NULL on failure */ +extern struct spi_ctx *spi_init(struct spi_config *config); +/* close descriptor and free resources */ +extern void spi_exit(struct spi_ctx *ctx); +/* process RX/TX transfer, ensure buffers are long enough */ +extern bool spi_transfer(struct spi_ctx *ctx, uint8_t *txbuf, + uint8_t *rxbuf, int len); + +#endif /* SPI_CONTEXT_H */ diff --git a/usbtest.py b/usbtest.py new file mode 100644 index 0000000..79f79cb --- /dev/null +++ b/usbtest.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python2.7 +# +# Original version supplied to me (Kano/kanoi) by xiangfu +# +# Modified to allow supplying the data to send +# +# Linux usAge: ./ubstest.py /dev/ttyUSB0 0xhexcodes|string|icarus +# OR python ubstest.py /dev/ttyUSB0 0xhexcodes|string|icarus +# +# Windows usAge: ./ubstest.py COM1 0xhexcodes|string|icarus +# +# sends the data sepcified to the USB device and waits +# for a reply then displays it +# +# the data can be: +# 0xhexcodes: e.g. 0x68656c6c6f20776f726c640a +# would send "hello world\n" +# +# string: e.g. sendsometext +# +# icarus: sends 2 known block payloads for an icarus device +# and shows the expected and actual answers if it's +# a working V3 icarus + +import sys +import serial +import binascii + +if len(sys.argv) < 2: + sys.stderr.write("usAge: " + sys.argv[0] + " device strings...\n") + sys.stderr.write(" where device is either like /dev/ttyUSB0 or COM1\n") + sys.stderr.write(" and strings are either '0xXXXX' or 'text'\n") + sys.stderr.write(" if the first string is 'icarus' the rest are ignored\n") + sys.stderr.write(" and 2 valid icarus test payloads are sent with results displayed\n") + sys.stderr.write("\nAfter any command is sent it waits up to 30 seconds for a reply\n"); + sys.exit("Aborting") + +# Open with a 10 second timeout - just to be sure +ser = serial.Serial(sys.argv[1], 115200, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE, 10, False, False, 5, False, None) + +if sys.argv[2] == "icarus": + + # This show how Icarus use the block and midstate data + # This will produce nonce 063c5e01 + block = "0000000120c8222d0497a7ab44a1a2c7bf39de941c9970b1dc7cdc400000079700000000e88aabe1f353238c668d8a4df9318e614c10c474f8cdf8bc5f6397b946c33d7c4e7242c31a098ea500000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" + midstate = "33c5bf5751ec7f7e056443b5aee3800331432c83f404d9de38b94ecbf907b92d" + + rdata2 = block.decode('hex')[95:63:-1] + rmid = midstate.decode('hex')[::-1] + payload = rmid + rdata2 + + print("Push payload to icarus: " + binascii.hexlify(payload)) + ser.write(payload) + + b=ser.read(4) + print("Result:(should be: 063c5e01): " + binascii.hexlify(b)) + + # Just another test + payload2 = "ce92099c5a80bb81c52990d5c0924c625fd25a535640607d5a4bdf8174e2c8d500000000000000000000000080000000000000000b290c1a42313b4f21b5bcb8" + print("Push payload to icarus: " + payload2) + ser.write(payload2.decode('hex')) + + b=ser.read(4) + print("Result:(should be: 8e0b31c5): " + binascii.hexlify(b)) +else: + data = "" + for arg in sys.argv[2::]: + if arg[0:2:] == '0x': + data += arg[2::].decode('hex') + else: + data += arg + + print("Sending: 0x" + binascii.hexlify(data)) + ser.write(data) + + # If you're expecting more than one linefeed terminated reply, + # you'll only see the first one + # AND with no linefeed, this will wait the 10 seconds before returning + print("Waiting up to 10 seconds ...") + b=ser.readline() + print("Result: hex 0x" + binascii.hexlify(b)) + + # This could mess up the display - do it last + print("Result: asc '" + b + "'") + +ser.close() diff --git a/usbutils.c b/usbutils.c new file mode 100644 index 0000000..758ddd1 --- /dev/null +++ b/usbutils.c @@ -0,0 +1,4456 @@ +/* + * Copyright 2012-2013 Andrew Smith + * Copyright 2013-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include + +#include "logging.h" +#include "miner.h" +#include "usbutils.h" + +static pthread_mutex_t cgusb_lock; +static pthread_mutex_t cgusbres_lock; +static cglock_t cgusb_fd_lock; +static cgtimer_t usb11_cgt; + +#define NODEV(err) ((err) != LIBUSB_SUCCESS && (err) != LIBUSB_ERROR_TIMEOUT) + +#define NOCONTROLDEV(err) ((err) < 0 && NODEV(err)) + +/* + * WARNING - these assume DEVLOCK(cgpu, pstate) is called first and + * DEVUNLOCK(cgpu, pstate) in called in the same function with the same pstate + * given to DEVLOCK. + * You must call DEVUNLOCK(cgpu, pstate) before exiting the function or it will leave + * the thread Cancelability unrestored + */ +#define DEVWLOCK(cgpu, _pth_state) do { \ + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &_pth_state); \ + cg_wlock(&cgpu->usbinfo.devlock); \ + } while (0) + +#define DEVWUNLOCK(cgpu, _pth_state) do { \ + cg_wunlock(&cgpu->usbinfo.devlock); \ + pthread_setcancelstate(_pth_state, NULL); \ + } while (0) + +#define DEVRLOCK(cgpu, _pth_state) do { \ + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &_pth_state); \ + cg_rlock(&cgpu->usbinfo.devlock); \ + } while (0) + +#define DEVRUNLOCK(cgpu, _pth_state) do { \ + cg_runlock(&cgpu->usbinfo.devlock); \ + pthread_setcancelstate(_pth_state, NULL); \ + } while (0) + +#define USB_CONFIG 1 + +#define BITFURY_TIMEOUT_MS 999 +#define DRILLBIT_TIMEOUT_MS 999 +#define ICARUS_TIMEOUT_MS 999 +#define BMSC_TIMEOUT_MS 999 + +#ifdef WIN32 +#define BFLSC_TIMEOUT_MS 999 +#define BITFORCE_TIMEOUT_MS 999 +#define MODMINER_TIMEOUT_MS 999 +#define AVALON_TIMEOUT_MS 999 +#define AVALON4_TIMEOUT_MS 999 +#define BITMAIN_TIMEOUT_MS 999 +#define KLONDIKE_TIMEOUT_MS 999 +#define COINTERRA_TIMEOUT_MS 999 +#define HASHFAST_TIMEOUT_MS 999 +#define HASHRATIO_TIMEOUT_MS 999 +#define BLOCKERUPTER_TIMEOUT_MS 999 + +/* The safety timeout we use, cancelling async transfers on windows that fail + * to timeout on their own. */ +#define WIN_CALLBACK_EXTRA 40 +#define WIN_WRITE_CBEXTRA 5000 +#else +#define BFLSC_TIMEOUT_MS 300 +#define BITFORCE_TIMEOUT_MS 200 +#define MODMINER_TIMEOUT_MS 100 +#define AVALON_TIMEOUT_MS 200 +#define AVALON4_TIMEOUT_MS 50 +#define BITMAIN_TIMEOUT_MS 200 +#define KLONDIKE_TIMEOUT_MS 200 +#define COINTERRA_TIMEOUT_MS 200 +#define HASHFAST_TIMEOUT_MS 500 +#define HASHRATIO_TIMEOUT_MS 200 +#define BLOCKERUPTER_TIMEOUT_MS 300 +#endif + +#define USB_EPS(_intx, _epinfosx) { \ + .interface = _intx, \ + .ctrl_transfer = _intx, \ + .epinfo_count = ARRAY_SIZE(_epinfosx), \ + .epinfos = _epinfosx \ + } + +#define USB_EPS_CTRL(_inty, _ctrlinty, _epinfosy) { \ + .interface = _inty, \ + .ctrl_transfer = _ctrlinty, \ + .epinfo_count = ARRAY_SIZE(_epinfosy), \ + .epinfos = _epinfosy \ + } + +/* Linked list of all async transfers in progress. Protected by cgusb_fd_lock. + * This allows us to not stop the usb polling thread till all are complete, and + * to find cancellable transfers. */ +static struct list_head ut_list; + +#ifdef USE_BFLSC +static struct usb_epinfo bflsc_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 512, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 512, EPO(2), 0, 0 } +}; + +static struct usb_intinfo bflsc_ints[] = { + USB_EPS(0, bflsc_epinfos) +}; +#endif + +#ifdef USE_BITFORCE +// N.B. transfer size is 512 with USB2.0, but only 64 with USB1.1 +static struct usb_epinfo bfl_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo bfl_ints[] = { + USB_EPS(0, bfl_epinfos) +}; +#endif + +#ifdef USE_BITFURY +static struct usb_epinfo bfu0_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_INTERRUPT, 8, EPI(2), 0, 0 } +}; + +static struct usb_epinfo bfu1_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 16, EPI(3), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 16, EPO(4), 0, 0 } +}; + +/* Default to interface 1 */ +static struct usb_intinfo bfu_ints[] = { + USB_EPS(1, bfu1_epinfos), + USB_EPS(0, bfu0_epinfos) +}; + +static struct usb_epinfo bxf0_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_INTERRUPT, 8, EPI(1), 0, 0 } +}; + +static struct usb_epinfo bxf1_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(2), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo bxf_ints[] = { + USB_EPS(1, bxf1_epinfos), + USB_EPS(0, bxf0_epinfos) +}; + +static struct usb_epinfo nfu_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_INTERRUPT, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_INTERRUPT, 64, EPO(1), 0, 0 }, +}; + +static struct usb_intinfo nfu_ints[] = { + USB_EPS(0, nfu_epinfos) +}; + +static struct usb_epinfo bxm_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 512, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 512, EPO(2), 0, 0 } +}; + +static struct usb_intinfo bxm_ints[] = { + USB_EPS(0, bxm_epinfos) +}; +#endif + +#ifdef USE_BLOCKERUPTER +// BlockErupter Device +static struct usb_epinfo bet_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(1), 0, 0 } +}; + +static struct usb_intinfo bet_ints[] = { + USB_EPS(0, bet_epinfos) +}; +#endif + +#ifdef USE_DRILLBIT +// Drillbit Bitfury devices +static struct usb_epinfo drillbit_int_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_INTERRUPT, 8, EPI(3), 0, 0 } +}; + +static struct usb_epinfo drillbit_bulk_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 16, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 16, EPO(2), 0, 0 }, +}; + +/* Default to interface 1 */ +static struct usb_intinfo drillbit_ints[] = { + USB_EPS(1, drillbit_bulk_epinfos), + USB_EPS(0, drillbit_int_epinfos) +}; +#endif + +#ifdef USE_HASHFAST +#include "driver-hashfast.h" + +static struct usb_epinfo hfa0_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_INTERRUPT, 8, EPI(3), 0, 0 } +}; + +static struct usb_epinfo hfa1_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +/* Default to interface 1 */ +static struct usb_intinfo hfa_ints[] = { + USB_EPS(1, hfa1_epinfos), + USB_EPS(0, hfa0_epinfos) +}; +#endif + +#ifdef USE_HASHRATIO +static struct usb_epinfo hro_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo hro_ints[] = { + USB_EPS(0, hro_epinfos) +}; +#endif + +#ifdef USE_MODMINER +static struct usb_epinfo mmq_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(3), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(3), 0, 0 } +}; + +static struct usb_intinfo mmq_ints[] = { + USB_EPS(1, mmq_epinfos) +}; +#endif + +#ifdef USE_AVALON +static struct usb_epinfo ava_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo ava_ints[] = { + USB_EPS(0, ava_epinfos) +}; +#endif + +#ifdef USE_AVALON2 +static struct usb_epinfo ava2_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(3), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo ava2_ints[] = { + USB_EPS(0, ava2_epinfos) +}; +#endif + +#ifdef USE_AVALON4 +static struct usb_epinfo ava4_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(1), 0, 0 } +}; + +static struct usb_intinfo ava4_ints[] = { + USB_EPS(1, ava4_epinfos) +}; +#endif + +#ifdef USE_KLONDIKE +static struct usb_epinfo kln_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(1), 0, 0 } +}; + +static struct usb_intinfo kln_ints[] = { + USB_EPS(0, kln_epinfos) +}; + +static struct usb_epinfo kli0_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_INTERRUPT, 8, EPI(1), 0, 0 } +}; + +static struct usb_epinfo kli1_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(2), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo kli_ints[] = { + USB_EPS(1, kli1_epinfos), + USB_EPS(0, kli0_epinfos) +}; +#endif + +#ifdef USE_ICARUS +static struct usb_epinfo ica_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(3), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo ica_ints[] = { + USB_EPS(0, ica_epinfos) +}; + +static struct usb_epinfo ica1_epinfos0[] = { + { LIBUSB_TRANSFER_TYPE_INTERRUPT, 16, EPI(0x82), 0, 0 } +}; + +static struct usb_epinfo ica1_epinfos1[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(0x81), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(0x01), 0, 0 } +}; + +static struct usb_intinfo ica1_ints[] = { + USB_EPS(1, ica1_epinfos1), + USB_EPS(0, ica1_epinfos0) +}; + +static struct usb_epinfo amu_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(1), 0, 0 } +}; + +static struct usb_intinfo amu_ints[] = { + USB_EPS(0, amu_epinfos) +}; + +static struct usb_epinfo llt_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo llt_ints[] = { + USB_EPS(0, llt_epinfos) +}; + +static struct usb_epinfo cmr1_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo cmr1_ints[] = { + USB_EPS(0, cmr1_epinfos) +}; + +static struct usb_epinfo cmr2_epinfos0[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; +static struct usb_epinfo cmr2_epinfos1[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(3), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(4), 0, 0 }, +}; +static struct usb_epinfo cmr2_epinfos2[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(5), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(6), 0, 0 }, +}; +static struct usb_epinfo cmr2_epinfos3[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(7), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(8), 0, 0 } +}; + +static struct usb_intinfo cmr2_ints[] = { + USB_EPS_CTRL(0, 1, cmr2_epinfos0), + USB_EPS_CTRL(1, 2, cmr2_epinfos1), + USB_EPS_CTRL(2, 3, cmr2_epinfos2), + USB_EPS_CTRL(3, 4, cmr2_epinfos3) +}; +#endif + +#ifdef USE_COINTERRA +static struct usb_epinfo cointerra_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(1), 0, 0 } +}; + +static struct usb_intinfo cointerra_ints[] = { + USB_EPS(0, cointerra_epinfos) +}; +#endif + +#ifdef USE_BMSC +static struct usb_epinfo ica_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(3), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo ica_ints[] = { + USB_EPS(0, ica_epinfos) +}; + +static struct usb_epinfo amu_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(1), 0, 0 } +}; + +static struct usb_intinfo amu_ints[] = { + USB_EPS(0, amu_epinfos) +}; + +static struct usb_epinfo llt_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo llt_ints[] = { + USB_EPS(0, llt_epinfos) +}; + +static struct usb_epinfo cmr1_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; + +static struct usb_intinfo cmr1_ints[] = { + USB_EPS(0, cmr1_epinfos) +}; + +static struct usb_epinfo cmr2_epinfos0[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +}; +static struct usb_epinfo cmr2_epinfos1[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(3), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(4), 0, 0 }, +}; +static struct usb_epinfo cmr2_epinfos2[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(5), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(6), 0, 0 }, +}; +static struct usb_epinfo cmr2_epinfos3[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(7), 0, 0 }, + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(8), 0, 0 } +}; + +static struct usb_intinfo cmr2_ints[] = { + USB_EPS_CTRL(0, 1, cmr2_epinfos0), + USB_EPS_CTRL(1, 2, cmr2_epinfos1), + USB_EPS_CTRL(2, 3, cmr2_epinfos2), + USB_EPS_CTRL(3, 4, cmr2_epinfos3) +}; +#endif + +#ifdef USE_BITMAIN +static struct usb_epinfo btm_epinfos[] = { + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPI(1), 0, 0 }, +#ifdef WIN32 + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(2), 0, 0 } +#else + { LIBUSB_TRANSFER_TYPE_BULK, 64, EPO(1), 0, 0 } +#endif +}; + +static struct usb_intinfo btm_ints[] = { + USB_EPS(0, btm_epinfos) +}; +#endif + +#define IDVENDOR_FTDI 0x0403 + +#define INTINFO(_ints) \ + .intinfo_count = ARRAY_SIZE(_ints), \ + .intinfos = _ints + +#define USBEP(_usbdev, _intinfo, _epinfo) (_usbdev->found->intinfos[_intinfo].epinfos[_epinfo].ep) +#define THISIF(_found, _this) (_found->intinfos[_this].interface) +#define USBIF(_usbdev, _this) THISIF(_usbdev->found, _this) + +// TODO: Add support for (at least) Isochronous endpoints +static struct usb_find_devices find_dev[] = { +#ifdef USE_BFLSC + { + .drv = DRIVER_bflsc, + .name = "BAS", + .ident = IDENT_BAS, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6014, + //.iManufacturer = "Butterfly Labs", + .iProduct = "BitFORCE SHA256 SC", + .config = 1, + .timeout = BFLSC_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(bflsc_ints) }, + { + .drv = DRIVER_bflsc, + .name = "BMA", + .ident = IDENT_BMA, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6014, + //.iManufacturer = "BUTTERFLY LABS" + .iProduct = "BitFORCE SC-28nm", + .config = 1, + .timeout = BFLSC_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(bflsc_ints) }, + { + .drv = DRIVER_bflsc, + .name = "BMA", + .ident = IDENT_BMA, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6014, + .iManufacturer = "BUTTERFLY LABS", + .iProduct = "BitFORCE SHA256", + .config = 1, + .timeout = BFLSC_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(bflsc_ints) }, +#endif +#ifdef USE_BITFORCE + { + .drv = DRIVER_bitforce, + .name = "BFL", + .ident = IDENT_BFL, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6014, + .iManufacturer = "Butterfly Labs Inc.", + .iProduct = "BitFORCE SHA256", + .config = 1, + .timeout = BITFORCE_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(bfl_ints) }, +#endif +#ifdef USE_BITFURY + { + .drv = DRIVER_bitfury, + .name = "BF1", + .ident = IDENT_BF1, + .idVendor = 0x03eb, + .idProduct = 0x204b, + .config = 1, + .timeout = BITFURY_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + //.iManufacturer = "BPMC", + .iProduct = "Bitfury BF1", + INTINFO(bfu_ints) + }, + { + .drv = DRIVER_bitfury, + .name = "BXF", + .ident = IDENT_BXF, + .idVendor = 0x198c, + .idProduct = 0xb1f1, + .config = 1, + .timeout = BITFURY_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + .iManufacturer = "c-scape", + .iProduct = "bi?fury", + INTINFO(bxf_ints) + }, + { + .drv = DRIVER_bitfury, + .name = "OSM", + .ident = IDENT_OSM, + .idVendor = 0x198c, + .idProduct = 0xb1f1, + .config = 1, + .timeout = BITFURY_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + .iManufacturer = "c-scape", + .iProduct = "OneString", + INTINFO(bxf_ints) + }, + { + .drv = DRIVER_bitfury, + .name = "NFU", + .ident = IDENT_NFU, + .idVendor = 0x04d8, + .idProduct = 0x00de, + .config = 1, + .timeout = BITFURY_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(nfu_ints) + }, + { + .drv = DRIVER_bitfury, + .name = "BXM", + .ident = IDENT_BXM, + .idVendor = 0x0403, + .idProduct = 0x6014, + .config = 1, + .timeout = BITFURY_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(bxm_ints) + }, +#endif +#ifdef USE_BLOCKERUPTER + { + .drv = DRIVER_blockerupter, + .name = "BET", + .ident = IDENT_BET, + .idVendor = 0x10c4, + .idProduct = 0xea60, + .config = 1, + .timeout = BLOCKERUPTER_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(bet_ints) }, + +#endif +#ifdef USE_DRILLBIT + { + .drv = DRIVER_drillbit, + .name = "DRB", + .ident = IDENT_DRB, + .idVendor = 0x03eb, + .idProduct = 0x2404, + .config = 1, + .timeout = DRILLBIT_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + .iManufacturer = "Drillbit Systems", + .iProduct = NULL, /* Can be Thumb or Eight, same driver */ + INTINFO(drillbit_ints) + }, +#endif +#ifdef USE_MODMINER + { + .drv = DRIVER_modminer, + .name = "MMQ", + .ident = IDENT_MMQ, + .idVendor = 0x1fc9, + .idProduct = 0x0003, + .config = 1, + .timeout = MODMINER_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(mmq_ints) }, +#endif +#ifdef USE_AVALON + { + .drv = DRIVER_avalon, + .name = "BTB", + .ident = IDENT_BTB, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6001, + .iManufacturer = "Burnin Electronics", + .iProduct = "BitBurner", + .config = 1, + .timeout = AVALON_TIMEOUT_MS, + .latency = 10, + INTINFO(ava_ints) }, + { + .drv = DRIVER_avalon, + .name = "BBF", + .ident = IDENT_BBF, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6001, + .iManufacturer = "Burnin Electronics", + .iProduct = "BitBurner Fury", + .config = 1, + .timeout = AVALON_TIMEOUT_MS, + .latency = 10, + INTINFO(ava_ints) }, + { + .drv = DRIVER_avalon, + .name = "AVA", + .ident = IDENT_AVA, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6001, + .config = 1, + .timeout = AVALON_TIMEOUT_MS, + .latency = 10, + INTINFO(ava_ints) }, +#endif +#ifdef USE_AVALON2 + { + .drv = DRIVER_avalon2, + .name = "AV2", + .ident = IDENT_AV2, + .idVendor = 0x067b, + .idProduct = 0x2303, + .config = 1, + .timeout = AVALON_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(ava2_ints) }, +#endif +#ifdef USE_AVALON4 + { + .drv = DRIVER_avalon4, + .name = "AV4", + .ident = IDENT_AV4, + .idVendor = 0x29f1, + .idProduct = 0x33f2, + .iManufacturer = "CANAAN", + .iProduct = "USB2IIC Converter", + .config = 1, + .timeout = AVALON4_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(ava4_ints) }, +#endif +#ifdef USE_HASHFAST + { + .drv = DRIVER_hashfast, + .name = "HFA", + .ident = IDENT_HFA, + .idVendor = HF_USB_VENDOR_ID, + .idProduct = HF_USB_PRODUCT_ID_G1, + .iManufacturer = "HashFast LLC", + .iProduct = "M1 Module", + .config = 1, + .timeout = HASHFAST_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(hfa_ints) }, +#endif +#ifdef USE_HASHRATIO + { + .drv = DRIVER_hashratio, + .name = "HRO", + .ident = IDENT_HRO, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6001, + .config = 1, + .timeout = HASHRATIO_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(hro_ints) }, +#endif +#ifdef USE_KLONDIKE + { + .drv = DRIVER_klondike, + .name = "KLN", + .ident = IDENT_KLN, + .idVendor = 0x04D8, + .idProduct = 0xF60A, + .config = 1, + .timeout = KLONDIKE_TIMEOUT_MS, + .latency = 10, + INTINFO(kln_ints) }, + { + .drv = DRIVER_klondike, + .name = "KLI", + .ident = IDENT_KLN, + .idVendor = 0x04D8, + .idProduct = 0xF60A, + .config = 1, + .timeout = KLONDIKE_TIMEOUT_MS, + .latency = 10, + INTINFO(kli_ints) }, +#endif +#ifdef USE_ICARUS + { + .drv = DRIVER_icarus, + .name = "ICA", + .ident = IDENT_ICA, + .idVendor = 0x067b, + .idProduct = 0x2303, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(ica_ints) }, + { + .drv = DRIVER_icarus, + .name = "ICA", + .ident = IDENT_AVA, + .idVendor = 0x1fc9, + .idProduct = 0x0083, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(ica1_ints) }, + { + .drv = DRIVER_icarus, + .name = "AMU", + .ident = IDENT_AMU, + .idVendor = 0x10c4, + .idProduct = 0xea60, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(amu_ints) }, + { + .drv = DRIVER_icarus, + .name = "LIN", + .ident = IDENT_LIN, + .idVendor = 0x10c4, + .idProduct = 0xea60, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(amu_ints) }, + { + .drv = DRIVER_icarus, + .name = "ANU", + .ident = IDENT_ANU, + .idVendor = 0x10c4, + .idProduct = 0xea60, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(amu_ints) }, + { + .drv = DRIVER_icarus, + .name = "BLT", + .ident = IDENT_BLT, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6001, + .iProduct = "FT232R USB UART", + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(llt_ints) }, + // For any that don't match the above "BLT" + { + .drv = DRIVER_icarus, + .name = "LLT", + .ident = IDENT_LLT, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6001, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(llt_ints) }, + { + .drv = DRIVER_icarus, + .name = "CMR", + .ident = IDENT_CMR1, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6014, + .iProduct = "Cairnsmore1", + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(cmr1_ints) }, + { + .drv = DRIVER_icarus, + .name = "CMR", + .ident = IDENT_CMR2, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x8350, + .iProduct = "Cairnsmore1", + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(cmr2_ints) }, +#endif +#ifdef USE_COINTERRA + { + .drv = DRIVER_cointerra, + .name = "CTA", + .ident = IDENT_CTA, + .idVendor = 0x1cbe, + .idProduct = 0x0003, + .config = 1, + .timeout = COINTERRA_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(cointerra_ints) }, +#endif +#ifdef USE_BMSC + { + .drv = DRIVER_bmsc, + .name = "ICA", + .ident = IDENT_ICA, + .idVendor = 0x067b, + .idProduct = 0x2303, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(ica_ints) }, + { + .drv = DRIVER_bmsc, + .name = "AMU", + .ident = IDENT_AMU, + .idVendor = 0x10c4, + .idProduct = 0xea60, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(amu_ints) }, + { + .drv = DRIVER_bmsc, + .name = "ANU", + .ident = IDENT_ANU, + .idVendor = 0x10c4, + .idProduct = 0xea60, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_UNUSED, + INTINFO(amu_ints) }, + { + .drv = DRIVER_bmsc, + .name = "BLT", + .ident = IDENT_BLT, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6001, + .iProduct = "FT232R USB UART", + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(llt_ints) }, + // For any that don't match the above "BLT" + { + .drv = DRIVER_bmsc, + .name = "LLT", + .ident = IDENT_LLT, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6001, + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(llt_ints) }, + { + .drv = DRIVER_bmsc, + .name = "CMR", + .ident = IDENT_CMR1, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6014, + .iProduct = "Cairnsmore1", + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(cmr1_ints) }, + { + .drv = DRIVER_bmsc, + .name = "CMR", + .ident = IDENT_CMR2, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x8350, + .iProduct = "Cairnsmore1", + .config = 1, + .timeout = ICARUS_TIMEOUT_MS, + .latency = LATENCY_STD, + INTINFO(cmr2_ints) }, +#endif +#ifdef USE_BITMAIN + { + .drv = DRIVER_bitmain, + .name = "BMM", + .ident = IDENT_BMM, +#ifdef WIN32 + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6001, +#else + .idVendor = 0x4254, + .idProduct = 0x4153, +#endif + .config = 1, + .timeout = BITMAIN_TIMEOUT_MS, + .latency = 10, + INTINFO(btm_ints) }, + { + .drv = DRIVER_bitmain, + .name = "BMS", + .ident = IDENT_BMS, + .idVendor = IDVENDOR_FTDI, + .idProduct = 0x6602, + .config = 1, + .timeout = BITMAIN_TIMEOUT_MS, + .latency = 10, + INTINFO(btm_ints) }, +#endif + { DRIVER_MAX, NULL, 0, 0, 0, NULL, NULL, 0, 0, 0, 0, NULL } +}; + +#define STRBUFLEN 256 +static const char *BLANK = ""; +static const char *space = " "; +static const char *nodatareturned = "no data returned "; + +#if 0 // enable USBDEBUG - only during development testing + static const char *debug_true_str = "true"; + static const char *debug_false_str = "false"; + static const char *nodevstr = "=NODEV"; + #define bool_str(boo) ((boo) ? debug_true_str : debug_false_str) + #define isnodev(err) (NODEV(err) ? nodevstr : BLANK) + #define USBDEBUG(fmt, ...) applog(LOG_WARNING, fmt, ##__VA_ARGS__) +#else + #define USBDEBUG(fmt, ...) +#endif + +// For device limits by driver +static struct driver_count { + int count; + int limit; +} drv_count[DRIVER_MAX]; + +// For device limits by list of bus/dev +static struct usb_busdev { + int bus_number; + int device_address; +#ifdef WIN32 + void *resource1; + void *resource2; +#else + int fd; +#endif +} *busdev; + +static int busdev_count = 0; + +// Total device limit +static int total_count = 0; +static int total_limit = 999999; + +struct usb_in_use_list { + struct usb_busdev in_use; + struct usb_in_use_list *prev; + struct usb_in_use_list *next; +}; + +// List of in use devices +static struct usb_in_use_list *in_use_head = NULL; +static struct usb_in_use_list *blacklist_head = NULL; + +struct resource_work { + bool lock; + const char *dname; + uint8_t bus_number; + uint8_t device_address; + struct resource_work *next; +}; + +// Pending work for the reslock thread +struct resource_work *res_work_head = NULL; + +struct resource_reply { + uint8_t bus_number; + uint8_t device_address; + bool got; + struct resource_reply *next; +}; + +// Replies to lock requests +struct resource_reply *res_reply_head = NULL; + +// Some stats need to always be defined +#define SEQ0 0 +#define SEQ1 1 + +// NONE must be 0 - calloced +#define MODE_NONE 0 +#define MODE_CTRL_READ (1 << 0) +#define MODE_CTRL_WRITE (1 << 1) +#define MODE_BULK_READ (1 << 2) +#define MODE_BULK_WRITE (1 << 3) + +// Set this to 0 to remove stats processing +#define DO_USB_STATS 1 + +static bool stats_initialised = false; + +#if DO_USB_STATS + +#define MODE_SEP_STR "+" +#define MODE_NONE_STR "X" +#define MODE_CTRL_READ_STR "cr" +#define MODE_CTRL_WRITE_STR "cw" +#define MODE_BULK_READ_STR "br" +#define MODE_BULK_WRITE_STR "bw" + +// One for each CMD, TIMEOUT, ERROR +struct cg_usb_stats_item { + uint64_t count; + double total_delay; + double min_delay; + double max_delay; + struct timeval first; + struct timeval last; +}; + +#define CMD_CMD 0 +#define CMD_TIMEOUT 1 +#define CMD_ERROR 2 + +// One for each C_CMD +struct cg_usb_stats_details { + int seq; + uint32_t modes; + struct cg_usb_stats_item item[CMD_ERROR+1]; +}; + +// One for each device +struct cg_usb_stats { + char *name; + int device_id; + struct cg_usb_stats_details *details; +}; + +static struct cg_usb_stats *usb_stats = NULL; +static int next_stat = USB_NOSTAT; + +#define SECTOMS(s) ((int)((s) * 1000)) + +#define USB_STATS(sgpu_, sta_, fin_, err_, mode_, cmd_, seq_, tmo_) \ + stats(sgpu_, sta_, fin_, err_, mode_, cmd_, seq_, tmo_) +#define STATS_TIMEVAL(tv_) cgtime(tv_) +#define USB_REJECT(sgpu_, mode_) rejected_inc(sgpu_, mode_) + +#else +#define USB_STATS(sgpu_, sta_, fin_, err_, mode_, cmd_, seq_, tmo_) +#define STATS_TIMEVAL(tv_) +#define USB_REJECT(sgpu_, mode_) + +#endif // DO_USB_STATS + +/* Create usb_commands array from USB_PARSE_COMMANDS macro in usbutils.h */ +char *usb_commands[] = { + USB_PARSE_COMMANDS(JUMPTABLE) + "Null" +}; + +#ifdef EOL +#undef EOL +#endif +#define EOL "\n" + +static const char *DESDEV = "Device"; +static const char *DESCON = "Config"; +static const char *DESSTR = "String"; +static const char *DESINT = "Interface"; +static const char *DESEP = "Endpoint"; +static const char *DESHID = "HID"; +static const char *DESRPT = "Report"; +static const char *DESPHY = "Physical"; +static const char *DESHUB = "Hub"; + +static const char *EPIN = "In: "; +static const char *EPOUT = "Out: "; +static const char *EPX = "?: "; + +static const char *CONTROL = "Control"; +static const char *ISOCHRONOUS_X = "Isochronous+?"; +static const char *ISOCHRONOUS_N_X = "Isochronous+None+?"; +static const char *ISOCHRONOUS_N_D = "Isochronous+None+Data"; +static const char *ISOCHRONOUS_N_F = "Isochronous+None+Feedback"; +static const char *ISOCHRONOUS_N_I = "Isochronous+None+Implicit"; +static const char *ISOCHRONOUS_A_X = "Isochronous+Async+?"; +static const char *ISOCHRONOUS_A_D = "Isochronous+Async+Data"; +static const char *ISOCHRONOUS_A_F = "Isochronous+Async+Feedback"; +static const char *ISOCHRONOUS_A_I = "Isochronous+Async+Implicit"; +static const char *ISOCHRONOUS_D_X = "Isochronous+Adaptive+?"; +static const char *ISOCHRONOUS_D_D = "Isochronous+Adaptive+Data"; +static const char *ISOCHRONOUS_D_F = "Isochronous+Adaptive+Feedback"; +static const char *ISOCHRONOUS_D_I = "Isochronous+Adaptive+Implicit"; +static const char *ISOCHRONOUS_S_X = "Isochronous+Sync+?"; +static const char *ISOCHRONOUS_S_D = "Isochronous+Sync+Data"; +static const char *ISOCHRONOUS_S_F = "Isochronous+Sync+Feedback"; +static const char *ISOCHRONOUS_S_I = "Isochronous+Sync+Implicit"; +static const char *BULK = "Bulk"; +static const char *INTERRUPT = "Interrupt"; +static const char *UNKNOWN = "Unknown"; + +static const char *destype(uint8_t bDescriptorType) +{ + switch (bDescriptorType) { + case LIBUSB_DT_DEVICE: + return DESDEV; + case LIBUSB_DT_CONFIG: + return DESCON; + case LIBUSB_DT_STRING: + return DESSTR; + case LIBUSB_DT_INTERFACE: + return DESINT; + case LIBUSB_DT_ENDPOINT: + return DESEP; + case LIBUSB_DT_HID: + return DESHID; + case LIBUSB_DT_REPORT: + return DESRPT; + case LIBUSB_DT_PHYSICAL: + return DESPHY; + case LIBUSB_DT_HUB: + return DESHUB; + } + return UNKNOWN; +} + +static const char *epdir(uint8_t bEndpointAddress) +{ + switch (bEndpointAddress & LIBUSB_ENDPOINT_DIR_MASK) { + case LIBUSB_ENDPOINT_IN: + return EPIN; + case LIBUSB_ENDPOINT_OUT: + return EPOUT; + } + return EPX; +} + +static const char *epatt(uint8_t bmAttributes) +{ + switch(bmAttributes & LIBUSB_TRANSFER_TYPE_MASK) { + case LIBUSB_TRANSFER_TYPE_CONTROL: + return CONTROL; + case LIBUSB_TRANSFER_TYPE_BULK: + return BULK; + case LIBUSB_TRANSFER_TYPE_INTERRUPT: + return INTERRUPT; + case LIBUSB_TRANSFER_TYPE_ISOCHRONOUS: + switch(bmAttributes & LIBUSB_ISO_SYNC_TYPE_MASK) { + case LIBUSB_ISO_SYNC_TYPE_NONE: + switch(bmAttributes & LIBUSB_ISO_USAGE_TYPE_MASK) { + case LIBUSB_ISO_USAGE_TYPE_DATA: + return ISOCHRONOUS_N_D; + case LIBUSB_ISO_USAGE_TYPE_FEEDBACK: + return ISOCHRONOUS_N_F; + case LIBUSB_ISO_USAGE_TYPE_IMPLICIT: + return ISOCHRONOUS_N_I; + } + return ISOCHRONOUS_N_X; + case LIBUSB_ISO_SYNC_TYPE_ASYNC: + switch(bmAttributes & LIBUSB_ISO_USAGE_TYPE_MASK) { + case LIBUSB_ISO_USAGE_TYPE_DATA: + return ISOCHRONOUS_A_D; + case LIBUSB_ISO_USAGE_TYPE_FEEDBACK: + return ISOCHRONOUS_A_F; + case LIBUSB_ISO_USAGE_TYPE_IMPLICIT: + return ISOCHRONOUS_A_I; + } + return ISOCHRONOUS_A_X; + case LIBUSB_ISO_SYNC_TYPE_ADAPTIVE: + switch(bmAttributes & LIBUSB_ISO_USAGE_TYPE_MASK) { + case LIBUSB_ISO_USAGE_TYPE_DATA: + return ISOCHRONOUS_D_D; + case LIBUSB_ISO_USAGE_TYPE_FEEDBACK: + return ISOCHRONOUS_D_F; + case LIBUSB_ISO_USAGE_TYPE_IMPLICIT: + return ISOCHRONOUS_D_I; + } + return ISOCHRONOUS_D_X; + case LIBUSB_ISO_SYNC_TYPE_SYNC: + switch(bmAttributes & LIBUSB_ISO_USAGE_TYPE_MASK) { + case LIBUSB_ISO_USAGE_TYPE_DATA: + return ISOCHRONOUS_S_D; + case LIBUSB_ISO_USAGE_TYPE_FEEDBACK: + return ISOCHRONOUS_S_F; + case LIBUSB_ISO_USAGE_TYPE_IMPLICIT: + return ISOCHRONOUS_S_I; + } + return ISOCHRONOUS_S_X; + } + return ISOCHRONOUS_X; + } + + return UNKNOWN; +} + +static void append(char **buf, char *append, size_t *off, size_t *len) +{ + int new = strlen(append); + if ((new + *off) >= *len) + { + *len *= 2; + *buf = realloc(*buf, *len); + if (unlikely(!*buf)) + quit(1, "USB failed to realloc append"); + } + + strcpy(*buf + *off, append); + *off += new; +} + +static bool setgetdes(ssize_t count, libusb_device *dev, struct libusb_device_handle *handle, struct libusb_config_descriptor **config, int cd, char **buf, size_t *off, size_t *len) +{ + char tmp[512]; + int err; + + err = libusb_set_configuration(handle, cd); + if (err) { + snprintf(tmp, sizeof(tmp), EOL " ** dev %d: Failed to set config descriptor to %d, err %d", + (int)count, cd, err); + append(buf, tmp, off, len); + return false; + } + + err = libusb_get_active_config_descriptor(dev, config); + if (err) { + snprintf(tmp, sizeof(tmp), EOL " ** dev %d: Failed to get active config descriptor set to %d, err %d", + (int)count, cd, err); + append(buf, tmp, off, len); + return false; + } + + snprintf(tmp, sizeof(tmp), EOL " ** dev %d: Set & Got active config descriptor to %d, err %d", + (int)count, cd, err); + append(buf, tmp, off, len); + return true; +} + +static void usb_full(ssize_t *count, libusb_device *dev, char **buf, size_t *off, size_t *len, int level) +{ + struct libusb_device_descriptor desc; + uint8_t bus_number; + uint8_t device_address; + struct libusb_device_handle *handle; + struct libusb_config_descriptor *config; + const struct libusb_interface_descriptor *idesc; + const struct libusb_endpoint_descriptor *epdesc; + unsigned char man[STRBUFLEN+1]; + unsigned char prod[STRBUFLEN+1]; + unsigned char ser[STRBUFLEN+1]; + char tmp[512]; + int err, i, j, k; + + err = libusb_get_device_descriptor(dev, &desc); + if (opt_usb_list_all && err) { + snprintf(tmp, sizeof(tmp), EOL ".USB dev %d: Failed to get descriptor, err %d", + (int)(++(*count)), err); + append(buf, tmp, off, len); + return; + } + + bus_number = libusb_get_bus_number(dev); + device_address = libusb_get_device_address(dev); + + if (!opt_usb_list_all) { + bool known = false; + + for (i = 0; find_dev[i].drv != DRIVER_MAX; i++) + if ((find_dev[i].idVendor == desc.idVendor) && + (find_dev[i].idProduct == desc.idProduct)) { + known = true; + break; + } + + if (!known) + return; + } + + (*count)++; + + if (level == 0) { + snprintf(tmp, sizeof(tmp), EOL ".USB dev %d: Bus %d Device %d ID: %04x:%04x", + (int)(*count), (int)bus_number, (int)device_address, + desc.idVendor, desc.idProduct); + } else { + snprintf(tmp, sizeof(tmp), EOL ".USB dev %d: Bus %d Device %d Device Descriptor:" EOL "\tLength: %d" EOL + "\tDescriptor Type: %s" EOL "\tUSB: %04x" EOL "\tDeviceClass: %d" EOL + "\tDeviceSubClass: %d" EOL "\tDeviceProtocol: %d" EOL "\tMaxPacketSize0: %d" EOL + "\tidVendor: %04x" EOL "\tidProduct: %04x" EOL "\tDeviceRelease: %x" EOL + "\tNumConfigurations: %d", + (int)(*count), (int)bus_number, (int)device_address, + (int)(desc.bLength), destype(desc.bDescriptorType), + desc.bcdUSB, (int)(desc.bDeviceClass), (int)(desc.bDeviceSubClass), + (int)(desc.bDeviceProtocol), (int)(desc.bMaxPacketSize0), + desc.idVendor, desc.idProduct, desc.bcdDevice, + (int)(desc.bNumConfigurations)); + } + append(buf, tmp, off, len); + + err = libusb_open(dev, &handle); + if (err) { + snprintf(tmp, sizeof(tmp), EOL " ** dev %d: Failed to open, err %d", (int)(*count), err); + append(buf, tmp, off, len); + return; + } + + err = libusb_get_string_descriptor_ascii(handle, desc.iManufacturer, man, STRBUFLEN); + if (err < 0) + snprintf((char *)man, sizeof(man), "** err:(%d) %s", err, libusb_error_name(err)); + + err = libusb_get_string_descriptor_ascii(handle, desc.iProduct, prod, STRBUFLEN); + if (err < 0) + snprintf((char *)prod, sizeof(prod), "** err:(%d) %s", err, libusb_error_name(err)); + + if (level == 0) { + libusb_close(handle); + snprintf(tmp, sizeof(tmp), EOL " Manufacturer: '%s'" EOL " Product: '%s'", man, prod); + append(buf, tmp, off, len); + return; + } + + if (libusb_kernel_driver_active(handle, 0) == 1) { + snprintf(tmp, sizeof(tmp), EOL " * dev %d: kernel attached", (int)(*count)); + append(buf, tmp, off, len); + } + + err = libusb_get_active_config_descriptor(dev, &config); + if (err) { + if (!setgetdes(*count, dev, handle, &config, 1, buf, off, len) + && !setgetdes(*count, dev, handle, &config, 0, buf, off, len)) { + libusb_close(handle); + snprintf(tmp, sizeof(tmp), EOL " ** dev %d: Failed to set config descriptor to %d or %d", + (int)(*count), 1, 0); + append(buf, tmp, off, len); + return; + } + } + + snprintf(tmp, sizeof(tmp), EOL " dev %d: Active Config:" EOL "\tDescriptorType: %s" EOL + "\tNumInterfaces: %d" EOL "\tConfigurationValue: %d" EOL + "\tAttributes: %d" EOL "\tMaxPower: %d", + (int)(*count), destype(config->bDescriptorType), + (int)(config->bNumInterfaces), (int)(config->iConfiguration), + (int)(config->bmAttributes), (int)(config->MaxPower)); + append(buf, tmp, off, len); + + for (i = 0; i < (int)(config->bNumInterfaces); i++) { + for (j = 0; j < config->interface[i].num_altsetting; j++) { + idesc = &(config->interface[i].altsetting[j]); + + snprintf(tmp, sizeof(tmp), EOL " _dev %d: Interface Descriptor %d:" EOL + "\tDescriptorType: %s" EOL "\tInterfaceNumber: %d" EOL + "\tNumEndpoints: %d" EOL "\tInterfaceClass: %d" EOL + "\tInterfaceSubClass: %d" EOL "\tInterfaceProtocol: %d", + (int)(*count), j, destype(idesc->bDescriptorType), + (int)(idesc->bInterfaceNumber), + (int)(idesc->bNumEndpoints), + (int)(idesc->bInterfaceClass), + (int)(idesc->bInterfaceSubClass), + (int)(idesc->bInterfaceProtocol)); + append(buf, tmp, off, len); + + for (k = 0; k < (int)(idesc->bNumEndpoints); k++) { + epdesc = &(idesc->endpoint[k]); + + snprintf(tmp, sizeof(tmp), EOL " __dev %d: Interface %d Endpoint %d:" EOL + "\tDescriptorType: %s" EOL + "\tEndpointAddress: %s0x%x" EOL + "\tAttributes: %s" EOL "\tMaxPacketSize: %d" EOL + "\tInterval: %d" EOL "\tRefresh: %d", + (int)(*count), (int)(idesc->bInterfaceNumber), k, + destype(epdesc->bDescriptorType), + epdir(epdesc->bEndpointAddress), + (int)(epdesc->bEndpointAddress), + epatt(epdesc->bmAttributes), + epdesc->wMaxPacketSize, + (int)(epdesc->bInterval), + (int)(epdesc->bRefresh)); + append(buf, tmp, off, len); + } + } + } + + libusb_free_config_descriptor(config); + config = NULL; + + err = libusb_get_string_descriptor_ascii(handle, desc.iSerialNumber, ser, STRBUFLEN); + if (err < 0) + snprintf((char *)ser, sizeof(ser), "** err:(%d) %s", err, libusb_error_name(err)); + + snprintf(tmp, sizeof(tmp), EOL " dev %d: More Info:" EOL "\tManufacturer: '%s'" EOL + "\tProduct: '%s'" EOL "\tSerial '%s'", + (int)(*count), man, prod, ser); + append(buf, tmp, off, len); + + libusb_close(handle); +} + +// Function to dump all USB devices +void usb_all(int level) +{ + libusb_device **list; + ssize_t count, i, j; + char *buf; + size_t len, off; + + count = libusb_get_device_list(NULL, &list); + if (count < 0) { + applog(LOG_ERR, "USB all: failed, err:(%d) %s", (int)count, libusb_error_name((int)count)); + return; + } + + if (count == 0) + applog(LOG_WARNING, "USB all: found no devices"); + else + { + len = 10000; + buf = malloc(len+1); + if (unlikely(!buf)) + quit(1, "USB failed to malloc buf in usb_all"); + + sprintf(buf, "USB all: found %d devices", (int)count); + off = strlen(buf); + + if (!opt_usb_list_all) + append(&buf, " - listing known devices", &off, &len); + + j = -1; + for (i = 0; i < count; i++) + usb_full(&j, list[i], &buf, &off, &len, level); + + _applog(LOG_WARNING, buf, false); + + free(buf); + + if (j == -1) + applog(LOG_WARNING, "No known USB devices"); + else + applog(LOG_WARNING, "%d %sUSB devices", + (int)(++j), opt_usb_list_all ? BLANK : "known "); + + } + + libusb_free_device_list(list, 1); +} + +static void cgusb_check_init() +{ + mutex_lock(&cgusb_lock); + + if (stats_initialised == false) { + // N.B. environment LIBUSB_DEBUG also sets libusb_set_debug() + if (opt_usbdump >= 0) { + libusb_set_debug(NULL, opt_usbdump); + usb_all(opt_usbdump); + } + stats_initialised = true; + } + + mutex_unlock(&cgusb_lock); +} + +const char *usb_cmdname(enum usb_cmds cmd) +{ + cgusb_check_init(); + + return usb_commands[cmd]; +} + +void usb_applog(struct cgpu_info *cgpu, enum usb_cmds cmd, char *msg, int amount, int err) +{ + if (msg && !*msg) + msg = NULL; + + if (!msg && amount == 0 && err == LIBUSB_SUCCESS) + msg = (char *)nodatareturned; + + applog(LOG_ERR, "%s%i: %s failed%s%s (err=%d amt=%d)", + cgpu->drv->name, cgpu->device_id, + usb_cmdname(cmd), + msg ? space : BLANK, msg ? msg : BLANK, + err, amount); +} + +#ifdef WIN32 +static void in_use_store_ress(uint8_t bus_number, uint8_t device_address, void *resource1, void *resource2) +{ + struct usb_in_use_list *in_use_tmp; + bool found = false, empty = true; + + mutex_lock(&cgusb_lock); + in_use_tmp = in_use_head; + while (in_use_tmp) { + if (in_use_tmp->in_use.bus_number == (int)bus_number && + in_use_tmp->in_use.device_address == (int)device_address) { + found = true; + + if (in_use_tmp->in_use.resource1) + empty = false; + in_use_tmp->in_use.resource1 = resource1; + + if (in_use_tmp->in_use.resource2) + empty = false; + in_use_tmp->in_use.resource2 = resource2; + + break; + } + in_use_tmp = in_use_tmp->next; + } + mutex_unlock(&cgusb_lock); + + if (found == false) + applog(LOG_ERR, "FAIL: USB store_ress not found (%d:%d)", + (int)bus_number, (int)device_address); + + if (empty == false) + applog(LOG_ERR, "FAIL: USB store_ress not empty (%d:%d)", + (int)bus_number, (int)device_address); +} + +static void in_use_get_ress(uint8_t bus_number, uint8_t device_address, void **resource1, void **resource2) +{ + struct usb_in_use_list *in_use_tmp; + bool found = false, empty = false; + + mutex_lock(&cgusb_lock); + in_use_tmp = in_use_head; + while (in_use_tmp) { + if (in_use_tmp->in_use.bus_number == (int)bus_number && + in_use_tmp->in_use.device_address == (int)device_address) { + found = true; + + if (!in_use_tmp->in_use.resource1) + empty = true; + *resource1 = in_use_tmp->in_use.resource1; + in_use_tmp->in_use.resource1 = NULL; + + if (!in_use_tmp->in_use.resource2) + empty = true; + *resource2 = in_use_tmp->in_use.resource2; + in_use_tmp->in_use.resource2 = NULL; + + break; + } + in_use_tmp = in_use_tmp->next; + } + mutex_unlock(&cgusb_lock); + + if (found == false) + applog(LOG_ERR, "FAIL: USB get_lock not found (%d:%d)", + (int)bus_number, (int)device_address); + + if (empty == true) + applog(LOG_ERR, "FAIL: USB get_lock empty (%d:%d)", + (int)bus_number, (int)device_address); +} +#else + +static void in_use_store_fd(uint8_t bus_number, uint8_t device_address, int fd) +{ + struct usb_in_use_list *in_use_tmp; + bool found = false; + + mutex_lock(&cgusb_lock); + in_use_tmp = in_use_head; + while (in_use_tmp) { + if (in_use_tmp->in_use.bus_number == (int)bus_number && + in_use_tmp->in_use.device_address == (int)device_address) { + found = true; + in_use_tmp->in_use.fd = fd; + break; + } + in_use_tmp = in_use_tmp->next; + } + mutex_unlock(&cgusb_lock); + + if (found == false) { + applog(LOG_ERR, "FAIL: USB store_fd not found (%d:%d)", + (int)bus_number, (int)device_address); + } +} + +static int in_use_get_fd(uint8_t bus_number, uint8_t device_address) +{ + struct usb_in_use_list *in_use_tmp; + bool found = false; + int fd = -1; + + mutex_lock(&cgusb_lock); + in_use_tmp = in_use_head; + while (in_use_tmp) { + if (in_use_tmp->in_use.bus_number == (int)bus_number && + in_use_tmp->in_use.device_address == (int)device_address) { + found = true; + fd = in_use_tmp->in_use.fd; + break; + } + in_use_tmp = in_use_tmp->next; + } + mutex_unlock(&cgusb_lock); + + if (found == false) { + applog(LOG_ERR, "FAIL: USB get_lock not found (%d:%d)", + (int)bus_number, (int)device_address); + } + return fd; +} +#endif + +static bool _in_use(struct usb_in_use_list *head, uint8_t bus_number, + uint8_t device_address) +{ + struct usb_in_use_list *in_use_tmp; + bool ret = false; + + in_use_tmp = head; + while (in_use_tmp) { + if (in_use_tmp->in_use.bus_number == (int)bus_number && + in_use_tmp->in_use.device_address == (int)device_address) { + ret = true; + break; + } + in_use_tmp = in_use_tmp->next; + if (in_use_tmp == head) + break; + } + return ret; +} + +static bool __is_in_use(uint8_t bus_number, uint8_t device_address) +{ + if (_in_use(in_use_head, bus_number, device_address)) + return true; + if (_in_use(blacklist_head, bus_number, device_address)) + return true; + return false; +} + +static bool is_in_use_bd(uint8_t bus_number, uint8_t device_address) +{ + bool ret; + + mutex_lock(&cgusb_lock); + ret = __is_in_use(bus_number, device_address); + mutex_unlock(&cgusb_lock); + return ret; +} + +static bool is_in_use(libusb_device *dev) +{ + return is_in_use_bd(libusb_get_bus_number(dev), libusb_get_device_address(dev)); +} + +static bool how_in_use(uint8_t bus_number, uint8_t device_address, bool *blacklisted) +{ + bool ret; + mutex_lock(&cgusb_lock); + ret = _in_use(in_use_head, bus_number, device_address); + if (!ret) { + if (_in_use(blacklist_head, bus_number, device_address)) + *blacklisted = true; + } + mutex_unlock(&cgusb_lock); + + return ret; +} + +void usb_list(void) +{ + struct libusb_device_descriptor desc; + struct libusb_device_handle *handle; + uint8_t bus_number; + uint8_t device_address; + libusb_device **list; + ssize_t count, i, j; + int err, total = 0; + + count = libusb_get_device_list(NULL, &list); + if (count < 0) { + applog(LOG_ERR, "USB list: failed, err:(%d) %s", (int)count, libusb_error_name((int)count)); + return; + } + if (count == 0) { + applog(LOG_WARNING, "USB list: found no devices"); + return; + } + for (i = 0; i < count; i++) { + bool known = false, blacklisted = false, active; + unsigned char manuf[256], prod[256]; + libusb_device *dev = list[i]; + + err = libusb_get_device_descriptor(dev, &desc); + if (err) { + applog(LOG_WARNING, "USB list: Failed to get descriptor %d", (int)i); + break; + } + + bus_number = libusb_get_bus_number(dev); + device_address = libusb_get_device_address(dev); + + for (j = 0; find_dev[j].drv != DRIVER_MAX; j++) { + if ((find_dev[j].idVendor == desc.idVendor) && + (find_dev[j].idProduct == desc.idProduct)) { + known = true; + break; + } + } + if (!known) + continue; + + err = libusb_open(dev, &handle); + if (err) { + applog(LOG_WARNING, "USB list: Failed to open %d", (int)i); + break; + } + libusb_get_string_descriptor_ascii(handle, desc.iManufacturer, manuf, 255); + libusb_get_string_descriptor_ascii(handle, desc.iProduct, prod, 255); + total++; + active = how_in_use(bus_number, device_address, &blacklisted); + simplelog(LOG_WARNING, "Bus %u Device %u ID: %04x:%04x %s %s %sactive %s", + bus_number, device_address, desc.idVendor, desc.idProduct, + manuf, prod, active ? "" : "in", blacklisted ? "blacklisted" : ""); + } + libusb_free_device_list(list, 1); + simplelog(LOG_WARNING, "%d total known USB device%s", total, total > 1 ? "s": ""); +} + +static void add_in_use(uint8_t bus_number, uint8_t device_address, bool blacklist) +{ + struct usb_in_use_list *in_use_tmp, **head; + bool found = false; + + mutex_lock(&cgusb_lock); + if (unlikely(!blacklist && __is_in_use(bus_number, device_address))) { + found = true; + goto nofway; + } + if (blacklist) + head = &blacklist_head; + else + head = &in_use_head; + + in_use_tmp = calloc(1, sizeof(*in_use_tmp)); + if (unlikely(!in_use_tmp)) + quit(1, "USB failed to calloc in_use_tmp"); + in_use_tmp->in_use.bus_number = (int)bus_number; + in_use_tmp->in_use.device_address = (int)device_address; + in_use_tmp->next = in_use_head; + if (*head) + (*head)->prev = in_use_tmp; + *head = in_use_tmp; +nofway: + mutex_unlock(&cgusb_lock); + + if (found) + applog(LOG_ERR, "FAIL: USB add already in use (%d:%d)", + (int)bus_number, (int)device_address); +} + +static void __remove_in_use(uint8_t bus_number, uint8_t device_address, bool blacklist) +{ + struct usb_in_use_list *in_use_tmp, **head; + bool found = false; + + mutex_lock(&cgusb_lock); + if (blacklist) + head = &blacklist_head; + else + head = &in_use_head; + + in_use_tmp = *head; + while (in_use_tmp) { + if (in_use_tmp->in_use.bus_number == (int)bus_number && + in_use_tmp->in_use.device_address == (int)device_address) { + found = true; + if (in_use_tmp == *head) { + *head = (*head)->next; + if (*head) + (*head)->prev = NULL; + } else { + in_use_tmp->prev->next = in_use_tmp->next; + if (in_use_tmp->next) + in_use_tmp->next->prev = in_use_tmp->prev; + } + free(in_use_tmp); + break; + } + in_use_tmp = in_use_tmp->next; + if (in_use_tmp == *head) + break; + } + + mutex_unlock(&cgusb_lock); + + if (!found) { + applog(LOG_ERR, "FAIL: USB remove not already in use (%d:%d)", + (int)bus_number, (int)device_address); + } +} + +static void remove_in_use(uint8_t bus_number, uint8_t device_address) +{ + __remove_in_use(bus_number, device_address, false); +} + +static bool cgminer_usb_lock_bd(struct device_drv *drv, uint8_t bus_number, uint8_t device_address) +{ + struct resource_work *res_work; + bool ret; + + applog(LOG_DEBUG, "USB lock %s %d-%d", drv->dname, (int)bus_number, (int)device_address); + + res_work = calloc(1, sizeof(*res_work)); + if (unlikely(!res_work)) + quit(1, "USB failed to calloc lock res_work"); + res_work->lock = true; + res_work->dname = (const char *)(drv->dname); + res_work->bus_number = bus_number; + res_work->device_address = device_address; + + mutex_lock(&cgusbres_lock); + res_work->next = res_work_head; + res_work_head = res_work; + mutex_unlock(&cgusbres_lock); + + cgsem_post(&usb_resource_sem); + + // TODO: add a timeout fail - restart the resource thread? + while (true) { + cgsleep_ms(50); + + mutex_lock(&cgusbres_lock); + if (res_reply_head) { + struct resource_reply *res_reply_prev = NULL; + struct resource_reply *res_reply = res_reply_head; + while (res_reply) { + if (res_reply->bus_number == bus_number && + res_reply->device_address == device_address) { + + if (res_reply_prev) + res_reply_prev->next = res_reply->next; + else + res_reply_head = res_reply->next; + + mutex_unlock(&cgusbres_lock); + + ret = res_reply->got; + + free(res_reply); + + return ret; + } + res_reply_prev = res_reply; + res_reply = res_reply->next; + } + } + mutex_unlock(&cgusbres_lock); + } +} + +static bool cgminer_usb_lock(struct device_drv *drv, libusb_device *dev) +{ + return cgminer_usb_lock_bd(drv, libusb_get_bus_number(dev), libusb_get_device_address(dev)); +} + +static void cgminer_usb_unlock_bd(struct device_drv *drv, uint8_t bus_number, uint8_t device_address) +{ + struct resource_work *res_work; + + applog(LOG_DEBUG, "USB unlock %s %d-%d", drv->dname, (int)bus_number, (int)device_address); + + res_work = calloc(1, sizeof(*res_work)); + if (unlikely(!res_work)) + quit(1, "USB failed to calloc unlock res_work"); + res_work->lock = false; + res_work->dname = (const char *)(drv->dname); + res_work->bus_number = bus_number; + res_work->device_address = device_address; + + mutex_lock(&cgusbres_lock); + res_work->next = res_work_head; + res_work_head = res_work; + mutex_unlock(&cgusbres_lock); + + cgsem_post(&usb_resource_sem); + + return; +} + +static void cgminer_usb_unlock(struct device_drv *drv, libusb_device *dev) +{ + cgminer_usb_unlock_bd(drv, libusb_get_bus_number(dev), libusb_get_device_address(dev)); +} + +static struct cg_usb_device *free_cgusb(struct cg_usb_device *cgusb) +{ + applog(LOG_DEBUG, "USB free %s", cgusb->found->name); + + if (cgusb->serial_string && cgusb->serial_string != BLANK) + free(cgusb->serial_string); + + if (cgusb->manuf_string && cgusb->manuf_string != BLANK) + free(cgusb->manuf_string); + + if (cgusb->prod_string && cgusb->prod_string != BLANK) + free(cgusb->prod_string); + + if (cgusb->descriptor) + free(cgusb->descriptor); + + free(cgusb->found); + + free(cgusb); + + return NULL; +} + +static void _usb_uninit(struct cgpu_info *cgpu) +{ + int ifinfo; + + // May have happened already during a failed initialisation + // if release_cgpu() was called due to a USB NODEV(err) + if (!cgpu->usbdev) + return; + + applog(LOG_DEBUG, "USB uninit %s%i", + cgpu->drv->name, cgpu->device_id); + + if (cgpu->usbdev->handle) { + for (ifinfo = cgpu->usbdev->found->intinfo_count - 1; ifinfo >= 0; ifinfo--) { + libusb_release_interface(cgpu->usbdev->handle, + THISIF(cgpu->usbdev->found, ifinfo)); + } +#ifdef LINUX + libusb_attach_kernel_driver(cgpu->usbdev->handle, THISIF(cgpu->usbdev->found, ifinfo)); +#endif + cg_wlock(&cgusb_fd_lock); + libusb_close(cgpu->usbdev->handle); + cgpu->usbdev->handle = NULL; + cg_wunlock(&cgusb_fd_lock); + } + cgpu->usbdev = free_cgusb(cgpu->usbdev); +} + +void usb_uninit(struct cgpu_info *cgpu) +{ + int pstate; + + DEVWLOCK(cgpu, pstate); + + _usb_uninit(cgpu); + + DEVWUNLOCK(cgpu, pstate); +} + +/* We have dropped the read devlock before entering this function but we pick + * up the write lock to prevent any attempts to work on dereferenced code once + * the nodev flag has been set. */ +static bool __release_cgpu(struct cgpu_info *cgpu) +{ + struct cg_usb_device *cgusb = cgpu->usbdev; + bool initted = cgpu->usbinfo.initialised; + struct cgpu_info *lookcgpu; + int i; + + // It has already been done + if (cgpu->usbinfo.nodev) + return false; + + applog(LOG_DEBUG, "USB release %s%i", + cgpu->drv->name, cgpu->device_id); + + if (initted) { + zombie_devs++; + total_count--; + drv_count[cgpu->drv->drv_id].count--; + } + + cgpu->usbinfo.nodev = true; + cgpu->usbinfo.nodev_count++; + cgtime(&cgpu->usbinfo.last_nodev); + + // Any devices sharing the same USB device should be marked also + for (i = 0; i < total_devices; i++) { + lookcgpu = get_devices(i); + if (lookcgpu != cgpu && lookcgpu->usbdev == cgusb) { + if (initted) { + total_count--; + drv_count[lookcgpu->drv->drv_id].count--; + } + + lookcgpu->usbinfo.nodev = true; + lookcgpu->usbinfo.nodev_count++; + cg_memcpy(&(lookcgpu->usbinfo.last_nodev), + &(cgpu->usbinfo.last_nodev), sizeof(struct timeval)); + lookcgpu->usbdev = NULL; + } + } + + _usb_uninit(cgpu); + return true; +} + +static void release_cgpu(struct cgpu_info *cgpu) +{ + if (__release_cgpu(cgpu)) + cgminer_usb_unlock_bd(cgpu->drv, cgpu->usbinfo.bus_number, cgpu->usbinfo.device_address); +} + +void blacklist_cgpu(struct cgpu_info *cgpu) +{ + if (cgpu->blacklisted) { + applog(LOG_WARNING, "Device already blacklisted"); + return; + } + cgpu->blacklisted = true; + add_in_use(cgpu->usbinfo.bus_number, cgpu->usbinfo.device_address, true); + if (__release_cgpu(cgpu)) + cgminer_usb_unlock_bd(cgpu->drv, cgpu->usbinfo.bus_number, cgpu->usbinfo.device_address); +} + +void whitelist_cgpu(struct cgpu_info *cgpu) +{ + if (!cgpu->blacklisted) { + applog(LOG_WARNING, "Device not blacklisted"); + return; + } + __remove_in_use(cgpu->usbinfo.bus_number, cgpu->usbinfo.device_address, true); + cgpu->blacklisted = false; +} + +/* + * Force a NODEV on a device so it goes back to hotplug + */ +void usb_nodev(struct cgpu_info *cgpu) +{ + int pstate; + + DEVWLOCK(cgpu, pstate); + + release_cgpu(cgpu); + + DEVWUNLOCK(cgpu, pstate); +} + +/* + * Use the same usbdev thus locking is across all related devices + */ +struct cgpu_info *usb_copy_cgpu(struct cgpu_info *orig) +{ + struct cgpu_info *copy; + int pstate; + + DEVWLOCK(orig, pstate); + + copy = calloc(1, sizeof(*copy)); + if (unlikely(!copy)) + quit(1, "Failed to calloc cgpu for %s in usb_copy_cgpu", orig->drv->dname); + + copy->name = orig->name; + copy->drv = copy_drv(orig->drv); + copy->deven = orig->deven; + copy->threads = orig->threads; + + copy->usbdev = orig->usbdev; + + cg_memcpy(&(copy->usbinfo), &(orig->usbinfo), sizeof(copy->usbinfo)); + + copy->usbinfo.nodev = (copy->usbdev == NULL); + + DEVWUNLOCK(orig, pstate); + + return copy; +} + +struct cgpu_info *usb_alloc_cgpu(struct device_drv *drv, int threads) +{ + struct cgpu_info *cgpu = calloc(1, sizeof(*cgpu)); + + if (unlikely(!cgpu)) + quit(1, "Failed to calloc cgpu for %s in usb_alloc_cgpu", drv->dname); + + cgpu->drv = drv; + cgpu->deven = DEV_ENABLED; + cgpu->threads = threads; + + cgpu->usbinfo.nodev = true; + + cglock_init(&cgpu->usbinfo.devlock); + + return cgpu; +} + +struct cgpu_info *usb_free_cgpu(struct cgpu_info *cgpu) +{ + if (cgpu->drv->copy) + free(cgpu->drv); + + free(cgpu->device_path); + + free(cgpu); + + return NULL; +} + +#define USB_INIT_FAIL 0 +#define USB_INIT_OK 1 +#define USB_INIT_IGNORE 2 + +static int _usb_init(struct cgpu_info *cgpu, struct libusb_device *dev, struct usb_find_devices *found) +{ + unsigned char man[STRBUFLEN+1], prod[STRBUFLEN+1]; + struct cg_usb_device *cgusb = NULL; + struct libusb_config_descriptor *config = NULL; + const struct libusb_interface_descriptor *idesc; + const struct libusb_endpoint_descriptor *epdesc; + unsigned char strbuf[STRBUFLEN+1]; + char devpath[32]; + char devstr[STRBUFLEN+1]; + int err, ifinfo, epinfo, alt, epnum, pstate; + int bad = USB_INIT_FAIL; + int cfg, claimed = 0, i; + + DEVWLOCK(cgpu, pstate); + + cgpu->usbinfo.bus_number = libusb_get_bus_number(dev); + cgpu->usbinfo.device_address = libusb_get_device_address(dev); + + if (found->intinfo_count > 1) { + snprintf(devpath, sizeof(devpath), "%d:%d-i%d", + (int)(cgpu->usbinfo.bus_number), + (int)(cgpu->usbinfo.device_address), + THISIF(found, 0)); + } else { + snprintf(devpath, sizeof(devpath), "%d:%d", + (int)(cgpu->usbinfo.bus_number), + (int)(cgpu->usbinfo.device_address)); + } + + cgpu->device_path = strdup(devpath); + + snprintf(devstr, sizeof(devstr), "- %s device %s", found->name, devpath); + + cgusb = calloc(1, sizeof(*cgusb)); + if (unlikely(!cgusb)) + quit(1, "USB failed to calloc _usb_init cgusb"); + cgusb->found = found; + + if (found->idVendor == IDVENDOR_FTDI) + cgusb->usb_type = USB_TYPE_FTDI; + + cgusb->ident = found->ident; + + cgusb->descriptor = calloc(1, sizeof(*(cgusb->descriptor))); + if (unlikely(!cgusb->descriptor)) + quit(1, "USB failed to calloc _usb_init cgusb descriptor"); + + err = libusb_get_device_descriptor(dev, cgusb->descriptor); + if (err) { + applog(LOG_DEBUG, + "USB init failed to get descriptor, err %d %s", + err, devstr); + goto dame; + } + + cg_wlock(&cgusb_fd_lock); + err = libusb_open(dev, &(cgusb->handle)); + cg_wunlock(&cgusb_fd_lock); + if (err) { + switch (err) { + case LIBUSB_ERROR_ACCESS: + applog(LOG_ERR, + "USB init, open device failed, err %d, " + "you don't have privilege to access %s", + err, devstr); + applog(LOG_ERR, "See README file included for help"); + break; +#ifdef WIN32 + // Windows specific message + case LIBUSB_ERROR_NOT_SUPPORTED: + applog(LOG_ERR, "USB init, open device failed, err %d, ", err); + applog(LOG_ERR, "You need to install a WinUSB driver for %s", devstr); + applog(LOG_ERR, "And associate %s with WinUSB using zadig", devstr); + applog(LOG_ERR, "See README.txt file included for help"); + break; +#endif + default: + applog(LOG_DEBUG, + "USB init, open failed, err %d %s", + err, devstr); + } + goto dame; + } + +#ifdef LINUX + for (ifinfo = 0; ifinfo < found->intinfo_count; ifinfo++) { + if (libusb_kernel_driver_active(cgusb->handle, THISIF(found, ifinfo)) == 1) { + applog(LOG_DEBUG, "USB init, kernel attached ... %s", devstr); + err = libusb_detach_kernel_driver(cgusb->handle, THISIF(found, ifinfo)); + if (err == 0) { + applog(LOG_DEBUG, + "USB init, kernel detached ifinfo %d interface %d" + " successfully %s", + ifinfo, THISIF(found, ifinfo), devstr); + } else { + applog(LOG_WARNING, + "USB init, kernel detach ifinfo %d interface %d failed," + " err %d in use? %s", + ifinfo, THISIF(found, ifinfo), err, devstr); + goto nokernel; + } + } + } +#endif + + err = libusb_get_string_descriptor_ascii(cgusb->handle, + cgusb->descriptor->iManufacturer, + man, STRBUFLEN); + if (err < 0) { + applog(LOG_DEBUG, + "USB init, failed to get iManufacturer, err %d %s", + err, devstr); + goto cldame; + } + if (found->iManufacturer) { + if (strcasecmp((char *)man, found->iManufacturer)) { + applog(LOG_DEBUG, "USB init, iManufacturer mismatch %s", + devstr); + applog(LOG_DEBUG, "Found %s vs %s", man, found->iManufacturer); + bad = USB_INIT_IGNORE; + goto cldame; + } + } else { + for (i = 0; find_dev[i].drv != DRIVER_MAX; i++) { + const char *iManufacturer = find_dev[i].iManufacturer; + /* If other drivers has an iManufacturer set that match, + * don't try to claim this device. */ + + if (!iManufacturer) + continue; + if (!strcasecmp((char *)man, iManufacturer)) { + applog(LOG_DEBUG, "USB init, alternative iManufacturer match %s", + devstr); + applog(LOG_DEBUG, "Found %s", iManufacturer); + bad = USB_INIT_IGNORE; + goto cldame; + } + } + } + + err = libusb_get_string_descriptor_ascii(cgusb->handle, + cgusb->descriptor->iProduct, + prod, STRBUFLEN); + if (err < 0) { + applog(LOG_DEBUG, + "USB init, failed to get iProduct, err %d %s", + err, devstr); + goto cldame; + } + if (found->iProduct) { + if (strcasecmp((char *)prod, found->iProduct)) { + applog(LOG_DEBUG, "USB init, iProduct mismatch %s", + devstr); + applog(LOG_DEBUG, "Found %s vs %s", prod, found->iProduct); + bad = USB_INIT_IGNORE; + goto cldame; + } + } else { + for (i = 0; find_dev[i].drv != DRIVER_MAX; i++) { + const char *iProduct = find_dev[i].iProduct; + /* Do same for iProduct as iManufacturer above */ + + if (!iProduct) + continue; + if (!strcasecmp((char *)prod, iProduct)) { + applog(LOG_DEBUG, "USB init, alternative iProduct match %s", + devstr); + applog(LOG_DEBUG, "Found %s", iProduct); + bad = USB_INIT_IGNORE; + goto cldame; + } + } + } + + cfg = -1; + err = libusb_get_configuration(cgusb->handle, &cfg); + if (err) + cfg = -1; + + // Try to set it if we can't read it or it's different + if (cfg != found->config) { + err = libusb_set_configuration(cgusb->handle, found->config); + if (err) { + switch(err) { + case LIBUSB_ERROR_BUSY: + applog(LOG_WARNING, + "USB init, set config %d in use %s", + found->config, devstr); + break; + default: + applog(LOG_DEBUG, + "USB init, failed to set config to %d, err %d %s", + found->config, err, devstr); + } + goto cldame; + } + } + + err = libusb_get_active_config_descriptor(dev, &config); + if (err) { + applog(LOG_DEBUG, + "USB init, failed to get config descriptor, err %d %s", + err, devstr); + goto cldame; + } + + int imax = -1; + for (ifinfo = 0; ifinfo < found->intinfo_count; ifinfo++) + if (found->intinfos[ifinfo].interface > imax) + imax = found->intinfos[ifinfo].interface; + + if ((int)(config->bNumInterfaces) <= imax) { + applog(LOG_DEBUG, "USB init bNumInterfaces %d <= interface max %d for %s", + (int)(config->bNumInterfaces), imax, devstr); + goto cldame; + } + + for (ifinfo = 0; ifinfo < found->intinfo_count; ifinfo++) + for (epinfo = 0; epinfo < found->intinfos[ifinfo].epinfo_count; epinfo++) + found->intinfos[ifinfo].epinfos[epinfo].found = false; + + for (ifinfo = 0; ifinfo < found->intinfo_count; ifinfo++) { + int interface = found->intinfos[ifinfo].interface; + for (alt = 0; alt < config->interface[interface].num_altsetting; alt++) { + idesc = &(config->interface[interface].altsetting[alt]); + for (epnum = 0; epnum < (int)(idesc->bNumEndpoints); epnum++) { + struct usb_epinfo *epinfos = found->intinfos[ifinfo].epinfos; + epdesc = &(idesc->endpoint[epnum]); + for (epinfo = 0; epinfo < found->intinfos[ifinfo].epinfo_count; epinfo++) { + if (!epinfos[epinfo].found) { + if (epdesc->bmAttributes == epinfos[epinfo].att + && epdesc->wMaxPacketSize >= epinfos[epinfo].size + && epdesc->bEndpointAddress == epinfos[epinfo].ep) { + epinfos[epinfo].found = true; + epinfos[epinfo].wMaxPacketSize = epdesc->wMaxPacketSize; + break; + } + } + } + } + } + } + + for (ifinfo = 0; ifinfo < found->intinfo_count; ifinfo++) + for (epinfo = 0; epinfo < found->intinfos[ifinfo].epinfo_count; epinfo++) + if (found->intinfos[ifinfo].epinfos[epinfo].found == false) { + applog(LOG_DEBUG, "USB init found (%d,%d) == false %s", + ifinfo, epinfo, devstr); + goto cldame; + } + + claimed = 0; + for (ifinfo = 0; ifinfo < found->intinfo_count; ifinfo++) { + err = libusb_claim_interface(cgusb->handle, THISIF(found, ifinfo)); + if (err == 0) + claimed++; + else { + switch(err) { + case LIBUSB_ERROR_BUSY: + applog(LOG_WARNING, + "USB init, claim ifinfo %d interface %d in use %s", + ifinfo, THISIF(found, ifinfo), devstr); + break; + default: + applog(LOG_DEBUG, + "USB init, claim ifinfo %d interface %d failed," + " err %d %s", + ifinfo, THISIF(found, ifinfo), err, devstr); + } + goto reldame; + } + } + + cfg = -1; + err = libusb_get_configuration(cgusb->handle, &cfg); + if (err) + cfg = -1; + if (cfg != found->config) { + applog(LOG_WARNING, + "USB init, incorrect config (%d!=%d) after claim of %s", + cfg, found->config, devstr); + goto reldame; + } + + cgusb->usbver = cgusb->descriptor->bcdUSB; + if (cgusb->usbver < 0x0200) { + cgusb->usb11 = true; + cgusb->tt = true; + } + +// TODO: allow this with the right version of the libusb include and running library +// cgusb->speed = libusb_get_device_speed(dev); + + err = libusb_get_string_descriptor_ascii(cgusb->handle, + cgusb->descriptor->iProduct, strbuf, STRBUFLEN); + if (err > 0) + cgusb->prod_string = strdup((char *)strbuf); + else + cgusb->prod_string = (char *)BLANK; + + err = libusb_get_string_descriptor_ascii(cgusb->handle, + cgusb->descriptor->iManufacturer, strbuf, STRBUFLEN); + if (err > 0) + cgusb->manuf_string = strdup((char *)strbuf); + else + cgusb->manuf_string = (char *)BLANK; + + err = libusb_get_string_descriptor_ascii(cgusb->handle, + cgusb->descriptor->iSerialNumber, strbuf, STRBUFLEN); + if (err > 0) + cgusb->serial_string = strdup((char *)strbuf); + else + cgusb->serial_string = (char *)BLANK; + +// TODO: ? +// cgusb->fwVersion <- for temp1/temp2 decision? or serial? (driver-modminer.c) +// cgusb->interfaceVersion + + applog(LOG_DEBUG, + "USB init %s usbver=%04x prod='%s' manuf='%s' serial='%s'", + devstr, cgusb->usbver, cgusb->prod_string, + cgusb->manuf_string, cgusb->serial_string); + + cgpu->usbdev = cgusb; + cgpu->usbinfo.nodev = false; + + libusb_free_config_descriptor(config); + + // Allow a name change based on the idVendor+idProduct + // N.B. must be done before calling add_cgpu() + if (strcasecmp(cgpu->drv->name, found->name)) { + if (!cgpu->drv->copy) + cgpu->drv = copy_drv(cgpu->drv); + cgpu->drv->name = (char *)(found->name); + } + + bad = USB_INIT_OK; + goto out_unlock; + +reldame: + + ifinfo = claimed; + while (ifinfo-- > 0) + libusb_release_interface(cgusb->handle, THISIF(found, ifinfo)); + +cldame: +#ifdef LINUX + libusb_attach_kernel_driver(cgusb->handle, THISIF(found, ifinfo)); + +nokernel: +#endif + cg_wlock(&cgusb_fd_lock); + libusb_close(cgusb->handle); + cgusb->handle = NULL; + cg_wunlock(&cgusb_fd_lock); + +dame: + + if (config) + libusb_free_config_descriptor(config); + + cgusb = free_cgusb(cgusb); + +out_unlock: + DEVWUNLOCK(cgpu, pstate); + + return bad; +} + +bool usb_init(struct cgpu_info *cgpu, struct libusb_device *dev, struct usb_find_devices *found_match) +{ + struct usb_find_devices *found_use = NULL; + int uninitialised_var(ret); + int i; + + for (i = 0; find_dev[i].drv != DRIVER_MAX; i++) { + if (find_dev[i].drv == found_match->drv && + find_dev[i].idVendor == found_match->idVendor && + find_dev[i].idProduct == found_match->idProduct) { + found_use = malloc(sizeof(*found_use)); + if (unlikely(!found_use)) + quit(1, "USB failed to malloc found_use"); + cg_memcpy(found_use, &(find_dev[i]), sizeof(*found_use)); + + ret = _usb_init(cgpu, dev, found_use); + + if (ret != USB_INIT_IGNORE) + break; + } + } + + if (ret == USB_INIT_FAIL) + applog(LOG_ERR, "%s detect (%d:%d) failed to initialise (incorrect device?)", + cgpu->drv->dname, + (int)(cgpu->usbinfo.bus_number), + (int)(cgpu->usbinfo.device_address)); + + return (ret == USB_INIT_OK); +} + +static bool usb_check_device(struct device_drv *drv, struct libusb_device *dev, struct usb_find_devices *look) +{ + struct libusb_device_descriptor desc; + int bus_number, device_address; + int err, i; + bool ok; + + err = libusb_get_device_descriptor(dev, &desc); + if (err) { + applog(LOG_DEBUG, "USB check device: Failed to get descriptor, err %d", err); + return false; + } + + if (desc.idVendor != look->idVendor || desc.idProduct != look->idProduct) { + applog(LOG_DEBUG, "%s looking for %s %04x:%04x but found %04x:%04x instead", + drv->name, look->name, look->idVendor, look->idProduct, desc.idVendor, desc.idProduct); + + return false; + } + + if (busdev_count > 0) { + bus_number = (int)libusb_get_bus_number(dev); + device_address = (int)libusb_get_device_address(dev); + ok = false; + for (i = 0; i < busdev_count; i++) { + if (bus_number == busdev[i].bus_number) { + if (busdev[i].device_address == -1 || + device_address == busdev[i].device_address) { + ok = true; + break; + } + } + } + if (!ok) { + applog(LOG_DEBUG, "%s rejected %s %04x:%04x with bus:dev (%d:%d)", + drv->name, look->name, look->idVendor, look->idProduct, + bus_number, device_address); + return false; + } + } + + applog(LOG_DEBUG, "%s looking for and found %s %04x:%04x", + drv->name, look->name, look->idVendor, look->idProduct); + + return true; +} + +static struct usb_find_devices *usb_check_each(int drvnum, struct device_drv *drv, struct libusb_device *dev) +{ + struct usb_find_devices *found; + int i; + + for (i = 0; find_dev[i].drv != DRIVER_MAX; i++) + if (find_dev[i].drv == drvnum) { + if (usb_check_device(drv, dev, &(find_dev[i]))) { + found = malloc(sizeof(*found)); + if (unlikely(!found)) + quit(1, "USB failed to malloc found"); + cg_memcpy(found, &(find_dev[i]), sizeof(*found)); + return found; + } + } + + return NULL; +} + +#define DRIVER_USB_CHECK_EACH(X) if (drv->drv_id == DRIVER_##X) \ + return usb_check_each(DRIVER_##X, drv, dev); + +static struct usb_find_devices *usb_check(__maybe_unused struct device_drv *drv, __maybe_unused struct libusb_device *dev) +{ + if (drv_count[drv->drv_id].count >= drv_count[drv->drv_id].limit) { + applog(LOG_DEBUG, + "USB scan devices3: %s limit %d reached", + drv->dname, drv_count[drv->drv_id].limit); + return NULL; + } + + DRIVER_PARSE_COMMANDS(DRIVER_USB_CHECK_EACH) + + return NULL; +} + +void __usb_detect(struct device_drv *drv, struct cgpu_info *(*device_detect)(struct libusb_device *, struct usb_find_devices *), + bool single) +{ + libusb_device **list; + ssize_t count, i; + struct usb_find_devices *found; + struct cgpu_info *cgpu; + + applog(LOG_DEBUG, "USB scan devices: checking for %s devices", drv->name); + + if (total_count >= total_limit) { + applog(LOG_DEBUG, "USB scan devices: total limit %d reached", total_limit); + return; + } + + if (drv_count[drv->drv_id].count >= drv_count[drv->drv_id].limit) { + applog(LOG_DEBUG, + "USB scan devices: %s limit %d reached", + drv->dname, drv_count[drv->drv_id].limit); + return; + } + + count = libusb_get_device_list(NULL, &list); + if (count < 0) { + applog(LOG_DEBUG, "USB scan devices: failed, err %d", (int)count); + return; + } + + if (count == 0) + applog(LOG_DEBUG, "USB scan devices: found no devices"); + else + cgsleep_ms(166); + + for (i = 0; i < count; i++) { + if (total_count >= total_limit) { + applog(LOG_DEBUG, "USB scan devices2: total limit %d reached", total_limit); + break; + } + + if (drv_count[drv->drv_id].count >= drv_count[drv->drv_id].limit) { + applog(LOG_DEBUG, + "USB scan devices2: %s limit %d reached", + drv->dname, drv_count[drv->drv_id].limit); + break; + } + + found = usb_check(drv, list[i]); + if (found != NULL) { + bool new_dev = false; + + if (is_in_use(list[i]) || cgminer_usb_lock(drv, list[i]) == false) + free(found); + else { + cgpu = device_detect(list[i], found); + if (!cgpu) + cgminer_usb_unlock(drv, list[i]); + else { + new_dev = true; + cgpu->usbinfo.initialised = true; + total_count++; + drv_count[drv->drv_id].count++; + } + free(found); + } + if (single && new_dev) + break; + } + } + + libusb_free_device_list(list, 1); +} + +#if DO_USB_STATS +static void modes_str(char *buf, uint32_t modes) +{ + bool first; + + *buf = '\0'; + + if (modes == MODE_NONE) + strcpy(buf, MODE_NONE_STR); + else { + first = true; + + if (modes & MODE_CTRL_READ) { + strcpy(buf, MODE_CTRL_READ_STR); + first = false; + } + + if (modes & MODE_CTRL_WRITE) { + if (!first) + strcat(buf, MODE_SEP_STR); + strcat(buf, MODE_CTRL_WRITE_STR); + first = false; + } + + if (modes & MODE_BULK_READ) { + if (!first) + strcat(buf, MODE_SEP_STR); + strcat(buf, MODE_BULK_READ_STR); + first = false; + } + + if (modes & MODE_BULK_WRITE) { + if (!first) + strcat(buf, MODE_SEP_STR); + strcat(buf, MODE_BULK_WRITE_STR); + first = false; + } + } +} +#endif + +// The stat data can be spurious due to not locking it before copying it - +// however that would require the stat() function to also lock and release +// a mutex every time a usb read or write is called which would slow +// things down more +struct api_data *api_usb_stats(__maybe_unused int *count) +{ +#if DO_USB_STATS + struct cg_usb_stats_details *details; + struct cg_usb_stats *sta; + struct api_data *root = NULL; + int device; + int cmdseq; + char modes_s[32]; + + if (next_stat == USB_NOSTAT) + return NULL; + + while (*count < next_stat * C_MAX * 2) { + device = *count / (C_MAX * 2); + cmdseq = *count % (C_MAX * 2); + + (*count)++; + + sta = &(usb_stats[device]); + details = &(sta->details[cmdseq]); + + // Only show stats that have results + if (details->item[CMD_CMD].count == 0 && + details->item[CMD_TIMEOUT].count == 0 && + details->item[CMD_ERROR].count == 0) + continue; + + root = api_add_string(root, "Name", sta->name, false); + root = api_add_int(root, "ID", &(sta->device_id), false); + root = api_add_const(root, "Stat", usb_commands[cmdseq/2], false); + root = api_add_int(root, "Seq", &(details->seq), true); + modes_str(modes_s, details->modes); + root = api_add_string(root, "Modes", modes_s, true); + root = api_add_uint64(root, "Count", + &(details->item[CMD_CMD].count), true); + root = api_add_double(root, "Total Delay", + &(details->item[CMD_CMD].total_delay), true); + root = api_add_double(root, "Min Delay", + &(details->item[CMD_CMD].min_delay), true); + root = api_add_double(root, "Max Delay", + &(details->item[CMD_CMD].max_delay), true); + root = api_add_uint64(root, "Timeout Count", + &(details->item[CMD_TIMEOUT].count), true); + root = api_add_double(root, "Timeout Total Delay", + &(details->item[CMD_TIMEOUT].total_delay), true); + root = api_add_double(root, "Timeout Min Delay", + &(details->item[CMD_TIMEOUT].min_delay), true); + root = api_add_double(root, "Timeout Max Delay", + &(details->item[CMD_TIMEOUT].max_delay), true); + root = api_add_uint64(root, "Error Count", + &(details->item[CMD_ERROR].count), true); + root = api_add_double(root, "Error Total Delay", + &(details->item[CMD_ERROR].total_delay), true); + root = api_add_double(root, "Error Min Delay", + &(details->item[CMD_ERROR].min_delay), true); + root = api_add_double(root, "Error Max Delay", + &(details->item[CMD_ERROR].max_delay), true); + root = api_add_timeval(root, "First Command", + &(details->item[CMD_CMD].first), true); + root = api_add_timeval(root, "Last Command", + &(details->item[CMD_CMD].last), true); + root = api_add_timeval(root, "First Timeout", + &(details->item[CMD_TIMEOUT].first), true); + root = api_add_timeval(root, "Last Timeout", + &(details->item[CMD_TIMEOUT].last), true); + root = api_add_timeval(root, "First Error", + &(details->item[CMD_ERROR].first), true); + root = api_add_timeval(root, "Last Error", + &(details->item[CMD_ERROR].last), true); + + return root; + } +#endif + return NULL; +} + +#if DO_USB_STATS +static void newstats(struct cgpu_info *cgpu) +{ + int i; + + mutex_lock(&cgusb_lock); + + cgpu->usbinfo.usbstat = next_stat + 1; + + usb_stats = realloc(usb_stats, sizeof(*usb_stats) * (next_stat+1)); + if (unlikely(!usb_stats)) + quit(1, "USB failed to realloc usb_stats %d", next_stat+1); + + usb_stats[next_stat].name = cgpu->drv->name; + usb_stats[next_stat].device_id = -1; + usb_stats[next_stat].details = calloc(2, sizeof(struct cg_usb_stats_details) * (C_MAX + 1)); + if (unlikely(!usb_stats[next_stat].details)) + quit(1, "USB failed to calloc details for %d", next_stat+1); + + for (i = 1; i < C_MAX * 2; i += 2) + usb_stats[next_stat].details[i].seq = 1; + + next_stat++; + + mutex_unlock(&cgusb_lock); +} +#endif + +void update_usb_stats(__maybe_unused struct cgpu_info *cgpu) +{ +#if DO_USB_STATS + if (cgpu->usbinfo.usbstat < 1) + newstats(cgpu); + + // we don't know the device_id until after add_cgpu() + usb_stats[cgpu->usbinfo.usbstat - 1].device_id = cgpu->device_id; +#endif +} + +#if DO_USB_STATS +static void stats(struct cgpu_info *cgpu, struct timeval *tv_start, struct timeval *tv_finish, int err, int mode, enum usb_cmds cmd, int seq, int timeout) +{ + struct cg_usb_stats_details *details; + double diff; + int item, extrams; + + if (cgpu->usbinfo.usbstat < 1) + newstats(cgpu); + + cgpu->usbinfo.tmo_count++; + + // timeout checks are only done when stats are enabled + extrams = SECTOMS(tdiff(tv_finish, tv_start)) - timeout; + if (extrams >= USB_TMO_0) { + uint32_t totms = (uint32_t)(timeout + extrams); + int offset = 0; + + if (extrams >= USB_TMO_2) { + applog(LOG_INFO, "%s%i: TIMEOUT %s took %dms but was %dms", + cgpu->drv->name, cgpu->device_id, + usb_cmdname(cmd), totms, timeout) ; + offset = 2; + } else if (extrams >= USB_TMO_1) + offset = 1; + + cgpu->usbinfo.usb_tmo[offset].count++; + cgpu->usbinfo.usb_tmo[offset].total_over += extrams; + cgpu->usbinfo.usb_tmo[offset].total_tmo += timeout; + if (cgpu->usbinfo.usb_tmo[offset].min_tmo == 0) { + cgpu->usbinfo.usb_tmo[offset].min_tmo = totms; + cgpu->usbinfo.usb_tmo[offset].max_tmo = totms; + } else { + if (cgpu->usbinfo.usb_tmo[offset].min_tmo > totms) + cgpu->usbinfo.usb_tmo[offset].min_tmo = totms; + if (cgpu->usbinfo.usb_tmo[offset].max_tmo < totms) + cgpu->usbinfo.usb_tmo[offset].max_tmo = totms; + } + } + + details = &(usb_stats[cgpu->usbinfo.usbstat - 1].details[cmd * 2 + seq]); + details->modes |= mode; + + diff = tdiff(tv_finish, tv_start); + + switch (err) { + case LIBUSB_SUCCESS: + item = CMD_CMD; + break; + case LIBUSB_ERROR_TIMEOUT: + item = CMD_TIMEOUT; + break; + default: + item = CMD_ERROR; + break; + } + + if (details->item[item].count == 0) { + details->item[item].min_delay = diff; + cg_memcpy(&(details->item[item].first), tv_start, sizeof(*tv_start)); + } else if (diff < details->item[item].min_delay) + details->item[item].min_delay = diff; + + if (diff > details->item[item].max_delay) + details->item[item].max_delay = diff; + + details->item[item].total_delay += diff; + cg_memcpy(&(details->item[item].last), tv_start, sizeof(*tv_start)); + details->item[item].count++; +} + +static void rejected_inc(struct cgpu_info *cgpu, uint32_t mode) +{ + struct cg_usb_stats_details *details; + int item = CMD_ERROR; + + if (cgpu->usbinfo.usbstat < 1) + newstats(cgpu); + + details = &(usb_stats[cgpu->usbinfo.usbstat - 1].details[C_REJECTED * 2 + 0]); + details->modes |= mode; + details->item[item].count++; +} +#endif + +#define USB_RETRY_MAX 5 + +struct usb_transfer { + cgsem_t cgsem; + struct libusb_transfer *transfer; + bool cancellable; + struct list_head list; +}; + +bool async_usb_transfers(void) +{ + bool ret; + + cg_rlock(&cgusb_fd_lock); + ret = !list_empty(&ut_list); + cg_runlock(&cgusb_fd_lock); + + return ret; +} + +/* Cancellable transfers should only be labelled as such if it is safe for them + * to effectively mimic timing out early. This flag is usually used to signify + * a read is waiting on a non-critical response that takes a long time and the + * driver wishes it be aborted if work restart message has been sent. */ +void cancel_usb_transfers(void) +{ + struct usb_transfer *ut; + int cancellations = 0; + + cg_wlock(&cgusb_fd_lock); + list_for_each_entry(ut, &ut_list, list) { + if (ut->cancellable) { + ut->cancellable = false; + libusb_cancel_transfer(ut->transfer); + cancellations++; + } + } + cg_wunlock(&cgusb_fd_lock); + + if (cancellations) + applog(LOG_DEBUG, "Cancelled %d USB transfers", cancellations); +} + +static void init_usb_transfer(struct usb_transfer *ut) +{ + cgsem_init(&ut->cgsem); + ut->transfer = libusb_alloc_transfer(0); + if (unlikely(!ut->transfer)) + quit(1, "Failed to libusb_alloc_transfer"); + ut->transfer->user_data = ut; + ut->cancellable = false; +} + +static void complete_usb_transfer(struct usb_transfer *ut) +{ + cg_wlock(&cgusb_fd_lock); + list_del(&ut->list); + cg_wunlock(&cgusb_fd_lock); + + cgsem_destroy(&ut->cgsem); + libusb_free_transfer(ut->transfer); +} + +static void LIBUSB_CALL transfer_callback(struct libusb_transfer *transfer) +{ + struct usb_transfer *ut = transfer->user_data; + + ut->cancellable = false; + cgsem_post(&ut->cgsem); +} + +static int usb_transfer_toerr(int ret) +{ + if (ret <= 0) + return ret; + + switch (ret) { + default: + case LIBUSB_TRANSFER_COMPLETED: + ret = LIBUSB_SUCCESS; + break; + case LIBUSB_TRANSFER_ERROR: + ret = LIBUSB_ERROR_IO; + break; + case LIBUSB_TRANSFER_TIMED_OUT: + case LIBUSB_TRANSFER_CANCELLED: + ret = LIBUSB_ERROR_TIMEOUT; + break; + case LIBUSB_TRANSFER_STALL: + ret = LIBUSB_ERROR_PIPE; + break; + case LIBUSB_TRANSFER_NO_DEVICE: + ret = LIBUSB_ERROR_NO_DEVICE; + break; + case LIBUSB_TRANSFER_OVERFLOW: + ret = LIBUSB_ERROR_OVERFLOW; + break; + } + return ret; +} + +/* Wait for callback function to tell us it has finished the USB transfer, but + * use our own timer to cancel the request if we go beyond the timeout. */ +static int callback_wait(struct usb_transfer *ut, int *transferred, unsigned int timeout) +{ + struct libusb_transfer *transfer= ut->transfer; + int ret; + + ret = cgsem_mswait(&ut->cgsem, timeout); + if (ret == ETIMEDOUT) { + /* We are emulating a timeout ourself here */ + libusb_cancel_transfer(transfer); + + /* Now wait for the callback function to be invoked. */ + cgsem_wait(&ut->cgsem); + } + ret = transfer->status; + ret = usb_transfer_toerr(ret); + + /* No need to sort out mutexes here since they won't be reused */ + *transferred = transfer->actual_length; + + return ret; +} + +static int usb_submit_transfer(struct usb_transfer *ut, struct libusb_transfer *transfer, + bool cancellable, bool tt) +{ + int err; + + INIT_LIST_HEAD(&ut->list); + + cg_wlock(&cgusb_fd_lock); + /* Imitate a transaction translator for writes to usb1.1 devices */ + if (tt) + cgsleep_ms_r(&usb11_cgt, 1); + err = libusb_submit_transfer(transfer); + if (likely(!err)) + ut->cancellable = cancellable; + list_add(&ut->list, &ut_list); + if (tt) + cgtimer_time(&usb11_cgt); + cg_wunlock(&cgusb_fd_lock); + + return err; +} + +static int +usb_perform_transfer(struct cgpu_info *cgpu, struct cg_usb_device *usbdev, int intinfo, + int epinfo, unsigned char *data, int length, int *transferred, + unsigned int timeout, __maybe_unused int mode, enum usb_cmds cmd, + __maybe_unused int seq, bool cancellable, bool tt) +{ + int bulk_timeout, callback_timeout = timeout, err_retries = 0; + struct libusb_device_handle *dev_handle = usbdev->handle; + struct usb_epinfo *usb_epinfo; + struct usb_transfer ut; + unsigned char endpoint; + bool interrupt; + int err, errn; +#if DO_USB_STATS + struct timeval tv_start, tv_finish; +#endif + unsigned char buf[512]; +#ifdef WIN32 + /* On windows the callback_timeout is a safety mechanism only. */ + bulk_timeout = timeout; + callback_timeout += WIN_CALLBACK_EXTRA; +#else + /* We give the transfer no timeout since we manage timeouts ourself on + * non windows. */ + bulk_timeout = 0; +#endif + + usb_epinfo = &(usbdev->found->intinfos[intinfo].epinfos[epinfo]); + interrupt = usb_epinfo->att == LIBUSB_TRANSFER_TYPE_INTERRUPT; + endpoint = usb_epinfo->ep; + + if (unlikely(!data)) { + applog(LOG_ERR, "USB error: usb_perform_transfer sent NULL data (%s,intinfo=%d,epinfo=%d,length=%d,timeout=%u,mode=%d,cmd=%s,seq=%d) endpoint=%d", + cgpu->drv->name, intinfo, epinfo, length, timeout, mode, usb_cmdname(cmd), seq, (int)endpoint); + err = LIBUSB_ERROR_IO; + goto out_fail; + } + /* Avoid any async transfers during shutdown to allow the polling + * thread to be shut down after all existing transfers are complete */ + if (opt_lowmem || cgpu->shutdown) + return libusb_bulk_transfer(dev_handle, endpoint, data, length, transferred, timeout); +err_retry: + init_usb_transfer(&ut); + + if ((endpoint & LIBUSB_ENDPOINT_DIR_MASK) == LIBUSB_ENDPOINT_OUT) { + cg_memcpy(buf, data, length); +#ifndef HAVE_LIBUSB + /* Older versions may not have this feature so only enable it + * when we know we're compiling with included static libusb. We + * only do this for bulk transfer, not interrupt. */ + if (!cgpu->nozlp && !interrupt) + ut.transfer->flags |= LIBUSB_TRANSFER_ADD_ZERO_PACKET; +#endif +#ifdef WIN32 + /* Writes on windows really don't like to be cancelled, but + * are prone to timeouts under heavy USB traffic, so make this + * a last resort cancellation delayed long after the write + * would have timed out on its own. */ + callback_timeout += WIN_WRITE_CBEXTRA; +#endif + } + + USBDEBUG("USB debug: @usb_perform_transfer(%s (nodev=%s),intinfo=%d,epinfo=%d,data=%p,length=%d,timeout=%u,mode=%d,cmd=%s,seq=%d) endpoint=%d", cgpu->drv->name, bool_str(cgpu->usbinfo.nodev), intinfo, epinfo, data, length, timeout, mode, usb_cmdname(cmd), seq, (int)endpoint); + + if (interrupt) { + libusb_fill_interrupt_transfer(ut.transfer, dev_handle, endpoint, + buf, length, transfer_callback, &ut, + bulk_timeout); + } else { + libusb_fill_bulk_transfer(ut.transfer, dev_handle, endpoint, buf, + length, transfer_callback, &ut, bulk_timeout); + } + STATS_TIMEVAL(&tv_start); + err = usb_submit_transfer(&ut, ut.transfer, cancellable, tt); + errn = errno; + if (!err) + err = callback_wait(&ut, transferred, callback_timeout); + else + err = usb_transfer_toerr(err); + complete_usb_transfer(&ut); + + STATS_TIMEVAL(&tv_finish); + USB_STATS(cgpu, &tv_start, &tv_finish, err, mode, cmd, seq, timeout); + + if (err < 0) { + applog(LOG_DEBUG, "%s%i: %s (amt=%d err=%d ern=%d)", + cgpu->drv->name, cgpu->device_id, + usb_cmdname(cmd), *transferred, err, errn); + } + + if (err == LIBUSB_ERROR_PIPE) { + int pipeerr, retries = 0; + + do { + cgpu->usbinfo.last_pipe = time(NULL); + cgpu->usbinfo.pipe_count++; + applog(LOG_INFO, "%s%i: libusb pipe error, trying to clear", + cgpu->drv->name, cgpu->device_id); + pipeerr = libusb_clear_halt(dev_handle, endpoint); + applog(LOG_DEBUG, "%s%i: libusb pipe error%scleared", + cgpu->drv->name, cgpu->device_id, err ? " not " : " "); + + if (pipeerr) + cgpu->usbinfo.clear_fail_count++; + } while (pipeerr && ++retries < USB_RETRY_MAX); + if (!pipeerr && ++err_retries < USB_RETRY_MAX) + goto err_retry; + } + if (err == LIBUSB_ERROR_IO && ++err_retries < USB_RETRY_MAX) + goto err_retry; +out_fail: + if (NODEV(err)) + *transferred = 0; + else if ((endpoint & LIBUSB_ENDPOINT_DIR_MASK) == LIBUSB_ENDPOINT_IN && *transferred) + cg_memcpy(data, buf, *transferred); + + return err; +} + +void usb_reset(struct cgpu_info *cgpu) +{ + int pstate, err = 0; + + DEVRLOCK(cgpu, pstate); + if (!cgpu->usbinfo.nodev) { + err = libusb_reset_device(cgpu->usbdev->handle); + applog(LOG_WARNING, "%s %i attempted reset got err:(%d) %s", + cgpu->drv->name, cgpu->device_id, err, libusb_error_name(err)); + } + if (NODEV(err)) { + cg_ruwlock(&cgpu->usbinfo.devlock); + release_cgpu(cgpu); + DEVWUNLOCK(cgpu, pstate); + } else + DEVRUNLOCK(cgpu, pstate); +} + +int _usb_read(struct cgpu_info *cgpu, int intinfo, int epinfo, char *buf, size_t bufsiz, + int *processed, int timeout, const char *end, enum usb_cmds cmd, bool readonce, bool cancellable) +{ + unsigned char *ptr, usbbuf[USB_READ_BUFSIZE]; + struct timeval read_start, tv_finish; + int bufleft, err, got, tot, pstate, tried_reset; + struct cg_usb_device *usbdev; + unsigned int initial_timeout; + bool first = true; + size_t usbbufread; + int endlen = 0; + char *eom = NULL; + double done; + bool ftdi; + + memset(usbbuf, 0, USB_READ_BUFSIZE); + memset(buf, 0, bufsiz); + + if (end) + endlen = strlen(end); + + DEVRLOCK(cgpu, pstate); + if (cgpu->usbinfo.nodev) { + *processed = 0; + USB_REJECT(cgpu, MODE_BULK_READ); + + err = LIBUSB_ERROR_NO_DEVICE; + goto out_noerrmsg; + } + + usbdev = cgpu->usbdev; + /* Interrupt transfers are guaranteed to be of an expected size (we hope) */ + if (usbdev->found->intinfos[intinfo].epinfos[epinfo].att == LIBUSB_TRANSFER_TYPE_INTERRUPT) + usbbufread = bufsiz; + else + usbbufread = 512; + + ftdi = (usbdev->usb_type == USB_TYPE_FTDI); + + USBDEBUG("USB debug: _usb_read(%s (nodev=%s),intinfo=%d,epinfo=%d,buf=%p,bufsiz=%d,proc=%p,timeout=%u,end=%s,cmd=%s,ftdi=%s,readonce=%s)", cgpu->drv->name, bool_str(cgpu->usbinfo.nodev), intinfo, epinfo, buf, (int)bufsiz, processed, timeout, end ? (char *)str_text((char *)end) : "NULL", usb_cmdname(cmd), bool_str(ftdi), bool_str(readonce)); + + if (bufsiz > USB_MAX_READ) + quit(1, "%s USB read request %d too large (max=%d)", cgpu->drv->name, (int)bufsiz, USB_MAX_READ); + + if (timeout == DEVTIMEOUT) + timeout = usbdev->found->timeout; + + tot = usbdev->bufamt; + bufleft = bufsiz - tot; + if (tot) + cg_memcpy(usbbuf, usbdev->buffer, tot); + ptr = usbbuf + tot; + usbdev->bufamt = 0; + + err = LIBUSB_SUCCESS; + if (end != NULL) + eom = strstr((const char *)usbbuf, end); + + initial_timeout = timeout; + cgtime(&read_start); + tried_reset = 0; + while (bufleft > 0 && !eom) { + err = usb_perform_transfer(cgpu, usbdev, intinfo, epinfo, ptr, usbbufread, + &got, timeout, MODE_BULK_READ, cmd, + first ? SEQ0 : SEQ1, cancellable, false); + cgtime(&tv_finish); + ptr[got] = '\0'; + + USBDEBUG("USB debug: @_usb_read(%s (nodev=%s)) first=%s err=%d%s got=%d ptr='%s' usbbufread=%d", cgpu->drv->name, bool_str(cgpu->usbinfo.nodev), bool_str(first), err, isnodev(err), got, (char *)str_text((char *)ptr), (int)usbbufread); + + if (ftdi) { + // first 2 bytes returned are an FTDI status + if (got > 2) { + got -= 2; + memmove(ptr, ptr+2, got+1); + } else { + got = 0; + *ptr = '\0'; + } + } + + tot += got; + if (end != NULL) + eom = strstr((const char *)usbbuf, end); + + /* Attempt a usb reset for an error that will otherwise cause + * this device to drop out provided we know the device still + * might exist. */ + if (err && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_WARNING, "%s %i %s usb read err:(%d) %s", cgpu->drv->name, + cgpu->device_id, usb_cmdname(cmd), err, libusb_error_name(err)); + if (err != LIBUSB_ERROR_NO_DEVICE && !tried_reset) { + err = libusb_reset_device(usbdev->handle); + tried_reset = 1; // don't call reset twice in a row + applog(LOG_WARNING, "%s %i attempted reset got err:(%d) %s", + cgpu->drv->name, cgpu->device_id, err, libusb_error_name(err)); + } + } else { + tried_reset = 0; + } + ptr += got; + bufleft -= got; + if (bufleft < 1) + err = LIBUSB_SUCCESS; + + if (err || readonce) + break; + + + first = false; + + done = tdiff(&tv_finish, &read_start); + // N.B. this is: return last err with whatever size has already been read + timeout = initial_timeout - (done * 1000); + if (timeout <= 0) + break; + } + + /* If we found the end of message marker, just use that data and + * return success. */ + if (eom) { + size_t eomlen = (void *)eom - (void *)usbbuf + endlen; + + if (eomlen < bufsiz) { + bufsiz = eomlen; + err = LIBUSB_SUCCESS; + } + } + + // N.B. usbdev->buffer was emptied before the while() loop + if (tot > (int)bufsiz) { + usbdev->bufamt = tot - bufsiz; + cg_memcpy(usbdev->buffer, usbbuf + bufsiz, usbdev->bufamt); + tot -= usbdev->bufamt; + usbbuf[tot] = '\0'; + applog(LOG_DEBUG, "USB: %s%i read1 buffering %d extra bytes", + cgpu->drv->name, cgpu->device_id, usbdev->bufamt); + } + + *processed = tot; + cg_memcpy((char *)buf, (const char *)usbbuf, (tot < (int)bufsiz) ? tot + 1 : (int)bufsiz); + +out_noerrmsg: + if (NODEV(err)) { + cg_ruwlock(&cgpu->usbinfo.devlock); + release_cgpu(cgpu); + DEVWUNLOCK(cgpu, pstate); + } else + DEVRUNLOCK(cgpu, pstate); + + return err; +} + +int _usb_write(struct cgpu_info *cgpu, int intinfo, int epinfo, char *buf, size_t bufsiz, int *processed, int timeout, enum usb_cmds cmd) +{ + struct timeval write_start, tv_finish; + struct cg_usb_device *usbdev; + unsigned int initial_timeout; + int err, sent, tot, pstate, tried_reset; + bool first = true; + double done; + + DEVRLOCK(cgpu, pstate); + + USBDEBUG("USB debug: _usb_write(%s (nodev=%s),intinfo=%d,epinfo=%d,buf='%s',bufsiz=%d,proc=%p,timeout=%u,cmd=%s)", cgpu->drv->name, bool_str(cgpu->usbinfo.nodev), intinfo, epinfo, (char *)str_text(buf), (int)bufsiz, processed, timeout, usb_cmdname(cmd)); + + *processed = 0; + + if (cgpu->usbinfo.nodev) { + USB_REJECT(cgpu, MODE_BULK_WRITE); + + err = LIBUSB_ERROR_NO_DEVICE; + goto out_noerrmsg; + } + + usbdev = cgpu->usbdev; + if (timeout == DEVTIMEOUT) + timeout = usbdev->found->timeout; + + tot = 0; + err = LIBUSB_SUCCESS; + initial_timeout = timeout; + cgtime(&write_start); + tried_reset = 0; + while (bufsiz > 0) { + int tosend = bufsiz; + + /* USB 1.1 devices don't handle zero packets well so split them + * up to not have the final transfer equal to the wMaxPacketSize + * or they will stall waiting for more data. */ + if (usbdev->usb11) { + struct usb_epinfo *ue = &usbdev->found->intinfos[intinfo].epinfos[epinfo]; + + if (tosend == ue->wMaxPacketSize) { + tosend >>= 1; + if (unlikely(!tosend)) + tosend = 1; + } + } + err = usb_perform_transfer(cgpu, usbdev, intinfo, epinfo, (unsigned char *)buf, + tosend, &sent, timeout, MODE_BULK_WRITE, + cmd, first ? SEQ0 : SEQ1, false, usbdev->tt); + cgtime(&tv_finish); + + USBDEBUG("USB debug: @_usb_write(%s (nodev=%s)) err=%d%s sent=%d", cgpu->drv->name, bool_str(cgpu->usbinfo.nodev), err, isnodev(err), sent); + + tot += sent; + + /* Unlike reads, even a timeout error is unrecoverable on + * writes. */ + if (err) { + applog(LOG_WARNING, "%s %i %s usb write err:(%d) %s", cgpu->drv->name, + cgpu->device_id, usb_cmdname(cmd), err, libusb_error_name(err)); + if (err != LIBUSB_ERROR_NO_DEVICE && !tried_reset) { + err = libusb_reset_device(usbdev->handle); + tried_reset = 1; // don't try reset twice in a row + applog(LOG_WARNING, "%s %i attempted reset got err:(%d) %s", + cgpu->drv->name, cgpu->device_id, err, libusb_error_name(err)); + } + } else { + tried_reset = 0; + } + if (err) + break; + + buf += sent; + bufsiz -= sent; + + first = false; + + done = tdiff(&tv_finish, &write_start); + // N.B. this is: return last err with whatever size was written + timeout = initial_timeout - (done * 1000); + if (timeout <= 0) + break; + } + + *processed = tot; + +out_noerrmsg: + if (NODEV(err)) { + cg_ruwlock(&cgpu->usbinfo.devlock); + release_cgpu(cgpu); + DEVWUNLOCK(cgpu, pstate); + } else + DEVRUNLOCK(cgpu, pstate); + + return err; +} + +/* As we do for bulk reads, emulate a sync function for control transfers using + * our own timeouts that takes the same parameters as libusb_control_transfer. + */ +static int usb_control_transfer(struct cgpu_info *cgpu, libusb_device_handle *dev_handle, uint8_t bmRequestType, + uint8_t bRequest, uint16_t wValue, uint16_t wIndex, + unsigned char *buffer, uint16_t wLength, unsigned int timeout) +{ + struct usb_transfer ut; + unsigned char buf[70]; + int err, transferred; + bool tt = false; + + if (unlikely(cgpu->shutdown)) + return libusb_control_transfer(dev_handle, bmRequestType, bRequest, wValue, wIndex, buffer, wLength, timeout); + + init_usb_transfer(&ut); + libusb_fill_control_setup(buf, bmRequestType, bRequest, wValue, + wIndex, wLength); + if ((bmRequestType & LIBUSB_ENDPOINT_DIR_MASK) == LIBUSB_ENDPOINT_OUT) { + if (wLength) + cg_memcpy(buf + LIBUSB_CONTROL_SETUP_SIZE, buffer, wLength); + if (cgpu->usbdev->descriptor->bcdUSB < 0x0200) + tt = true; + } + libusb_fill_control_transfer(ut.transfer, dev_handle, buf, transfer_callback, + &ut, 0); + err = usb_submit_transfer(&ut, ut.transfer, false, tt); + if (!err) + err = callback_wait(&ut, &transferred, timeout); + if (err == LIBUSB_SUCCESS && transferred) { + if ((bmRequestType & LIBUSB_ENDPOINT_DIR_MASK) == LIBUSB_ENDPOINT_IN) + cg_memcpy(buffer, libusb_control_transfer_get_data(ut.transfer), + transferred); + err = transferred; + goto out; + } + err = usb_transfer_toerr(err); +out: + complete_usb_transfer(&ut); + return err; +} + +int __usb_transfer(struct cgpu_info *cgpu, uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, uint32_t *data, int siz, unsigned int timeout, __maybe_unused enum usb_cmds cmd) +{ + struct cg_usb_device *usbdev; +#if DO_USB_STATS + struct timeval tv_start, tv_finish; +#endif + unsigned char buf[64]; + uint32_t *buf32 = (uint32_t *)buf; + int err, i, bufsiz; + + USBDEBUG("USB debug: _usb_transfer(%s (nodev=%s),type=%"PRIu8",req=%"PRIu8",value=%"PRIu16",index=%"PRIu16",siz=%d,timeout=%u,cmd=%s)", cgpu->drv->name, bool_str(cgpu->usbinfo.nodev), request_type, bRequest, wValue, wIndex, siz, timeout, usb_cmdname(cmd)); + + if (cgpu->usbinfo.nodev) { + USB_REJECT(cgpu, MODE_CTRL_WRITE); + + err = LIBUSB_ERROR_NO_DEVICE; + goto out_; + } + usbdev = cgpu->usbdev; + if (timeout == DEVTIMEOUT) + timeout = usbdev->found->timeout; + + USBDEBUG("USB debug: @_usb_transfer() data=%s", bin2hex((unsigned char *)data, (size_t)siz)); + + if (siz > 0) { + bufsiz = siz - 1; + bufsiz >>= 2; + bufsiz++; + for (i = 0; i < bufsiz; i++) + buf32[i] = htole32(data[i]); + } + + USBDEBUG("USB debug: @_usb_transfer() buf=%s", bin2hex(buf, (size_t)siz)); + + STATS_TIMEVAL(&tv_start); + err = usb_control_transfer(cgpu, usbdev->handle, request_type, bRequest, + wValue, wIndex, buf, (uint16_t)siz, timeout); + STATS_TIMEVAL(&tv_finish); + USB_STATS(cgpu, &tv_start, &tv_finish, err, MODE_CTRL_WRITE, cmd, SEQ0, timeout); + + USBDEBUG("USB debug: @_usb_transfer(%s (nodev=%s)) err=%d%s", cgpu->drv->name, bool_str(cgpu->usbinfo.nodev), err, isnodev(err)); + + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_WARNING, "%s %i usb transfer err:(%d) %s", cgpu->drv->name, cgpu->device_id, + err, libusb_error_name(err)); + } +out_: + return err; +} + +/* We use the write devlock for control transfers since some control transfers + * are rare but may be changing settings within the device causing problems + * if concurrent transfers are happening. Using the write lock serialises + * any transfers. */ +int _usb_transfer(struct cgpu_info *cgpu, uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, uint32_t *data, int siz, unsigned int timeout, enum usb_cmds cmd) +{ + int pstate, err; + + DEVWLOCK(cgpu, pstate); + + err = __usb_transfer(cgpu, request_type, bRequest, wValue, wIndex, data, siz, timeout, cmd); + + if (NOCONTROLDEV(err)) + release_cgpu(cgpu); + + DEVWUNLOCK(cgpu, pstate); + + return err; +} + +int _usb_transfer_read(struct cgpu_info *cgpu, uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, char *buf, int bufsiz, int *amount, unsigned int timeout, __maybe_unused enum usb_cmds cmd) +{ + struct cg_usb_device *usbdev; +#if DO_USB_STATS + struct timeval tv_start, tv_finish; +#endif + unsigned char tbuf[64]; + int err, pstate; + + DEVWLOCK(cgpu, pstate); + + USBDEBUG("USB debug: _usb_transfer_read(%s (nodev=%s),type=%"PRIu8",req=%"PRIu8",value=%"PRIu16",index=%"PRIu16",bufsiz=%d,timeout=%u,cmd=%s)", cgpu->drv->name, bool_str(cgpu->usbinfo.nodev), request_type, bRequest, wValue, wIndex, bufsiz, timeout, usb_cmdname(cmd)); + + if (cgpu->usbinfo.nodev) { + USB_REJECT(cgpu, MODE_CTRL_READ); + + err = LIBUSB_ERROR_NO_DEVICE; + goto out_noerrmsg; + } + usbdev = cgpu->usbdev; + if (timeout == DEVTIMEOUT) + timeout = usbdev->found->timeout; + + *amount = 0; + + memset(tbuf, 0, 64); + STATS_TIMEVAL(&tv_start); + err = usb_control_transfer(cgpu, usbdev->handle, request_type, bRequest, + wValue, wIndex, tbuf, (uint16_t)bufsiz, timeout); + STATS_TIMEVAL(&tv_finish); + USB_STATS(cgpu, &tv_start, &tv_finish, err, MODE_CTRL_READ, cmd, SEQ0, timeout); + cg_memcpy(buf, tbuf, bufsiz); + + USBDEBUG("USB debug: @_usb_transfer_read(%s (nodev=%s)) amt/err=%d%s%s%s", cgpu->drv->name, bool_str(cgpu->usbinfo.nodev), err, isnodev(err), err > 0 ? " = " : BLANK, err > 0 ? bin2hex((unsigned char *)buf, (size_t)err) : BLANK); + + if (err > 0) { + *amount = err; + err = 0; + } + if (err < 0 && err != LIBUSB_ERROR_TIMEOUT) { + applog(LOG_WARNING, "%s %i usb transfer read err:(%d) %s", cgpu->drv->name, cgpu->device_id, + err, libusb_error_name(err)); + } +out_noerrmsg: + if (NOCONTROLDEV(err)) + release_cgpu(cgpu); + + DEVWUNLOCK(cgpu, pstate); + + return err; +} + +#define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD) +#define FTDI_RS0_CTS (1 << 4) +#define FTDI_RS0_DSR (1 << 5) +#define FTDI_RS0_RI (1 << 6) +#define FTDI_RS0_RLSD (1 << 7) + +/* Clear to send for FTDI */ +int usb_ftdi_cts(struct cgpu_info *cgpu) +{ + char buf[2], ret; + int err, amount; + + err = _usb_transfer_read(cgpu, (uint8_t)FTDI_TYPE_IN, (uint8_t)5, + (uint16_t)0, (uint16_t)0, buf, 2, + &amount, DEVTIMEOUT, C_FTDI_STATUS); + /* We return true in case drivers are waiting indefinitely to try and + * write to something that's not there. */ + if (err) + return true; + + ret = buf[0] & FTDI_STATUS_B0_MASK; + return (ret & FTDI_RS0_CTS); +} + +int _usb_ftdi_set_latency(struct cgpu_info *cgpu, int intinfo) +{ + int err = 0; + int pstate; + + DEVWLOCK(cgpu, pstate); + + if (cgpu->usbdev) { + if (cgpu->usbdev->usb_type != USB_TYPE_FTDI) { + applog(LOG_ERR, "%s: bmgid %d latency request on non-FTDI device", + cgpu->drv->name, cgpu->cgminer_id); + err = LIBUSB_ERROR_NOT_SUPPORTED; + } else if (cgpu->usbdev->found->latency == LATENCY_UNUSED) { + applog(LOG_ERR, "%s: cgid %d invalid latency (UNUSED)", + cgpu->drv->name, cgpu->cgminer_id); + err = LIBUSB_ERROR_NOT_SUPPORTED; + } + + if (!err) + err = __usb_transfer(cgpu, FTDI_TYPE_OUT, FTDI_REQUEST_LATENCY, + cgpu->usbdev->found->latency, + USBIF(cgpu->usbdev, intinfo), + NULL, 0, DEVTIMEOUT, C_LATENCY); + } + + DEVWUNLOCK(cgpu, pstate); + + applog(LOG_DEBUG, "%s: bmgid %d %s got err %d", + cgpu->drv->name, cgpu->cgminer_id, + usb_cmdname(C_LATENCY), err); + + return err; +} + +void usb_buffer_clear(struct cgpu_info *cgpu) +{ + int pstate; + + DEVWLOCK(cgpu, pstate); + + if (cgpu->usbdev) + cgpu->usbdev->bufamt = 0; + + DEVWUNLOCK(cgpu, pstate); +} + +uint32_t usb_buffer_size(struct cgpu_info *cgpu) +{ + uint32_t ret = 0; + int pstate; + + DEVRLOCK(cgpu, pstate); + + if (cgpu->usbdev) + ret = cgpu->usbdev->bufamt; + + DEVRUNLOCK(cgpu, pstate); + + return ret; +} + +/* + * The value returned (0) when usbdev is NULL + * doesn't matter since it also means the next call to + * any usbutils function will fail with a nodev + * N.B. this is to get the interface number to use in a control_transfer + * which for some devices isn't actually the interface number + */ +int _usb_interface(struct cgpu_info *cgpu, int intinfo) +{ + int interface = 0; + int pstate; + + DEVRLOCK(cgpu, pstate); + + if (cgpu->usbdev) + interface = cgpu->usbdev->found->intinfos[intinfo].ctrl_transfer; + + DEVRUNLOCK(cgpu, pstate); + + return interface; +} + +enum sub_ident usb_ident(struct cgpu_info *cgpu) +{ + enum sub_ident ident = IDENT_UNK; + int pstate; + + DEVRLOCK(cgpu, pstate); + + if (cgpu->usbdev) + ident = cgpu->usbdev->ident; + + DEVRUNLOCK(cgpu, pstate); + + return ident; +} + +// Need to set all devices with matching usbdev +void usb_set_dev_start(struct cgpu_info *cgpu) +{ + struct cg_usb_device *cgusb; + struct cgpu_info *cgpu2; + struct timeval now; + int pstate; + + DEVWLOCK(cgpu, pstate); + + cgusb = cgpu->usbdev; + + // If the device wasn't dropped + if (cgusb != NULL) { + int i; + + cgtime(&now); + + for (i = 0; i < total_devices; i++) { + cgpu2 = get_devices(i); + if (cgpu2->usbdev == cgusb) + copy_time(&(cgpu2->dev_start_tv), &now); + } + } + + DEVWUNLOCK(cgpu, pstate); +} + +void usb_cleanup(void) +{ + struct cgpu_info *cgpu; + int count, pstate; + int i; + + hotplug_time = 0; + + cgsleep_ms(10); + + count = 0; + for (i = 0; i < total_devices; i++) { + cgpu = devices[i]; + switch (cgpu->drv->drv_id) { + case DRIVER_bflsc: + case DRIVER_bitforce: + case DRIVER_bitfury: + case DRIVER_cointerra: + case DRIVER_drillbit: + case DRIVER_modminer: + case DRIVER_icarus: + case DRIVER_avalon: + case DRIVER_avalon2: + case DRIVER_avalon4: + case DRIVER_bitmain: + case DRIVER_bmsc: + case DRIVER_klondike: + case DRIVER_hashfast: + DEVWLOCK(cgpu, pstate); + release_cgpu(cgpu); + DEVWUNLOCK(cgpu, pstate); + count++; + break; + default: + break; + } + } + + /* + * Must attempt to wait for the resource thread to release coz + * during a restart it won't automatically release them in linux + */ + if (count) { + struct timeval start, now; + + cgtime(&start); + while (42) { + cgsleep_ms(50); + + mutex_lock(&cgusbres_lock); + + if (!res_work_head) + break; + + cgtime(&now); + if (tdiff(&now, &start) > 0.366) { + applog(LOG_WARNING, + "usb_cleanup gave up waiting for resource thread"); + break; + } + + mutex_unlock(&cgusbres_lock); + } + mutex_unlock(&cgusbres_lock); + } + + cgsem_destroy(&usb_resource_sem); +} + +#define DRIVER_COUNT_FOUND(X) if (X##_drv.name && strcasecmp(ptr, X##_drv.name) == 0) { \ + drv_count[X##_drv.drv_id].limit = lim; \ + found = true; \ + } +void usb_initialise(void) +{ + char *fre, *ptr, *comma, *colon; + int bus, dev, lim, i; + bool found; + + INIT_LIST_HEAD(&ut_list); + + for (i = 0; i < DRIVER_MAX; i++) { + drv_count[i].count = 0; + drv_count[i].limit = 999999; + } + + cgusb_check_init(); + + if (opt_usb_select && *opt_usb_select) { + // Absolute device limit + if (*opt_usb_select == ':') { + total_limit = atoi(opt_usb_select+1); + if (total_limit < 0) + quit(1, "Invalid --usb total limit"); + // Comma list of bus:dev devices to match + } else if (isdigit(*opt_usb_select)) { + fre = ptr = strdup(opt_usb_select); + do { + comma = strchr(ptr, ','); + if (comma) + *(comma++) = '\0'; + + colon = strchr(ptr, ':'); + if (!colon) + quit(1, "Invalid --usb bus:dev missing ':'"); + + *(colon++) = '\0'; + + if (!isdigit(*ptr)) + quit(1, "Invalid --usb bus:dev - bus must be a number"); + + if (!isdigit(*colon) && *colon != '*') + quit(1, "Invalid --usb bus:dev - dev must be a number or '*'"); + + bus = atoi(ptr); + if (bus <= 0) + quit(1, "Invalid --usb bus:dev - bus must be > 0"); + + if (*colon == '*') + dev = -1; + else { + dev = atoi(colon); + if (dev <= 0) + quit(1, "Invalid --usb bus:dev - dev must be > 0 or '*'"); + } + + busdev = realloc(busdev, sizeof(*busdev) * (++busdev_count)); + if (unlikely(!busdev)) + quit(1, "USB failed to realloc busdev"); + + busdev[busdev_count-1].bus_number = bus; + busdev[busdev_count-1].device_address = dev; + + ptr = comma; + } while (ptr); + free(fre); + // Comma list of DRV:limit + } else { + fre = ptr = strdup(opt_usb_select); + do { + comma = strchr(ptr, ','); + if (comma) + *(comma++) = '\0'; + + colon = strchr(ptr, ':'); + if (!colon) + quit(1, "Invalid --usb DRV:limit missing ':'"); + + *(colon++) = '\0'; + + if (!isdigit(*colon)) + quit(1, "Invalid --usb DRV:limit - limit must be a number"); + + lim = atoi(colon); + if (lim < 0) + quit(1, "Invalid --usb DRV:limit - limit must be >= 0"); + + found = false; + /* Use the DRIVER_PARSE_COMMANDS macro to iterate + * over all the drivers. */ + DRIVER_PARSE_COMMANDS(DRIVER_COUNT_FOUND) + if (!found) + quit(1, "Invalid --usb DRV:limit - unknown DRV='%s'", ptr); + + ptr = comma; + } while (ptr); + free(fre); + } + } +} + +#ifndef WIN32 +#include +#include +#include +#include +#include + +#ifndef __APPLE__ +union semun { + int val; + struct semid_ds *buf; + unsigned short *array; + struct seminfo *__buf; +}; +#endif + +#else +static LPSECURITY_ATTRIBUTES unsec(LPSECURITY_ATTRIBUTES sec) +{ + FreeSid(((PSECURITY_DESCRIPTOR)(sec->lpSecurityDescriptor))->Group); + free(sec->lpSecurityDescriptor); + free(sec); + return NULL; +} + +static LPSECURITY_ATTRIBUTES mksec(const char *dname, uint8_t bus_number, uint8_t device_address) +{ + SID_IDENTIFIER_AUTHORITY SIDAuthWorld = {SECURITY_WORLD_SID_AUTHORITY}; + PSID gsid = NULL; + LPSECURITY_ATTRIBUTES sec_att = NULL; + PSECURITY_DESCRIPTOR sec_des = NULL; + + sec_des = malloc(sizeof(*sec_des)); + if (unlikely(!sec_des)) + quit(1, "MTX: Failed to malloc LPSECURITY_DESCRIPTOR"); + + if (!InitializeSecurityDescriptor(sec_des, SECURITY_DESCRIPTOR_REVISION)) { + applog(LOG_ERR, + "MTX: %s (%d:%d) USB failed to init secdes err (%d)", + dname, (int)bus_number, (int)device_address, + (int)GetLastError()); + free(sec_des); + return NULL; + } + + if (!SetSecurityDescriptorDacl(sec_des, TRUE, NULL, FALSE)) { + applog(LOG_ERR, + "MTX: %s (%d:%d) USB failed to secdes dacl err (%d)", + dname, (int)bus_number, (int)device_address, + (int)GetLastError()); + free(sec_des); + return NULL; + } + + if(!AllocateAndInitializeSid(&SIDAuthWorld, 1, SECURITY_WORLD_RID, 0, 0, 0, 0, 0, 0, 0, &gsid)) { + applog(LOG_ERR, + "MTX: %s (%d:%d) USB failed to create gsid err (%d)", + dname, (int)bus_number, (int)device_address, + (int)GetLastError()); + free(sec_des); + return NULL; + } + + if (!SetSecurityDescriptorGroup(sec_des, gsid, FALSE)) { + applog(LOG_ERR, + "MTX: %s (%d:%d) USB failed to secdes grp err (%d)", + dname, (int)bus_number, (int)device_address, + (int)GetLastError()); + FreeSid(gsid); + free(sec_des); + return NULL; + } + + sec_att = malloc(sizeof(*sec_att)); + if (unlikely(!sec_att)) + quit(1, "MTX: Failed to malloc LPSECURITY_ATTRIBUTES"); + + sec_att->nLength = sizeof(*sec_att); + sec_att->lpSecurityDescriptor = sec_des; + sec_att->bInheritHandle = FALSE; + + return sec_att; +} +#endif + +// Any errors should always be printed since they will rarely if ever occur +// and thus it is best to always display them +static bool resource_lock(const char *dname, uint8_t bus_number, uint8_t device_address) +{ + applog(LOG_DEBUG, "USB res lock %s %d-%d", dname, (int)bus_number, (int)device_address); +#ifdef WIN32 + struct cgpu_info *cgpu; + LPSECURITY_ATTRIBUTES sec; + HANDLE usbMutex; + char name[64]; + DWORD res; + int i; + + if (is_in_use_bd(bus_number, device_address)) + return false; + + snprintf(name, sizeof(name), "cg-usb-%d-%d", (int)bus_number, (int)device_address); + + sec = mksec(dname, bus_number, device_address); + if (!sec) + return false; + + usbMutex = CreateMutex(sec, FALSE, name); + if (usbMutex == NULL) { + applog(LOG_ERR, + "MTX: %s USB failed to get '%s' err (%d)", + dname, name, (int)GetLastError()); + sec = unsec(sec); + return false; + } + + res = WaitForSingleObject(usbMutex, 0); + + switch(res) { + case WAIT_OBJECT_0: + case WAIT_ABANDONED: + // Am I using it already? + for (i = 0; i < total_devices; i++) { + cgpu = get_devices(i); + if (cgpu->usbinfo.bus_number == bus_number && + cgpu->usbinfo.device_address == device_address && + cgpu->usbinfo.nodev == false) { + if (ReleaseMutex(usbMutex)) { + applog(LOG_WARNING, + "MTX: %s USB can't get '%s' - device in use", + dname, name); + goto fail; + } + applog(LOG_ERR, + "MTX: %s USB can't get '%s' - device in use - failure (%d)", + dname, name, (int)GetLastError()); + goto fail; + } + } + break; + case WAIT_TIMEOUT: + if (!hotplug_mode) + applog(LOG_WARNING, + "MTX: %s USB failed to get '%s' - device in use", + dname, name); + goto fail; + case WAIT_FAILED: + applog(LOG_ERR, + "MTX: %s USB failed to get '%s' err (%d)", + dname, name, (int)GetLastError()); + goto fail; + default: + applog(LOG_ERR, + "MTX: %s USB failed to get '%s' unknown reply (%d)", + dname, name, (int)res); + goto fail; + } + + add_in_use(bus_number, device_address, false); + in_use_store_ress(bus_number, device_address, (void *)usbMutex, (void *)sec); + + return true; +fail: + CloseHandle(usbMutex); + sec = unsec(sec); + return false; +#else + char name[64]; + int fd; + + if (is_in_use_bd(bus_number, device_address)) + return false; + + snprintf(name, sizeof(name), "/tmp/bmminer-usb-%d-%d", (int)bus_number, (int)device_address); + fd = open(name, O_CREAT|O_RDONLY, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH); + if (fd == -1) { + applog(LOG_ERR, "%s USB open failed '%s' err (%d) %s", + dname, name, errno, strerror(errno)); + return false; + } + if (flock(fd, LOCK_EX | LOCK_NB)) { + applog(LOG_INFO, "%s USB failed to get '%s' - device in use", + dname, name); + close(fd); + return false; + } + + add_in_use(bus_number, device_address, false); + in_use_store_fd(bus_number, device_address, fd); + return true; +#endif +} + +// Any errors should always be printed since they will rarely if ever occur +// and thus it is best to always display them +static void resource_unlock(const char *dname, uint8_t bus_number, uint8_t device_address) +{ + applog(LOG_DEBUG, "USB res unlock %s %d-%d", dname, (int)bus_number, (int)device_address); + +#ifdef WIN32 + LPSECURITY_ATTRIBUTES sec = NULL; + HANDLE usbMutex = NULL; + char name[64]; + + snprintf(name, sizeof(name), "cg-usb-%d-%d", (int)bus_number, (int)device_address); + + in_use_get_ress(bus_number, device_address, (void **)(&usbMutex), (void **)(&sec)); + + if (!usbMutex || !sec) + goto fila; + + if (!ReleaseMutex(usbMutex)) + applog(LOG_ERR, + "MTX: %s USB failed to release '%s' err (%d)", + dname, name, (int)GetLastError()); + +fila: + + if (usbMutex) + CloseHandle(usbMutex); + if (sec) + unsec(sec); + remove_in_use(bus_number, device_address); + return; +#else + char name[64]; + int fd; + + snprintf(name, sizeof(name), "/tmp/bmminer-usb-%d-%d", (int)bus_number, (int)device_address); + + fd = in_use_get_fd(bus_number, device_address); + if (fd < 0) + return; + remove_in_use(bus_number, device_address); + close(fd); + unlink(name); + return; +#endif +} + +static void resource_process() +{ + struct resource_work *res_work = NULL; + struct resource_reply *res_reply = NULL; + bool ok; + + applog(LOG_DEBUG, "RES: %s (%d:%d) lock=%d", + res_work_head->dname, + (int)res_work_head->bus_number, + (int)res_work_head->device_address, + res_work_head->lock); + + if (res_work_head->lock) { + ok = resource_lock(res_work_head->dname, + res_work_head->bus_number, + res_work_head->device_address); + + applog(LOG_DEBUG, "RES: %s (%d:%d) lock ok=%d", + res_work_head->dname, + (int)res_work_head->bus_number, + (int)res_work_head->device_address, + ok); + + res_reply = calloc(1, sizeof(*res_reply)); + if (unlikely(!res_reply)) + quit(1, "USB failed to calloc res_reply"); + + res_reply->bus_number = res_work_head->bus_number; + res_reply->device_address = res_work_head->device_address; + res_reply->got = ok; + res_reply->next = res_reply_head; + + res_reply_head = res_reply; + } + else + resource_unlock(res_work_head->dname, + res_work_head->bus_number, + res_work_head->device_address); + + res_work = res_work_head; + res_work_head = res_work_head->next; + free(res_work); +} + +void *usb_resource_thread(void __maybe_unused *userdata) +{ + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + RenameThread("USBResource"); + + applog(LOG_DEBUG, "RES: thread starting"); + + while (42) { + /* Wait to be told we have work to do */ + cgsem_wait(&usb_resource_sem); + + mutex_lock(&cgusbres_lock); + while (res_work_head) + resource_process(); + mutex_unlock(&cgusbres_lock); + } + + return NULL; +} + +void initialise_usblocks(void) +{ + mutex_init(&cgusb_lock); + mutex_init(&cgusbres_lock); + cglock_init(&cgusb_fd_lock); +} + +#ifdef USE_BITMAIN + +struct cgpu_info *btm_alloc_cgpu(struct device_drv *drv, int threads) +{ + struct cgpu_info *cgpu = calloc(1, sizeof(*cgpu)); + + if (unlikely(!cgpu)) + quit(1, "Failed to calloc cgpu for %s in usb_alloc_cgpu", drv->dname); + + cgpu->drv = drv; + cgpu->deven = DEV_ENABLED; + cgpu->threads = threads; + + cgpu->usbinfo.nodev = true; + cgpu->device_fd = -1; + + cglock_init(&cgpu->usbinfo.devlock); + + return cgpu; +} + +struct cgpu_info *btm_free_cgpu(struct cgpu_info *cgpu) +{ + if (cgpu->drv->copy) + free(cgpu->drv); + + if(cgpu->device_path) { + free(cgpu->device_path); + } + + free(cgpu); + + return NULL; +} + +bool btm_init(struct cgpu_info *cgpu, const char * devpath) +{ +#ifdef WIN32 + int fd = -1; + signed short timeout = 1; + unsigned long baud = 115200; + bool purge = true; + HANDLE hSerial = NULL; + applog(LOG_DEBUG, "btm_init cgpu->device_fd=%d", cgpu->device_fd); + if(cgpu->device_fd >= 0) { + return false; + } + hSerial = CreateFile(devpath, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL); + if (unlikely(hSerial == INVALID_HANDLE_VALUE)) + { + DWORD e = GetLastError(); + switch (e) { + case ERROR_ACCESS_DENIED: + applog(LOG_DEBUG, "Do not have user privileges required to open %s", devpath); + break; + case ERROR_SHARING_VIOLATION: + applog(LOG_DEBUG, "%s is already in use by another process", devpath); + break; + default: + applog(LOG_DEBUG, "Open %s failed, GetLastError:%d", devpath, (int)e); + break; + } + } else { + // thanks to af_newbie for pointers about this + COMMCONFIG comCfg = {0}; + comCfg.dwSize = sizeof(COMMCONFIG); + comCfg.wVersion = 1; + comCfg.dcb.DCBlength = sizeof(DCB); + comCfg.dcb.BaudRate = baud; + comCfg.dcb.fBinary = 1; + comCfg.dcb.fDtrControl = DTR_CONTROL_ENABLE; + comCfg.dcb.fRtsControl = RTS_CONTROL_ENABLE; + comCfg.dcb.ByteSize = 8; + + SetCommConfig(hSerial, &comCfg, sizeof(comCfg)); + + // Code must specify a valid timeout value (0 means don't timeout) + const DWORD ctoms = (timeout * 100); + COMMTIMEOUTS cto = {ctoms, 0, ctoms, 0, ctoms}; + SetCommTimeouts(hSerial, &cto); + + if (purge) { + PurgeComm(hSerial, PURGE_RXABORT); + PurgeComm(hSerial, PURGE_TXABORT); + PurgeComm(hSerial, PURGE_RXCLEAR); + PurgeComm(hSerial, PURGE_TXCLEAR); + } + fd = _open_osfhandle((intptr_t)hSerial, 0); + } +#else + int fd = -1; + if(cgpu->device_fd >= 0) { + return false; + } + fd = open(devpath, O_RDWR|O_EXCL|O_NONBLOCK); +#endif + if(fd == -1) { + applog(LOG_DEBUG, "%s open %s error %d", + cgpu->drv->dname, devpath, errno); + return false; + } + cgpu->device_path = strdup(devpath); + cgpu->device_fd = fd; + cgpu->usbinfo.nodev = false; + applog(LOG_DEBUG, "btm_init open device fd = %d", cgpu->device_fd); + return true; +} + +void btm_uninit(struct cgpu_info *cgpu) +{ + applog(LOG_DEBUG, "BTM uninit %s%i", cgpu->drv->name, cgpu->device_fd); + + // May have happened already during a failed initialisation + // if release_cgpu() was called due to a USB NODEV(err) + close(cgpu->device_fd); + if(cgpu->device_path) { + free(cgpu->device_path); + cgpu->device_path = NULL; + } +} + +void btm_detect(struct device_drv *drv, bool (*device_detect)(const char*)) +{ + ssize_t count, i; + + applog(LOG_DEBUG, "BTM scan devices: checking for %s devices", drv->name); + + if (total_count >= total_limit) { + applog(LOG_DEBUG, "BTM scan devices: total limit %d reached", total_limit); + return; + } + + if (drv_count[drv->drv_id].count >= drv_count[drv->drv_id].limit) { + applog(LOG_DEBUG, + "BTM scan devices: %s limit %d reached", + drv->dname, drv_count[drv->drv_id].limit); + return; + } + device_detect("asic"); +} + +int btm_read(struct cgpu_info *cgpu, char *buf, size_t bufsize) +{ + int err = 0; + //applog(LOG_DEBUG, "btm_read ----- %d -----", bufsize); + err = read(cgpu->device_fd, buf, bufsize); + return err; +} + +int btm_write(struct cgpu_info *cgpu, char *buf, size_t bufsize) +{ + int err = 0; + //applog(LOG_DEBUG, "btm_write ----- %d -----", bufsize); + err = write(cgpu->device_fd, buf, bufsize); + return err; +} + +#endif diff --git a/usbutils.h b/usbutils.h new file mode 100644 index 0000000..b1bd64f --- /dev/null +++ b/usbutils.h @@ -0,0 +1,610 @@ +/* + * Copyright 2012-2013 Andrew Smith + * Copyright 2013-2014 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef USBUTILS_H +#define USBUTILS_H + +#include + +#include "util.h" + +#define EPI(x) (LIBUSB_ENDPOINT_IN | (unsigned char)(x)) +#define EPO(x) (LIBUSB_ENDPOINT_OUT | (unsigned char)(x)) + + +// For 0x0403:0x6014/0x6001 FT232H (and possibly others?) - BFL, BAS, BLT, LLT, AVA +#define FTDI_TYPE_OUT (LIBUSB_REQUEST_TYPE_VENDOR | LIBUSB_RECIPIENT_DEVICE | LIBUSB_ENDPOINT_OUT) +#define FTDI_TYPE_IN (LIBUSB_REQUEST_TYPE_VENDOR | LIBUSB_RECIPIENT_DEVICE | LIBUSB_ENDPOINT_IN) + +#define FTDI_REQUEST_RESET ((uint8_t)0) +#define FTDI_REQUEST_MODEM ((uint8_t)1) +#define FTDI_REQUEST_FLOW ((uint8_t)2) +#define FTDI_REQUEST_BAUD ((uint8_t)3) +#define FTDI_REQUEST_DATA ((uint8_t)4) +#define FTDI_REQUEST_LATENCY ((uint8_t)9) + +#define FTDI_VALUE_RESET 0 +#define FTDI_VALUE_PURGE_RX 1 +#define FTDI_VALUE_PURGE_TX 2 +#define FTDI_VALUE_LATENCY 1 + +// Baud +#define FTDI_VALUE_BAUD_BFL 0xc068 +#define FTDI_INDEX_BAUD_BFL 0x0200 +#define FTDI_VALUE_BAUD_BAS FTDI_VALUE_BAUD_BFL +#define FTDI_INDEX_BAUD_BAS FTDI_INDEX_BAUD_BFL +// LLT = BLT (same code) +#define FTDI_VALUE_BAUD_BLT 0x001a +#define FTDI_INDEX_BAUD_BLT 0x0000 + +// Avalon +#define FTDI_VALUE_BAUD_AVA 0x001A +#define FTDI_INDEX_BAUD_AVA 0x0000 + +#define FTDI_VALUE_DATA_AVA 8 + +// Bitmain +#define FTDI_VALUE_BAUD_BTM 0x001A +#define FTDI_INDEX_BAUD_BTM 0x0000 + +#define FTDI_VALUE_DATA_BTM 8 + +// BitBurner +#define BITBURNER_REQUEST ((uint8_t)0x42) +#define BITBURNER_VALUE 0x4242 +#define BITBURNER_INDEX_SET_VOLTAGE 1 +#define BITBURNER_INDEX_GET_VOLTAGE 2 +#define BITBURNER_INDEX_GET_VERSION 4 + +// CMR = 115200 & 57600 +#define FTDI_VALUE_BAUD_CMR_115 0xc068 +#define FTDI_INDEX_BAUD_CMR_115 0x0200 + +#define FTDI_VALUE_BAUD_CMR_57 0x80d0 +#define FTDI_INDEX_BAUD_CMR_57 0x0200 + +// Data control +#define FTDI_VALUE_DATA_BFL 0 +#define FTDI_VALUE_DATA_BAS FTDI_VALUE_DATA_BFL +// LLT = BLT (same code) +#define FTDI_VALUE_DATA_BLT 8 + +#define FTDI_VALUE_FLOW 0 +#define FTDI_VALUE_MODEM 0x0303 + + +// For 0x10c4:0xea60 USB cp210x chip - AMU +#define CP210X_TYPE_OUT 0x41 + +#define CP210X_REQUEST_IFC_ENABLE 0x00 +#define CP210X_REQUEST_DATA 0x07 +#define CP210X_REQUEST_BAUD 0x1e + +#define CP210X_VALUE_UART_ENABLE 0x0001 +#define CP210X_VALUE_DATA 0x0303 +#define CP210X_DATA_BAUD 0x0001c200 + +#define CP210X_SET_LINE_CTL 0x03 +#define CP210X_BITS_DATA_MASK 0x0f00 +#define CP210X_BITS_DATA_8 0x0800 +#define CP210X_BITS_PARITY_MARK 0x0030 +#define CP210X_BITS_PARITY_SPACE 0x0040 + + +// For 0x067b:0x2303 Prolific PL2303 - ICA +#define PL2303_CTRL_DTR 0x01 +#define PL2303_CTRL_RTS 0x02 + +#define PL2303_CTRL_OUT 0x21 +#define PL2303_VENDOR_OUT 0x40 + +#define PL2303_REQUEST_CTRL 0x22 +#define PL2303_REQUEST_LINE 0x20 +#define PL2303_REQUEST_VENDOR 0x01 + +#define PL2303_REPLY_CTRL 0x21 + +#define PL2303_VALUE_CTRL (PL2303_CTRL_DTR | PL2303_CTRL_RTS) +#define PL2303_VALUE_LINE 0 +#define PL2303_VALUE_LINE0 0x0001c200 +#define PL2303_VALUE_LINE1 0x080000 +#define PL2303_VALUE_LINE_SIZE 7 +#define PL2303_VALUE_VENDOR 0 + +// Use the device defined timeout +#define DEVTIMEOUT 0 + +// The default intinfo structure used is the first one +#define DEFAULT_INTINFO 0 + +// For endpoints defined in usb_find_devices.intinfos.epinfos, +// the first two must be the default IN and OUT and both must always exist +#define DEFAULT_EP_IN 0 +#define DEFAULT_EP_OUT 1 + +struct usb_epinfo { + uint8_t att; + uint16_t size; + unsigned char ep; + uint16_t wMaxPacketSize; + bool found; +}; + +struct usb_intinfo { + int interface; + int ctrl_transfer; + int epinfo_count; + struct usb_epinfo *epinfos; +}; + +enum sub_ident { + IDENT_UNK = 0, + IDENT_AMU, + IDENT_ANU, + IDENT_BMM, + IDENT_BMS, + IDENT_AU3, + IDENT_AVA, + IDENT_AV2, + IDENT_AV4, + IDENT_BAJ, + IDENT_BAL, + IDENT_BAM, + IDENT_BAS, + IDENT_BBF, + IDENT_BET, + IDENT_BF1, + IDENT_BFL, + IDENT_BLT, + IDENT_BMA, + IDENT_BTB, + IDENT_BXF, + IDENT_BXM, + IDENT_CMR1, + IDENT_CMR2, + IDENT_CTA, + IDENT_DRB, + IDENT_HFA, + IDENT_HRO, + IDENT_ICA, + IDENT_KLN, + IDENT_LIN, + IDENT_LLT, + IDENT_MMQ, + IDENT_NFU, + IDENT_OSM +}; + +struct usb_find_devices { + int drv; + const char *name; + enum sub_ident ident; + uint16_t idVendor; + uint16_t idProduct; + char *iManufacturer; + char *iProduct; + int config; + unsigned int timeout; + uint16_t latency; + int intinfo_count; + struct usb_intinfo *intinfos; +}; + +/* Latency is set to 32ms to prevent a transfer ever being more than 512 bytes + * +2 bytes of status such as the ftdi chip, when the chips emulate a 115200 + * baud rate, to avoid status bytes being interleaved in larger transfers. */ +#define LATENCY_UNUSED 0 +#define LATENCY_STD 32 + +enum usb_types { + USB_TYPE_STD = 0, + USB_TYPE_FTDI +}; + +#define USB_MAX_READ 8192 +/* + * We add 4: 1 for null, 2 for FTDI status and 1 to round to 4 bytes + * If a single device ever has multiple end points then it will need + * multiple of these + */ +#define USB_READ_BUFSIZE (USB_MAX_READ + 4) + +struct cg_usb_device { + struct usb_find_devices *found; + libusb_device_handle *handle; + pthread_mutex_t *mutex; + struct libusb_device_descriptor *descriptor; + enum usb_types usb_type; + enum sub_ident ident; + uint16_t usbver; + char *prod_string; + char *manuf_string; + char *serial_string; + unsigned char fwVersion; // ?? + unsigned char interfaceVersion; // ?? + char buffer[USB_MAX_READ]; + uint32_t bufsiz; + uint32_t bufamt; + bool usb11; // USB 1.1 flag for convenience + bool tt; // Enable the transaction translator +}; + +#define USB_NOSTAT 0 + +#define USB_TMO_0 50 +#define USB_TMO_1 100 +#define USB_TMO_2 500 +#define USB_TMOS 3 + +struct cg_usb_tmo { + uint32_t count; + uint32_t min_tmo; + uint32_t max_tmo; + uint64_t total_over; + uint64_t total_tmo; +}; + +struct cg_usb_info { + uint8_t bus_number; + uint8_t device_address; + int usbstat; + bool nodev; + bool initialised; + int nodev_count; + struct timeval last_nodev; + uint32_t ioerr_count; + uint32_t continuous_ioerr_count; + + /* + * for nodev and cgusb access (read and write) + * it's a pointer so MMQ can have it in multiple devices + * + * N.B. general mining code doesn't need to use the read + * lock for 'nodev' if it calls a usb_read/write/etc function + * that uses the lock - however, all usbutils code MUST use it + * to avoid devices disappearing while in use by multiple threads + */ + cglock_t devlock; + + time_t last_pipe; + uint64_t pipe_count; + uint64_t clear_err_count; + uint64_t retry_err_count; + uint64_t clear_fail_count; + + uint64_t read_delay_count; + double total_read_delay; + uint64_t write_delay_count; + double total_write_delay; + + uint64_t tmo_count; + struct cg_usb_tmo usb_tmo[USB_TMOS]; +}; + +#define ENUMERATION(a,b) a, +#define JUMPTABLE(a,b) b, + +#define USB_PARSE_COMMANDS(USB_ADD_COMMAND) \ + USB_ADD_COMMAND(C_REJECTED, "RejectedNoDevice") \ + USB_ADD_COMMAND(C_PING, "Ping") \ + USB_ADD_COMMAND(C_CLEAR, "Clear") \ + USB_ADD_COMMAND(C_REQUESTVERSION, "RequestVersion") \ + USB_ADD_COMMAND(C_GETVERSION, "GetVersion") \ + USB_ADD_COMMAND(C_REQUESTFPGACOUNT, "RequestFPGACount") \ + USB_ADD_COMMAND(C_GETFPGACOUNT, "GetFPGACount") \ + USB_ADD_COMMAND(C_STARTPROGRAM, "StartProgram") \ + USB_ADD_COMMAND(C_STARTPROGRAMSTATUS, "StartProgramStatus") \ + USB_ADD_COMMAND(C_PROGRAM, "Program") \ + USB_ADD_COMMAND(C_PROGRAMSTATUS, "ProgramStatus") \ + USB_ADD_COMMAND(C_PROGRAMSTATUS2, "ProgramStatus2") \ + USB_ADD_COMMAND(C_FINALPROGRAMSTATUS, "FinalProgramStatus") \ + USB_ADD_COMMAND(C_SETCLOCK, "SetClock") \ + USB_ADD_COMMAND(C_SETPARITY, "SetParity") \ + USB_ADD_COMMAND(C_REPLYSETCLOCK, "ReplySetClock") \ + USB_ADD_COMMAND(C_SETVOLT, "SetVolt") \ + USB_ADD_COMMAND(C_REPLYSETVOLT, "ReplySetVolt") \ + USB_ADD_COMMAND(C_REQUESTUSERCODE, "RequestUserCode") \ + USB_ADD_COMMAND(C_GETUSERCODE, "GetUserCode") \ + USB_ADD_COMMAND(C_REQUESTTEMPERATURE, "RequestTemperature") \ + USB_ADD_COMMAND(C_GETTEMPERATURE, "GetTemperature") \ + USB_ADD_COMMAND(C_SENDWORK, "SendWork") \ + USB_ADD_COMMAND(C_SENDWORKSTATUS, "SendWorkStatus") \ + USB_ADD_COMMAND(C_REQUESTWORKSTATUS, "RequestWorkStatus") \ + USB_ADD_COMMAND(C_GETWORKSTATUS, "GetWorkStatus") \ + USB_ADD_COMMAND(C_REQUESTIDENTIFY, "RequestIdentify") \ + USB_ADD_COMMAND(C_GETIDENTIFY, "GetIdentify") \ + USB_ADD_COMMAND(C_REQUESTFLASH, "RequestFlash") \ + USB_ADD_COMMAND(C_REQUESTSENDWORK, "RequestSendWork") \ + USB_ADD_COMMAND(C_REQUESTSENDWORKSTATUS, "RequestSendWorkStatus") \ + USB_ADD_COMMAND(C_RESET, "Reset") \ + USB_ADD_COMMAND(C_SETBAUD, "SetBaud") \ + USB_ADD_COMMAND(C_SETDATA, "SetDataCtrl") \ + USB_ADD_COMMAND(C_SETFLOW, "SetFlowCtrl") \ + USB_ADD_COMMAND(C_SETMODEM, "SetModemCtrl") \ + USB_ADD_COMMAND(C_PURGERX, "PurgeRx") \ + USB_ADD_COMMAND(C_PURGETX, "PurgeTx") \ + USB_ADD_COMMAND(C_FLASHREPLY, "FlashReply") \ + USB_ADD_COMMAND(C_REQUESTDETAILS, "RequestDetails") \ + USB_ADD_COMMAND(C_GETDETAILS, "GetDetails") \ + USB_ADD_COMMAND(C_REQUESTRESULTS, "RequestResults") \ + USB_ADD_COMMAND(C_GETRESULTS, "GetResults") \ + USB_ADD_COMMAND(C_REQUESTQUEJOB, "RequestQueJob") \ + USB_ADD_COMMAND(C_REQUESTQUEJOBSTATUS, "RequestQueJobStatus") \ + USB_ADD_COMMAND(C_QUEJOB, "QueJob") \ + USB_ADD_COMMAND(C_QUEJOBSTATUS, "QueJobStatus") \ + USB_ADD_COMMAND(C_QUEFLUSH, "QueFlush") \ + USB_ADD_COMMAND(C_QUEFLUSHREPLY, "QueFlushReply") \ + USB_ADD_COMMAND(C_REQUESTVOLTS, "RequestVolts") \ + USB_ADD_COMMAND(C_GETVOLTS, "GetVolts") \ + USB_ADD_COMMAND(C_SENDTESTWORK, "SendTestWork") \ + USB_ADD_COMMAND(C_LATENCY, "SetLatency") \ + USB_ADD_COMMAND(C_SETLINE, "SetLine") \ + USB_ADD_COMMAND(C_VENDOR, "Vendor") \ + USB_ADD_COMMAND(C_SETFAN, "SetFan") \ + USB_ADD_COMMAND(C_FANREPLY, "GetFan") \ + USB_ADD_COMMAND(C_AVALON_TASK, "AvalonTask") \ + USB_ADD_COMMAND(C_AVALON_READ, "AvalonRead") \ + USB_ADD_COMMAND(C_GET_AVALON_READY, "AvalonReady") \ + USB_ADD_COMMAND(C_AVALON_RESET, "AvalonReset") \ + USB_ADD_COMMAND(C_GET_AVALON_RESET, "GetAvalonReset") \ + USB_ADD_COMMAND(C_FTDI_STATUS, "FTDIStatus") \ + USB_ADD_COMMAND(C_ENABLE_UART, "EnableUART") \ + USB_ADD_COMMAND(C_ANU_SEND_CMD, "ANUSendcmd") \ + USB_ADD_COMMAND(C_ANU_SEND_RDREG, "ANUSendrdreg") \ + USB_ADD_COMMAND(C_ANU_SEND_VOLT, "ANUSendvolt") \ + USB_ADD_COMMAND(C_BB_SET_VOLTAGE, "SetCoreVoltage") \ + USB_ADD_COMMAND(C_BB_GET_VOLTAGE, "GetCoreVoltage") \ + USB_ADD_COMMAND(C_BF_RESET, "BFReset") \ + USB_ADD_COMMAND(C_BF_OPEN, "BFOpen") \ + USB_ADD_COMMAND(C_BF_INIT, "BFInit") \ + USB_ADD_COMMAND(C_BF_CLOSE, "BFClose") \ + USB_ADD_COMMAND(C_BF_REQINFO, "BFRequestInfo") \ + USB_ADD_COMMAND(C_BF_GETINFO, "BFGetInfo") \ + USB_ADD_COMMAND(C_BF_REQRESET, "BFRequestReset") \ + USB_ADD_COMMAND(C_BF_GETRESET, "BFGetReset") \ + USB_ADD_COMMAND(C_BF_REQWORK, "BFRequestWork") \ + USB_ADD_COMMAND(C_BF_GETWORK, "BFGetWork") \ + USB_ADD_COMMAND(C_BF_GETRES, "BFGetResults") \ + USB_ADD_COMMAND(C_BF_FLUSH, "BFFlush") \ + USB_ADD_COMMAND(C_BF_IFLUSH, "BFInterruptFlush") \ + USB_ADD_COMMAND(C_BF_IDENTIFY, "BFIdentify") \ + USB_ADD_COMMAND(C_BF_DETECTCHIPS, "BFDetectChips") \ + USB_ADD_COMMAND(C_BF_CONFIG, "BFConfig") \ + USB_ADD_COMMAND(C_BF_GETTEMP, "BFGetTemp") \ + USB_ADD_COMMAND(C_BF_AUTOTUNE, "BFAutoTune") \ + USB_ADD_COMMAND(C_ATMEL_RESET, "AtmelReset") \ + USB_ADD_COMMAND(C_ATMEL_OPEN, "AtmelOpen") \ + USB_ADD_COMMAND(C_ATMEL_INIT, "AtmelInit") \ + USB_ADD_COMMAND(C_ATMEL_CLOSE, "AtmelClose") \ + USB_ADD_COMMAND(C_AVA2_READ, "Ava2Read") \ + USB_ADD_COMMAND(C_AVA2_WRITE, "Ava2Write") \ + USB_ADD_COMMAND(C_AVA4_READ, "Ava4Read") \ + USB_ADD_COMMAND(C_AVA4_WRITE, "Ava4Write") \ + USB_ADD_COMMAND(C_BET_WRITE, "BlockErupterWrite") \ + USB_ADD_COMMAND(C_BET_READ, "BlockErupterRead") \ + USB_ADD_COMMAND(C_BF1_REQINFO, "BF1RequestInfo") \ + USB_ADD_COMMAND(C_BF1_GETINFO, "BF1GetInfo") \ + USB_ADD_COMMAND(C_BF1_REQRESET, "BF1RequestReset") \ + USB_ADD_COMMAND(C_BF1_GETRESET, "BF1GetReset") \ + USB_ADD_COMMAND(C_BF1_REQWORK, "BF1RequestWork") \ + USB_ADD_COMMAND(C_BF1_GETWORK, "BF1GetWork") \ + USB_ADD_COMMAND(C_BF1_GETRES, "BF1GetResults") \ + USB_ADD_COMMAND(C_BF1_FLUSH, "BF1Flush") \ + USB_ADD_COMMAND(C_BF1_IFLUSH, "BF1InterruptFlush") \ + USB_ADD_COMMAND(C_BF1_IDENTIFY, "BF1Identify") \ + USB_ADD_COMMAND(C_BXF_READ, "BXFRead") \ + USB_ADD_COMMAND(C_BXF_WORK, "BXFWork") \ + USB_ADD_COMMAND(C_BXF_TARGET, "BXFTarget") \ + USB_ADD_COMMAND(C_BXF_VERSION, "BXFVersion") \ + USB_ADD_COMMAND(C_BXF_MAXROLL, "BXFMaxRoll") \ + USB_ADD_COMMAND(C_BXF_FLUSH, "BXFFlush") \ + USB_ADD_COMMAND(C_BXF_CLOCK, "BXFClock") \ + USB_ADD_COMMAND(C_BXF_LEDMODE, "BXFLedMode") \ + USB_ADD_COMMAND(C_BXF_DEBUGMODE, "BXFDebugMode") \ + USB_ADD_COMMAND(C_BXM_FLUSH, "BXMFlush") \ + USB_ADD_COMMAND(C_BXM_SRESET, "BXMSReset") \ + USB_ADD_COMMAND(C_BXM_SETLATENCY, "BXMSetLatency") \ + USB_ADD_COMMAND(C_BXM_SECR, "BXMSetEventCharRequest") \ + USB_ADD_COMMAND(C_BXM_SETBITMODE, "BXMSetBitmodeRequest") \ + USB_ADD_COMMAND(C_BXM_CLOCK, "BXMClock") \ + USB_ADD_COMMAND(C_BXM_CLOCKDIV, "BXMClockDiv") \ + USB_ADD_COMMAND(C_BXM_LOOP, "BXMLoop") \ + USB_ADD_COMMAND(C_BXM_ADBUS, "BXMADBus") \ + USB_ADD_COMMAND(C_BXM_ACBUS, "BXMACBus") \ + USB_ADD_COMMAND(C_BXM_PURGERX, "BXMPurgeRX") \ + USB_ADD_COMMAND(C_BXM_PURGETX, "BXMPurgeTX") \ + USB_ADD_COMMAND(C_BXM_CSLOW, "BXMCSLow") \ + USB_ADD_COMMAND(C_BXM_CSHIGH, "BXMCSHigh") \ + USB_ADD_COMMAND(C_BXM_RESET, "BXMReset") \ + USB_ADD_COMMAND(C_BXM_SPITX, "BXMSPITX") \ + USB_ADD_COMMAND(C_BXM_SPIRX, "BXMSPIRX") \ + USB_ADD_COMMAND(C_HF_RESET, "HFReset") \ + USB_ADD_COMMAND(C_HF_PLL_CONFIG, "HFPLLConfig") \ + USB_ADD_COMMAND(C_HF_ADDRESS, "HFAddress") \ + USB_ADD_COMMAND(C_HF_BAUD, "HFBaud") \ + USB_ADD_COMMAND(C_HF_HASH, "HFHash") \ + USB_ADD_COMMAND(C_HF_NONCE, "HFNonce") \ + USB_ADD_COMMAND(C_HF_ABORT, "HFAbort") \ + USB_ADD_COMMAND(C_HF_STATUS, "HFStatus") \ + USB_ADD_COMMAND(C_HF_CONFIG, "HFConfig") \ + USB_ADD_COMMAND(C_HF_STATISTICS, "HFStatistics") \ + USB_ADD_COMMAND(C_HF_CLOCKGATE, "HFClockGate") \ + USB_ADD_COMMAND(C_HF_USB_INIT, "HFUSBInit") \ + USB_ADD_COMMAND(C_HF_DFU, "HFDFU") \ + USB_ADD_COMMAND(C_HF_DIE_STATUS, "HFDieStatus") \ + USB_ADD_COMMAND(C_HF_GWQ_STATUS, "HFGWQStatus") \ + USB_ADD_COMMAND(C_HF_WORK_RESTART, "HFWorkRestart") \ + USB_ADD_COMMAND(C_HF_GWQSTATS, "HFGWQStats") \ + USB_ADD_COMMAND(C_HF_NOTICE, "HFNotice") \ + USB_ADD_COMMAND(C_HF_PING, "HFPing") \ + USB_ADD_COMMAND(C_HF_FAN, "HFFan") \ + USB_ADD_COMMAND(C_HRO_WRITE, "HROWrite") \ + USB_ADD_COMMAND(C_HRO_READ, "HRORead") \ + USB_ADD_COMMAND(C_OP_NAME, "HFName") \ + USB_ADD_COMMAND(C_HF_GETHEADER, "HFGetHeader") \ + USB_ADD_COMMAND(C_HF_GETDATA, "HFGetData") \ + USB_ADD_COMMAND(C_HF_CLEAR_READ, "HFClearRead") \ + USB_ADD_COMMAND(C_CTA_READ, "CTARead") \ + USB_ADD_COMMAND(C_CTA_WRITE, "CTAWrite") \ + USB_ADD_COMMAND(C_MCP_GETGPIOSETTING, "MCPGetGPIOSetting") \ + USB_ADD_COMMAND(C_MCP_SETGPIOSETTING, "MCPSetGPIOSetting") \ + USB_ADD_COMMAND(C_MCP_GETGPIOPINVAL, "MCPGetGPIOPinVal") \ + USB_ADD_COMMAND(C_MCP_SETGPIOPINVAL, "MCPSetGPIOPinVal") \ + USB_ADD_COMMAND(C_MCP_GETGPIOPINDIR, "MCPGetGPIOPinDir") \ + USB_ADD_COMMAND(C_MCP_SETGPIOPINDIR, "MCPSetGPIOPinDir") \ + USB_ADD_COMMAND(C_MCP_SETSPISETTING, "MCPSetSPISetting") \ + USB_ADD_COMMAND(C_MCP_GETSPISETTING, "MCPGetSPISetting") \ + USB_ADD_COMMAND(C_MCP_SPITRANSFER, "MCPSPITransfer") \ + USB_ADD_COMMAND(C_MCP_SPICANCEL, "MCPSPICancel") \ + USB_ADD_COMMAND(C_BITMAIN_SEND, "BitmainSend") \ + USB_ADD_COMMAND(C_BITMAIN_READ, "BitmainRead") \ + USB_ADD_COMMAND(C_BITMAIN_TOKEN_TXCONFIG, "BitmainTokenTxConfig") \ + USB_ADD_COMMAND(C_BITMAIN_TOKEN_TXTASK, "BitmainTokenTxTask") \ + USB_ADD_COMMAND(C_BITMAIN_TOKEN_RXSTATUS, "BitmainTokenRxStatus") \ + USB_ADD_COMMAND(C_BITMAIN_DATA_RXSTATUS, "BitmainDataRxStatus") \ + USB_ADD_COMMAND(C_BITMAIN_DATA_RXNONCE, "BitmainDataRxNonce") + +/* Create usb_cmds enum from USB_PARSE_COMMANDS macro */ +enum usb_cmds { + USB_PARSE_COMMANDS(ENUMERATION) + C_MAX +}; + +struct device_drv; +struct cgpu_info; + +#ifdef USE_BITMAIN +struct cgpu_info *btm_alloc_cgpu(struct device_drv *drv, int threads); +struct cgpu_info *btm_free_cgpu(struct cgpu_info *cgpu); +void btm_uninit(struct cgpu_info *cgpu); +bool btm_init(struct cgpu_info *cgpu, const char * devpath); +void btm_detect(struct device_drv *drv, bool (*device_detect)(const char*)); +int btm_read(struct cgpu_info *cgpu, char *buf, size_t bufsize); +int btm_write(struct cgpu_info *cgpu, char *buf, size_t bufsize); +#endif + +bool async_usb_transfers(void); +void cancel_usb_transfers(void); +void usb_all(int level); +void usb_list(void); +const char *usb_cmdname(enum usb_cmds cmd); +void usb_applog(struct cgpu_info *cgpu, enum usb_cmds cmd, char *msg, int amount, int err); +void blacklist_cgpu(struct cgpu_info *cgpu); +void whitelist_cgpu(struct cgpu_info *cgpu); +void usb_nodev(struct cgpu_info *cgpu); +struct cgpu_info *usb_copy_cgpu(struct cgpu_info *orig); +struct cgpu_info *usb_alloc_cgpu(struct device_drv *drv, int threads); +struct cgpu_info *usb_free_cgpu(struct cgpu_info *cgpu); +void usb_uninit(struct cgpu_info *cgpu); +bool usb_init(struct cgpu_info *cgpu, struct libusb_device *dev, struct usb_find_devices *found); +void __usb_detect(struct device_drv *drv, struct cgpu_info *(*device_detect)(struct libusb_device *, struct usb_find_devices *), + bool single); +#define usb_detect(drv, cgpu) __usb_detect(drv, cgpu, false) +#define usb_detect_one(drv, cgpu) __usb_detect(drv, cgpu, true) +struct api_data *api_usb_stats(int *count); +void update_usb_stats(struct cgpu_info *cgpu); +void usb_reset(struct cgpu_info *cgpu); +int _usb_read(struct cgpu_info *cgpu, int intinfo, int epinfo, char *buf, size_t bufsiz, int *processed, int timeout, const char *end, enum usb_cmds cmd, bool readonce, bool cancellable); +int _usb_write(struct cgpu_info *cgpu, int intinfo, int epinfo, char *buf, size_t bufsiz, int *processed, int timeout, enum usb_cmds); +int _usb_transfer(struct cgpu_info *cgpu, uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, uint32_t *data, int siz, unsigned int timeout, enum usb_cmds cmd); +int _usb_transfer_read(struct cgpu_info *cgpu, uint8_t request_type, uint8_t bRequest, uint16_t wValue, uint16_t wIndex, char *buf, int bufsiz, int *amount, unsigned int timeout, enum usb_cmds cmd); +int usb_ftdi_cts(struct cgpu_info *cgpu); +int _usb_ftdi_set_latency(struct cgpu_info *cgpu, int intinfo); +#define usb_ftdi_set_latency(_cgpu) _usb_ftdi_set_latency(_cgpu, DEFAULT_INTINFO) +void usb_buffer_clear(struct cgpu_info *cgpu); +uint32_t usb_buffer_size(struct cgpu_info *cgpu); +int _usb_interface(struct cgpu_info *cgpu, int intinfo); +#define usb_interface(_cgpu) _usb_interface(_cgpu, DEFAULT_INTINFO) +enum sub_ident usb_ident(struct cgpu_info *cgpu); +void usb_set_dev_start(struct cgpu_info *cgpu); +void usb_cleanup(); +void usb_initialise(); +void *usb_resource_thread(void *userdata); +void initialise_usblocks(void); + +#define usb_read(cgpu, buf, bufsiz, read, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, DEVTIMEOUT, NULL, cmd, false, false) + +#define usb_read_cancellable(cgpu, buf, bufsiz, read, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, DEVTIMEOUT, NULL, cmd, false, true) + +#define usb_read_ii(cgpu, intinfo, buf, bufsiz, read, cmd) \ + _usb_read(cgpu, intinfo, DEFAULT_EP_IN, buf, bufsiz, read, DEVTIMEOUT, NULL, cmd, false, false) + +#define usb_read_once(cgpu, buf, bufsiz, read, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, DEVTIMEOUT, NULL, cmd, true, false) + +#define usb_read_ii_once(cgpu, intinfo, buf, bufsiz, read, cmd) \ + _usb_read(cgpu, intinfo, DEFAULT_EP_IN, buf, bufsiz, read, DEVTIMEOUT, NULL, cmd, true, false) + +#define usb_read_once_timeout(cgpu, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, timeout, NULL, cmd, true, false) + +#define usb_read_once_timeout_cancellable(cgpu, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, timeout, NULL, cmd, true, true) + +#define usb_read_ii_once_timeout(cgpu, intinfo, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, intinfo, DEFAULT_EP_IN, buf, bufsiz, read, timeout, NULL, cmd, true, false) + +#define usb_read_nl(cgpu, buf, bufsiz, read, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, DEVTIMEOUT, "\n", cmd, false, false) + +#define usb_read_nl_timeout(cgpu, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, timeout, "\n", cmd, false, false) + +#define usb_read_ok(cgpu, buf, bufsiz, read, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, DEVTIMEOUT, "OK\n", cmd, false, false) + +#define usb_read_ok_timeout(cgpu, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, timeout, "OK\n", cmd, false, false) + +#define usb_read_ep(cgpu, ep, buf, bufsiz, read, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, ep, buf, bufsiz, read, DEVTIMEOUT, NULL, cmd, false, false) + +#define usb_read_timeout(cgpu, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, timeout, NULL, cmd, false, false) + +#define usb_read_timeout_cancellable(cgpu, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, DEFAULT_EP_IN, buf, bufsiz, read, timeout, NULL, cmd, false, true) + +#define usb_read_ii_timeout(cgpu, intinfo, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, intinfo, DEFAULT_EP_IN, buf, bufsiz, read, timeout, NULL, cmd, false, false) + +#define usb_read_ii_timeout_cancellable(cgpu, intinfo, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, intinfo, DEFAULT_EP_IN, buf, bufsiz, read, timeout, NULL, cmd, false, true) + +#define usb_read_ep_timeout(cgpu, ep, buf, bufsiz, read, timeout, cmd) \ + _usb_read(cgpu, DEFAULT_INTINFO, ep, buf, bufsiz, read, timeout, NULL, cmd, false, false) + +#define usb_write(cgpu, buf, bufsiz, wrote, cmd) \ + _usb_write(cgpu, DEFAULT_INTINFO, DEFAULT_EP_OUT, buf, bufsiz, wrote, DEVTIMEOUT, cmd) + +#define usb_write_ii(cgpu, intinfo, buf, bufsiz, wrote, cmd) \ + _usb_write(cgpu, intinfo, DEFAULT_EP_OUT, buf, bufsiz, wrote, DEVTIMEOUT, cmd) + +#define usb_write_ep(cgpu, ep, buf, bufsiz, wrote, cmd) \ + _usb_write(cgpu, DEFAULT_INTINFO, ep, buf, bufsiz, wrote, DEVTIMEOUT, cmd) + +#define usb_write_timeout(cgpu, buf, bufsiz, wrote, timeout, cmd) \ + _usb_write(cgpu, DEFAULT_INTINFO, DEFAULT_EP_OUT, buf, bufsiz, wrote, timeout, cmd) + +#define usb_write_ep_timeout(cgpu, ep, buf, bufsiz, wrote, timeout, cmd) \ + _usb_write(cgpu, DEFAULT_INTINFO, ep, buf, bufsiz, wrote, timeout, cmd) + +#define usb_transfer(cgpu, typ, req, val, idx, cmd) \ + _usb_transfer(cgpu, typ, req, val, idx, NULL, 0, DEVTIMEOUT, cmd) + +#define usb_transfer_data(cgpu, typ, req, val, idx, data, len, cmd) \ + _usb_transfer(cgpu, typ, req, val, idx, data, len, DEVTIMEOUT, cmd) + +#define usb_transfer_read(cgpu, typ, req, val, idx, buf, bufsiz, read, cmd) \ + _usb_transfer_read(cgpu, typ, req, val, idx, buf, bufsiz, read, DEVTIMEOUT, cmd) + +#endif diff --git a/uthash.h b/uthash.h new file mode 100644 index 0000000..72acf11 --- /dev/null +++ b/uthash.h @@ -0,0 +1,948 @@ +/* +Copyright (c) 2003-2013, Troy D. Hanson http://troydhanson.github.com/uthash/ +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef UTHASH_H +#define UTHASH_H + +#include /* memcmp,strlen */ +#include /* ptrdiff_t */ +#include /* exit() */ + +/* These macros use decltype or the earlier __typeof GNU extension. + As decltype is only available in newer compilers (VS2010 or gcc 4.3+ + when compiling c++ source) this code uses whatever method is needed + or, for VS2008 where neither is available, uses casting workarounds. */ +#ifdef _MSC_VER /* MS compiler */ +#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ +#define DECLTYPE(x) (decltype(x)) +#else /* VS2008 or older (or VS2010 in C mode) */ +#define NO_DECLTYPE +#define DECLTYPE(x) +#endif +#else /* GNU, Sun and other compilers */ +#define DECLTYPE(x) (__typeof(x)) +#endif + +#ifdef NO_DECLTYPE +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + char **_da_dst = (char**)(&(dst)); \ + *_da_dst = (char*)(src); \ +} while(0) +#else +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + (dst) = DECLTYPE(dst)(src); \ +} while(0) +#endif + +/* a number of the hash function use uint32_t which isn't defined on win32 */ +#ifdef _MSC_VER +typedef unsigned int uint32_t; +typedef unsigned char uint8_t; +#else +#include /* uint32_t */ +#endif + +#define UTHASH_VERSION 1.9.8 + +#ifndef uthash_fatal +#define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */ +#endif +#ifndef uthash_malloc +#define uthash_malloc(sz) malloc(sz) /* malloc fcn */ +#endif +#ifndef uthash_free +#define uthash_free(ptr,sz) free(ptr) /* free fcn */ +#endif + +#ifndef uthash_noexpand_fyi +#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ +#endif +#ifndef uthash_expand_fyi +#define uthash_expand_fyi(tbl) /* can be defined to log expands */ +#endif + +/* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */ +#define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */ + +/* calculate the element whose hash handle address is hhe */ +#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) + +#define HASH_FIND(hh,head,keyptr,keylen,out) \ +do { \ + unsigned _hf_bkt,_hf_hashv; \ + out=NULL; \ + if (head) { \ + HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \ + if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \ + HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \ + keyptr,keylen,out); \ + } \ + } \ +} while (0) + +#ifdef HASH_BLOOM +#define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM) +#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0) +#define HASH_BLOOM_MAKE(tbl) \ +do { \ + (tbl)->bloom_nbits = HASH_BLOOM; \ + (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ + if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \ + memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \ + (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ +} while (0) + +#define HASH_BLOOM_FREE(tbl) \ +do { \ + uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ +} while (0) + +#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8))) +#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8))) + +#define HASH_BLOOM_ADD(tbl,hashv) \ + HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) + +#define HASH_BLOOM_TEST(tbl,hashv) \ + HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) + +#else +#define HASH_BLOOM_MAKE(tbl) +#define HASH_BLOOM_FREE(tbl) +#define HASH_BLOOM_ADD(tbl,hashv) +#define HASH_BLOOM_TEST(tbl,hashv) (1) +#define HASH_BLOOM_BYTELEN 0 +#endif + +#define HASH_MAKE_TABLE(hh,head) \ +do { \ + (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \ + sizeof(UT_hash_table)); \ + if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \ + memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \ + (head)->hh.tbl->tail = &((head)->hh); \ + (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ + (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ + (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ + (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ + HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ + if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \ + memset((head)->hh.tbl->buckets, 0, \ + HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ + HASH_BLOOM_MAKE((head)->hh.tbl); \ + (head)->hh.tbl->signature = HASH_SIGNATURE; \ +} while(0) + +#define HASH_ADD(hh,head,fieldname,keylen_in,add) \ + HASH_ADD_KEYPTR(hh,head,&((add)->fieldname),keylen_in,add) + +#define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ +do { \ + replaced=NULL; \ + HASH_FIND(hh,head,&((add)->fieldname),keylen_in,replaced); \ + if (replaced!=NULL) { \ + HASH_DELETE(hh,head,replaced); \ + }; \ + HASH_ADD(hh,head,fieldname,keylen_in,add); \ +} while(0) + +#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ +do { \ + unsigned _ha_bkt; \ + (add)->hh.next = NULL; \ + (add)->hh.key = (char*)(keyptr); \ + (add)->hh.keylen = (unsigned)(keylen_in); \ + if (!(head)) { \ + head = (add); \ + (head)->hh.prev = NULL; \ + HASH_MAKE_TABLE(hh,head); \ + } else { \ + (head)->hh.tbl->tail->next = (add); \ + (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ + (head)->hh.tbl->tail = &((add)->hh); \ + } \ + (head)->hh.tbl->num_items++; \ + (add)->hh.tbl = (head)->hh.tbl; \ + HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \ + (add)->hh.hashv, _ha_bkt); \ + HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \ + HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \ + HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \ + HASH_FSCK(hh,head); \ +} while(0) + +#define HASH_TO_BKT( hashv, num_bkts, bkt ) \ +do { \ + bkt = ((hashv) & ((num_bkts) - 1)); \ +} while(0) + +/* delete "delptr" from the hash table. + * "the usual" patch-up process for the app-order doubly-linked-list. + * The use of _hd_hh_del below deserves special explanation. + * These used to be expressed using (delptr) but that led to a bug + * if someone used the same symbol for the head and deletee, like + * HASH_DELETE(hh,users,users); + * We want that to work, but by changing the head (users) below + * we were forfeiting our ability to further refer to the deletee (users) + * in the patch-up process. Solution: use scratch space to + * copy the deletee pointer, then the latter references are via that + * scratch pointer rather than through the repointed (users) symbol. + */ +#define HASH_DELETE(hh,head,delptr) \ +do { \ + unsigned _hd_bkt; \ + struct UT_hash_handle *_hd_hh_del; \ + if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \ + uthash_free((head)->hh.tbl->buckets, \ + (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ + HASH_BLOOM_FREE((head)->hh.tbl); \ + uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ + head = NULL; \ + } else { \ + _hd_hh_del = &((delptr)->hh); \ + if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \ + (head)->hh.tbl->tail = \ + (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ + (head)->hh.tbl->hho); \ + } \ + if ((delptr)->hh.prev) { \ + ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ + (head)->hh.tbl->hho))->next = (delptr)->hh.next; \ + } else { \ + DECLTYPE_ASSIGN(head,(delptr)->hh.next); \ + } \ + if (_hd_hh_del->next) { \ + ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \ + (head)->hh.tbl->hho))->prev = \ + _hd_hh_del->prev; \ + } \ + HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ + HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ + (head)->hh.tbl->num_items--; \ + } \ + HASH_FSCK(hh,head); \ +} while (0) + + +/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ +#define HASH_FIND_STR(head,findstr,out) \ + HASH_FIND(hh,head,findstr,strlen(findstr),out) +#define HASH_ADD_STR(head,strfield,add) \ + HASH_ADD(hh,head,strfield,strlen(add->strfield),add) +#define HASH_REPLACE_STR(head,strfield,add,replaced) \ + HASH_REPLACE(hh,head,strfield,strlen(add->strfield),add,replaced) +#define HASH_FIND_INT(head,findint,out) \ + HASH_FIND(hh,head,findint,sizeof(int),out) +#define HASH_ADD_INT(head,intfield,add) \ + HASH_ADD(hh,head,intfield,sizeof(int),add) +#define HASH_REPLACE_INT(head,intfield,add,replaced) \ + HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) +#define HASH_FIND_PTR(head,findptr,out) \ + HASH_FIND(hh,head,findptr,sizeof(void *),out) +#define HASH_ADD_PTR(head,ptrfield,add) \ + HASH_ADD(hh,head,ptrfield,sizeof(void *),add) +#define HASH_REPLACE_PTR(head,ptrfield,add) \ + HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) +#define HASH_DEL(head,delptr) \ + HASH_DELETE(hh,head,delptr) + +/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. + * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. + */ +#ifdef HASH_DEBUG +#define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0) +#define HASH_FSCK(hh,head) \ +do { \ + unsigned _bkt_i; \ + unsigned _count, _bkt_count; \ + char *_prev; \ + struct UT_hash_handle *_thh; \ + if (head) { \ + _count = 0; \ + for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \ + _bkt_count = 0; \ + _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ + _prev = NULL; \ + while (_thh) { \ + if (_prev != (char*)(_thh->hh_prev)) { \ + HASH_OOPS("invalid hh_prev %p, actual %p\n", \ + _thh->hh_prev, _prev ); \ + } \ + _bkt_count++; \ + _prev = (char*)(_thh); \ + _thh = _thh->hh_next; \ + } \ + _count += _bkt_count; \ + if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ + HASH_OOPS("invalid bucket count %d, actual %d\n", \ + (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ + } \ + } \ + if (_count != (head)->hh.tbl->num_items) { \ + HASH_OOPS("invalid hh item count %d, actual %d\n", \ + (head)->hh.tbl->num_items, _count ); \ + } \ + /* traverse hh in app order; check next/prev integrity, count */ \ + _count = 0; \ + _prev = NULL; \ + _thh = &(head)->hh; \ + while (_thh) { \ + _count++; \ + if (_prev !=(char*)(_thh->prev)) { \ + HASH_OOPS("invalid prev %p, actual %p\n", \ + _thh->prev, _prev ); \ + } \ + _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ + _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \ + (head)->hh.tbl->hho) : NULL ); \ + } \ + if (_count != (head)->hh.tbl->num_items) { \ + HASH_OOPS("invalid app item count %d, actual %d\n", \ + (head)->hh.tbl->num_items, _count ); \ + } \ + } \ +} while (0) +#else +#define HASH_FSCK(hh,head) +#endif + +/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to + * the descriptor to which this macro is defined for tuning the hash function. + * The app can #include to get the prototype for write(2). */ +#ifdef HASH_EMIT_KEYS +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ +do { \ + unsigned _klen = fieldlen; \ + write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ + write(HASH_EMIT_KEYS, keyptr, fieldlen); \ +} while (0) +#else +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) +#endif + +/* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ +#ifdef HASH_FUNCTION +#define HASH_FCN HASH_FUNCTION +#else +#define HASH_FCN HASH_JEN +#endif + +/* The Bernstein hash function, used in Perl prior to v5.6 */ +#define HASH_BER(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _hb_keylen=keylen; \ + char *_hb_key=(char*)(key); \ + (hashv) = 0; \ + while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \ + bkt = (hashv) & (num_bkts-1); \ +} while (0) + + +/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at + * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ +#define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _sx_i; \ + char *_hs_key=(char*)(key); \ + hashv = 0; \ + for(_sx_i=0; _sx_i < keylen; _sx_i++) \ + hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ + bkt = hashv & (num_bkts-1); \ +} while (0) + +#define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _fn_i; \ + char *_hf_key=(char*)(key); \ + hashv = 2166136261UL; \ + for(_fn_i=0; _fn_i < keylen; _fn_i++) \ + hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \ + bkt = hashv & (num_bkts-1); \ +} while(0) + +#define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _ho_i; \ + char *_ho_key=(char*)(key); \ + hashv = 0; \ + for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ + hashv += _ho_key[_ho_i]; \ + hashv += (hashv << 10); \ + hashv ^= (hashv >> 6); \ + } \ + hashv += (hashv << 3); \ + hashv ^= (hashv >> 11); \ + hashv += (hashv << 15); \ + bkt = hashv & (num_bkts-1); \ +} while(0) + +#define HASH_JEN_MIX(a,b,c) \ +do { \ + a -= b; a -= c; a ^= ( c >> 13 ); \ + b -= c; b -= a; b ^= ( a << 8 ); \ + c -= a; c -= b; c ^= ( b >> 13 ); \ + a -= b; a -= c; a ^= ( c >> 12 ); \ + b -= c; b -= a; b ^= ( a << 16 ); \ + c -= a; c -= b; c ^= ( b >> 5 ); \ + a -= b; a -= c; a ^= ( c >> 3 ); \ + b -= c; b -= a; b ^= ( a << 10 ); \ + c -= a; c -= b; c ^= ( b >> 15 ); \ +} while (0) + +#define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned _hj_i,_hj_j,_hj_k; \ + unsigned char *_hj_key=(unsigned char*)(key); \ + hashv = 0xfeedbeef; \ + _hj_i = _hj_j = 0x9e3779b9; \ + _hj_k = (unsigned)(keylen); \ + while (_hj_k >= 12) { \ + _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ + + ( (unsigned)_hj_key[2] << 16 ) \ + + ( (unsigned)_hj_key[3] << 24 ) ); \ + _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ + + ( (unsigned)_hj_key[6] << 16 ) \ + + ( (unsigned)_hj_key[7] << 24 ) ); \ + hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ + + ( (unsigned)_hj_key[10] << 16 ) \ + + ( (unsigned)_hj_key[11] << 24 ) ); \ + \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ + \ + _hj_key += 12; \ + _hj_k -= 12; \ + } \ + hashv += keylen; \ + switch ( _hj_k ) { \ + case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \ + case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \ + case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \ + case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \ + case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \ + case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \ + case 5: _hj_j += _hj_key[4]; \ + case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \ + case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \ + case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \ + case 1: _hj_i += _hj_key[0]; \ + } \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ + bkt = hashv & (num_bkts-1); \ +} while(0) + +/* The Paul Hsieh hash function */ +#undef get16bits +#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ + || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) +#define get16bits(d) (*((const uint16_t *) (d))) +#endif + +#if !defined (get16bits) +#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ + +(uint32_t)(((const uint8_t *)(d))[0]) ) +#endif +#define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \ +do { \ + unsigned char *_sfh_key=(unsigned char*)(key); \ + uint32_t _sfh_tmp, _sfh_len = keylen; \ + \ + int _sfh_rem = _sfh_len & 3; \ + _sfh_len >>= 2; \ + hashv = 0xcafebabe; \ + \ + /* Main loop */ \ + for (;_sfh_len > 0; _sfh_len--) { \ + hashv += get16bits (_sfh_key); \ + _sfh_tmp = (uint32_t)(get16bits (_sfh_key+2)) << 11 ^ hashv; \ + hashv = (hashv << 16) ^ _sfh_tmp; \ + _sfh_key += 2*sizeof (uint16_t); \ + hashv += hashv >> 11; \ + } \ + \ + /* Handle end cases */ \ + switch (_sfh_rem) { \ + case 3: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 16; \ + hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)] << 18); \ + hashv += hashv >> 11; \ + break; \ + case 2: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 11; \ + hashv += hashv >> 17; \ + break; \ + case 1: hashv += *_sfh_key; \ + hashv ^= hashv << 10; \ + hashv += hashv >> 1; \ + } \ + \ + /* Force "avalanching" of final 127 bits */ \ + hashv ^= hashv << 3; \ + hashv += hashv >> 5; \ + hashv ^= hashv << 4; \ + hashv += hashv >> 17; \ + hashv ^= hashv << 25; \ + hashv += hashv >> 6; \ + bkt = hashv & (num_bkts-1); \ +} while(0) + +#ifdef HASH_USING_NO_STRICT_ALIASING +/* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads. + * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error. + * MurmurHash uses the faster approach only on CPU's where we know it's safe. + * + * Note the preprocessor built-in defines can be emitted using: + * + * gcc -m64 -dM -E - < /dev/null (on gcc) + * cc -## a.c (where a.c is a simple test file) (Sun Studio) + */ +#if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86)) +#define MUR_GETBLOCK(p,i) p[i] +#else /* non intel */ +#define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 0x3) == 0) +#define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 0x3) == 1) +#define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 0x3) == 2) +#define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 0x3) == 3) +#define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL)) +#if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__)) +#define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24)) +#define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16)) +#define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8)) +#else /* assume little endian non-intel */ +#define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24)) +#define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16)) +#define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8)) +#endif +#define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \ + (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \ + (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \ + MUR_ONE_THREE(p)))) +#endif +#define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +#define MUR_FMIX(_h) \ +do { \ + _h ^= _h >> 16; \ + _h *= 0x85ebca6b; \ + _h ^= _h >> 13; \ + _h *= 0xc2b2ae35l; \ + _h ^= _h >> 16; \ +} while(0) + +#define HASH_MUR(key,keylen,num_bkts,hashv,bkt) \ +do { \ + const uint8_t *_mur_data = (const uint8_t*)(key); \ + const int _mur_nblocks = (keylen) / 4; \ + uint32_t _mur_h1 = 0xf88D5353; \ + uint32_t _mur_c1 = 0xcc9e2d51; \ + uint32_t _mur_c2 = 0x1b873593; \ + uint32_t _mur_k1 = 0; \ + const uint8_t *_mur_tail; \ + const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+_mur_nblocks*4); \ + int _mur_i; \ + for(_mur_i = -_mur_nblocks; _mur_i; _mur_i++) { \ + _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \ + _mur_k1 *= _mur_c1; \ + _mur_k1 = MUR_ROTL32(_mur_k1,15); \ + _mur_k1 *= _mur_c2; \ + \ + _mur_h1 ^= _mur_k1; \ + _mur_h1 = MUR_ROTL32(_mur_h1,13); \ + _mur_h1 = _mur_h1*5+0xe6546b64; \ + } \ + _mur_tail = (const uint8_t*)(_mur_data + _mur_nblocks*4); \ + _mur_k1=0; \ + switch((keylen) & 3) { \ + case 3: _mur_k1 ^= _mur_tail[2] << 16; \ + case 2: _mur_k1 ^= _mur_tail[1] << 8; \ + case 1: _mur_k1 ^= _mur_tail[0]; \ + _mur_k1 *= _mur_c1; \ + _mur_k1 = MUR_ROTL32(_mur_k1,15); \ + _mur_k1 *= _mur_c2; \ + _mur_h1 ^= _mur_k1; \ + } \ + _mur_h1 ^= (keylen); \ + MUR_FMIX(_mur_h1); \ + hashv = _mur_h1; \ + bkt = hashv & (num_bkts-1); \ +} while(0) +#endif /* HASH_USING_NO_STRICT_ALIASING */ + +/* key comparison function; return 0 if keys equal */ +#define HASH_KEYCMP(a,b,len) memcmp(a,b,len) + +/* iterate over items in a known bucket to find desired item */ +#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \ +do { \ + if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \ + else out=NULL; \ + while (out) { \ + if ((out)->hh.keylen == keylen_in) { \ + if ((HASH_KEYCMP((out)->hh.key,keyptr,keylen_in)) == 0) break; \ + } \ + if ((out)->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,(out)->hh.hh_next)); \ + else out = NULL; \ + } \ +} while(0) + +/* add an item to a bucket */ +#define HASH_ADD_TO_BKT(head,addhh) \ +do { \ + head.count++; \ + (addhh)->hh_next = head.hh_head; \ + (addhh)->hh_prev = NULL; \ + if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \ + (head).hh_head=addhh; \ + if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \ + && (addhh)->tbl->noexpand != 1) { \ + HASH_EXPAND_BUCKETS((addhh)->tbl); \ + } \ +} while(0) + +/* remove an item from a given bucket */ +#define HASH_DEL_IN_BKT(hh,head,hh_del) \ + (head).count--; \ + if ((head).hh_head == hh_del) { \ + (head).hh_head = hh_del->hh_next; \ + } \ + if (hh_del->hh_prev) { \ + hh_del->hh_prev->hh_next = hh_del->hh_next; \ + } \ + if (hh_del->hh_next) { \ + hh_del->hh_next->hh_prev = hh_del->hh_prev; \ + } + +/* Bucket expansion has the effect of doubling the number of buckets + * and redistributing the items into the new buckets. Ideally the + * items will distribute more or less evenly into the new buckets + * (the extent to which this is true is a measure of the quality of + * the hash function as it applies to the key domain). + * + * With the items distributed into more buckets, the chain length + * (item count) in each bucket is reduced. Thus by expanding buckets + * the hash keeps a bound on the chain length. This bounded chain + * length is the essence of how a hash provides constant time lookup. + * + * The calculation of tbl->ideal_chain_maxlen below deserves some + * explanation. First, keep in mind that we're calculating the ideal + * maximum chain length based on the *new* (doubled) bucket count. + * In fractions this is just n/b (n=number of items,b=new num buckets). + * Since the ideal chain length is an integer, we want to calculate + * ceil(n/b). We don't depend on floating point arithmetic in this + * hash, so to calculate ceil(n/b) with integers we could write + * + * ceil(n/b) = (n/b) + ((n%b)?1:0) + * + * and in fact a previous version of this hash did just that. + * But now we have improved things a bit by recognizing that b is + * always a power of two. We keep its base 2 log handy (call it lb), + * so now we can write this with a bit shift and logical AND: + * + * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) + * + */ +#define HASH_EXPAND_BUCKETS(tbl) \ +do { \ + unsigned _he_bkt; \ + unsigned _he_bkt_i; \ + struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ + UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ + _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ + 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ + if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \ + memset(_he_new_buckets, 0, \ + 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ + tbl->ideal_chain_maxlen = \ + (tbl->num_items >> (tbl->log2_num_buckets+1)) + \ + ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \ + tbl->nonideal_items = 0; \ + for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \ + { \ + _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \ + while (_he_thh) { \ + _he_hh_nxt = _he_thh->hh_next; \ + HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \ + _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \ + if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \ + tbl->nonideal_items++; \ + _he_newbkt->expand_mult = _he_newbkt->count / \ + tbl->ideal_chain_maxlen; \ + } \ + _he_thh->hh_prev = NULL; \ + _he_thh->hh_next = _he_newbkt->hh_head; \ + if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \ + _he_thh; \ + _he_newbkt->hh_head = _he_thh; \ + _he_thh = _he_hh_nxt; \ + } \ + } \ + uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ + tbl->num_buckets *= 2; \ + tbl->log2_num_buckets++; \ + tbl->buckets = _he_new_buckets; \ + tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \ + (tbl->ineff_expands+1) : 0; \ + if (tbl->ineff_expands > 1) { \ + tbl->noexpand=1; \ + uthash_noexpand_fyi(tbl); \ + } \ + uthash_expand_fyi(tbl); \ +} while(0) + + +/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ +/* Note that HASH_SORT assumes the hash handle name to be hh. + * HASH_SRT was added to allow the hash handle name to be passed in. */ +#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) +#define HASH_SRT(hh,head,cmpfcn) \ +do { \ + unsigned _hs_i; \ + unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ + struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ + if (head) { \ + _hs_insize = 1; \ + _hs_looping = 1; \ + _hs_list = &((head)->hh); \ + while (_hs_looping) { \ + _hs_p = _hs_list; \ + _hs_list = NULL; \ + _hs_tail = NULL; \ + _hs_nmerges = 0; \ + while (_hs_p) { \ + _hs_nmerges++; \ + _hs_q = _hs_p; \ + _hs_psize = 0; \ + for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \ + _hs_psize++; \ + _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ + ((void*)((char*)(_hs_q->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + if (! (_hs_q) ) break; \ + } \ + _hs_qsize = _hs_insize; \ + while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \ + if (_hs_psize == 0) { \ + _hs_e = _hs_q; \ + _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ + ((void*)((char*)(_hs_q->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + _hs_qsize--; \ + } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \ + _hs_e = _hs_p; \ + if (_hs_p){ \ + _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ + ((void*)((char*)(_hs_p->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + } \ + _hs_psize--; \ + } else if (( \ + cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \ + DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \ + ) <= 0) { \ + _hs_e = _hs_p; \ + if (_hs_p){ \ + _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ + ((void*)((char*)(_hs_p->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + } \ + _hs_psize--; \ + } else { \ + _hs_e = _hs_q; \ + _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ + ((void*)((char*)(_hs_q->next) + \ + (head)->hh.tbl->hho)) : NULL); \ + _hs_qsize--; \ + } \ + if ( _hs_tail ) { \ + _hs_tail->next = ((_hs_e) ? \ + ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \ + } else { \ + _hs_list = _hs_e; \ + } \ + if (_hs_e) { \ + _hs_e->prev = ((_hs_tail) ? \ + ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \ + } \ + _hs_tail = _hs_e; \ + } \ + _hs_p = _hs_q; \ + } \ + if (_hs_tail){ \ + _hs_tail->next = NULL; \ + } \ + if ( _hs_nmerges <= 1 ) { \ + _hs_looping=0; \ + (head)->hh.tbl->tail = _hs_tail; \ + DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ + } \ + _hs_insize *= 2; \ + } \ + HASH_FSCK(hh,head); \ + } \ +} while (0) + +/* This function selects items from one hash into another hash. + * The end result is that the selected items have dual presence + * in both hashes. There is no copy of the items made; rather + * they are added into the new hash through a secondary hash + * hash handle that must be present in the structure. */ +#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ +do { \ + unsigned _src_bkt, _dst_bkt; \ + void *_last_elt=NULL, *_elt; \ + UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ + ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ + if (src) { \ + for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ + for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ + _src_hh; \ + _src_hh = _src_hh->hh_next) { \ + _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ + if (cond(_elt)) { \ + _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \ + _dst_hh->key = _src_hh->key; \ + _dst_hh->keylen = _src_hh->keylen; \ + _dst_hh->hashv = _src_hh->hashv; \ + _dst_hh->prev = _last_elt; \ + _dst_hh->next = NULL; \ + if (_last_elt_hh) { _last_elt_hh->next = _elt; } \ + if (!dst) { \ + DECLTYPE_ASSIGN(dst,_elt); \ + HASH_MAKE_TABLE(hh_dst,dst); \ + } else { \ + _dst_hh->tbl = (dst)->hh_dst.tbl; \ + } \ + HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ + HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \ + (dst)->hh_dst.tbl->num_items++; \ + _last_elt = _elt; \ + _last_elt_hh = _dst_hh; \ + } \ + } \ + } \ + } \ + HASH_FSCK(hh_dst,dst); \ +} while (0) + +#define HASH_CLEAR(hh,head) \ +do { \ + if (head) { \ + uthash_free((head)->hh.tbl->buckets, \ + (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ + HASH_BLOOM_FREE((head)->hh.tbl); \ + uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ + (head)=NULL; \ + } \ +} while(0) + +#define HASH_OVERHEAD(hh,head) \ + (size_t)((((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ + ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ + (sizeof(UT_hash_table)) + \ + (HASH_BLOOM_BYTELEN))) + +#ifdef NO_DECLTYPE +#define HASH_ITER(hh,head,el,tmp) \ +for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \ + el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL)) +#else +#define HASH_ITER(hh,head,el,tmp) \ +for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \ + el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL)) +#endif + +/* obtain a count of items in the hash */ +#define HASH_COUNT(head) HASH_CNT(hh,head) +#define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0) + +typedef struct UT_hash_bucket { + struct UT_hash_handle *hh_head; + unsigned count; + + /* expand_mult is normally set to 0. In this situation, the max chain length + * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If + * the bucket's chain exceeds this length, bucket expansion is triggered). + * However, setting expand_mult to a non-zero value delays bucket expansion + * (that would be triggered by additions to this particular bucket) + * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. + * (The multiplier is simply expand_mult+1). The whole idea of this + * multiplier is to reduce bucket expansions, since they are expensive, in + * situations where we know that a particular bucket tends to be overused. + * It is better to let its chain length grow to a longer yet-still-bounded + * value, than to do an O(n) bucket expansion too often. + */ + unsigned expand_mult; + +} UT_hash_bucket; + +/* random signature used only to find hash tables in external analysis */ +#define HASH_SIGNATURE 0xa0111fe1 +#define HASH_BLOOM_SIGNATURE 0xb12220f2 + +typedef struct UT_hash_table { + UT_hash_bucket *buckets; + unsigned num_buckets, log2_num_buckets; + unsigned num_items; + struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ + ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ + + /* in an ideal situation (all buckets used equally), no bucket would have + * more than ceil(#items/#buckets) items. that's the ideal chain length. */ + unsigned ideal_chain_maxlen; + + /* nonideal_items is the number of items in the hash whose chain position + * exceeds the ideal chain maxlen. these items pay the penalty for an uneven + * hash distribution; reaching them in a chain traversal takes >ideal steps */ + unsigned nonideal_items; + + /* ineffective expands occur when a bucket doubling was performed, but + * afterward, more than half the items in the hash had nonideal chain + * positions. If this happens on two consecutive expansions we inhibit any + * further expansion, as it's not helping; this happens when the hash + * function isn't a good fit for the key domain. When expansion is inhibited + * the hash will still work, albeit no longer in constant time. */ + unsigned ineff_expands, noexpand; + + uint32_t signature; /* used only to find hash tables in external analysis */ +#ifdef HASH_BLOOM + uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ + uint8_t *bloom_bv; + char bloom_nbits; +#endif + +} UT_hash_table; + +typedef struct UT_hash_handle { + struct UT_hash_table *tbl; + void *prev; /* prev element in app order */ + void *next; /* next element in app order */ + struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ + struct UT_hash_handle *hh_next; /* next hh in bucket order */ + void *key; /* ptr to enclosing struct's key */ + unsigned keylen; /* enclosing struct's key len */ + unsigned hashv; /* result of hash-fcn(key) */ +} UT_hash_handle; + +#endif /* UTHASH_H */ diff --git a/util.c b/util.c new file mode 100644 index 0000000..452ff56 --- /dev/null +++ b/util.c @@ -0,0 +1,4050 @@ +/* + * Copyright 2011-2014 Con Kolivas + * Copyright 2010 Jeff Garzik + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#ifdef HAVE_LIBCURL +#include +#endif +#include +#include +#include +#include +#ifndef WIN32 +#include +# ifdef __linux +# include +# endif +# include +# include +# include +# include +#else +# include +# include +# include +#endif +#include + +#include "miner.h" +#include "elist.h" +#include "compat.h" +#include "util.h" + +#define DEFAULT_SOCKWAIT 60 + +bool successful_connect = false; + +int no_yield(void) +{ + return 0; +} + +int (*selective_yield)(void) = &no_yield; + +unsigned char bit_swap_table[256] = +{ + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +}; + +static void keep_sockalive(SOCKETTYPE fd) +{ + const int tcp_one = 1; +#ifndef WIN32 + const int tcp_keepidle = 45; + const int tcp_keepintvl = 30; + int flags = fcntl(fd, F_GETFL, 0); + + fcntl(fd, F_SETFL, O_NONBLOCK | flags); +#else + u_long flags = 1; + + ioctlsocket(fd, FIONBIO, &flags); +#endif + + setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one)); + if (!opt_delaynet) +#ifndef __linux + setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); +#else /* __linux */ + fcntl(fd, F_SETFD, FD_CLOEXEC); + setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); + setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one)); + setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle)); + setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl)); +#endif /* __linux */ + +#ifdef __APPLE_CC__ + setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl)); +#endif /* __APPLE_CC__ */ + +} + +#ifdef WIN32 +/* Generic versions of inet_pton for windows, using different names in case + * it is implemented in ming in the future. */ +#define W32NS_INADDRSZ 4 +#define W32NS_IN6ADDRSZ 16 +#define W32NS_INT16SZ 2 + +static int Inet_Pton4(const char *src, char *dst) +{ + uint8_t tmp[W32NS_INADDRSZ], *tp; + + int saw_digit = 0; + int octets = 0; + *(tp = tmp) = 0; + + int ch; + while ((ch = *src++) != '\0') + { + if (ch >= '0' && ch <= '9') + { + uint32_t n = *tp * 10 + (ch - '0'); + + if (saw_digit && *tp == 0) + return 0; + + if (n > 255) + return 0; + + *tp = n; + if (!saw_digit) + { + if (++octets > 4) + return 0; + saw_digit = 1; + } + } + else if (ch == '.' && saw_digit) + { + if (octets == 4) + return 0; + *++tp = 0; + saw_digit = 0; + } + else + return 0; + } + if (octets < 4) + return 0; + + memcpy(dst, tmp, W32NS_INADDRSZ); + + return 1; +} + +static int Inet_Pton6(const char *src, char *dst) +{ + static const char xdigits[] = "0123456789abcdef"; + uint8_t tmp[W32NS_IN6ADDRSZ]; + + uint8_t *tp = (uint8_t*) memset(tmp, '\0', W32NS_IN6ADDRSZ); + uint8_t *endp = tp + W32NS_IN6ADDRSZ; + uint8_t *colonp = NULL; + + /* Leading :: requires some special handling. */ + if (*src == ':') + { + if (*++src != ':') + return 0; + } + + const char *curtok = src; + int saw_xdigit = 0; + uint32_t val = 0; + int ch; + while ((ch = tolower(*src++)) != '\0') + { + const char *pch = strchr(xdigits, ch); + if (pch != NULL) + { + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return 0; + saw_xdigit = 1; + continue; + } + if (ch == ':') + { + curtok = src; + if (!saw_xdigit) + { + if (colonp) + return 0; + colonp = tp; + continue; + } + else if (*src == '\0') + { + return 0; + } + if (tp + W32NS_INT16SZ > endp) + return 0; + *tp++ = (uint8_t) (val >> 8) & 0xff; + *tp++ = (uint8_t) val & 0xff; + saw_xdigit = 0; + val = 0; + continue; + } + if (ch == '.' && ((tp + W32NS_INADDRSZ) <= endp) && + Inet_Pton4(curtok, (char*) tp) > 0) + { + tp += W32NS_INADDRSZ; + saw_xdigit = 0; + break; /* '\0' was seen by inet_pton4(). */ + } + return 0; + } + if (saw_xdigit) + { + if (tp + W32NS_INT16SZ > endp) + return 0; + *tp++ = (uint8_t) (val >> 8) & 0xff; + *tp++ = (uint8_t) val & 0xff; + } + if (colonp != NULL) + { + int i; + /* + * Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + + if (tp == endp) + return 0; + + for (i = 1; i <= n; i++) + { + endp[-i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return 0; + + memcpy(dst, tmp, W32NS_IN6ADDRSZ); + + return 1; +} + +int Inet_Pton(int af, const char *src, void *dst) +{ + switch (af) + { + case AF_INET: + return Inet_Pton4(src, dst); + case AF_INET6: + return Inet_Pton6(src, dst); + default: + return -1; + } +} +#endif + +struct tq_ent +{ + void *data; + struct list_head q_node; +}; + +#ifdef HAVE_LIBCURL +struct timeval nettime; + +struct data_buffer +{ + void *buf; + size_t len; +}; + +struct upload_buffer +{ + const void *buf; + size_t len; +}; + +struct header_info +{ + char *lp_path; + int rolltime; + char *reason; + char *stratum_url; + bool hadrolltime; + bool canroll; + bool hadexpire; +}; + +static void databuf_free(struct data_buffer *db) +{ + if (!db) + return; + + free(db->buf); + + memset(db, 0, sizeof(*db)); +} + +static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb, + void *user_data) +{ + struct data_buffer *db = user_data; + size_t len = size * nmemb; + size_t oldlen, newlen; + void *newmem; + static const unsigned char zero = 0; + + oldlen = db->len; + newlen = oldlen + len; + + newmem = realloc(db->buf, newlen + 1); + if (!newmem) + return 0; + + db->buf = newmem; + db->len = newlen; + memcpy(db->buf + oldlen, ptr, len); + memcpy(db->buf + newlen, &zero, 1); /* null terminate */ + + return len; +} + +static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb, + void *user_data) +{ + struct upload_buffer *ub = user_data; + unsigned int len = size * nmemb; + + if (len > ub->len) + len = ub->len; + + if (len) + { + memcpy(ptr, ub->buf, len); + ub->buf += len; + ub->len -= len; + } + + return len; +} + +static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data) +{ + struct header_info *hi = user_data; + size_t remlen, slen, ptrlen = size * nmemb; + char *rem, *val = NULL, *key = NULL; + void *tmp; + + val = calloc(1, ptrlen); + key = calloc(1, ptrlen); + if (!key || !val) + goto out; + + tmp = memchr(ptr, ':', ptrlen); + if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */ + goto out; + slen = tmp - ptr; + if ((slen + 1) == ptrlen) /* skip key w/ no value */ + goto out; + memcpy(key, ptr, slen); /* store & nul term key */ + key[slen] = 0; + + rem = ptr + slen + 1; /* trim value's leading whitespace */ + remlen = ptrlen - slen - 1; + while ((remlen > 0) && (isspace(*rem))) + { + remlen--; + rem++; + } + + memcpy(val, rem, remlen); /* store value, trim trailing ws */ + val[remlen] = 0; + while ((*val) && (isspace(val[strlen(val) - 1]))) + val[strlen(val) - 1] = 0; + + if (!*val) /* skip blank value */ + goto out; + + if (opt_protocol) + applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val); + + if (!strcasecmp("X-Roll-Ntime", key)) + { + hi->hadrolltime = true; + if (!strncasecmp("N", val, 1)) + applog(LOG_DEBUG, "X-Roll-Ntime: N found"); + else + { + hi->canroll = true; + + /* Check to see if expire= is supported and if not, set + * the rolltime to the default scantime */ + if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) + { + sscanf(val + 7, "%d", &hi->rolltime); + hi->hadexpire = true; + } + else + hi->rolltime = opt_scantime; + applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime); + } + } + + if (!strcasecmp("X-Long-Polling", key)) + { + hi->lp_path = val; /* steal memory reference */ + val = NULL; + } + + if (!strcasecmp("X-Reject-Reason", key)) + { + hi->reason = val; /* steal memory reference */ + val = NULL; + } + + if (!strcasecmp("X-Stratum", key)) + { + hi->stratum_url = val; + val = NULL; + } + +out: + free(key); + free(val); + return ptrlen; +} + +static void last_nettime(struct timeval *last) +{ + rd_lock(&netacc_lock); + last->tv_sec = nettime.tv_sec; + last->tv_usec = nettime.tv_usec; + rd_unlock(&netacc_lock); +} + +static void set_nettime(void) +{ + wr_lock(&netacc_lock); + cgtime(&nettime); + wr_unlock(&netacc_lock); +} + +#if CURL_HAS_KEEPALIVE +static void keep_curlalive(CURL *curl) +{ + const int tcp_keepidle = 45; + const int tcp_keepintvl = 30; + const long int keepalive = 1; + + curl_easy_setopt(curl, CURLOPT_TCP_KEEPALIVE, keepalive); + curl_easy_setopt(curl, CURLOPT_TCP_KEEPIDLE, tcp_keepidle); + curl_easy_setopt(curl, CURLOPT_TCP_KEEPINTVL, tcp_keepintvl); +} +#else +static void keep_curlalive(CURL *curl) +{ + SOCKETTYPE sock; + + curl_easy_getinfo(curl, CURLINFO_LASTSOCKET, (long *)&sock); + keep_sockalive(sock); +} +#endif + +static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type, + __maybe_unused char *data, size_t size, void *userdata) +{ + struct pool *pool = (struct pool *)userdata; + + switch(type) + { + case CURLINFO_HEADER_IN: + case CURLINFO_DATA_IN: + case CURLINFO_SSL_DATA_IN: + pool->cgminer_pool_stats.net_bytes_received += size; + break; + case CURLINFO_HEADER_OUT: + case CURLINFO_DATA_OUT: + case CURLINFO_SSL_DATA_OUT: + pool->cgminer_pool_stats.net_bytes_sent += size; + break; + case CURLINFO_TEXT: + default: + break; + } + return 0; +} + +json_t *json_web_config(const char *url) +{ + struct data_buffer all_data = {NULL, 0}; + char curl_err_str[CURL_ERROR_SIZE]; + long timeout = 60; + json_error_t err; + json_t *val; + CURL *curl; + int rc; + + memset(&err, 0, sizeof(err)); + + curl = curl_easy_init(); + if (unlikely(!curl)) + quithere(1, "CURL initialisation failed"); + + curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); + + curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); + curl_easy_setopt(curl, CURLOPT_URL, url); + curl_easy_setopt(curl, CURLOPT_ENCODING, ""); + curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); + + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); + curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); + curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); + + val = NULL; + rc = curl_easy_perform(curl); + curl_easy_cleanup(curl); + if (rc) + { + applog(LOG_ERR, "HTTP config request of '%s' failed: %s", url, curl_err_str); + goto c_out; + } + + if (!all_data.buf) + { + applog(LOG_ERR, "Empty config data received from '%s'", url); + goto c_out; + } + + val = JSON_LOADS(all_data.buf, &err); + if (!val) + { + applog(LOG_ERR, "JSON config decode of '%s' failed(%d): %s", url, + err.line, err.text); + } + databuf_free(&all_data); + +c_out: + return val; +} + +json_t *json_rpc_call(CURL *curl, const char *url, + const char *userpass, const char *rpc_req, + bool probe, bool longpoll, int *rolltime, + struct pool *pool, bool share) +{ + long timeout = longpoll ? (60 * 60) : 60; + struct data_buffer all_data = {NULL, 0}; + struct header_info hi = {NULL, 0, NULL, NULL, false, false, false}; + char len_hdr[64], user_agent_hdr[128]; + char curl_err_str[CURL_ERROR_SIZE]; + struct curl_slist *headers = NULL; + struct upload_buffer upload_data; + json_t *val, *err_val, *res_val; + bool probing = false; + double byte_count; + json_error_t err; + int rc; + + memset(&err, 0, sizeof(err)); + + /* it is assumed that 'curl' is freshly [re]initialized at this pt */ + + if (probe) + probing = !pool->probed; + curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); + + // CURLOPT_VERBOSE won't write to stderr if we use CURLOPT_DEBUGFUNCTION + curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb); + curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool); + curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); + + curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); + curl_easy_setopt(curl, CURLOPT_URL, url); + curl_easy_setopt(curl, CURLOPT_ENCODING, ""); + curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); + + /* Shares are staggered already and delays in submission can be costly + * so do not delay them */ + if (!opt_delaynet || share) + curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); + curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb); + curl_easy_setopt(curl, CURLOPT_READDATA, &upload_data); + curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); + curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb); + curl_easy_setopt(curl, CURLOPT_HEADERDATA, &hi); + curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); + if (pool->rpc_proxy) + { + curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy); + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, pool->rpc_proxytype); + } + else if (opt_socks_proxy) + { + curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy); + curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); + } + if (userpass) + { + curl_easy_setopt(curl, CURLOPT_USERPWD, userpass); + curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); + } + if (longpoll) + keep_curlalive(curl); + curl_easy_setopt(curl, CURLOPT_POST, 1); + + if (opt_protocol) + applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req); + + upload_data.buf = rpc_req; + upload_data.len = strlen(rpc_req); + sprintf(len_hdr, "Content-Length: %lu", + (unsigned long) upload_data.len); + sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE_STRING); + + headers = curl_slist_append(headers, + "Content-type: application/json"); + headers = curl_slist_append(headers, + "X-Mining-Extensions: longpoll midstate rollntime submitold"); + + if (likely(global_hashrate)) + { + char ghashrate[255]; + + sprintf(ghashrate, "X-Mining-Hashrate: %llu", global_hashrate); + headers = curl_slist_append(headers, ghashrate); + } + + headers = curl_slist_append(headers, len_hdr); + headers = curl_slist_append(headers, user_agent_hdr); + headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/ + + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + if (opt_delaynet) + { + /* Don't delay share submission, but still track the nettime */ + if (!share) + { + long long now_msecs, last_msecs; + struct timeval now, last; + + cgtime(&now); + last_nettime(&last); + now_msecs = (long long)now.tv_sec * 1000; + now_msecs += now.tv_usec / 1000; + last_msecs = (long long)last.tv_sec * 1000; + last_msecs += last.tv_usec / 1000; + if (now_msecs > last_msecs && now_msecs - last_msecs < 250) + { + struct timespec rgtp; + + rgtp.tv_sec = 0; + rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000; + nanosleep(&rgtp, NULL); + } + } + set_nettime(); + } + + rc = curl_easy_perform(curl); + if (rc) + { + applog(LOG_INFO, "HTTP request failed: %s", curl_err_str); + goto err_out; + } + + if (!all_data.buf) + { + applog(LOG_DEBUG, "Empty data received in json_rpc_call."); + goto err_out; + } + + pool->cgminer_pool_stats.times_sent++; + if (curl_easy_getinfo(curl, CURLINFO_SIZE_UPLOAD, &byte_count) == CURLE_OK) + pool->cgminer_pool_stats.bytes_sent += byte_count; + pool->cgminer_pool_stats.times_received++; + if (curl_easy_getinfo(curl, CURLINFO_SIZE_DOWNLOAD, &byte_count) == CURLE_OK) + pool->cgminer_pool_stats.bytes_received += byte_count; + + if (probing) + { + pool->probed = true; + /* If X-Long-Polling was found, activate long polling */ + if (hi.lp_path) + { + if (pool->hdr_path != NULL) + free(pool->hdr_path); + pool->hdr_path = hi.lp_path; + } + else + pool->hdr_path = NULL; + if (hi.stratum_url) + { + pool->stratum_url = hi.stratum_url; + hi.stratum_url = NULL; + } + } + else + { + if (hi.lp_path) + { + free(hi.lp_path); + hi.lp_path = NULL; + } + if (hi.stratum_url) + { + free(hi.stratum_url); + hi.stratum_url = NULL; + } + } + + *rolltime = hi.rolltime; + pool->cgminer_pool_stats.rolltime = hi.rolltime; + pool->cgminer_pool_stats.hadrolltime = hi.hadrolltime; + pool->cgminer_pool_stats.canroll = hi.canroll; + pool->cgminer_pool_stats.hadexpire = hi.hadexpire; + + val = JSON_LOADS(all_data.buf, &err); + if (!val) + { + applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); + + if (opt_protocol) + applog(LOG_DEBUG, "JSON protocol response:\n%s", (char *)(all_data.buf)); + + goto err_out; + } + + if (opt_protocol) + { + char *s = json_dumps(val, JSON_INDENT(3)); + + applog(LOG_DEBUG, "JSON protocol response:\n%s", s); + free(s); + } + + /* JSON-RPC valid response returns a non-null 'result', + * and a null 'error'. + */ + res_val = json_object_get(val, "result"); + err_val = json_object_get(val, "error"); + + if (!res_val ||(err_val && !json_is_null(err_val))) + { + char *s; + + if (err_val) + s = json_dumps(err_val, JSON_INDENT(3)); + else + s = strdup("(unknown reason)"); + + applog(LOG_INFO, "JSON-RPC call failed: %s", s); + + free(s); + + goto err_out; + } + + if (hi.reason) + { + json_object_set_new(val, "reject-reason", json_string(hi.reason)); + free(hi.reason); + hi.reason = NULL; + } + successful_connect = true; + databuf_free(&all_data); + curl_slist_free_all(headers); + curl_easy_reset(curl); + return val; + +err_out: + databuf_free(&all_data); + curl_slist_free_all(headers); + curl_easy_reset(curl); + if (!successful_connect) + applog(LOG_DEBUG, "Failed to connect in json_rpc_call"); + curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); + return NULL; +} +#define PROXY_HTTP CURLPROXY_HTTP +#define PROXY_HTTP_1_0 CURLPROXY_HTTP_1_0 +#define PROXY_SOCKS4 CURLPROXY_SOCKS4 +#define PROXY_SOCKS5 CURLPROXY_SOCKS5 +#define PROXY_SOCKS4A CURLPROXY_SOCKS4A +#define PROXY_SOCKS5H CURLPROXY_SOCKS5_HOSTNAME +#else /* HAVE_LIBCURL */ +#define PROXY_HTTP 0 +#define PROXY_HTTP_1_0 1 +#define PROXY_SOCKS4 2 +#define PROXY_SOCKS5 3 +#define PROXY_SOCKS4A 4 +#define PROXY_SOCKS5H 5 +#endif /* HAVE_LIBCURL */ + +static struct +{ + const char *name; + proxytypes_t proxytype; +} proxynames[] = +{ + { "http:", PROXY_HTTP }, + { "http0:", PROXY_HTTP_1_0 }, + { "socks4:", PROXY_SOCKS4 }, + { "socks5:", PROXY_SOCKS5 }, + { "socks4a:", PROXY_SOCKS4A }, + { "socks5h:", PROXY_SOCKS5H }, + { NULL, 0 } +}; + +const char *proxytype(proxytypes_t proxytype) +{ + int i; + + for (i = 0; proxynames[i].name; i++) + if (proxynames[i].proxytype == proxytype) + return proxynames[i].name; + + return "invalid"; +} + +char *get_proxy(char *url, struct pool *pool) +{ + pool->rpc_proxy = NULL; + + char *split; + int plen, len, i; + + for (i = 0; proxynames[i].name; i++) + { + plen = strlen(proxynames[i].name); + if (strncmp(url, proxynames[i].name, plen) == 0) + { + if (!(split = strchr(url, '|'))) + return url; + + *split = '\0'; + len = split - url; + pool->rpc_proxy = malloc(1 + len - plen); + if (!(pool->rpc_proxy)) + quithere(1, "Failed to malloc rpc_proxy"); + + strcpy(pool->rpc_proxy, url + plen); + extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); + pool->rpc_proxytype = proxynames[i].proxytype; + url = split + 1; + break; + } + } + return url; +} + +/* Adequate size s==len*2 + 1 must be alloced to use this variant */ +void __bin2hex(char *s, const unsigned char *p, size_t len) +{ + int i; + static const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + for (i = 0; i < (int)len; i++) + { + *s++ = hex[p[i] >> 4]; + *s++ = hex[p[i] & 0xF]; + } + *s++ = '\0'; +} + +/* Returns a malloced array string of a binary value of arbitrary length. The + * array is rounded up to a 4 byte size to appease architectures that need + * aligned array sizes */ +char *bin2hex(const unsigned char *p, size_t len) +{ + ssize_t slen; + char *s; + + slen = len * 2 + 1; + if (slen % 4) + slen += 4 - (slen % 4); + s = calloc(slen, 1); + if (unlikely(!s)) + quithere(1, "Failed to calloc"); + + __bin2hex(s, p, len); + + return s; +} + +static const int hex2bin_tbl[256] = +{ + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, +}; + +/* Does the reverse of bin2hex but does not allocate any ram */ +bool hex2bin(unsigned char *p, const char *hexstr, size_t len) +{ + int nibble1, nibble2; + unsigned char idx; + bool ret = false; + + while (*hexstr && len) + { + if (unlikely(!hexstr[1])) + { + applog(LOG_ERR, "hex2bin str truncated"); + return ret; + } + + idx = *hexstr++; + nibble1 = hex2bin_tbl[idx]; + idx = *hexstr++; + nibble2 = hex2bin_tbl[idx]; + + if (unlikely((nibble1 < 0) || (nibble2 < 0))) + { + applog(LOG_ERR, "hex2bin scan failed"); + return ret; + } + + *p++ = (((unsigned char)nibble1) << 4) | ((unsigned char)nibble2); + --len; + } + + if (likely(len == 0 && *hexstr == 0)) + ret = true; + return ret; +} + +static bool _valid_hex(char *s, const char *file, const char *func, const int line) +{ + bool ret = false; + int i, len; + + if (unlikely(!s)) + { + applog(LOG_ERR, "Null string passed to valid_hex from"IN_FMT_FFL, file, func, line); + return ret; + } + len = strlen(s); + for (i = 0; i < len; i++) + { + unsigned char idx = s[i]; + + if (unlikely(hex2bin_tbl[idx] < 0)) + { + applog(LOG_ERR, "Invalid char 0x%x passed to valid_hex from"IN_FMT_FFL, idx, file, func, line); + return ret; + } + } + ret = true; + return ret; +} + +#define valid_hex(s) _valid_hex(s, __FILE__, __func__, __LINE__) + +static bool _valid_ascii(char *s, const char *file, const char *func, const int line) +{ + bool ret = false; + int i, len; + + if (unlikely(!s)) + { + applog(LOG_ERR, "Null string passed to valid_ascii from"IN_FMT_FFL, file, func, line); + return ret; + } + len = strlen(s); + if (unlikely(!len)) + { + applog(LOG_ERR, "Zero length string passed to valid_ascii from"IN_FMT_FFL, file, func, line); + return ret; + } + for (i = 0; i < len; i++) + { + unsigned char idx = s[i]; + + if (unlikely(idx < 32 || idx > 126)) + { + applog(LOG_ERR, "Invalid char 0x%x passed to valid_ascii from"IN_FMT_FFL, idx, file, func, line); + return ret; + } + } + ret = true; + return ret; +} + +#define valid_ascii(s) _valid_ascii(s, __FILE__, __func__, __LINE__) + +static const int b58tobin_tbl[] = +{ + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, + -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17, 18, 19, 20, 21, -1, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, + -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 +}; + +/* b58bin should always be at least 25 bytes long and already checked to be + * valid. */ +void b58tobin(unsigned char *b58bin, const char *b58) +{ + uint32_t c, bin32[7]; + int len, i, j; + uint64_t t; + + memset(bin32, 0, 7 * sizeof(uint32_t)); + len = strlen(b58); + for (i = 0; i < len; i++) + { + c = b58[i]; + c = b58tobin_tbl[c]; + for (j = 6; j >= 0; j--) + { + t = ((uint64_t)bin32[j]) * 58 + c; + c = (t & 0x3f00000000ull) >> 32; + bin32[j] = t & 0xffffffffull; + } + } + *(b58bin++) = bin32[0] & 0xff; + for (i = 1; i < 7; i++) + { + *((uint32_t *)b58bin) = htobe32(bin32[i]); + b58bin += sizeof(uint32_t); + } +} + +void address_to_pubkeyhash(unsigned char *pkh, const char *addr) +{ + unsigned char b58bin[25]; + + memset(b58bin, 0, 25); + b58tobin(b58bin, addr); + pkh[0] = 0x76; + pkh[1] = 0xa9; + pkh[2] = 0x14; + memcpy(&pkh[3], &b58bin[1], 20); + pkh[23] = 0x88; + pkh[24] = 0xac; +} + +/* For encoding nHeight into coinbase, return how many bytes were used */ +int ser_number(unsigned char *s, int32_t val) +{ + int32_t *i32 = (int32_t *)&s[1]; + int len; + + if (val < 128) + len = 1; + else if (val < 16512) + len = 2; + else if (val < 2113664) + len = 3; + else + len = 4; + *i32 = htole32(val); + s[0] = len++; + return len; +} + +/* For encoding variable length strings */ +unsigned char *ser_string(char *s, int *slen) +{ + size_t len = strlen(s); + unsigned char *ret; + + ret = malloc(1 + len + 8); // Leave room for largest size + if (unlikely(!ret)) + quit(1, "Failed to malloc ret in ser_string"); + if (len < 253) + { + ret[0] = len; + memcpy(ret + 1, s, len); + *slen = len + 1; + } + else if (len < 0x10000) + { + uint16_t *u16 = (uint16_t *)&ret[1]; + + ret[0] = 253; + *u16 = htobe16(len); + memcpy(ret + 3, s, len); + *slen = len + 3; + } + else + { + /* size_t is only 32 bit on many platforms anyway */ + uint32_t *u32 = (uint32_t *)&ret[1]; + + ret[0] = 254; + *u32 = htobe32(len); + memcpy(ret + 5, s, len); + *slen = len + 5; + } + return ret; +} + +bool fulltest(const unsigned char *hash, const unsigned char *target) +{ + uint32_t *hash32 = (uint32_t *)hash; + uint32_t *target32 = (uint32_t *)target; + bool rc = true; + int i; + + for (i = 28 / 4; i >= 0; i--) + { + uint32_t h32tmp = le32toh(hash32[i]); + uint32_t t32tmp = le32toh(target32[i]); + + if (h32tmp > t32tmp) + { + rc = false; + break; + } + if (h32tmp < t32tmp) + { + rc = true; + break; + } + } + + if (opt_debug) + { + unsigned char hash_swap[32], target_swap[32]; + char *hash_str, *target_str; + + swab256(hash_swap, hash); + swab256(target_swap, target); + hash_str = bin2hex(hash_swap, 32); + target_str = bin2hex(target_swap, 32); + + applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s", + hash_str, + target_str, + rc ? "YES (hash <= target)" : + "no (false positive; hash > target)"); + + free(hash_str); + free(target_str); + } + + return rc; +} + +struct thread_q *tq_new(void) +{ + struct thread_q *tq; + + tq = calloc(1, sizeof(*tq)); + if (!tq) + return NULL; + + INIT_LIST_HEAD(&tq->q); + pthread_mutex_init(&tq->mutex, NULL); + pthread_cond_init(&tq->cond, NULL); + + return tq; +} + +void tq_free(struct thread_q *tq) +{ + struct tq_ent *ent, *iter; + + if (!tq) + return; + + list_for_each_entry_safe(ent, iter, &tq->q, q_node) + { + list_del(&ent->q_node); + free(ent); + } + + pthread_cond_destroy(&tq->cond); + pthread_mutex_destroy(&tq->mutex); + + memset(tq, 0, sizeof(*tq)); /* poison */ + free(tq); +} + +static void tq_freezethaw(struct thread_q *tq, bool frozen) +{ + mutex_lock(&tq->mutex); + tq->frozen = frozen; + pthread_cond_signal(&tq->cond); + mutex_unlock(&tq->mutex); +} + +void tq_freeze(struct thread_q *tq) +{ + tq_freezethaw(tq, true); +} + +void tq_thaw(struct thread_q *tq) +{ + tq_freezethaw(tq, false); +} + +bool tq_push(struct thread_q *tq, void *data) +{ + struct tq_ent *ent; + bool rc = true; + + ent = calloc(1, sizeof(*ent)); + if (!ent) + return false; + + ent->data = data; + INIT_LIST_HEAD(&ent->q_node); + + mutex_lock(&tq->mutex); + if (!tq->frozen) + { + list_add_tail(&ent->q_node, &tq->q); + } + else + { + free(ent); + rc = false; + } + pthread_cond_signal(&tq->cond); + mutex_unlock(&tq->mutex); + + return rc; +} + +void *tq_pop(struct thread_q *tq, const struct timespec *abstime) +{ + struct tq_ent *ent; + void *rval = NULL; + int rc; + + mutex_lock(&tq->mutex); + if (!list_empty(&tq->q)) + goto pop; + + if (abstime) + rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime); + else + rc = pthread_cond_wait(&tq->cond, &tq->mutex); + if (rc) + goto out; + if (list_empty(&tq->q)) + goto out; +pop: + ent = list_entry(tq->q.next, struct tq_ent, q_node); + rval = ent->data; + + list_del(&ent->q_node); + free(ent); +out: + mutex_unlock(&tq->mutex); + + return rval; +} + +int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg) +{ + cgsem_init(&thr->sem); + + return pthread_create(&thr->pth, attr, start, arg); +} + +void thr_info_cancel(struct thr_info *thr) +{ + if (!thr) + return; + + if (PTH(thr) != 0L) + { + pthread_cancel(thr->pth); + PTH(thr) = 0L; + } + cgsem_destroy(&thr->sem); +} + +void subtime(struct timeval *a, struct timeval *b) +{ + timersub(a, b, b); +} + +void addtime(struct timeval *a, struct timeval *b) +{ + timeradd(a, b, b); +} + +bool time_more(struct timeval *a, struct timeval *b) +{ + return timercmp(a, b, >); +} + +bool time_less(struct timeval *a, struct timeval *b) +{ + return timercmp(a, b, <); +} + +void copy_time(struct timeval *dest, const struct timeval *src) +{ + memcpy(dest, src, sizeof(struct timeval)); +} + +void timespec_to_val(struct timeval *val, const struct timespec *spec) +{ + val->tv_sec = spec->tv_sec; + val->tv_usec = spec->tv_nsec / 1000; +} + +void timeval_to_spec(struct timespec *spec, const struct timeval *val) +{ + spec->tv_sec = val->tv_sec; + spec->tv_nsec = val->tv_usec * 1000; +} + +void us_to_timeval(struct timeval *val, int64_t us) +{ + lldiv_t tvdiv = lldiv(us, 1000000); + + val->tv_sec = tvdiv.quot; + val->tv_usec = tvdiv.rem; +} + +void us_to_timespec(struct timespec *spec, int64_t us) +{ + lldiv_t tvdiv = lldiv(us, 1000000); + + spec->tv_sec = tvdiv.quot; + spec->tv_nsec = tvdiv.rem * 1000; +} + +void ms_to_timespec(struct timespec *spec, int64_t ms) +{ + lldiv_t tvdiv = lldiv(ms, 1000); + + spec->tv_sec = tvdiv.quot; + spec->tv_nsec = tvdiv.rem * 1000000; +} + +void ms_to_timeval(struct timeval *val, int64_t ms) +{ + lldiv_t tvdiv = lldiv(ms, 1000); + + val->tv_sec = tvdiv.quot; + val->tv_usec = tvdiv.rem * 1000; +} + +static void spec_nscheck(struct timespec *ts) +{ + while (ts->tv_nsec >= 1000000000) + { + ts->tv_nsec -= 1000000000; + ts->tv_sec++; + } + while (ts->tv_nsec < 0) + { + ts->tv_nsec += 1000000000; + ts->tv_sec--; + } +} + +void timeraddspec(struct timespec *a, const struct timespec *b) +{ + a->tv_sec += b->tv_sec; + a->tv_nsec += b->tv_nsec; + spec_nscheck(a); +} + +static int __maybe_unused timespec_to_ms(struct timespec *ts) +{ + return ts->tv_sec * 1000 + ts->tv_nsec / 1000000; +} + +/* Subtract b from a */ +static void __maybe_unused timersubspec(struct timespec *a, const struct timespec *b) +{ + a->tv_sec -= b->tv_sec; + a->tv_nsec -= b->tv_nsec; + spec_nscheck(a); +} + +char *Strcasestr(char *haystack, const char *needle) +{ + char *lowhay, *lowneedle, *ret; + int hlen, nlen, i, ofs; + + if (unlikely(!haystack || !needle)) + return NULL; + hlen = strlen(haystack); + nlen = strlen(needle); + if (!hlen || !nlen) + return NULL; + lowhay = alloca(hlen); + lowneedle = alloca(nlen); + for (i = 0; i < hlen; i++) + lowhay[i] = tolower(haystack[i]); + for (i = 0; i < nlen; i++) + lowneedle[i] = tolower(needle[i]); + ret = strstr(lowhay, lowneedle); + if (!ret) + return ret; + ofs = ret - lowhay; + return haystack + ofs; +} + +char *Strsep(char **stringp, const char *delim) +{ + char *ret = *stringp; + char *p; + + p = (ret != NULL) ? strpbrk(ret, delim) : NULL; + + if (p == NULL) + *stringp = NULL; + else + { + *p = '\0'; + *stringp = p + 1; + } + + return ret; +} + +#ifdef WIN32 +/* Mingw32 has no strsep so create our own custom one */ + +/* Windows start time is since 1601 LOL so convert it to unix epoch 1970. */ +#define EPOCHFILETIME (116444736000000000LL) + +/* These are cgminer specific sleep functions that use an absolute nanosecond + * resolution timer to avoid poor usleep accuracy and overruns. */ + +/* Return the system time as an lldiv_t in decimicroseconds. */ +static void decius_time(lldiv_t *lidiv) +{ + FILETIME ft; + LARGE_INTEGER li; + + GetSystemTimeAsFileTime(&ft); + li.LowPart = ft.dwLowDateTime; + li.HighPart = ft.dwHighDateTime; + li.QuadPart -= EPOCHFILETIME; + + /* SystemTime is in decimicroseconds so divide by an unusual number */ + *lidiv = lldiv(li.QuadPart, 10000000); +} + +/* This is a cgminer gettimeofday wrapper. Since we always call gettimeofday + * with tz set to NULL, and windows' default resolution is only 15ms, this + * gives us higher resolution times on windows. */ +void cgtime(struct timeval *tv) +{ + lldiv_t lidiv; + + decius_time(&lidiv); + tv->tv_sec = lidiv.quot; + tv->tv_usec = lidiv.rem / 10; +} + +#else /* WIN32 */ +void cgtime(struct timeval *tv) +{ + gettimeofday(tv, NULL); +} + +int cgtimer_to_ms(cgtimer_t *cgt) +{ + return timespec_to_ms(cgt); +} + +/* Subtracts b from a and stores it in res. */ +void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) +{ + res->tv_sec = a->tv_sec - b->tv_sec; + res->tv_nsec = a->tv_nsec - b->tv_nsec; + if (res->tv_nsec < 0) + { + res->tv_nsec += 1000000000; + res->tv_sec--; + } +} +#endif /* WIN32 */ + +#ifdef CLOCK_MONOTONIC /* Essentially just linux */ +void cgtimer_time(cgtimer_t *ts_start) +{ + clock_gettime(CLOCK_MONOTONIC, ts_start); +} + +static void nanosleep_abstime(struct timespec *ts_end) +{ + int ret; + + do + { + ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL); + } + while (ret == EINTR); +} + +/* Reentrant version of cgsleep functions allow start time to be set separately + * from the beginning of the actual sleep, allowing scheduling delays to be + * counted in the sleep. */ +void cgsleep_ms_r(cgtimer_t *ts_start, int ms) +{ + struct timespec ts_end; + + ms_to_timespec(&ts_end, ms); + timeraddspec(&ts_end, ts_start); + nanosleep_abstime(&ts_end); +} + +void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) +{ + struct timespec ts_end; + + us_to_timespec(&ts_end, us); + timeraddspec(&ts_end, ts_start); + nanosleep_abstime(&ts_end); +} +#else /* CLOCK_MONOTONIC */ +#ifdef __MACH__ +#include +#include +void cgtimer_time(cgtimer_t *ts_start) +{ + clock_serv_t cclock; + mach_timespec_t mts; + + host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); + clock_get_time(cclock, &mts); + mach_port_deallocate(mach_task_self(), cclock); + ts_start->tv_sec = mts.tv_sec; + ts_start->tv_nsec = mts.tv_nsec; +} +#elif !defined(WIN32) /* __MACH__ - Everything not linux/macosx/win32 */ +void cgtimer_time(cgtimer_t *ts_start) +{ + struct timeval tv; + + cgtime(&tv); + ts_start->tv_sec = tv->tv_sec; + ts_start->tv_nsec = tv->tv_usec * 1000; +} +#endif /* __MACH__ */ + +#ifdef WIN32 +/* For windows we use the SystemTime stored as a LARGE_INTEGER as the cgtimer_t + * typedef, allowing us to have sub-microsecond resolution for times, do simple + * arithmetic for timer calculations, and use windows' own hTimers to get + * accurate absolute timeouts. */ +int cgtimer_to_ms(cgtimer_t *cgt) +{ + return (int)(cgt->QuadPart / 10000LL); +} + +/* Subtracts b from a and stores it in res. */ +void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) +{ + res->QuadPart = a->QuadPart - b->QuadPart; +} + +/* Note that cgtimer time is NOT offset by the unix epoch since we use absolute + * timeouts with hTimers. */ +void cgtimer_time(cgtimer_t *ts_start) +{ + FILETIME ft; + + GetSystemTimeAsFileTime(&ft); + ts_start->LowPart = ft.dwLowDateTime; + ts_start->HighPart = ft.dwHighDateTime; +} + +static void liSleep(LARGE_INTEGER *li, int timeout) +{ + HANDLE hTimer; + DWORD ret; + + if (unlikely(timeout <= 0)) + return; + + hTimer = CreateWaitableTimer(NULL, TRUE, NULL); + if (unlikely(!hTimer)) + quit(1, "Failed to create hTimer in liSleep"); + ret = SetWaitableTimer(hTimer, li, 0, NULL, NULL, 0); + if (unlikely(!ret)) + quit(1, "Failed to SetWaitableTimer in liSleep"); + /* We still use a timeout as a sanity check in case the system time + * is changed while we're running */ + ret = WaitForSingleObject(hTimer, timeout); + if (unlikely(ret != WAIT_OBJECT_0 && ret != WAIT_TIMEOUT)) + quit(1, "Failed to WaitForSingleObject in liSleep"); + CloseHandle(hTimer); +} + +void cgsleep_ms_r(cgtimer_t *ts_start, int ms) +{ + LARGE_INTEGER li; + + li.QuadPart = ts_start->QuadPart + (int64_t)ms * 10000LL; + liSleep(&li, ms); +} + +void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) +{ + LARGE_INTEGER li; + int ms; + + li.QuadPart = ts_start->QuadPart + us * 10LL; + ms = us / 1000; + if (!ms) + ms = 1; + liSleep(&li, ms); +} +#else /* WIN32 */ +static void cgsleep_spec(struct timespec *ts_diff, const struct timespec *ts_start) +{ + struct timespec now; + + timeraddspec(ts_diff, ts_start); + cgtimer_time(&now); + timersubspec(ts_diff, &now); + if (unlikely(ts_diff->tv_sec < 0)) + return; + nanosleep(ts_diff, NULL); +} + +void cgsleep_ms_r(cgtimer_t *ts_start, int ms) +{ + struct timespec ts_diff; + + ms_to_timespec(&ts_diff, ms); + cgsleep_spec(&ts_diff, ts_start); +} + +void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) +{ + struct timespec ts_diff; + + us_to_timespec(&ts_diff, us); + cgsleep_spec(&ts_diff, ts_start); +} +#endif /* WIN32 */ +#endif /* CLOCK_MONOTONIC */ + +void cgsleep_ms(int ms) +{ + cgtimer_t ts_start; + + cgsleep_prepare_r(&ts_start); + cgsleep_ms_r(&ts_start, ms); +} + +void cgsleep_us(int64_t us) +{ + cgtimer_t ts_start; + + cgsleep_prepare_r(&ts_start); + cgsleep_us_r(&ts_start, us); +} + +/* Returns the microseconds difference between end and start times as a double */ +double us_tdiff(struct timeval *end, struct timeval *start) +{ + /* Sanity check. We should only be using this for small differences so + * limit the max to 60 seconds. */ + if (unlikely(end->tv_sec - start->tv_sec > 60)) + return 60000000; + return (end->tv_sec - start->tv_sec) * 1000000 + (end->tv_usec - start->tv_usec); +} + +/* Returns the milliseconds difference between end and start times */ +int ms_tdiff(struct timeval *end, struct timeval *start) +{ + /* Like us_tdiff, limit to 1 hour. */ + if (unlikely(end->tv_sec - start->tv_sec > 3600)) + return 3600000; + return (end->tv_sec - start->tv_sec) * 1000 + (end->tv_usec - start->tv_usec) / 1000; +} + +/* Returns the seconds difference between end and start times as a double */ +double tdiff(struct timeval *end, struct timeval *start) +{ + return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0; +} + +void check_extranonce_option(struct pool *pool, char * url) +{ + char extra_op[16],*extra_op_loc; + extra_op_loc = strstr(url,"#"); + if(extra_op_loc && !pool->extranonce_subscribe) + { + strcpy(extra_op, extra_op_loc); + *extra_op_loc = '\0'; + if(!strcmp(extra_op,"#xnsub")) + { + pool->extranonce_subscribe = true; + applog(LOG_DEBUG, "Pool %d extranonce subscribing enabled.",pool->pool_no); + return; + } + } + return; +} + +bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) +{ + char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; + char url_address[256], port[6]; + int url_len, port_len = 0; + + *sockaddr_url = url; + url_begin = strstr(url, "//"); + if (!url_begin) + url_begin = url; + else + url_begin += 2; + + /* Look for numeric ipv6 entries */ + ipv6_begin = strstr(url_begin, "["); + ipv6_end = strstr(url_begin, "]"); + if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) + url_end = strstr(ipv6_end, ":"); + else + url_end = strstr(url_begin, ":"); + if (url_end) + { + url_len = url_end - url_begin; + port_len = strlen(url_begin) - url_len - 1; + if (port_len < 1) + return false; + port_start = url_end + 1; + } + else + url_len = strlen(url_begin); + + if (url_len < 1) + return false; + + /* Get rid of the [] */ + if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) + { + url_len -= 2; + url_begin++; + } + + snprintf(url_address, 254, "%.*s", url_len, url_begin); + + if (port_len) + { + char *slash; + + snprintf(port, 6, "%.*s", port_len, port_start); + slash = strchr(port, '/'); + if (slash) + *slash = '\0'; + } + else + strcpy(port, "80"); + + *sockaddr_port = strdup(port); + *sockaddr_url = strdup(url_address); + + return true; +} + +enum send_ret +{ + SEND_OK, + SEND_SELECTFAIL, + SEND_SENDFAIL, + SEND_INACTIVE +}; + +/* Send a single command across a socket, appending \n to it. This should all + * be done under stratum lock except when first establishing the socket */ +static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len) +{ + SOCKETTYPE sock = pool->sock; + ssize_t ssent = 0; + + strcat(s, "\n"); + len++; + + while (len > 0 ) + { + struct timeval timeout = {1, 0}; + ssize_t sent; + fd_set wd; + retry: + FD_ZERO(&wd); + FD_SET(sock, &wd); + if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) + { + if (interrupted()) + goto retry; + return SEND_SELECTFAIL; + } +#ifdef __APPLE__ + sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE); +#elif WIN32 + sent = send(pool->sock, s + ssent, len, 0); +#else + sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL); +#endif + if (sent < 0) + { + if (!sock_blocks()) + return SEND_SENDFAIL; + sent = 0; + } + ssent += sent; + len -= sent; + } + + pool->cgminer_pool_stats.times_sent++; + pool->cgminer_pool_stats.bytes_sent += ssent; + pool->cgminer_pool_stats.net_bytes_sent += ssent; + return SEND_OK; +} + +bool stratum_send(struct pool *pool, char *s, ssize_t len) +{ + enum send_ret ret = SEND_INACTIVE; + + if (opt_protocol) + applog(LOG_DEBUG, "SEND: %s", s); + + mutex_lock(&pool->stratum_lock); + if (pool->stratum_active) + ret = __stratum_send(pool, s, len); + mutex_unlock(&pool->stratum_lock); + + /* This is to avoid doing applog under stratum_lock */ + switch (ret) + { + default: + case SEND_OK: + break; + case SEND_SELECTFAIL: + applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no); + suspend_stratum(pool); + break; + case SEND_SENDFAIL: + applog(LOG_DEBUG, "Failed to send in stratum_send"); + suspend_stratum(pool); + break; + case SEND_INACTIVE: + applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active"); + break; + } + return (ret == SEND_OK); +} + +static bool socket_full(struct pool *pool, int wait) +{ + SOCKETTYPE sock = pool->sock; + struct timeval timeout; + fd_set rd; + + if (unlikely(wait < 0)) + wait = 0; + FD_ZERO(&rd); + FD_SET(sock, &rd); + timeout.tv_usec = 0; + timeout.tv_sec = wait; + if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0) + return true; + return false; +} + +/* Check to see if Santa's been good to you */ +bool sock_full(struct pool *pool) +{ + if (strlen(pool->sockbuf)) + return true; + + return (socket_full(pool, 0)); +} + +static void clear_sockbuf(struct pool *pool) +{ + if (likely(pool->sockbuf)) + strcpy(pool->sockbuf, ""); +} + +static void clear_sock(struct pool *pool) +{ + ssize_t n; + + mutex_lock(&pool->stratum_lock); + do + { + if (pool->sock) + n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0); + else + n = 0; + } + while (n > 0); + mutex_unlock(&pool->stratum_lock); + + clear_sockbuf(pool); +} + +/* Realloc memory to new size and zero any extra memory added */ +void _recalloc(void **ptr, size_t old, size_t new, const char *file, const char *func, const int line) +{ + if (new == old) + return; + *ptr = realloc(*ptr, new); + if (unlikely(!*ptr)) + quitfrom(1, file, func, line, "Failed to realloc"); + if (new > old) + memset(*ptr + old, 0, new - old); +} + +/* Make sure the pool sockbuf is large enough to cope with any coinbase size + * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE + * and zeroing the new memory */ +static void recalloc_sock(struct pool *pool, size_t len) +{ + size_t old, new; + + old = strlen(pool->sockbuf); + new = old + len + 1; + if (new < pool->sockbuf_size) + return; + new = new + (RBUFSIZE - (new % RBUFSIZE)); + // Avoid potentially recursive locking + // applog(LOG_DEBUG, "Recallocing pool sockbuf to %d", new); + pool->sockbuf = realloc(pool->sockbuf, new); + if (!pool->sockbuf) + quithere(1, "Failed to realloc pool sockbuf"); + memset(pool->sockbuf + old, 0, new - old); + pool->sockbuf_size = new; +} + +/* Peeks at a socket to find the first end of line and then reads just that + * from the socket and returns that as a malloced char */ +char *recv_line(struct pool *pool) +{ + char *tok, *sret = NULL; + ssize_t len, buflen; + int waited = 0; + + if (!strstr(pool->sockbuf, "\n")) + { + struct timeval rstart, now; + + cgtime(&rstart); + if (!socket_full(pool, DEFAULT_SOCKWAIT)) + { + applog(LOG_DEBUG, "Timed out waiting for data on socket_full"); + goto out; + } + + do + { + char s[RBUFSIZE]; + size_t slen; + ssize_t n; + + memset(s, 0, RBUFSIZE); + n = recv(pool->sock, s, RECVSIZE, 0); + if (!n) + { + applog(LOG_DEBUG, "Socket closed waiting in recv_line"); + suspend_stratum(pool); + break; + } + cgtime(&now); + waited = tdiff(&now, &rstart); + if (n < 0) + { + if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) + { + applog(LOG_DEBUG, "Failed to recv sock in recv_line"); + suspend_stratum(pool); + break; + } + } + else + { + slen = strlen(s); + recalloc_sock(pool, slen); + strcat(pool->sockbuf, s); + } + } + while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n")); + } + + buflen = strlen(pool->sockbuf); + tok = strtok(pool->sockbuf, "\n"); + if (!tok) + { + applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line"); + goto out; + } + sret = strdup(tok); + len = strlen(sret); + + /* Copy what's left in the buffer after the \n, including the + * terminating \0 */ + if (buflen > len + 1) + memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1); + else + strcpy(pool->sockbuf, ""); + + pool->cgminer_pool_stats.times_received++; + pool->cgminer_pool_stats.bytes_received += len; + pool->cgminer_pool_stats.net_bytes_received += len; +out: + if (!sret) + clear_sock(pool); + else if (opt_protocol) + applog(LOG_DEBUG, "RECVD: %s", sret); + return sret; +} + +/* Extracts a string value from a json array with error checking. To be used + * when the value of the string returned is only examined and not to be stored. + * See json_array_string below */ +static char *__json_array_string(json_t *val, unsigned int entry) +{ + json_t *arr_entry; + + if (json_is_null(val)) + return NULL; + if (!json_is_array(val)) + return NULL; + if (entry > json_array_size(val)) + return NULL; + arr_entry = json_array_get(val, entry); + if (!json_is_string(arr_entry)) + return NULL; + + return (char *)json_string_value(arr_entry); +} + +/* Creates a freshly malloced dup of __json_array_string */ +static char *json_array_string(json_t *val, unsigned int entry) +{ + char *buf = __json_array_string(val, entry); + + if (buf) + return strdup(buf); + return NULL; +} + +static char *blank_merkle = "0000000000000000000000000000000000000000000000000000000000000000"; + +static bool parse_notify(struct pool *pool, json_t *val) +{ + char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, + *ntime, header[228]; + unsigned char *cb1 = NULL, *cb2 = NULL; + size_t cb1_len, cb2_len, alloc_len; + bool clean, ret = false; + int merkles, i; + json_t *arr; + + arr = json_array_get(val, 4); + if (!arr || !json_is_array(arr)) + goto out; + + merkles = json_array_size(arr); + + job_id = json_array_string(val, 0); + prev_hash = __json_array_string(val, 1); + coinbase1 = json_array_string(val, 2); + coinbase2 = json_array_string(val, 3); + bbversion = __json_array_string(val, 5); + nbit = __json_array_string(val, 6); + ntime = __json_array_string(val, 7); + clean = json_is_true(json_array_get(val, 8)); + + if (!valid_ascii(job_id) || !valid_hex(prev_hash) || !valid_hex(coinbase1) || + !valid_hex(coinbase2) || !valid_hex(bbversion) || !valid_hex(nbit) || + !valid_hex(ntime)) + { + /* Annoying but we must not leak memory */ + free(job_id); + free(coinbase1); + free(coinbase2); + goto out; + } + + cg_wlock(&pool->data_lock); + free(pool->swork.job_id); + pool->swork.job_id = job_id; + snprintf(pool->prev_hash, 65, "%s", prev_hash); + cb1_len = strlen(coinbase1) / 2; + cb2_len = strlen(coinbase2) / 2; + snprintf(pool->bbversion, 9, "%s", bbversion); + snprintf(pool->nbit, 9, "%s", nbit); + snprintf(pool->ntime, 9, "%s", ntime); + pool->swork.clean = clean; + alloc_len = pool->coinbase_len = cb1_len + pool->n1_len + pool->n2size + cb2_len; + pool->nonce2_offset = cb1_len + pool->n1_len; + + for (i = 0; i < pool->merkles; i++) + free(pool->swork.merkle_bin[i]); + if (merkles) + { + pool->swork.merkle_bin = realloc(pool->swork.merkle_bin, + sizeof(char *) * merkles + 1); + for (i = 0; i < merkles; i++) + { + char *merkle = json_array_string(arr, i); + + pool->swork.merkle_bin[i] = malloc(32); + if (unlikely(!pool->swork.merkle_bin[i])) + quit(1, "Failed to malloc pool swork merkle_bin"); + if (opt_protocol) + applog(LOG_DEBUG, "merkle %d: %s", i, merkle); + ret = hex2bin(pool->swork.merkle_bin[i], merkle, 32); + free(merkle); + if (unlikely(!ret)) + { + applog(LOG_ERR, "Failed to convert merkle to merkle_bin in parse_notify"); + goto out_unlock; + } + } + } + pool->merkles = merkles; + if (clean) + pool->nonce2 = 0; +#if 0 + header_len = strlen(pool->bbversion) + + strlen(pool->prev_hash); + /* merkle_hash */ 32 + + strlen(pool->ntime) + + strlen(pool->nbit) + + /* nonce */ 8 + + /* workpadding */ 96; +#endif + snprintf(header, 225, + "%s%s%s%s%s%s%s", + pool->bbversion, + pool->prev_hash, + blank_merkle, + pool->ntime, + pool->nbit, + "00000000", /* nonce */ + workpadding); + ret = hex2bin(pool->header_bin, header, 112); + if (unlikely(!ret)) + { + applog(LOG_ERR, "Failed to convert header to header_bin in parse_notify"); + goto out_unlock; + } + + cb1 = alloca(cb1_len); + ret = hex2bin(cb1, coinbase1, cb1_len); + if (unlikely(!ret)) + { + applog(LOG_ERR, "Failed to convert cb1 to cb1_bin in parse_notify"); + goto out_unlock; + } + cb2 = alloca(cb2_len); + ret = hex2bin(cb2, coinbase2, cb2_len); + if (unlikely(!ret)) + { + applog(LOG_ERR, "Failed to convert cb2 to cb2_bin in parse_notify"); + goto out_unlock; + } + free(pool->coinbase); + align_len(&alloc_len); + pool->coinbase = calloc(alloc_len, 1); + if (unlikely(!pool->coinbase)) + quit(1, "Failed to calloc pool coinbase in parse_notify"); + memcpy(pool->coinbase, cb1, cb1_len); + memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len); + memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len); + if (opt_debug) + { + char *cb = bin2hex(pool->coinbase, pool->coinbase_len); + + applog(LOG_DEBUG, "Pool %d coinbase %s", pool->pool_no, cb); + free(cb); + } +out_unlock: + cg_wunlock(&pool->data_lock); + + if (opt_protocol) + { + applog(LOG_DEBUG, "job_id: %s", job_id); + applog(LOG_DEBUG, "prev_hash: %s", prev_hash); + applog(LOG_DEBUG, "coinbase1: %s", coinbase1); + applog(LOG_DEBUG, "coinbase2: %s", coinbase2); + applog(LOG_DEBUG, "bbversion: %s", bbversion); + applog(LOG_DEBUG, "nbit: %s", nbit); + applog(LOG_DEBUG, "ntime: %s", ntime); + applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no"); + } + free(coinbase1); + free(coinbase2); + + /* A notify message is the closest stratum gets to a getwork */ + pool->getwork_requested++; + total_getworks++; + if (pool == current_pool()) + opt_work_update = true; +out: + return ret; +} + +static bool parse_version(struct pool *pool, json_t *val) +{ + int i; + for(i = 0; i < json_array_size(val); i++) + { + pool->version[i] = json_integer_value(json_array_get(val, i)); + } +} + +static bool parse_diff(struct pool *pool, json_t *val) +{ + double old_diff, diff; + + diff = json_number_value(json_array_get(val, 0)); + if (diff == 0) + return false; + + cg_wlock(&pool->data_lock); + old_diff = pool->sdiff; + pool->sdiff = diff; + cg_wunlock(&pool->data_lock); + + if (old_diff != diff) + { + int idiff = diff; + + if ((double)idiff == diff) + applog(LOG_NOTICE, "Pool %d difficulty changed to %d", + pool->pool_no, idiff); + else + applog(LOG_NOTICE, "Pool %d difficulty changed to %.1f", + pool->pool_no, diff); + } + else + applog(LOG_DEBUG, "Pool %d difficulty set to %f", pool->pool_no, + diff); + + return true; +} + +static bool parse_extranonce(struct pool *pool, json_t *val) +{ + int n2size; + char* nonce1; + + nonce1 = json_array_string(val, 0); + if (!valid_hex(nonce1)) + { + applog(LOG_INFO, "Failed to get valid nonce1 in parse_extranonce"); + goto out; + } + n2size = json_integer_value(json_array_get(val, 1)); + if (n2size < 2 || n2size > 16) + { + applog(LOG_INFO, "Failed to get valid n2size in parse_extranonce"); + free(nonce1); + goto out; + } + + cg_wlock(&pool->data_lock); + pool->nonce1 = nonce1; + pool->n1_len = strlen(nonce1) / 2; + free(pool->nonce1bin); + pool->nonce1bin = calloc(pool->n1_len, 1); + if (unlikely(!pool->nonce1bin)) + quithere(1, "Failed to calloc pool->nonce1bin"); + hex2bin(pool->nonce1bin, pool->nonce1, pool->n1_len); + pool->n2size = n2size; + applog(LOG_NOTICE, "Pool %d confirmed mining.extranonce.subscribe with extranonce1 %s extran2size %d", + pool->pool_no, pool->nonce1, pool->n2size); + cg_wunlock(&pool->data_lock); + return true; +out: + return false; +} + +static void __suspend_stratum(struct pool *pool) +{ + clear_sockbuf(pool); + pool->stratum_active = pool->stratum_notify = false; + if (pool->sock) + CLOSESOCKET(pool->sock); + pool->sock = 0; +} + +static bool parse_reconnect(struct pool *pool, json_t *val) +{ + char *sockaddr_url, *stratum_port, *tmp; + char *url, *port, address[256]; + + memset(address, 0, 255); + url = (char *)json_string_value(json_array_get(val, 0)); + if (!url) + url = pool->sockaddr_url; + else + { + char *dot_pool, *dot_reconnect; + dot_pool = strchr(pool->sockaddr_url, '.'); + if (!dot_pool) + { + applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", + pool->sockaddr_url); + return false; + } + dot_reconnect = strchr(url, '.'); + if (!dot_reconnect) + { + applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", + url); + return false; + } + if (strcmp(dot_pool, dot_reconnect)) + { + applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", + pool->sockaddr_url); + return false; + } + } + + port = (char *)json_string_value(json_array_get(val, 1)); + if (!port) + port = pool->stratum_port; + + snprintf(address, 254, "%s:%s", url, port); + + if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) + return false; + + applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); + + clear_pool_work(pool); + + mutex_lock(&pool->stratum_lock); + __suspend_stratum(pool); + tmp = pool->sockaddr_url; + pool->sockaddr_url = sockaddr_url; + pool->stratum_url = pool->sockaddr_url; + free(tmp); + tmp = pool->stratum_port; + pool->stratum_port = stratum_port; + free(tmp); + mutex_unlock(&pool->stratum_lock); + + return restart_stratum(pool); +} + +static bool send_version(struct pool *pool, json_t *val) +{ + json_t *id_val = json_object_get(val, "id"); + char s[RBUFSIZE]; + int id; + + if (!id_val) + return false; + id = json_integer_value(json_object_get(val, "id")); + + sprintf(s, "{\"id\": %d, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", id); + if (!stratum_send(pool, s, strlen(s))) + return false; + + return true; +} + +static bool send_pong(struct pool *pool, json_t *val) +{ + json_t *id_val = json_object_get(val, "id"); + char s[RBUFSIZE]; + int id; + + if (!id_val) + return false; + id = json_integer_value(json_object_get(val, "id")); + + sprintf(s, "{\"id\": %d, \"result\": \"pong\", \"error\": null}", id); + if (!stratum_send(pool, s, strlen(s))) + return false; + + return true; +} + +static bool show_message(struct pool *pool, json_t *val) +{ + char *msg; + + if (!json_is_array(val)) + return false; + msg = (char *)json_string_value(json_array_get(val, 0)); + if (!msg) + return false; + applog(LOG_NOTICE, "Pool %d message: %s", pool->pool_no, msg); + return true; +} + +bool parse_method(struct pool *pool, char *s) +{ + json_t *val = NULL, *method, *err_val, *params; + json_error_t err; + bool ret = false; + char *buf; + + if (!s) + goto out; + + val = JSON_LOADS(s, &err); + if (!val) + { + applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); + goto out; + } + + method = json_object_get(val, "method"); + if (!method) + goto out_decref; + err_val = json_object_get(val, "error"); + params = json_object_get(val, "params"); + + if (err_val && !json_is_null(err_val)) + { + char *ss; + + if (err_val) + ss = json_dumps(err_val, JSON_INDENT(3)); + else + ss = strdup("(unknown reason)"); + + applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss); + free(ss); + goto out_decref; + } + + buf = (char *)json_string_value(method); + if (!buf) + goto out_decref; + + if (!strncasecmp(buf, "mining.multi_version", 20)) + { + pool->support_vil = true; + applog(LOG_INFO,"Pool support multi version"); + ret = parse_version(pool, params); + goto out_decref; + } + + if (!strncasecmp(buf, "mining.notify", 13)) + { + if (parse_notify(pool, params)) + pool->stratum_notify = ret = true; + else + pool->stratum_notify = ret = false; + goto out_decref; + } + + + if(!strncasecmp(buf, "mining.set_extranonce", 21)) + { + ret = parse_extranonce(pool, params); + goto out_decref; + } + + if (!strncasecmp(buf, "mining.set_difficulty", 21)) + { + ret = parse_diff(pool, params); + goto out_decref; + } + + if (!strncasecmp(buf, "client.reconnect", 16)) + { + ret = parse_reconnect(pool, params); + goto out_decref; + } + + if (!strncasecmp(buf, "client.get_version", 18)) + { + ret = send_version(pool, val); + goto out_decref; + } + + if (!strncasecmp(buf, "client.show_message", 19)) + { + ret = show_message(pool, params); + goto out_decref; + } + + if (!strncasecmp(buf, "mining.ping", 11)) + { + applog(LOG_INFO, "Pool %d ping", pool->pool_no); + ret = send_pong(pool, val); + goto out_decref; + } +out_decref: + json_decref(val); +out: + return ret; +} + +bool auth_stratum(struct pool *pool) +{ + json_t *val = NULL, *res_val, *err_val; + char s[RBUFSIZE], *sret = NULL; + json_error_t err; + bool ret = false; + + sprintf(s, "{\"id\": %d, \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}", + swork_id++, pool->rpc_user, pool->rpc_pass); + + if (!stratum_send(pool, s, strlen(s))) + return ret; + + /* Parse all data in the queue and anything left should be auth */ + while (42) + { + sret = recv_line(pool); + if (!sret) + return ret; + if (parse_method(pool, sret)) + free(sret); + else + break; + } + + val = JSON_LOADS(sret, &err); + free(sret); + res_val = json_object_get(val, "result"); + err_val = json_object_get(val, "error"); + + if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) + { + char *ss; + + if (err_val) + ss = json_dumps(err_val, JSON_INDENT(3)); + else + ss = strdup("(unknown reason)"); + applog(LOG_INFO, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss); + free(ss); + + suspend_stratum(pool); + + goto out; + } + + ret = true; + applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no); + pool->probed = true; + successful_connect = true; + if (opt_suggest_diff) + { + sprintf(s, "{\"id\": %d, \"method\": \"mining.suggest_difficulty\", \"params\": [%d]}", + swork_id++, opt_suggest_diff); + stratum_send(pool, s, strlen(s)); + } + if (opt_multi_version) + { + sprintf(s, "{\"id\": %d, \"method\": \"mining.multi_version\", \"params\": [%d]}", + swork_id++, opt_multi_version); + stratum_send(pool, s, strlen(s)); + } +out: + json_decref(val); + return ret; +} + +static int recv_byte(int sockd) +{ + char c; + + if (recv(sockd, &c, 1, 0) != -1) + return c; + + return -1; +} + +static bool http_negotiate(struct pool *pool, int sockd, bool http0) +{ + char buf[1024]; + int i, len; + + if (http0) + { + snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.0\r\n\r\n", + pool->sockaddr_url, pool->stratum_port); + } + else + { + snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.1\r\nHost: %s:%s\r\n\r\n", + pool->sockaddr_url, pool->stratum_port, pool->sockaddr_url, + pool->stratum_port); + } + applog(LOG_DEBUG, "Sending proxy %s:%s - %s", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); + send(sockd, buf, strlen(buf), 0); + len = recv(sockd, buf, 12, 0); + if (len <= 0) + { + applog(LOG_WARNING, "Couldn't read from proxy %s:%s after sending CONNECT", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); + return false; + } + buf[len] = '\0'; + applog(LOG_DEBUG, "Received from proxy %s:%s - %s", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); + if (strcmp(buf, "HTTP/1.1 200") && strcmp(buf, "HTTP/1.0 200")) + { + applog(LOG_WARNING, "HTTP Error from proxy %s:%s - %s", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); + return false; + } + + /* Ignore unwanted headers till we get desired response */ + for (i = 0; i < 4; i++) + { + buf[i] = recv_byte(sockd); + if (buf[i] == (char)-1) + { + applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); + return false; + } + } + while (strncmp(buf, "\r\n\r\n", 4)) + { + for (i = 0; i < 3; i++) + buf[i] = buf[i + 1]; + buf[3] = recv_byte(sockd); + if (buf[3] == (char)-1) + { + applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); + return false; + } + } + + applog(LOG_DEBUG, "Success negotiating with %s:%s HTTP proxy", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); + return true; +} + +static bool socks5_negotiate(struct pool *pool, int sockd) +{ + unsigned char atyp, uclen; + unsigned short port; + char buf[515]; + int i, len; + + buf[0] = 0x05; + buf[1] = 0x01; + buf[2] = 0x00; + applog(LOG_DEBUG, "Attempting to negotiate with %s:%s SOCKS5 proxy", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); + send(sockd, buf, 3, 0); + if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != buf[2]) + { + applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); + return false; + } + + buf[0] = 0x05; + buf[1] = 0x01; + buf[2] = 0x00; + buf[3] = 0x03; + len = (strlen(pool->sockaddr_url)); + if (len > 255) + len = 255; + uclen = len; + buf[4] = (uclen & 0xff); + memcpy(buf + 5, pool->sockaddr_url, len); + port = atoi(pool->stratum_port); + buf[5 + len] = (port >> 8); + buf[6 + len] = (port & 0xff); + send(sockd, buf, (7 + len), 0); + if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != 0x00) + { + applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); + return false; + } + + recv_byte(sockd); + atyp = recv_byte(sockd); + if (atyp == 0x01) + { + for (i = 0; i < 4; i++) + recv_byte(sockd); + } + else if (atyp == 0x03) + { + len = recv_byte(sockd); + for (i = 0; i < len; i++) + recv_byte(sockd); + } + else + { + applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); + return false; + } + for (i = 0; i < 2; i++) + recv_byte(sockd); + + applog(LOG_DEBUG, "Success negotiating with %s:%s SOCKS5 proxy", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); + return true; +} + +static bool socks4_negotiate(struct pool *pool, int sockd, bool socks4a) +{ + unsigned short port; + in_addr_t inp; + char buf[515]; + int i, len; + + buf[0] = 0x04; + buf[1] = 0x01; + port = atoi(pool->stratum_port); + buf[2] = port >> 8; + buf[3] = port & 0xff; + sprintf(&buf[8], "CGMINER"); + + /* See if we've been given an IP address directly to avoid needing to + * resolve it. */ + inp = inet_addr(pool->sockaddr_url); + inp = ntohl(inp); + if ((int)inp != -1) + socks4a = false; + else + { + /* Try to extract the IP address ourselves first */ + struct addrinfo servinfobase, *servinfo, hints; + + servinfo = &servinfobase; + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_INET; /* IPV4 only */ + if (!getaddrinfo(pool->sockaddr_url, NULL, &hints, &servinfo)) + { + struct sockaddr_in *saddr_in = (struct sockaddr_in *)servinfo->ai_addr; + + inp = ntohl(saddr_in->sin_addr.s_addr); + socks4a = false; + freeaddrinfo(servinfo); + } + } + + if (!socks4a) + { + if ((int)inp == -1) + { + applog(LOG_WARNING, "Invalid IP address specified for socks4 proxy: %s", + pool->sockaddr_url); + return false; + } + buf[4] = (inp >> 24) & 0xFF; + buf[5] = (inp >> 16) & 0xFF; + buf[6] = (inp >> 8) & 0xFF; + buf[7] = (inp >> 0) & 0xFF; + send(sockd, buf, 16, 0); + } + else + { + /* This appears to not be working but hopefully most will be + * able to resolve IP addresses themselves. */ + buf[4] = 0; + buf[5] = 0; + buf[6] = 0; + buf[7] = 1; + len = strlen(pool->sockaddr_url); + if (len > 255) + len = 255; + memcpy(&buf[16], pool->sockaddr_url, len); + len += 16; + buf[len++] = '\0'; + send(sockd, buf, len, 0); + } + + if (recv_byte(sockd) != 0x00 || recv_byte(sockd) != 0x5a) + { + applog(LOG_WARNING, "Bad response from %s:%s SOCKS4 server", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); + return false; + } + + for (i = 0; i < 6; i++) + recv_byte(sockd); + + return true; +} + +static void noblock_socket(SOCKETTYPE fd) +{ +#ifndef WIN32 + int flags = fcntl(fd, F_GETFL, 0); + + fcntl(fd, F_SETFL, O_NONBLOCK | flags); +#else + u_long flags = 1; + + ioctlsocket(fd, FIONBIO, &flags); +#endif +} + +static void block_socket(SOCKETTYPE fd) +{ +#ifndef WIN32 + int flags = fcntl(fd, F_GETFL, 0); + + fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); +#else + u_long flags = 0; + + ioctlsocket(fd, FIONBIO, &flags); +#endif +} + +static bool sock_connecting(void) +{ +#ifndef WIN32 + return errno == EINPROGRESS; +#else + return WSAGetLastError() == WSAEWOULDBLOCK; +#endif +} +static bool setup_stratum_socket(struct pool *pool) +{ + struct addrinfo *servinfo, hints, *p; + char *sockaddr_url, *sockaddr_port; + int sockd; + + mutex_lock(&pool->stratum_lock); + pool->stratum_active = false; + if (pool->sock) + CLOSESOCKET(pool->sock); + pool->sock = 0; + mutex_unlock(&pool->stratum_lock); + + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + + if (!pool->rpc_proxy && opt_socks_proxy) + { + pool->rpc_proxy = opt_socks_proxy; + extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); + pool->rpc_proxytype = PROXY_SOCKS5; + } + + if (pool->rpc_proxy) + { + sockaddr_url = pool->sockaddr_proxy_url; + sockaddr_port = pool->sockaddr_proxy_port; + } + else + { + sockaddr_url = pool->sockaddr_url; + sockaddr_port = pool->stratum_port; + } + if (getaddrinfo(sockaddr_url, sockaddr_port, &hints, &servinfo) != 0) + { + if (!pool->probed) + { + applog(LOG_WARNING, "Failed to resolve (?wrong URL) %s:%s", + sockaddr_url, sockaddr_port); + pool->probed = true; + } + else + { + applog(LOG_INFO, "Failed to getaddrinfo for %s:%s", + sockaddr_url, sockaddr_port); + } + return false; + } + + for (p = servinfo; p != NULL; p = p->ai_next) + { + sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); + if (sockd == -1) + { + applog(LOG_DEBUG, "Failed socket"); + continue; + } + + /* Iterate non blocking over entries returned by getaddrinfo + * to cope with round robin DNS entries, finding the first one + * we can connect to quickly. */ + noblock_socket(sockd); + if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) + { + struct timeval tv_timeout = {1, 0}; + int selret; + fd_set rw; + + if (!sock_connecting()) + { + CLOSESOCKET(sockd); + applog(LOG_DEBUG, "Failed sock connect"); + continue; + } + retry: + FD_ZERO(&rw); + FD_SET(sockd, &rw); + selret = select(sockd + 1, NULL, &rw, NULL, &tv_timeout); + if (selret > 0 && FD_ISSET(sockd, &rw)) + { + socklen_t len; + int err, n; + + len = sizeof(err); + n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (!n && !err) + { + applog(LOG_DEBUG, "Succeeded delayed connect"); + block_socket(sockd); + break; + } + } + if (selret < 0 && interrupted()) + goto retry; + CLOSESOCKET(sockd); + applog(LOG_DEBUG, "Select timeout/failed connect"); + continue; + } + applog(LOG_WARNING, "Succeeded immediate connect"); + block_socket(sockd); + + break; + } + if (p == NULL) + { + applog(LOG_INFO, "Failed to connect to stratum on %s:%s", + sockaddr_url, sockaddr_port); + freeaddrinfo(servinfo); + return false; + } + freeaddrinfo(servinfo); + + if (pool->rpc_proxy) + { + switch (pool->rpc_proxytype) + { + case PROXY_HTTP_1_0: + if (!http_negotiate(pool, sockd, true)) + return false; + break; + case PROXY_HTTP: + if (!http_negotiate(pool, sockd, false)) + return false; + break; + case PROXY_SOCKS5: + case PROXY_SOCKS5H: + if (!socks5_negotiate(pool, sockd)) + return false; + break; + case PROXY_SOCKS4: + if (!socks4_negotiate(pool, sockd, false)) + return false; + break; + case PROXY_SOCKS4A: + if (!socks4_negotiate(pool, sockd, true)) + return false; + break; + default: + applog(LOG_WARNING, "Unsupported proxy type for %s:%s", + pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); + return false; + break; + } + } + + if (!pool->sockbuf) + { + pool->sockbuf = calloc(RBUFSIZE, 1); + if (!pool->sockbuf) + quithere(1, "Failed to calloc pool sockbuf"); + pool->sockbuf_size = RBUFSIZE; + } + + pool->sock = sockd; + keep_sockalive(sockd); + return true; +} + +static char *get_sessionid(json_t *val) +{ + char *ret = NULL; + json_t *arr_val; + int arrsize, i; + + arr_val = json_array_get(val, 0); + if (!arr_val || !json_is_array(arr_val)) + goto out; + arrsize = json_array_size(arr_val); + for (i = 0; i < arrsize; i++) + { + json_t *arr = json_array_get(arr_val, i); + char *notify; + + if (!arr | !json_is_array(arr)) + break; + notify = __json_array_string(arr, 0); + if (!notify) + continue; + if (!strncasecmp(notify, "mining.notify", 13)) + { + ret = json_array_string(arr, 1); + break; + } + } +out: + return ret; +} + +void suspend_stratum(struct pool *pool) +{ + applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no); + + mutex_lock(&pool->stratum_lock); + __suspend_stratum(pool); + mutex_unlock(&pool->stratum_lock); +} + +void extranonce_subscribe_stratum(struct pool *pool) +{ + if(pool->extranonce_subscribe) + { + char s[RBUFSIZE]; + sprintf(s,"{\"id\": %d, \"method\": \"mining.extranonce.subscribe\", \"params\": []}", swork_id++); + applog(LOG_INFO, "Send extranonce.subscribe for stratum pool %d", pool->pool_no); + stratum_send(pool, s, strlen(s)); + } +} + + +bool initiate_stratum(struct pool *pool) +{ + bool ret = false, recvd = false, noresume = false, sockd = false; + char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid; + json_t *val = NULL, *res_val, *err_val; + json_error_t err; + int n2size; + +resend: + if (!setup_stratum_socket(pool)) + { + sockd = false; + goto out; + } + + sockd = true; + + if (recvd) + { + /* Get rid of any crap lying around if we're resending */ + clear_sock(pool); + sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++); + } + else + { + if (pool->sessionid) + sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid); + else + sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++); + } + + if (__stratum_send(pool, s, strlen(s)) != SEND_OK) + { + applog(LOG_DEBUG, "Failed to send s in initiate_stratum"); + goto out; + } + + if (!socket_full(pool, DEFAULT_SOCKWAIT)) + { + applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum"); + goto out; + } + + sret = recv_line(pool); + if (!sret) + goto out; + + recvd = true; + + val = JSON_LOADS(sret, &err); + free(sret); + if (!val) + { + applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); + goto out; + } + + res_val = json_object_get(val, "result"); + err_val = json_object_get(val, "error"); + + if (!res_val || json_is_null(res_val) || + (err_val && !json_is_null(err_val))) + { + char *ss; + + if (err_val) + ss = json_dumps(err_val, JSON_INDENT(3)); + else + ss = strdup("(unknown reason)"); + + applog(LOG_INFO, "JSON-RPC decode failed: %s", ss); + + free(ss); + + goto out; + } + + sessionid = get_sessionid(res_val); + if (!sessionid) + applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum"); + nonce1 = json_array_string(res_val, 1); + if (!valid_hex(nonce1)) + { + applog(LOG_INFO, "Failed to get valid nonce1 in initiate_stratum"); + free(sessionid); + goto out; + } + n2size = json_integer_value(json_array_get(res_val, 2)); + if (n2size < 2 || n2size > 16) + { + applog(LOG_INFO, "Failed to get valid n2size in initiate_stratum"); + free(sessionid); + free(nonce1); + goto out; + } + + cg_wlock(&pool->data_lock); + pool->sessionid = sessionid; + pool->nonce1 = nonce1; + pool->n1_len = strlen(nonce1) / 2; + free(pool->nonce1bin); + pool->nonce1bin = calloc(pool->n1_len, 1); + if (unlikely(!pool->nonce1bin)) + quithere(1, "Failed to calloc pool->nonce1bin"); + hex2bin(pool->nonce1bin, pool->nonce1, pool->n1_len); + pool->n2size = n2size; + cg_wunlock(&pool->data_lock); + + if (sessionid) + applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid); + + ret = true; +out: + if (ret) + { + if (!pool->stratum_url) + pool->stratum_url = pool->sockaddr_url; + pool->stratum_active = true; + pool->sdiff = 1; + if (opt_protocol) + { + applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d", + pool->pool_no, pool->nonce1, pool->n2size); + } + if(pool->extranonce_subscribe) + { + sprintf(s,"{\"id\": %d, \"method\": \"mining.extranonce.subscribe\", \"params\": []}", swork_id++); + stratum_send(pool, s, strlen(s)); + } + } + else + { + if (recvd && !noresume) + { + /* Reset the sessionid used for stratum resuming in case the pool + * does not support it, or does not know how to respond to the + * presence of the sessionid parameter. */ + cg_wlock(&pool->data_lock); + free(pool->sessionid); + free(pool->nonce1); + pool->sessionid = pool->nonce1 = NULL; + cg_wunlock(&pool->data_lock); + + applog(LOG_DEBUG, "Failed to resume stratum, trying afresh"); + noresume = true; + json_decref(val); + goto resend; + } + applog(LOG_DEBUG, "Initiate stratum failed"); + if (sockd) + suspend_stratum(pool); + } + + json_decref(val); + return ret; +} + +bool restart_stratum(struct pool *pool) +{ + bool ret = false; + + if (pool->stratum_active) + suspend_stratum(pool); + if (!initiate_stratum(pool)) + goto out; + if (!auth_stratum(pool)) + goto out; + extranonce_subscribe_stratum(pool); + ret = true; +out: + if (!ret) + pool_died(pool); + else + stratum_resumed(pool); + return ret; +} + +void dev_error(struct cgpu_info *dev, enum dev_reason reason) +{ + dev->device_last_not_well = time(NULL); + dev->device_not_well_reason = reason; + + switch (reason) + { + case REASON_THREAD_FAIL_INIT: + dev->thread_fail_init_count++; + break; + case REASON_THREAD_ZERO_HASH: + dev->thread_zero_hash_count++; + break; + case REASON_THREAD_FAIL_QUEUE: + dev->thread_fail_queue_count++; + break; + case REASON_DEV_SICK_IDLE_60: + dev->dev_sick_idle_60_count++; + break; + case REASON_DEV_DEAD_IDLE_600: + dev->dev_dead_idle_600_count++; + break; + case REASON_DEV_NOSTART: + dev->dev_nostart_count++; + break; + case REASON_DEV_OVER_HEAT: + dev->dev_over_heat_count++; + break; + case REASON_DEV_THERMAL_CUTOFF: + dev->dev_thermal_cutoff_count++; + break; + case REASON_DEV_COMMS_ERROR: + dev->dev_comms_error_count++; + break; + case REASON_DEV_THROTTLE: + dev->dev_throttle_count++; + break; + } +} + +/* Realloc an existing string to fit an extra string s, appending s to it. */ +void *realloc_strcat(char *ptr, char *s) +{ + size_t old = 0, len = strlen(s); + char *ret; + + if (!len) + return ptr; + if (ptr) + old = strlen(ptr); + + len += old + 1; + align_len(&len); + + ret = malloc(len); + if (unlikely(!ret)) + quithere(1, "Failed to malloc"); + + if (ptr) + { + sprintf(ret, "%s%s", ptr, s); + free(ptr); + } + else + sprintf(ret, "%s", s); + return ret; +} + +/* Make a text readable version of a string using 0xNN for < ' ' or > '~' + * Including 0x00 at the end + * You must free the result yourself */ +void *str_text(char *ptr) +{ + unsigned char *uptr; + char *ret, *txt; + + if (ptr == NULL) + { + ret = strdup("(null)"); + + if (unlikely(!ret)) + quithere(1, "Failed to malloc null"); + } + + uptr = (unsigned char *)ptr; + + ret = txt = malloc(strlen(ptr)*4+5); // Guaranteed >= needed + if (unlikely(!txt)) + quithere(1, "Failed to malloc txt"); + + do + { + if (*uptr < ' ' || *uptr > '~') + { + sprintf(txt, "0x%02x", *uptr); + txt += 4; + } + else + *(txt++) = *uptr; + } + while (*(uptr++)); + + *txt = '\0'; + + return ret; +} + +void RenameThread(const char* name) +{ + char buf[16]; + + snprintf(buf, sizeof(buf), "cg@%s", name); +#if defined(PR_SET_NAME) + // Only the first 15 characters are used (16 - NUL terminator) + prctl(PR_SET_NAME, buf, 0, 0, 0); +#elif (defined(__FreeBSD__) || defined(__OpenBSD__)) + pthread_set_name_np(pthread_self(), buf); +#elif defined(MAC_OSX) + pthread_setname_np(buf); +#else + // Prevent warnings + (void)buf; +#endif +} + +/* cgminer specific wrappers for true unnamed semaphore usage on platforms + * that support them and for apple which does not. We use a single byte across + * a pipe to emulate semaphore behaviour there. */ +#ifdef __APPLE__ +void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) +{ + int flags, fd, i; + + if (pipe(cgsem->pipefd) == -1) + quitfrom(1, file, func, line, "Failed pipe errno=%d", errno); + + /* Make the pipes FD_CLOEXEC to allow them to close should we call + * execv on restart. */ + for (i = 0; i < 2; i++) + { + fd = cgsem->pipefd[i]; + flags = fcntl(fd, F_GETFD, 0); + flags |= FD_CLOEXEC; + if (fcntl(fd, F_SETFD, flags) == -1) + quitfrom(1, file, func, line, "Failed to fcntl errno=%d", errno); + } +} + +void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) +{ + const char buf = 1; + int ret; + +retry: + ret = write(cgsem->pipefd[1], &buf, 1); + if (unlikely(ret == 0)) + applog(LOG_WARNING, "Failed to write errno=%d" IN_FMT_FFL, errno, file, func, line); + else if (unlikely(ret < 0 && interrupted)) + goto retry; +} + +void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) +{ + char buf; + int ret; +retry: + ret = read(cgsem->pipefd[0], &buf, 1); + if (unlikely(ret == 0)) + applog(LOG_WARNING, "Failed to read errno=%d" IN_FMT_FFL, errno, file, func, line); + else if (unlikely(ret < 0 && interrupted)) + goto retry; +} + +void cgsem_destroy(cgsem_t *cgsem) +{ + close(cgsem->pipefd[1]); + close(cgsem->pipefd[0]); +} + +/* This is similar to sem_timedwait but takes a millisecond value */ +int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) +{ + struct timeval timeout; + int ret, fd; + fd_set rd; + char buf; + +retry: + fd = cgsem->pipefd[0]; + FD_ZERO(&rd); + FD_SET(fd, &rd); + ms_to_timeval(&timeout, ms); + ret = select(fd + 1, &rd, NULL, NULL, &timeout); + + if (ret > 0) + { + ret = read(fd, &buf, 1); + return 0; + } + if (likely(!ret)) + return ETIMEDOUT; + if (interrupted()) + goto retry; + quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); + /* We don't reach here */ + return 0; +} + +/* Reset semaphore count back to zero */ +void cgsem_reset(cgsem_t *cgsem) +{ + int ret, fd; + fd_set rd; + char buf; + + fd = cgsem->pipefd[0]; + FD_ZERO(&rd); + FD_SET(fd, &rd); + do + { + struct timeval timeout = {0, 0}; + + ret = select(fd + 1, &rd, NULL, NULL, &timeout); + if (ret > 0) + ret = read(fd, &buf, 1); + else if (unlikely(ret < 0 && interrupted())) + ret = 1; + } + while (ret > 0); +} +#else +void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) +{ + int ret; + if ((ret = sem_init(cgsem, 0, 0))) + quitfrom(1, file, func, line, "Failed to sem_init ret=%d errno=%d", ret, errno); +} + +void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) +{ + if (unlikely(sem_post(cgsem))) + quitfrom(1, file, func, line, "Failed to sem_post errno=%d cgsem=0x%p", errno, cgsem); +} + +void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) +{ +retry: + if (unlikely(sem_wait(cgsem))) + { + if (interrupted()) + goto retry; + quitfrom(1, file, func, line, "Failed to sem_wait errno=%d cgsem=0x%p", errno, cgsem); + } +} + +int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) +{ + struct timespec abs_timeout, ts_now; + struct timeval tv_now; + int ret; + + cgtime(&tv_now); + timeval_to_spec(&ts_now, &tv_now); + ms_to_timespec(&abs_timeout, ms); +retry: + timeraddspec(&abs_timeout, &ts_now); + ret = sem_timedwait(cgsem, &abs_timeout); + + if (ret) + { + if (likely(sock_timeout())) + return ETIMEDOUT; + if (interrupted()) + goto retry; + quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); + } + return 0; +} + +void cgsem_reset(cgsem_t *cgsem) +{ + int ret; + + do + { + ret = sem_trywait(cgsem); + if (unlikely(ret < 0 && interrupted())) + ret = 0; + } + while (!ret); +} + +void cgsem_destroy(cgsem_t *cgsem) +{ + sem_destroy(cgsem); +} +#endif + +/* Provide a completion_timeout helper function for unreliable functions that + * may die due to driver issues etc that time out if the function fails and + * can then reliably return. */ +struct cg_completion +{ + cgsem_t cgsem; + void (*fn)(void *fnarg); + void *fnarg; +}; + +void *completion_thread(void *arg) +{ + struct cg_completion *cgc = (struct cg_completion *)arg; + + pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + cgc->fn(cgc->fnarg); + cgsem_post(&cgc->cgsem); + + return NULL; +} + +bool cg_completion_timeout(void *fn, void *fnarg, int timeout) +{ + struct cg_completion *cgc; + pthread_t pthread; + bool ret = false; + + cgc = malloc(sizeof(struct cg_completion)); + if (unlikely(!cgc)) + return ret; + cgsem_init(&cgc->cgsem); + cgc->fn = fn; + cgc->fnarg = fnarg; + + pthread_create(&pthread, NULL, completion_thread, (void *)cgc); + + ret = cgsem_mswait(&cgc->cgsem, timeout); + if (!ret) + { + pthread_join(pthread, NULL); + free(cgc); + } + else + pthread_cancel(pthread); + return !ret; +} + +void _cg_memcpy(void *dest, const void *src, unsigned int n, const char *file, const char *func, const int line) +{ + if (unlikely(n < 1 || n > (1ul << 31))) + { + applog(LOG_ERR, "ERR: Asked to memcpy %u bytes from %s %s():%d", + n, file, func, line); + return; + } + memcpy(dest, src, n); +} + +int cg_timeval_subtract(struct timeval* result, struct timeval* x, struct timeval* y) +{ + int nsec = 0; + if(x->tv_sec > y->tv_sec) + return -1; + + if((x->tv_sec == y->tv_sec) && (x->tv_usec > y->tv_usec)) + return -1; + + result->tv_sec = (y->tv_sec - x->tv_sec); + result->tv_usec = (y->tv_usec - x->tv_usec); + + if(result->tv_usec < 0) + { + result->tv_sec--; + result->tv_usec += 1000000; + } + return 0; +} + +void rev(unsigned char *s, size_t l) +{ + size_t i, j; + unsigned char t; + + for (i = 0, j = l - 1; i < j; i++, j--) + { + t = s[i]; + s[i] = s[j]; + s[j] = t; + } +} + +int check_asicnum(int asic_num, unsigned char nonce) +{ + switch(asic_num) + { + case 1: + return 1; + case 2: + switch(nonce & 0x80) + { + case 0x80: + return 2; + default: + return 1; + } + case 4: + switch(nonce & 0xC0) + { + case 0xC0: + return 4; + case 0x80: + return 3; + case 0x40: + return 2; + default: + return 1; + } + case 8: + switch(nonce & 0xE0) + { + case 0xE0: + return 8; + case 0xC0: + return 7; + case 0xA0: + return 6; + case 0x80: + return 5; + case 0x60: + return 4; + case 0x40: + return 3; + case 0x20: + return 2; + default : + return 1; + } + case 16: + switch(nonce & 0xF0) + { + case 0xF0: + return 16; + case 0xE0: + return 15; + case 0xD0: + return 14; + case 0xC0: + return 13; + case 0xB0: + return 12; + case 0xA0: + return 11; + case 0x90: + return 10; + case 0x80: + return 9; + case 0x70: + return 8; + case 0x60: + return 7; + case 0x50: + return 6; + case 0x40: + return 5; + case 0x30: + return 4; + case 0x20: + return 3; + case 0x10: + return 2; + default : + return 1; + } + case 32: + switch(nonce & 0xF8) + { + case 0xF8: + return 32; + case 0xF0: + return 31; + case 0xE8: + return 30; + case 0xE0: + return 29; + case 0xD8: + return 28; + case 0xD0: + return 27; + case 0xC8: + return 26; + case 0xC0: + return 25; + case 0xB8: + return 24; + case 0xB0: + return 23; + case 0xA8: + return 22; + case 0xA0: + return 21; + case 0x98: + return 20; + case 0x90: + return 19; + case 0x88: + return 18; + case 0x80: + return 17; + case 0x78: + return 16; + case 0x70: + return 15; + case 0x68: + return 14; + case 0x60: + return 13; + case 0x58: + return 12; + case 0x50: + return 11; + case 0x48: + return 10; + case 0x40: + return 9; + case 0x38: + return 8; + case 0x30: + return 7; + case 0x28: + return 6; + case 0x20: + return 5; + case 0x18: + return 4; + case 0x10: + return 3; + case 0x08: + return 2; + default : + return 1; + } + case 64: + switch(nonce & 0xFC) + { + case 0xFC: + return 64; + case 0xF8: + return 63; + case 0xF4: + return 62; + case 0xF0: + return 61; + case 0xEC: + return 60; + case 0xE8: + return 59; + case 0xE4: + return 58; + case 0xE0: + return 57; + case 0xDC: + return 56; + case 0xD8: + return 55; + case 0xD4: + return 54; + case 0xD0: + return 53; + case 0xCC: + return 52; + case 0xC8: + return 51; + case 0xC4: + return 50; + case 0xC0: + return 49; + case 0xBC: + return 48; + case 0xB8: + return 47; + case 0xB4: + return 46; + case 0xB0: + return 45; + case 0xAC: + return 44; + case 0xA8: + return 43; + case 0xA4: + return 42; + case 0xA0: + return 41; + case 0x9C: + return 40; + case 0x98: + return 39; + case 0x94: + return 38; + case 0x90: + return 37; + case 0x8C: + return 36; + case 0x88: + return 35; + case 0x84: + return 34; + case 0x80: + return 33; + case 0x7C: + return 32; + case 0x78: + return 31; + case 0x74: + return 30; + case 0x70: + return 29; + case 0x6C: + return 28; + case 0x68: + return 27; + case 0x64: + return 26; + case 0x60: + return 25; + case 0x5C: + return 24; + case 0x58: + return 23; + case 0x54: + return 22; + case 0x50: + return 21; + case 0x4C: + return 20; + case 0x48: + return 19; + case 0x44: + return 18; + case 0x40: + return 17; + case 0x3C: + return 16; + case 0x38: + return 15; + case 0x34: + return 14; + case 0x30: + return 13; + case 0x2C: + return 12; + case 0x28: + return 11; + case 0x24: + return 10; + case 0x20: + return 9; + case 0x1C: + return 8; + case 0x18: + return 7; + case 0x14: + return 6; + case 0x10: + return 5; + case 0x0C: + return 4; + case 0x08: + return 3; + case 0x04: + return 2; + default : + return 1; + } + default: + return 0; + } +} + +void cg_logwork(struct work *work, unsigned char *nonce_bin, bool ok) +{ + if(opt_logwork_path) + { + char szmsg[1024] = {0}; + unsigned char midstate_tmp[32] = {0}; + unsigned char data_tmp[32] = {0}; + unsigned char hash_tmp[32] = {0}; + char * szworkdata = NULL; + char * szmidstate = NULL; + char * szdata = NULL; + char * sznonce4 = NULL; + char * sznonce5 = NULL; + char * szhash = NULL; + int asicnum = 0; + uint64_t worksharediff = 0; + memcpy(midstate_tmp, work->midstate, 32); + memcpy(data_tmp, work->data+64, 12); + memcpy(hash_tmp, work->hash, 32); + rev((void *)midstate_tmp, 32); + rev((void *)data_tmp, 12); + rev((void *)hash_tmp, 32); + szworkdata = bin2hex((void *)work->data, 128); + szmidstate = bin2hex((void *)midstate_tmp, 32); + szdata = bin2hex((void *)data_tmp, 12); + sznonce4 = bin2hex((void *)nonce_bin, 4); + sznonce5 = bin2hex((void *)nonce_bin, 5); + szhash = bin2hex((void *)hash_tmp, 32); + worksharediff = share_ndiff(work); + sprintf(szmsg, "%s %08x midstate %s data %s nonce %s hash %s diff %I64d", ok?"o":"x", work->id, szmidstate, szdata, sznonce5, szhash, worksharediff); + if(strcmp(opt_logwork_path, "screen") == 0) + { + applog(LOG_ERR, szmsg); + } + else + { + applog(LOG_ERR, szmsg); + if(g_logwork_file) + { + sprintf(szmsg, "%s %08x work %s midstate %s data %s nonce %s hash %s diff %I64d", ok?"o":"x", work->id, szworkdata, szmidstate, szdata, sznonce5, szhash, worksharediff); + + fwrite(szmsg, strlen(szmsg), 1, g_logwork_file); + fwrite("\n", 1, 1, g_logwork_file); + fflush(g_logwork_file); + + if(ok) + { + if(g_logwork_asicnum == 1) + { + sprintf(szmsg, "midstate %s data %s nonce %s hash %s", szmidstate, szdata, sznonce4, szhash); + fwrite(szmsg, strlen(szmsg), 1, g_logwork_files[0]); + fwrite("\n", 1, 1, g_logwork_files[0]); + fflush(g_logwork_files[0]); + } + else if(g_logwork_asicnum == 32 || g_logwork_asicnum == 64) + { + sprintf(szmsg, "midstate %s data %s nonce %s hash %s", szmidstate, szdata, sznonce4, szhash); + asicnum = check_asicnum(g_logwork_asicnum, nonce_bin[0]); + fwrite(szmsg, strlen(szmsg), 1, g_logwork_files[asicnum]); + fwrite("\n", 1, 1, g_logwork_files[asicnum]); + fflush(g_logwork_files[asicnum]); + } + + if(opt_logwork_diff) + { + int diffnum = 0; + uint64_t difftmp = worksharediff; + while(1) + { + difftmp = difftmp >> 1; + if(difftmp > 0) + { + diffnum++; + if(diffnum >= 64) + { + break; + } + } + else + { + break; + } + } + applog(LOG_DEBUG, "work diff %I64d diffnum %d", worksharediff, diffnum); + sprintf(szmsg, "midstate %s data %s nonce %s hash %s", szmidstate, szdata, sznonce4, szhash); + fwrite(szmsg, strlen(szmsg), 1, g_logwork_diffs[diffnum]); + fwrite("\n", 1, 1, g_logwork_diffs[diffnum]); + fflush(g_logwork_diffs[diffnum]); + } + } + } + } + if(szworkdata) free(szworkdata); + if(szmidstate) free(szmidstate); + if(szdata) free(szdata); + if(sznonce4) free(sznonce4); + if(sznonce5) free(sznonce5); + if(szhash) free(szhash); + } +} + +void cg_logwork_uint32(struct work *work, uint32_t nonce, bool ok) +{ + if(opt_logwork_path) + { + unsigned char nonce_bin[5] = {0}; + memcpy(nonce_bin, &nonce, 4); + cg_logwork(work, nonce_bin, ok); + } +} diff --git a/util.h b/util.h new file mode 100644 index 0000000..14336ac --- /dev/null +++ b/util.h @@ -0,0 +1,181 @@ +#ifndef __UTIL_H__ +#define __UTIL_H__ + +#include + +#if defined(unix) || defined(__APPLE__) + #include + #include + #include + #include + + #define SOCKETTYPE long + #define SOCKETFAIL(a) ((a) < 0) + #define INVSOCK -1 + #define INVINETADDR -1 + #define CLOSESOCKET close + #define INET_PTON inet_pton + + #define SOCKERRMSG strerror(errno) + static inline bool sock_blocks(void) + { + return (errno == EAGAIN || errno == EWOULDBLOCK); + } + static inline bool sock_timeout(void) + { + return (errno == ETIMEDOUT); + } + static inline bool interrupted(void) + { + return (errno == EINTR); + } +#elif defined WIN32 + #include + #include + + #define SOCKETTYPE SOCKET + #define SOCKETFAIL(a) ((int)(a) == SOCKET_ERROR) + #define INVSOCK INVALID_SOCKET + #define INVINETADDR INADDR_NONE + #define CLOSESOCKET closesocket + + int Inet_Pton(int af, const char *src, void *dst); + #define INET_PTON Inet_Pton + + extern char *WSAErrorMsg(void); + #define SOCKERRMSG WSAErrorMsg() + + /* Check for windows variants of the errors as well as when ming + * decides to wrap the error into the errno equivalent. */ + static inline bool sock_blocks(void) + { + return (WSAGetLastError() == WSAEWOULDBLOCK || errno == EAGAIN); + } + static inline bool sock_timeout(void) + { + return (WSAGetLastError() == WSAETIMEDOUT || errno == ETIMEDOUT); + } + static inline bool interrupted(void) + { + return (WSAGetLastError() == WSAEINTR || errno == EINTR); + } + #ifndef SHUT_RDWR + #define SHUT_RDWR SD_BOTH + #endif + + #ifndef in_addr_t + #define in_addr_t uint32_t + #endif +#endif + +#define JSON_LOADS(str, err_ptr) json_loads((str), 0, (err_ptr)) + +#ifdef HAVE_LIBCURL +#include +typedef curl_proxytype proxytypes_t; +#else +typedef int proxytypes_t; +#endif /* HAVE_LIBCURL */ + +/* cgminer locks, a write biased variant of rwlocks */ +struct cglock { + pthread_mutex_t mutex; + pthread_rwlock_t rwlock; +}; + +typedef struct cglock cglock_t; + +/* cgminer specific unnamed semaphore implementations to cope with osx not + * implementing them. */ +#ifdef __APPLE__ +struct cgsem { + int pipefd[2]; +}; + +typedef struct cgsem cgsem_t; +#else +typedef sem_t cgsem_t; +#endif +#ifdef WIN32 +typedef LARGE_INTEGER cgtimer_t; +#else +typedef struct timespec cgtimer_t; +#endif + +int no_yield(void); +int (*selective_yield)(void); +struct thr_info; +struct pool; +enum dev_reason; +struct cgpu_info; +void b58tobin(unsigned char *b58bin, const char *b58); +void address_to_pubkeyhash(unsigned char *pkh, const char *addr); +int ser_number(unsigned char *s, int32_t val); +unsigned char *ser_string(char *s, int *slen); +int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg); +void thr_info_cancel(struct thr_info *thr); +void cgtime(struct timeval *tv); +void subtime(struct timeval *a, struct timeval *b); +void addtime(struct timeval *a, struct timeval *b); +bool time_more(struct timeval *a, struct timeval *b); +bool time_less(struct timeval *a, struct timeval *b); +void copy_time(struct timeval *dest, const struct timeval *src); +void timespec_to_val(struct timeval *val, const struct timespec *spec); +void timeval_to_spec(struct timespec *spec, const struct timeval *val); +void us_to_timeval(struct timeval *val, int64_t us); +void us_to_timespec(struct timespec *spec, int64_t us); +void ms_to_timespec(struct timespec *spec, int64_t ms); +void timeraddspec(struct timespec *a, const struct timespec *b); +char *Strcasestr(char *haystack, const char *needle); +char *Strsep(char **stringp, const char *delim); +void cgsleep_ms(int ms); +void cgsleep_us(int64_t us); +void cgtimer_time(cgtimer_t *ts_start); +#define cgsleep_prepare_r(ts_start) cgtimer_time(ts_start) +void cgsleep_ms_r(cgtimer_t *ts_start, int ms); +void cgsleep_us_r(cgtimer_t *ts_start, int64_t us); +int cgtimer_to_ms(cgtimer_t *cgt); +void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res); +double us_tdiff(struct timeval *end, struct timeval *start); +int ms_tdiff(struct timeval *end, struct timeval *start); +double tdiff(struct timeval *end, struct timeval *start); +bool stratum_send(struct pool *pool, char *s, ssize_t len); +bool sock_full(struct pool *pool); +void _recalloc(void **ptr, size_t old, size_t new, const char *file, const char *func, const int line); +#define recalloc(ptr, old, new) _recalloc((void *)&(ptr), old, new, __FILE__, __func__, __LINE__) +char *recv_line(struct pool *pool); +bool parse_method(struct pool *pool, char *s); +void check_extranonce_option(struct pool *pool, char * url); +bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port); +void extranonce_subscribe_stratum(struct pool *pool); +bool auth_stratum(struct pool *pool); +bool initiate_stratum(struct pool *pool); +bool restart_stratum(struct pool *pool); +void suspend_stratum(struct pool *pool); +void dev_error(struct cgpu_info *dev, enum dev_reason reason); +void *realloc_strcat(char *ptr, char *s); +void *str_text(char *ptr); +void RenameThread(const char* name); +void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line); +void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line); +void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line); +int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line); +void cgsem_reset(cgsem_t *cgsem); +void cgsem_destroy(cgsem_t *cgsem); +bool cg_completion_timeout(void *fn, void *fnarg, int timeout); +void _cg_memcpy(void *dest, const void *src, unsigned int n, const char *file, const char *func, const int line); + +#define cgsem_init(_sem) _cgsem_init(_sem, __FILE__, __func__, __LINE__) +#define cgsem_post(_sem) _cgsem_post(_sem, __FILE__, __func__, __LINE__) +#define cgsem_wait(_sem) _cgsem_wait(_sem, __FILE__, __func__, __LINE__) +#define cgsem_mswait(_sem, _timeout) _cgsem_mswait(_sem, _timeout, __FILE__, __func__, __LINE__) +#define cg_memcpy(dest, src, n) _cg_memcpy(dest, src, n, __FILE__, __func__, __LINE__) + +/* Align a size_t to 4 byte boundaries for fussy arches */ +static inline void align_len(size_t *len) +{ + if (*len % 4) + *len += 4 - (*len % 4); +} + +#endif /* __UTIL_H__ */ diff --git a/warn-on-use.h b/warn-on-use.h new file mode 100644 index 0000000..2cdeec3 --- /dev/null +++ b/warn-on-use.h @@ -0,0 +1,109 @@ +/* A C macro for emitting warnings if a function is used. + Copyright (C) 2010-2011 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* _GL_WARN_ON_USE (function, "literal string") issues a declaration + for FUNCTION which will then trigger a compiler warning containing + the text of "literal string" anywhere that function is called, if + supported by the compiler. If the compiler does not support this + feature, the macro expands to an unused extern declaration. + + This macro is useful for marking a function as a potential + portability trap, with the intent that "literal string" include + instructions on the replacement function that should be used + instead. However, one of the reasons that a function is a + portability trap is if it has the wrong signature. Declaring + FUNCTION with a different signature in C is a compilation error, so + this macro must use the same type as any existing declaration so + that programs that avoid the problematic FUNCTION do not fail to + compile merely because they included a header that poisoned the + function. But this implies that _GL_WARN_ON_USE is only safe to + use if FUNCTION is known to already have a declaration. Use of + this macro implies that there must not be any other macro hiding + the declaration of FUNCTION; but undefining FUNCTION first is part + of the poisoning process anyway (although for symbols that are + provided only via a macro, the result is a compilation error rather + than a warning containing "literal string"). Also note that in + C++, it is only safe to use if FUNCTION has no overloads. + + For an example, it is possible to poison 'getline' by: + - adding a call to gl_WARN_ON_USE_PREPARE([[#include ]], + [getline]) in configure.ac, which potentially defines + HAVE_RAW_DECL_GETLINE + - adding this code to a header that wraps the system : + #undef getline + #if HAVE_RAW_DECL_GETLINE + _GL_WARN_ON_USE (getline, "getline is required by POSIX 2008, but" + "not universally present; use the gnulib module getline"); + #endif + + It is not possible to directly poison global variables. But it is + possible to write a wrapper accessor function, and poison that + (less common usage, like &environ, will cause a compilation error + rather than issue the nice warning, but the end result of informing + the developer about their portability problem is still achieved): + #if HAVE_RAW_DECL_ENVIRON + static inline char ***rpl_environ (void) { return &environ; } + _GL_WARN_ON_USE (rpl_environ, "environ is not always properly declared"); + # undef environ + # define environ (*rpl_environ ()) + #endif + */ +#ifndef _GL_WARN_ON_USE + +# if 4 < __GNUC__ || (__GNUC__ == 4 && 3 <= __GNUC_MINOR__) +/* A compiler attribute is available in gcc versions 4.3.0 and later. */ +# define _GL_WARN_ON_USE(function, message) \ +extern __typeof__ (function) function __attribute__ ((__warning__ (message))) +# elif __GNUC__ >= 3 && GNULIB_STRICT_CHECKING +/* Verify the existence of the function. */ +# define _GL_WARN_ON_USE(function, message) \ +extern __typeof__ (function) function +# else /* Unsupported. */ +# define _GL_WARN_ON_USE(function, message) \ +_GL_WARN_EXTERN_C int _gl_warn_on_use +# endif +#endif + +/* _GL_WARN_ON_USE_CXX (function, rettype, parameters_and_attributes, "string") + is like _GL_WARN_ON_USE (function, "string"), except that the function is + declared with the given prototype, consisting of return type, parameters, + and attributes. + This variant is useful for overloaded functions in C++. _GL_WARN_ON_USE does + not work in this case. */ +#ifndef _GL_WARN_ON_USE_CXX +# if 4 < __GNUC__ || (__GNUC__ == 4 && 3 <= __GNUC_MINOR__) +# define _GL_WARN_ON_USE_CXX(function,rettype,parameters_and_attributes,msg) \ +extern rettype function parameters_and_attributes \ + __attribute__ ((__warning__ (msg))) +# elif __GNUC__ >= 3 && GNULIB_STRICT_CHECKING +/* Verify the existence of the function. */ +# define _GL_WARN_ON_USE_CXX(function,rettype,parameters_and_attributes,msg) \ +extern rettype function parameters_and_attributes +# else /* Unsupported. */ +# define _GL_WARN_ON_USE_CXX(function,rettype,parameters_and_attributes,msg) \ +_GL_WARN_EXTERN_C int _gl_warn_on_use +# endif +#endif + +/* _GL_WARN_EXTERN_C declaration; + performs the declaration with C linkage. */ +#ifndef _GL_WARN_EXTERN_C +# if defined __cplusplus +# define _GL_WARN_EXTERN_C extern "C" +# else +# define _GL_WARN_EXTERN_C extern +# endif +#endif diff --git a/windows-build.txt b/windows-build.txt new file mode 100644 index 0000000..2e814d2 --- /dev/null +++ b/windows-build.txt @@ -0,0 +1,348 @@ +###################################################################################### +# # +# Native WIN32 setup and build instructions (on mingw32/Windows): # +# # +###################################################################################### + +(See bottom of file for steps to cross-build for Win32 from Linux.) + +************************************************************************************** +* Introduction * +************************************************************************************** +The following instructions have been tested on both Windows 7 and Windows XP. +Most of what is described below (copying files, downloading files, etc.) can be done +directly in the MinGW MSYS shell; these instructions do not do so because package +versions and links change over time. The best way is to use your browser, go to the +links directly, and see for yourself which versions you want to install. +Winrar was used to do the extracting of archive files in the making of this guide. + +If you think that this documentation was helpful and you wish to donate, you can +do so at the following address. 12KaKtrK52iQjPdtsJq7fJ7smC32tXWbWr + +************************************************************************************** +* A tip that might help you along the way * +************************************************************************************** +Enable "QuickEdit Mode" in your Command Prompt Window or MinGW Command Prompt +Window (No need to go into the context menu to choose edit-mark/copy/paste): +Right-click on the title bar and click Properties. Under the Options tab, check +the box for "QuickEdit Mode". Alternately, if you want this change to be +permanent on all of your Command Prompt Windows; you can click Defaults instead +of Properties as described above. Now you can drag and select text you want to +copy, right-click to copy the text to the clipboard and right-click once again to +paste it at the desired location. You could for example, copy some text from this +document to the clipboard and right click in your Command Prompt Window to paste +what you copied. + +************************************************************************************** +* Install mingw32 * +************************************************************************************** +Go to this url ==> http://www.mingw.org/wiki/Getting_Started +Click the link that says "Download and run the latest mingw-get-inst version." +Download and run the latest file. Install MinGW in the default directory. +(I downloaded the one labeled "mingw-get-inst-20120426" - note that this could +be a different version later.) +Make sure to check the option for "Download latest repository catalogs". +I just selected all the check boxes (excluding "Fortran Compiler") so that everything +was installed. + +************************************************************************************** +* Run the MSYS shell for the first time to create your user directory * +************************************************************************************** +(Start Icon/keyboard key ==> All Programs ==> MinGW ==> MinGW Shell). +This will create your user directory for you. + +************************************************************************************** +* Install libpdcurses * +************************************************************************************** +Type the lines below to install libpdcurses. +mingw-get install mingw32-libpdcurses +mingw-get install mingw32-pdcurses +Ctrl-D or typing "logout" and pressing the enter key should get you out of the +window. + +************************************************************************************** +* Copy CGMiner source to your MSYS working directory * +************************************************************************************** +Copy CGMiner source code directory into: +\MinGW\msys\1.0\home\(folder with your user name) + +************************************************************************************** +* Install AMD APP SDK, latest version (only if you want GPU mining) * +************************************************************************************** +Note: You do not need to install the AMD APP SDK if you are only using Nvidia GPU's +Go to this url for the latest AMD APP SDK: + http://developer.amd.com/tools/heterogeneous-computing/amd-accelerated-parallel-processing-app-sdk/downloads/ +Go to this url for legacy AMD APP SDK's: + http://developer.amd.com/tools/heterogeneous-computing/amd-accelerated-parallel-processing-app-sdk/downloads/download-archive/ +Download and install whichever version you like best. +Copy the folders in \Program Files (x86)\AMD APP\include to \MinGW\include +Copy \Program Files (x86)\AMD APP\lib\x86\libOpenCL.a to \MinGW\lib +Note: If you are on a 32 bit version of windows "Program Files (x86)" will be +"Program Files". +Note2: If you update your APP SDK later you might want to recopy the above files + +************************************************************************************** +* Install AMD ADL SDK, latest version (only if you want GPU monitoring) * +************************************************************************************** +Note: You do not need to install the AMD ADL SDK if you are only using Nvidia GPU's +Go to this url ==> http://developer.amd.com/tools/graphics-development/display-library-adl-sdk/ +Download and unzip the file you downloaded. +Pull adl_defines.h, adl_sdk.h, and adl_structures.h out of the include folder +Put those files into the ADL_SDK folder in your source tree as shown below. +\MinGW\msys\1.0\home\(folder with your user name)\cgminer-x.x.x\ADL_SDK + +************************************************************************************** +* Install GTK-WIN, required for Pkg-config in the next step * +************************************************************************************** +Go to this url ==> http://sourceforge.net/projects/gtk-win/ +Download the file. +After you have downloaded the file Double click/run it and this will install GTK+ +I chose all the selection boxes when I installed. +Copy libglib-2.0-0.dll and intl.dll from \Program Files (x86)\gtk2-runtime\bin to +\MinGW\bin +Note: If you are on a 32 bit version of windows "Program Files (x86)" will be +"Program Files". + +************************************************************************************** +* Install pkg-config * +************************************************************************************** +Go to this url ==> http://www.gtk.org/download/win32.php +Scroll down to where it shows pkg-cfg. +Download the file from the tool link. Extract "pkg-config.exe" from bin and place in +your \MinGW\bin directory. +Download the file from the "Dev" link. Extract "pkg.m4" from share\aclocal and place +in your \MingW\share\aclocal directory. + +************************************************************************************** +* Install libcurl * +************************************************************************************** +Go to this url ==> http://curl.haxx.se/download.html#Win32 +At the section where it says "Win32 - Generic", Click on the link that indicates +Win32 2000.XP 7.27.0 libcurl SSL and download it. +The one I downloaded may not be current for you. Choose the latest. +Extract the files that are in the zip (bin, include, and lib) to their respective +locations in MinGW (\MinGW\bin, \MinGW\include, and \MinGW\lib). +Edit the file \MinGW\lib\pkgconfig\libcurl.pc and change "-lcurl" to +"-lcurl -lcurldll". +Ref. http://old.nabble.com/gcc-working-with-libcurl-td20506927.html + +************************************************************************************** +* Build cgminer.exe * +************************************************************************************** +Run the MinGW MSYS shell +(Start Icon/keyboard key ==> All Programs ==> MinGW ==> MinGW Shell). +Change the working directory to your CGMiner project folder. +Example: cd cgminer-2.1.2 [Enter Key] if you are unsure then type "ls -la" +Another way is to type "cd cg" and then press the tab key; It will auto fill. +Type the lines below one at a time. Look for problems after each one before going on +to the next. + + adl.sh (optional - see below) + autoreconf -fvi + CFLAGS="-O2 -msse2" ./configure (additional config options, see below) + make + strip cgminer.exe <== only do this if you are not compiling for debugging + +************************************************************************************** +* Copy files to a build directory/folder * +************************************************************************************** +Make a directory and copy the following files into it. This will be your CGMiner +Folder that you use for mining. Remember the .cl filenames could change on later +releases. If you installed a different version of libcurl then some of those dll's +may be different as well. + cgminer.exe from \MinGW\msys\1.0\home\(username)\cgminer-x.x.x + *.cl from \MinGW\msys\1.0\home\(username)\cgminer-x.x.x + README from \MinGW\msys\1.0\home\(username)\cgminer-x.x.x + libcurl.dll from \MinGW\bin + libidn-11.dll from \MinGW\bin + libeay32.dll from \MinGW\bin + ssleay32.dll from \MinGW\bin + libpdcurses.dll from \MinGW\bin + pthreadGC2.dll from \MinGW\bin + +************************************************************************************** +* Optional - Install Git into MinGW/MSYS * +************************************************************************************** +Go to this url ==> http://code.google.com/p/msysgit/ +Click on the Downloads tab. +Download the latest "Portable" git archive. +Extract the git*.exe files from the bin folder and put them into \MinGW\bin. +Extract the share\git-core folder and place it into \MinGW\share. +After the previous step you should have a folder called \MinGW\share\git-core. +To test if it is working, open a MinGW shell and type the following: + git config -–global core.autocrlf false (note: one time run only) + git clone git://github.com/ckolivas/cgminer.git + +If you simply just want to update the source after you have already cloned, type: + git pull +"git pull" did not work for me. Try the following which does the same thing: + git fetch && git merge FETCH_HEAD + +Now you can get the latest source directly from github. + +************************************************************************************** +* Optional - Make a .sh file to automate copying over ADL files * +************************************************************************************** +Make a folder/directory in your home folder and name it ADL_SDK. + (ref: \MinGW\msys\1.0\home\(folder with your user name)\ADL_SDK) +Copy the ADL .h files into that folder/directory. +Open your favorite text editor and type the following into it. + cp -av ../ADL_SDK/*.h ADL_SDK +Save the file as "adl.sh" and then place the file into "\MinGW\msys\1.0\bin". +From now on when your current working directory is the cgminer source directory +You can simply type "adl.sh" and it will place the ADL header files into place +For you. Make sure you never remove the ADL_SDK folder from your home folder. + +************************************************************************************** +* Optional - Install libusb if you need auto USB device detection; required for Ztex * +************************************************************************************** +Go to this url ==> http://git.libusb.org/?p=libusb.git;a=snapshot;h=master;sf=zip +save the file to your local storage. Open the file and copy the libusb* folder to +\MinGW\msys\1.0\home\(your user directory/folder). +Or if you do not want to download the file directly and would like to use git then +Type the following from the MSYS shell in your home folder. +git clone git://git.libusb.org/libusb.git + +Run the MinGW MSYS shell +(Start Icon/keyboard key ==> All Programs ==> MinGW ==> MinGW Shell). +Change the working directory to your libusb project folder. +Example: cd libusb-something [Enter Key] if you are unsure then type "ls -la" +Another way is to type "cd libusb" and then press the tab key; It will auto fill. +Type the lines below one at a time. Look for problems after each one before going on +to the next. + +./autogen.sh --disable-debug-log --prefix=/MinGW +make +make install + +You may now exit the MSYS shell. +Ctrl-D or typing "logout" and pressing the enter key should get you out of the +window. + +You will have to copy "libusb-1.0.dll" to your working cgminer binary directory. +You will find "libusb-1.0.dll" in the \MinGW\bin directory/folder. + +Use this method if libusb does not work for you on Ztex. Once someone lets us know +Libusb works instead of libusbx then we will remove the section below this line. +Run the MSYS shell and change into the libusb folder as above. +Type ==> make uninstall +Go to this url ==> http://libusbx.org/ +Click on the "Downloads" tab. +Click on "releases". +Click on the latest version. I downloaded 1.0.14; yours may be newer. +Do not download from the link that says "Looking for the latest version?". +Click on "Windows" +Click on the file and download it. I downloaded libusbx-1.0.12-win.7z. +Extract the the following from the file and place in where directed. +Copy libusb.h from include\libusbx-1.0 to \MinGW\include\libusb-1.0\libusb.h +Copy contents of MinGW32\static \MinGW\lib +Copy contents of MinGW32\dll to \MinGW\lib +You will have to copy "libusb-1.0.dll" to your working cgminer binary directory. + +************************************************************************************** +* Some ./configure options * +************************************************************************************** +--enable-cpumining Build with cpu mining support(default disabled) +--disable-opencl Override detection and disable building with opencl +--disable-adl Override detection and disable building with adl +--enable-bitforce Compile support for BitForce FPGAs(default disabled) +--enable-icarus Compile support for Icarus Board(default disabled) +--enable-bmsc Compile support for BitMain Single Chain(default disabled) +--enable-bitmain Compile support for BitMain Multi Chain(default disabled) +--enable-modminer Compile support for ModMiner FPGAs(default disabled) +--enable-ztex Compile support for Ztex Board(default disabled) +--enable-scrypt Compile support for scrypt litecoin mining (default disabled) +--without-curses Compile support for curses TUI (default enabled) +--without-libudev Autodetect FPGAs using libudev (default enabled) + + + +###################################################################################### +# # +# Cross-compiling for Windows from Linux # +# # +###################################################################################### + +It is possible to cross-compile Windows binaries from Linux. The +process is a bit different to the native steps shown above (it is also +possible to use wine and the native steps, but this is more messing +around, very slow, and not advisable.) + +** Install mingw cross compiler + +On Ubuntu/Debian: + +sudo apt-get install mingw32 + +** create a directory to hold our cross-library dependencies + +We'll create a directory outside the source tree to hold non-system +libraries we depend on. We could put these in +/usr/i586-mingw32msvc/lib or anywhere else, instead (though keeping it +outside /usr means we can set it up without root privileges.) + +IMPORTANT: If you put this directory inside your cgminer directory, +remember 'make distclean' may delete it! + +mkdir -p ../cgminer-win32-deps/lib +cd ../cgminer-win32-deps +mkdir include +mkdir bin + +NB: All following steps assume you are in the "cgminer-win32-deps" directory. Adjust as necessary. + +** pdcurses + +wget http://internode.dl.sourceforge.net/project/pdcurses/pdcurses/3.4/pdc34dllw.zip +unzip /home/gus/Downloads/pdc34dllw.zip +mv *.h include/ +mv pdcurses.lib lib/ +mv pdcurses.dll bin/ + +** pthreads-w32 + +(NB: I found pthreads-w32 2.9.1 doesn't seem to work properly, transfers time out early due to sem_timedwait exiting immediately(?)) + +wget -O lib/libpthread.a ftp://sourceware.org/pub/pthreads-win32/prebuilt-dll-2-8-0-release/lib/libpthreadGC2.a +wget -O include/pthread.h ftp://sourceware.org/pub/pthreads-win32/prebuilt-dll-2-8-0-release/include/pthread.h +wget -O include/sched.h ftp://sourceware.org/pub/pthreads-win32/prebuilt-dll-2-8-0-release/include/sched.h +wget -O include/semaphore.h ftp://sourceware.org/pub/pthreads-win32/prebuilt-dll-2-8-0-release/include/semaphore.h +wget -O lib/libpthread.a ftp://sourceware.org/pub/pthreads-win32/prebuilt-dll-2-8-0-release/lib/libpthreadGC2.a +wget -O bin/pthreadGC2.dll ftp://sourceware.org/pub/pthreads-win32/prebuilt-dll-2-8-0-release/lib/pthreadGC2.dll + +** libcurl + +wget http://curl.haxx.se/gknw.net/7.33.0/dist-w32/curl-7.33.0-devel-mingw32.zip +unzip curl-7.33.0-devel-mingw32.zip +mv curl-7.33.0-devel-mingw32/include/* include/ +mv curl-7.33.0-devel-mingw32/lib/* lib/ +mv curl-7.33.0-devel-mingw32/bin/* bin/ +rm -rf curl-7.33.0-devel-mingw32 + + +** clean up + +rm *.zip + + +** Building cgminer + +Below assumes you're building in a "build-win32" or similar directory +inside the cgminer directory. Fix up the -I and -L paths appropriately +if you're building in-tree or someplace else. + +Configure command: + +CPPFLAGS="-I`pwd`/../../cgminer-win32-deps/include" LDFLAGS="-L`pwd`/../../cgminer-win32-deps/lib -lcurldll" ../autogen.sh --prefix=/usr/local/i586-mingw32 --host=i586-mingw32msvc --build=i686-linux + +^^^ Plus whatever configure arguments you want to add. Note the paths + to cgminer-win32-deps that you may need to change. + +And make: + +make + +After cgminer builds, the next steps are the same as for native +building as given under "Copy files to a build directory/folder" +(DLLs can all be found in the cgminer-win32-deps/bin directory.) +