From cf49e1e5226f8e09f025963c0b7e2b051b3ecdf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lukas=20F=C3=BClling?= Date: Thu, 18 Aug 2016 11:44:27 +0200 Subject: [PATCH 1/4] add gpu selection for opencl/cuda use (Which doesn't actually fix #30) --- train.lua | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/train.lua b/train.lua index 2029370..debb3bb 100644 --- a/train.lua +++ b/train.lua @@ -16,6 +16,7 @@ cmd:option('--minLR', 0.00001, 'minimum learning rate') cmd:option('--saturateEpoch', 20, 'epoch at which linear decayed LR will reach minLR') cmd:option('--maxEpoch', 50, 'maximum number of epochs to run') cmd:option('--batchSize', 10, 'mini-batch size') +cmd:option('--gpu', 0, 'Zero-indexed ID of the GPU to use; for CPU mode set --gpu = -1') cmd:text() options = cmd:parse(arg) @@ -55,10 +56,12 @@ local minMeanError = nil if options.cuda then require 'cutorch' require 'cunn' + cutorch.setDevice(options.gpu + 1) model:cuda() elseif options.opencl then require 'cltorch' require 'clnn' + cltorch.setDevice(options.gpu + 1) model:cl() end @@ -125,7 +128,7 @@ for epoch = 1, options.maxEpoch do for i=1, dataset.examplesCount/options.batchSize do collectgarbage() - + print(optimState) local _,tloss = optim.adam(feval, params, optimState) err = tloss[1] -- optim returns a list From e35ce723bbfcc675a176a1bbad32bb8bffc9a137 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lukas=20F=C3=BClling?= Date: Mon, 22 Aug 2016 09:53:21 +0200 Subject: [PATCH 2/4] remove debug print --- train.lua | 1 - 1 file changed, 1 deletion(-) diff --git a/train.lua b/train.lua index debb3bb..8bc94af 100644 --- a/train.lua +++ b/train.lua @@ -128,7 +128,6 @@ for epoch = 1, options.maxEpoch do for i=1, dataset.examplesCount/options.batchSize do collectgarbage() - print(optimState) local _,tloss = optim.adam(feval, params, optimState) err = tloss[1] -- optim returns a list From fef13dd457cc7e90c376f6e1b366998251fdd3e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lukas=20F=C3=BClling?= Date: Mon, 22 Aug 2016 09:54:36 +0200 Subject: [PATCH 3/4] fix comment --- train.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.lua b/train.lua index 8bc94af..e7ac20c 100644 --- a/train.lua +++ b/train.lua @@ -16,7 +16,7 @@ cmd:option('--minLR', 0.00001, 'minimum learning rate') cmd:option('--saturateEpoch', 20, 'epoch at which linear decayed LR will reach minLR') cmd:option('--maxEpoch', 50, 'maximum number of epochs to run') cmd:option('--batchSize', 10, 'mini-batch size') -cmd:option('--gpu', 0, 'Zero-indexed ID of the GPU to use; for CPU mode set --gpu = -1') +cmd:option('--gpu', 0, 'Zero-indexed ID of the GPU to use. Optional.') cmd:text() options = cmd:parse(arg) From 26ade0832bcc03ae4922a0edf8648b468aaa3084 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lukas=20F=C3=BClling?= Date: Mon, 22 Aug 2016 10:40:42 +0200 Subject: [PATCH 4/4] add option table --- README.md | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 33a4258..58cd6ee 100644 --- a/README.md +++ b/README.md @@ -101,10 +101,15 @@ _(Disclaimer: nonsensical responses have been removed.)_ th train.lua [-h / options] ``` -Use the `--dataset NUMBER` option to control the size of the dataset. Training on the full dataset takes about 5h for a single epoch. - The model will be saved to `data/model.t7` after each epoch if it has improved (error decreased). +### Options (some, not all) +- `--opencl` use opencl for computation (requires [torch-cl](https://github.com/hughperkins/distro-cl)) +- `--cuda` use cuda for computation +- `--gpu [index]` use the nth GPU for computation (eg. on a 2015 MacBook `--gpu 0` results in the Intel GPU being used while `--gpu 1` uses the far more powerful AMD GPU) +- `-- dataset [size]` control the size of the dataset +- `--maxEpoch [amount]` specify the number of epochs to run + ## Testing To load the model and have a conversation: