diff --git a/notebooks/c02_Intro_to_NN_Part_2/Intro_to_NN_Part_2.ipynb b/notebooks/c02_Intro_to_NN_Part_2/Intro_to_NN_Part_2.ipynb index 7e2d11a..afa01ac 100644 --- a/notebooks/c02_Intro_to_NN_Part_2/Intro_to_NN_Part_2.ipynb +++ b/notebooks/c02_Intro_to_NN_Part_2/Intro_to_NN_Part_2.ipynb @@ -1824,45 +1824,12 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "start_time": "2020-10-13T05:58:42.602Z" + "end_time": "2020-10-13T06:00:52.239227Z", + "start_time": "2020-10-13T05:59:23.760111Z" } }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "90b0f467dd314fb4830cf6a3e19bf5a0", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=10.0), HTML(value='')))" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[1, 10] loss: 0.0537\n", - "[1, 20] loss: 0.00856\n", - "[1, 30] loss: 0.0023\n", - "[1, 40] loss: 0.000716\n", - "[1, 50] loss: 0.00039\n", - "[1, 60] loss: 0.000216\n", - "[1, 70] loss: 0.000143\n" - ] - } - ], - "source": [ - "learning_rate = 1e-3\n", - "convnet2 = BetterCNN().to(device)\n", - "optimizer = torch.optim.Adam(convnet2.parameters(), lr=learning_rate)\n", - "model = train(convnet2, x_train, y_train, criterion, optimizer, n_epochs=10)\n", - "test(model, x_test, y_test)" - ] + "outputs": [], + "source": [] }, { "cell_type": "markdown", @@ -1876,8 +1843,8 @@ "execution_count": null, "metadata": { "ExecuteTime": { - "end_time": "2020-10-13T05:57:46.993651Z", - "start_time": "2020-10-13T05:57:00.198Z" + "end_time": "2020-10-13T06:00:52.240942Z", + "start_time": "2020-10-13T05:59:24.354Z" } }, "outputs": [], diff --git a/notebooks/c02_Intro_to_NN_Part_2/Intro_to_NN_Part_2.py b/notebooks/c02_Intro_to_NN_Part_2/Intro_to_NN_Part_2.py index fd48d83..fd7fdc0 100644 --- a/notebooks/c02_Intro_to_NN_Part_2/Intro_to_NN_Part_2.py +++ b/notebooks/c02_Intro_to_NN_Part_2/Intro_to_NN_Part_2.py @@ -433,11 +433,7 @@ def forward(self, x): # # -learning_rate = 1e-3 -convnet2 = BetterCNN().to(device) -optimizer = torch.optim.Adam(convnet2.parameters(), lr=learning_rate) -model = train(convnet2, x_train, y_train, criterion, optimizer, n_epochs=10) -test(model, x_test, y_test) + # Finally ! After changing the optimizer, creating a better CNN architecture and train for a couple of epochs we got an accuracy of over 99% on unseen data.