From d70a7a0571bc920270cc91cd6f453b551e29ccc9 Mon Sep 17 00:00:00 2001 From: "alberto.confente" Date: Mon, 25 Nov 2024 15:22:24 +0900 Subject: [PATCH] fixed dual buffer bug on lamaml experiments --- experiments/split_cifar100/lamaml.py | 15 +++------------ experiments/split_tiny_imagenet/lamaml.py | 17 ++++------------- 2 files changed, 7 insertions(+), 25 deletions(-) diff --git a/experiments/split_cifar100/lamaml.py b/experiments/split_cifar100/lamaml.py index 42fc510..35e3257 100644 --- a/experiments/split_cifar100/lamaml.py +++ b/experiments/split_cifar100/lamaml.py @@ -22,7 +22,7 @@ def lamaml_scifar100(override_args=None): args = create_default_args( {'cuda': 0, 'n_inner_updates': 5, 'second_order': True, 'grad_clip_norm': 1.0, 'learn_lr': True, 'lr_alpha': 0.25, - 'sync_update': False, 'mem_size': 200, 'lr': 0.1, + 'sync_update': False, 'mem_size': 200, 'buffer_mb_size': 10, 'lr': 0.1, 'train_mb_size': 10, 'train_epochs': 10, 'seed': None}, override_args ) @@ -41,16 +41,6 @@ def lamaml_scifar100(override_args=None): metrics.accuracy_metrics(epoch=True, experience=True, stream=True), loggers=[interactive_logger]) - # Buffer - rs_buffer = ReservoirSamplingBuffer(max_size=args.mem_size) - replay_plugin = ReplayPlugin( - mem_size=args.mem_size, - batch_size=args.train_mb_size, - batch_size_mem=args.train_mb_size, - task_balanced_dataloader=False, - storage_policy=rs_buffer - ) - # Strategy model = MTConvCIFAR() cl_strategy = LaMAML( @@ -65,9 +55,10 @@ def lamaml_scifar100(override_args=None): sync_update=args.sync_update, train_mb_size=args.train_mb_size, train_epochs=args.train_epochs, + buffer_mb_size=args.buffer_mb_size, + max_buffer_size=args.mem_size, eval_mb_size=100, device=device, - plugins=[replay_plugin], evaluator=evaluation_plugin, ) diff --git a/experiments/split_tiny_imagenet/lamaml.py b/experiments/split_tiny_imagenet/lamaml.py index 31b949a..dda1609 100644 --- a/experiments/split_tiny_imagenet/lamaml.py +++ b/experiments/split_tiny_imagenet/lamaml.py @@ -24,8 +24,8 @@ def lamaml_stinyimagenet(override_args=None): args = create_default_args( {'cuda': 0, 'n_inner_updates': 5, 'second_order': True, 'grad_clip_norm': 1.0, 'learn_lr': True, 'lr_alpha': 0.4, - 'sync_update': False, 'mem_size': 400, 'lr': 0.1, 'train_mb_size': 10, - 'train_epochs': 10, 'seed': None}, override_args + 'sync_update': False, 'mem_size': 400, 'buffer_mb_size': 10, 'lr': 0.1, + 'train_mb_size': 10, 'train_epochs': 10, 'seed': None}, override_args ) set_seed(args.seed) @@ -43,16 +43,6 @@ def lamaml_stinyimagenet(override_args=None): metrics.accuracy_metrics(epoch=True, experience=True, stream=True), loggers=[interactive_logger]) - # Buffer - rs_buffer = ReservoirSamplingBuffer(max_size=args.mem_size) - replay_plugin = ReplayPlugin( - mem_size=args.mem_size, - batch_size=args.train_mb_size, - batch_size_mem=args.train_mb_size, - task_balanced_dataloader=False, - storage_policy=rs_buffer - ) - # Strategy model = MTConvTinyImageNet() cl_strategy = LaMAML( @@ -67,9 +57,10 @@ def lamaml_stinyimagenet(override_args=None): sync_update=args.sync_update, train_mb_size=args.train_mb_size, train_epochs=args.train_epochs, + buffer_mb_size=args.buffer_mb_size, + max_buffer_size=args.mem_size, eval_mb_size=100, device=device, - plugins=[replay_plugin], evaluator=evaluation_plugin, )