diff --git a/README.md b/README.md index a53fa76b9..ccfce1e88 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,13 @@ python -m torch.distributed.launch --nproc_per_node=$NGPUS eval.py --model fcn32 ### Demo ``` cd ./scripts -python demo.py --model fcn32s_vgg16_voc --input-pic ./datasets/test.jpg +#for new users: +python demo.py --model fcn32s_vgg16_voc --input-pic ../tests/test_img.jpg +#you should add 'test.jpg' by yourself +python demo.py --model fcn32s_vgg16_voc --input-pic ../datasets/test.jpg + + + ``` ``` diff --git a/scripts/demo.py b/scripts/demo.py index 259080a0a..bc5773307 100644 --- a/scripts/demo.py +++ b/scripts/demo.py @@ -24,6 +24,7 @@ help='path to the input picture') parser.add_argument('--outdir', default='./eval', type=str, help='path to save the predict result') +parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() @@ -41,7 +42,7 @@ def demo(config): image = Image.open(config.input_pic).convert('RGB') images = transform(image).unsqueeze(0).to(device) - model = get_model(args.model, pretrained=True, root=args.save_folder).to(device) + model = get_model(args.model, local_rank=args.local_rank, pretrained=True, root=args.save_folder).to(device) print('Finished loading model!') model.eval()