Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FileNotFoundError: [Errno 2] No such file or directory: '/home/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/filter_model.pkl #3

Open
suvanbalu opened this issue Apr 20, 2024 · 2 comments

Comments

@suvanbalu
Copy link

I've followed the steps mentioned in the readme file, but in the last step
./run_MUFIN.sh 0,1 PreTrainedMufinMultiModal MM-AmazonTitles-300K MUFIN_pretrained ViT sentencebert -1 0

we get this error
FileNotFoundError: [Errno 2] No such file or directory: '/home/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/filter_model.pkl. We don't have any pkl file in our models folder.

This is the output for tree $HOME/scratch/XC

/home/scratch/XC
├── data
│   ├── MM-AmazonTitles-300K
│   │   ├── download_images.ipynb
│   │   ├── filter_labels_test.txt
│   │   ├── filter_labels_train.txt
│   │   ├── img.urls.txt
│   │   ├── raw_data
│   │   │   ├── label.raw.txt
│   │   │   ├── test.raw.txt
│   │   │   └── train.raw.txt
│   │   ├── sentencebert
│   │   │   ├── test.txt.seq.memmap.dat
│   │   │   └── test.txt.seq.memmap.meta
│   │   ├── trn_X_Y.txt
│   │   └── tst_X_Y.txt
│   └── MM-AmazonTitles-300K.zip
├── models
│   └── MM-AmazonTitles-300K
│   ├── MufinMultiModal
│   │   └── v_MUFIN
│   └── PreTrainedMufinMultiModal
│   └── v_MUFIN_pretrained
├── programs
│   ├── CafeXC
│   │   ├── CafeXC.yml
│   │   ├── get_examples.ipynb
│   │   ├── README.md
│   │   ├── tokenize.ipynb
│   │   └── xc
│   │   ├── libs
│   │   │   ├── anns.py
│   │   │   ├── build_data.py
│   │   │   ├── cluster.py
│   │   │   ├── collate_fn.py
│   │   │   ├── convert2onnx.py
│   │   │   ├── custom_dtypes.py
│   │   │   ├── data_base.py
│   │   │   ├── data_img.py
│   │   │   ├── data_lbl.py
│   │   │   ├── dataloader.py
│   │   │   ├── dataparallel.py
│   │   │   ├── dataset.py
│   │   │   ├── data_shorty.py
│   │   │   ├── data_txt.py
│   │   │   ├── diskann.py
│   │   │   ├── dtypes.py
│   │   │   ├── fast_cluster.py
│   │   │   ├── graph.py
│   │   │   ├── loss.py
│   │   │   ├── model_base.py
│   │   │   ├── model.py
│   │   │   ├── model_utils.py
│   │   │   ├── optimizer_utils.py
│   │   │   ├── parameters.py
│   │   │   └── utils.py
│   │   ├── method
│   │   │   └── mufin
│   │   │   ├── dataset.py
│   │   │   ├── model.py
│   │   │   ├── models_encoders.py
│   │   │   └── network.py
│   │   ├── models
│   │   │   ├── custom_transformer.py
│   │   │   ├── models_base.py
│   │   │   ├── models_clf.py
│   │   │   ├── models_emb.py
│   │   │   ├── models_fusion.py
│   │   │   ├── models_img.py
│   │   │   ├── models_txt.py
│   │   │   └── network.py
│   │   └── tools
│   │   ├── analysis.py
│   │   ├── evaluate.py
│   │   ├── extract_eval.py
│   │   ├── get_pre_train.py
│   │   ├── label_features_split.py
│   │   └── tokenize_text.py
│   ├── Miniconda3-latest-Linux-x86_64.sh
│   ├── Miniconda3-latest-Linux-x86_64.sh.1
│   └── MUFIN
│   ├── configs
│   │   ├── LF-AmazonTitles-131K.json
│   │   └── MM-AmazonTitles-300K.json
│   ├── img_db_hash.ipynb
│   ├── LICENSE
│   ├── MUFIN.ipynb
│   ├── MUFIN.pdf
│   ├── mufin.py
│   ├── parameters.py
│   ├── pycache
│   │   └── parameters.cpython-39.pyc
│   ├── README.md
│   ├── run_MUFIN.sh
│   ├── run_pretrained.sh
│   └── xc
│   ├── libs
│   │   ├── anns.py
│   │   ├── build_data.py
│   │   ├── cluster.py
│   │   ├── collate_fn.py
│   │   ├── convert2onnx.py
│   │   ├── custom_dtypes.py
│   │   ├── data_base.py
│   │   ├── data_img.py
│   │   ├── data_lbl.py
│   │   ├── dataloader.py
│   │   ├── dataparallel.py
│   │   ├── dataset.py
│   │   ├── data_shorty.py
│   │   ├── data_txt.py
│   │   ├── diskann.py
│   │   ├── dtypes.py
│   │   ├── fast_cluster.py
│   │   ├── graph.py
│   │   ├── loss.py
│   │   ├── model_base.py
│   │   ├── model.py
│   │   ├── model_utils.py
│   │   ├── optimizer_utils.py
│   │   ├── parameters.py
│   │   ├── pycache
│   │   │   ├── anns.cpython-39.pyc
│   │   │   ├── cluster.cpython-39.pyc
│   │   │   ├── custom_dtypes.cpython-39.pyc
│   │   │   ├── data_base.cpython-39.pyc
│   │   │   ├── data_img.cpython-39.pyc
│   │   │   ├── data_lbl.cpython-39.pyc
│   │   │   ├── dataloader.cpython-39.pyc
│   │   │   ├── dataparallel.cpython-39.pyc
│   │   │   ├── dataset.cpython-39.pyc
│   │   │   ├── data_shorty.cpython-39.pyc
│   │   │   ├── data_txt.cpython-39.pyc
│   │   │   ├── diskann.cpython-39.pyc
│   │   │   ├── fast_cluster.cpython-39.pyc
│   │   │   ├── loss.cpython-39.pyc
│   │   │   ├── model_base.cpython-39.pyc
│   │   │   ├── model_utils.cpython-39.pyc
│   │   │   ├── optimizer_utils.cpython-39.pyc
│   │   │   ├── parameters.cpython-39.pyc
│   │   │   ├── utils.bin_index-82.py39.1.nbc
│   │   │   ├── utils.bin_index-82.py39.2.nbc
│   │   │   ├── utils.bin_index-82.py39.nbi
│   │   │   └── utils.cpython-39.pyc
│   │   └── utils.py
│   ├── method
│   │   └── mufin
│   │   ├── dataset.py
│   │   ├── model.py
│   │   ├── models_encoders.py
│   │   ├── network.py
│   │   └── pycache
│   │   ├── dataset.cpython-39.pyc
│   │   ├── model.cpython-39.pyc
│   │   ├── models_encoders.cpython-39.pyc
│   │   └── network.cpython-39.pyc
│   ├── models
│   │   ├── custom_transformer.py
│   │   ├── models_base.py
│   │   ├── models_clf.py
│   │   ├── models_emb.py
│   │   ├── models_fusion.py
│   │   ├── models_img.py
│   │   ├── models_txt.py
│   │   ├── network.py
│   │   └── pycache
│   │   ├── custom_transformer.cpython-39.pyc
│   │   ├── models_base.cpython-39.pyc
│   │   ├── models_clf.cpython-39.pyc
│   │   ├── models_emb.cpython-39.pyc
│   │   ├── models_fusion.cpython-39.pyc
│   │   ├── models_img.cpython-39.pyc
│   │   └── models_txt.cpython-39.pyc
│   └── tools
│   ├── analysis.py
│   ├── evaluate.py
│   ├── extract_eval.py
│   ├── get_pre_train.py
│   ├── label_features_split.py
│   ├── pycache
│   │   └── tokenize_text.cpython-39.pyc
│   └── tokenize_text.py
└── results
└── MM-AmazonTitles-300K
├── MufinMultiModal
│   └── v_MUFIN
│   ├── log_predict.txt
│   ├── module1
│   ├── module2
│   ├── module3
│   └── module4
└── PreTrainedMufinMultiModal
└── v_MUFIN_pretrained
├── log_eval.txt
├── log_extract.txt
├── log_predict.txt
├── module1
├── module2
├── module3
└── module4

@anshumitts
Copy link
Member

Can you post the entire error stack?

@m23csa004
Copy link

Hi...I'm also facing the same problem:
here is the full error that I'm getting after the last command:

Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model.pkl', img_model='ViT', txt_model='sentencebert', config='configs/MM-AmazonTitles-300K.json', mode='retrain_anns', seed=22, pred_fname='score.npz', extract_x_txt=None, extract_x_shorty=None, extract_x_img=None, extract_y=None, extract_fname='test.npy', filter_labels='filter_labels_test.txt', preload=False, save_all=False, keep_all=False, ranker='XAttnRanker', encoder_init=None, cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=1, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=1023, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='images/label.img.bin', lbl_x_txt='raw_data/label.raw.txt', lr=0.02, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=300, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='images/train.img.bin', trn_x_txt='raw_data/train.raw.txt', trn_y='trn_X_Y.txt', tst_x_img='images/test.img.bin', tst_x_txt='raw_data/test.raw.txt', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1)
MufinMultiModal(
(criterian): CustomMarginLoss(m=0.3, num_neg=10, num_pos=1)
(item_encoder): DataParallel(
(module): MultiModalEncoder(
(txt_encoder): SentenceBert(
apply_pooler=True, device=cuda:0
(features): DistilBertModel(
(embeddings): Embeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(dropout): Dropout(p=0.1, inplace=False)
)
(transformer): Transformer(
(layer): ModuleList(
(0-5): 6 x TransformerBlock(
(attention): MultiHeadSelfAttention(
(dropout): Dropout(p=0.1, inplace=False)
(q_lin): Linear(in_features=768, out_features=768, bias=True)
(k_lin): Linear(in_features=768, out_features=768, bias=True)
(v_lin): Linear(in_features=768, out_features=768, bias=True)
(out_lin): Linear(in_features=768, out_features=768, bias=True)
)
(sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(ffn): FFN(
(dropout): Dropout(p=0.1, inplace=False)
(lin1): Linear(in_features=768, out_features=3072, bias=True)
(lin2): Linear(in_features=3072, out_features=768, bias=True)
(activation): GELUActivation()
)
(output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
)
(img_encoder): ModelViT(
apply_pooler=True
(features): ViTModel(
(embeddings): ViTEmbeddings(
(patch_embeddings): ViTPatchEmbeddings(
(projection): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(dropout): Dropout(p=0.0, inplace=False)
)
(encoder): ViTEncoder(
(layer): ModuleList(
(0-11): 12 x ViTLayer(
(attention): ViTAttention(
(attention): ViTSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(output): ViTSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(intermediate): ViTIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
(intermediate_act_fn): GELUActivation()
)
(output): ViTOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(layernorm_before): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(layernorm_after): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
(layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(pooler): ViTPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
(transform): Sequential(
(0): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
)
)
Loading model..
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 141, in main
retrain_anns(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 84, in retrain_anns
model.retrain(data_dir=params.data_dir, trn_img=params.trn_x_img,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 194, in retrain
self.load(self.params.model_dir, "model.pkl")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_base.py", line 158, in load
self.filter.load(os.path.join(model_dir, f"filter_{fname}"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_utils.py", line 35, in load
data = joblib.load(model_path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/joblib/numpy_pickle.py", line 650, in load
with open(filename, 'rb') as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/filter_model.pkl'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model.pkl', img_model='ViT', txt_model='sentencebert', config='configs/MM-AmazonTitles-300K.json', mode='predict', seed=22, pred_fname='score.npz', extract_x_txt='raw_data/test.raw.txt', extract_x_shorty=None, extract_x_img='images/test.img.bin', extract_y='tst_X_Y.txt', extract_fname='module2/test.npz', filter_labels='filter_labels_test.txt', preload=False, save_all=False, keep_all=False, ranker='XAttnRanker', encoder_init=None, cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=2, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=1023, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='images/label.img.bin', lbl_x_txt='raw_data/label.raw.txt', lr=0.02, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=300, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='images/train.img.bin', trn_x_txt='raw_data/train.raw.txt', trn_y='trn_X_Y.txt', tst_x_img='images/test.img.bin', tst_x_txt='raw_data/test.raw.txt', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1)
MufinMultiModal(
(criterian): CustomMarginLoss(m=0.3, num_neg=10, num_pos=1)
(item_encoder): DataParallel(
(module): MultiModalEncoder(
(txt_encoder): SentenceBert(
apply_pooler=True, device=cuda:0
(features): DistilBertModel(
(embeddings): Embeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(dropout): Dropout(p=0.1, inplace=False)
)
(transformer): Transformer(
(layer): ModuleList(
(0-5): 6 x TransformerBlock(
(attention): MultiHeadSelfAttention(
(dropout): Dropout(p=0.1, inplace=False)
(q_lin): Linear(in_features=768, out_features=768, bias=True)
(k_lin): Linear(in_features=768, out_features=768, bias=True)
(v_lin): Linear(in_features=768, out_features=768, bias=True)
(out_lin): Linear(in_features=768, out_features=768, bias=True)
)
(sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(ffn): FFN(
(dropout): Dropout(p=0.1, inplace=False)
(lin1): Linear(in_features=768, out_features=3072, bias=True)
(lin2): Linear(in_features=3072, out_features=768, bias=True)
(activation): GELUActivation()
)
(output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
)
(img_encoder): ModelViT(
apply_pooler=True
(features): ViTModel(
(embeddings): ViTEmbeddings(
(patch_embeddings): ViTPatchEmbeddings(
(projection): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(dropout): Dropout(p=0.0, inplace=False)
)
(encoder): ViTEncoder(
(layer): ModuleList(
(0-11): 12 x ViTLayer(
(attention): ViTAttention(
(attention): ViTSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(output): ViTSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(intermediate): ViTIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
(intermediate_act_fn): GELUActivation()
)
(output): ViTOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(layernorm_before): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(layernorm_after): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
(layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(pooler): ViTPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
(transform): Sequential(
(0): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
)
)
IMG:images/test.img.bin(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/test.img.bin.npz' /home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/test.img.bin not found!!
TXT:raw_data/test.raw.txt(read_full=True)
sentencebert None
Reading: 260536it [00:00, 524173.32it/s]
/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/sentencebert/test.txt.seq.memmap
100%|████████████████████████████████████████████████████████████████████| 8142/8142 [00:32<00:00, 248.26it/s]
TXT:test.txt.seq.memmap(read_full=True)
Loading model..
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 121, in main
predict(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 27, in predict
score_mat = model.predict(
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 127, in predict
self.load(self.params.model_dir, "model.pkl")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_base.py", line 158, in load
self.filter.load(os.path.join(model_dir, f"filter_{fname}"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_utils.py", line 35, in load
data = joblib.load(model_path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/joblib/numpy_pickle.py", line 650, in load
with open(filename, 'rb') as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/filter_model.pkl'
Overlap is: 586902
/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/tst_X_Y.txt module2/test
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/CafeXC/xc/tools/evaluate.py", line 82, in
main(targets_file, train_label_file, result_dir,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/tools/evaluate.py", line 43, in main
scr_mat = remove_overlap(load_file(score_mat_dir).tolil(), docs, lbls)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/utils.py", line 172, in load_file
return sp.load_npz(path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/scipy/sparse/matrix_io.py", line 125, in load_npz
with np.load(file, **PICKLE_KWARGS) as loaded:
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/numpy/lib/npyio.py", line 405, in load
fid = stack.enter_context(open(os_fspath(file), "rb"))
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module2/test.npz'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model.pkl', img_model='ViT', txt_model='sentencebert', config='configs/MM-AmazonTitles-300K.json', mode='predict', seed=22, pred_fname='score.npz', extract_x_txt='raw_data/train.raw.txt', extract_x_shorty=None, extract_x_img='images/train.img.bin', extract_y='trn_X_Y.txt', extract_fname='module2/train.npz', filter_labels='filter_labels_train.txt', preload=False, save_all=False, keep_all=False, ranker='XAttnRanker', encoder_init=None, cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=2, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=1023, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='images/label.img.bin', lbl_x_txt='raw_data/label.raw.txt', lr=0.02, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=300, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='images/train.img.bin', trn_x_txt='raw_data/train.raw.txt', trn_y='trn_X_Y.txt', tst_x_img='images/test.img.bin', tst_x_txt='raw_data/test.raw.txt', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1)
MufinMultiModal(
(criterian): CustomMarginLoss(m=0.3, num_neg=10, num_pos=1)
(item_encoder): DataParallel(
(module): MultiModalEncoder(
(txt_encoder): SentenceBert(
apply_pooler=True, device=cuda:0
(features): DistilBertModel(
(embeddings): Embeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(dropout): Dropout(p=0.1, inplace=False)
)
(transformer): Transformer(
(layer): ModuleList(
(0-5): 6 x TransformerBlock(
(attention): MultiHeadSelfAttention(
(dropout): Dropout(p=0.1, inplace=False)
(q_lin): Linear(in_features=768, out_features=768, bias=True)
(k_lin): Linear(in_features=768, out_features=768, bias=True)
(v_lin): Linear(in_features=768, out_features=768, bias=True)
(out_lin): Linear(in_features=768, out_features=768, bias=True)
)
(sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(ffn): FFN(
(dropout): Dropout(p=0.1, inplace=False)
(lin1): Linear(in_features=768, out_features=3072, bias=True)
(lin2): Linear(in_features=3072, out_features=768, bias=True)
(activation): GELUActivation()
)
(output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
)
(img_encoder): ModelViT(
apply_pooler=True
(features): ViTModel(
(embeddings): ViTEmbeddings(
(patch_embeddings): ViTPatchEmbeddings(
(projection): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(dropout): Dropout(p=0.0, inplace=False)
)
(encoder): ViTEncoder(
(layer): ModuleList(
(0-11): 12 x ViTLayer(
(attention): ViTAttention(
(attention): ViTSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(output): ViTSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(intermediate): ViTIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
(intermediate_act_fn): GELUActivation()
)
(output): ViTOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(layernorm_before): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(layernorm_after): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
(layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(pooler): ViTPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
(transform): Sequential(
(0): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
)
)
IMG:images/train.img.bin(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/train.img.bin.npz' /home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/train.img.bin not found!!
TXT:raw_data/train.raw.txt(read_full=True)
sentencebert None
Reading: 586781it [00:00, 886107.50it/s]
/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/sentencebert/train.txt.seq.memmap
100%|██████████████████████████████████████████████████████████████████| 18337/18337 [01:13<00:00, 250.03it/s]
TXT:train.txt.seq.memmap(read_full=True)
Loading model..
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 121, in main
predict(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 27, in predict
score_mat = model.predict(
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 127, in predict
self.load(self.params.model_dir, "model.pkl")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_base.py", line 158, in load
self.filter.load(os.path.join(model_dir, f"filter
{fname}"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_utils.py", line 35, in load
data = joblib.load(model_path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/joblib/numpy_pickle.py", line 650, in load
with open(filename, 'rb') as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/filter_model.pkl'
Overlap is: 119707
/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/trn_X_Y.txt module2/train
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/CafeXC/xc/tools/evaluate.py", line 82, in
main(targets_file, train_label_file, result_dir,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/tools/evaluate.py", line 43, in main
scr_mat = remove_overlap(load_file(score_mat_dir).tolil(), docs, lbls)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/utils.py", line 172, in load_file
return sp.load_npz(path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/scipy/sparse/matrix_io.py", line 125, in load_npz
with np.load(file, **PICKLE_KWARGS) as loaded:
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/numpy/lib/npyio.py", line 405, in load
fid = stack.enter_context(open(os_fspath(file), "rb"))
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module2/train.npz'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model.pkl', img_model='ViT', txt_model='sentencebert', config='configs/MM-AmazonTitles-300K.json', mode='extract_model', seed=22, pred_fname='score.npz', extract_x_txt=None, extract_x_shorty=None, extract_x_img=None, extract_y=None, extract_fname='module3/encoder.pkl', filter_labels='filter_labels_test.txt', preload=False, save_all=False, keep_all=False, ranker='XAttnRanker', encoder_init=None, cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=3, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=1023, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='images/label.img.bin', lbl_x_txt='raw_data/label.raw.txt', lr=0.02, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=300, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='images/train.img.bin', trn_x_txt='raw_data/train.raw.txt', trn_y='trn_X_Y.txt', tst_x_img='images/test.img.bin', tst_x_txt='raw_data/test.raw.txt', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1)
MufinMultiModal(
(criterian): CustomMarginLoss(m=0.3, num_neg=10, num_pos=1)
(item_encoder): DataParallel(
(module): MultiModalEncoder(
(txt_encoder): SentenceBert(
apply_pooler=True, device=cuda:0
(features): DistilBertModel(
(embeddings): Embeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(dropout): Dropout(p=0.1, inplace=False)
)
(transformer): Transformer(
(layer): ModuleList(
(0-5): 6 x TransformerBlock(
(attention): MultiHeadSelfAttention(
(dropout): Dropout(p=0.1, inplace=False)
(q_lin): Linear(in_features=768, out_features=768, bias=True)
(k_lin): Linear(in_features=768, out_features=768, bias=True)
(v_lin): Linear(in_features=768, out_features=768, bias=True)
(out_lin): Linear(in_features=768, out_features=768, bias=True)
)
(sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(ffn): FFN(
(dropout): Dropout(p=0.1, inplace=False)
(lin1): Linear(in_features=768, out_features=3072, bias=True)
(lin2): Linear(in_features=3072, out_features=768, bias=True)
(activation): GELUActivation()
)
(output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
)
(img_encoder): ModelViT(
apply_pooler=True
(features): ViTModel(
(embeddings): ViTEmbeddings(
(patch_embeddings): ViTPatchEmbeddings(
(projection): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(dropout): Dropout(p=0.0, inplace=False)
)
(encoder): ViTEncoder(
(layer): ModuleList(
(0-11): 12 x ViTLayer(
(attention): ViTAttention(
(attention): ViTSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(output): ViTSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(intermediate): ViTIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
(intermediate_act_fn): GELUActivation()
)
(output): ViTOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(layernorm_before): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(layernorm_after): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
(layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(pooler): ViTPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
(transform): Sequential(
(0): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
)
)
Loading model..
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 136, in main
extract_model(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 79, in extract_model
encoder = model.extract_encoder()
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_base.py", line 165, in extract_encoder
self.load(self.params.model_dir, self.params.model_out_name)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_base.py", line 158, in load
self.filter.load(os.path.join(model_dir, f"filter
{fname}"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_utils.py", line 35, in load
data = joblib.load(model_path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/joblib/numpy_pickle.py", line 650, in load
with open(filename, 'rb') as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/filter_model.pkl'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model.pkl', img_model='ViT', txt_model='sentencebert', config='configs/MM-AmazonTitles-300K.json', mode='extract', seed=22, pred_fname='score.npz', extract_x_txt='raw_data/test.raw.txt', extract_x_shorty=None, extract_x_img='images/test.img.bin', extract_y=None, extract_fname='module3/test', filter_labels='filter_labels_test.txt', preload=False, save_all=False, keep_all=False, ranker='XAttnRanker', encoder_init=None, cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=3, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=1023, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='images/label.img.bin', lbl_x_txt='raw_data/label.raw.txt', lr=0.02, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=300, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='images/train.img.bin', trn_x_txt='raw_data/train.raw.txt', trn_y='trn_X_Y.txt', tst_x_img='images/test.img.bin', tst_x_txt='raw_data/test.raw.txt', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1)
MufinMultiModal(
(criterian): CustomMarginLoss(m=0.3, num_neg=10, num_pos=1)
(item_encoder): DataParallel(
(module): MultiModalEncoder(
(txt_encoder): SentenceBert(
apply_pooler=True, device=cuda:0
(features): DistilBertModel(
(embeddings): Embeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(dropout): Dropout(p=0.1, inplace=False)
)
(transformer): Transformer(
(layer): ModuleList(
(0-5): 6 x TransformerBlock(
(attention): MultiHeadSelfAttention(
(dropout): Dropout(p=0.1, inplace=False)
(q_lin): Linear(in_features=768, out_features=768, bias=True)
(k_lin): Linear(in_features=768, out_features=768, bias=True)
(v_lin): Linear(in_features=768, out_features=768, bias=True)
(out_lin): Linear(in_features=768, out_features=768, bias=True)
)
(sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(ffn): FFN(
(dropout): Dropout(p=0.1, inplace=False)
(lin1): Linear(in_features=768, out_features=3072, bias=True)
(lin2): Linear(in_features=3072, out_features=768, bias=True)
(activation): GELUActivation()
)
(output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
)
(img_encoder): ModelViT(
apply_pooler=True
(features): ViTModel(
(embeddings): ViTEmbeddings(
(patch_embeddings): ViTPatchEmbeddings(
(projection): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(dropout): Dropout(p=0.0, inplace=False)
)
(encoder): ViTEncoder(
(layer): ModuleList(
(0-11): 12 x ViTLayer(
(attention): ViTAttention(
(attention): ViTSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(output): ViTSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(intermediate): ViTIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
(intermediate_act_fn): GELUActivation()
)
(output): ViTOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(layernorm_before): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(layernorm_after): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
(layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(pooler): ViTPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
(transform): Sequential(
(0): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
)
)
IMG:images/test.img.bin(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/test.img.bin.npz' /home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/test.img.bin not found!!
TXT:raw_data/test.raw.txt(read_full=True)
sentencebert None
TXT:test.txt.seq.memmap(read_full=True)
Loading model..
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 131, in main
extract(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 69, in extract
embeddings = model.extract(data_dir=params.data_dir,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 82, in extract
self.load(self.params.model_dir, "model.pkl")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_base.py", line 158, in load
self.filter.load(os.path.join(model_dir, f"filter
{fname}"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_utils.py", line 35, in load
data = joblib.load(model_path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/joblib/numpy_pickle.py", line 650, in load
with open(filename, 'rb') as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/filter_model.pkl'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model.pkl', img_model='ViT', txt_model='sentencebert', config='configs/MM-AmazonTitles-300K.json', mode='extract', seed=22, pred_fname='score.npz', extract_x_txt='raw_data/train.raw.txt', extract_x_shorty=None, extract_x_img='images/train.img.bin', extract_y=None, extract_fname='module3/train', filter_labels='filter_labels_test.txt', preload=False, save_all=False, keep_all=False, ranker='XAttnRanker', encoder_init=None, cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=3, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=1023, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='images/label.img.bin', lbl_x_txt='raw_data/label.raw.txt', lr=0.02, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=300, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='images/train.img.bin', trn_x_txt='raw_data/train.raw.txt', trn_y='trn_X_Y.txt', tst_x_img='images/test.img.bin', tst_x_txt='raw_data/test.raw.txt', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1)
MufinMultiModal(
(criterian): CustomMarginLoss(m=0.3, num_neg=10, num_pos=1)
(item_encoder): DataParallel(
(module): MultiModalEncoder(
(txt_encoder): SentenceBert(
apply_pooler=True, device=cuda:0
(features): DistilBertModel(
(embeddings): Embeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(dropout): Dropout(p=0.1, inplace=False)
)
(transformer): Transformer(
(layer): ModuleList(
(0-5): 6 x TransformerBlock(
(attention): MultiHeadSelfAttention(
(dropout): Dropout(p=0.1, inplace=False)
(q_lin): Linear(in_features=768, out_features=768, bias=True)
(k_lin): Linear(in_features=768, out_features=768, bias=True)
(v_lin): Linear(in_features=768, out_features=768, bias=True)
(out_lin): Linear(in_features=768, out_features=768, bias=True)
)
(sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(ffn): FFN(
(dropout): Dropout(p=0.1, inplace=False)
(lin1): Linear(in_features=768, out_features=3072, bias=True)
(lin2): Linear(in_features=3072, out_features=768, bias=True)
(activation): GELUActivation()
)
(output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
)
(img_encoder): ModelViT(
apply_pooler=True
(features): ViTModel(
(embeddings): ViTEmbeddings(
(patch_embeddings): ViTPatchEmbeddings(
(projection): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(dropout): Dropout(p=0.0, inplace=False)
)
(encoder): ViTEncoder(
(layer): ModuleList(
(0-11): 12 x ViTLayer(
(attention): ViTAttention(
(attention): ViTSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(output): ViTSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(intermediate): ViTIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
(intermediate_act_fn): GELUActivation()
)
(output): ViTOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(layernorm_before): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(layernorm_after): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
(layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(pooler): ViTPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
(transform): Sequential(
(0): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
)
)
IMG:images/train.img.bin(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/train.img.bin.npz' /home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/train.img.bin not found!!
TXT:raw_data/train.raw.txt(read_full=True)
sentencebert None
TXT:train.txt.seq.memmap(read_full=True)
Loading model..
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 131, in main
extract(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 69, in extract
embeddings = model.extract(data_dir=params.data_dir,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 82, in extract
self.load(self.params.model_dir, "model.pkl")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_base.py", line 158, in load
self.filter.load(os.path.join(model_dir, f"filter
{fname}"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_utils.py", line 35, in load
data = joblib.load(model_path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/joblib/numpy_pickle.py", line 650, in load
with open(filename, 'rb') as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/filter_model.pkl'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model.pkl', img_model='ViT', txt_model='sentencebert', config='configs/MM-AmazonTitles-300K.json', mode='extract', seed=22, pred_fname='score.npz', extract_x_txt='raw_data/label.raw.txt', extract_x_shorty=None, extract_x_img='images/label.img.bin', extract_y=None, extract_fname='module3/label', filter_labels='filter_labels_test.txt', preload=False, save_all=False, keep_all=False, ranker='XAttnRanker', encoder_init=None, cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=3, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=1023, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='images/label.img.bin', lbl_x_txt='raw_data/label.raw.txt', lr=0.02, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=300, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='images/train.img.bin', trn_x_txt='raw_data/train.raw.txt', trn_y='trn_X_Y.txt', tst_x_img='images/test.img.bin', tst_x_txt='raw_data/test.raw.txt', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1)
MufinMultiModal(
(criterian): CustomMarginLoss(m=0.3, num_neg=10, num_pos=1)
(item_encoder): DataParallel(
(module): MultiModalEncoder(
(txt_encoder): SentenceBert(
apply_pooler=True, device=cuda:0
(features): DistilBertModel(
(embeddings): Embeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(dropout): Dropout(p=0.1, inplace=False)
)
(transformer): Transformer(
(layer): ModuleList(
(0-5): 6 x TransformerBlock(
(attention): MultiHeadSelfAttention(
(dropout): Dropout(p=0.1, inplace=False)
(q_lin): Linear(in_features=768, out_features=768, bias=True)
(k_lin): Linear(in_features=768, out_features=768, bias=True)
(v_lin): Linear(in_features=768, out_features=768, bias=True)
(out_lin): Linear(in_features=768, out_features=768, bias=True)
)
(sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(ffn): FFN(
(dropout): Dropout(p=0.1, inplace=False)
(lin1): Linear(in_features=768, out_features=3072, bias=True)
(lin2): Linear(in_features=3072, out_features=768, bias=True)
(activation): GELUActivation()
)
(output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
)
(img_encoder): ModelViT(
apply_pooler=True
(features): ViTModel(
(embeddings): ViTEmbeddings(
(patch_embeddings): ViTPatchEmbeddings(
(projection): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(dropout): Dropout(p=0.0, inplace=False)
)
(encoder): ViTEncoder(
(layer): ModuleList(
(0-11): 12 x ViTLayer(
(attention): ViTAttention(
(attention): ViTSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(output): ViTSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(intermediate): ViTIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
(intermediate_act_fn): GELUActivation()
)
(output): ViTOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(layernorm_before): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(layernorm_after): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
(layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(pooler): ViTPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
(transform): Sequential(
(0): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
)
)
IMG:images/label.img.bin(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/label.img.bin.npz' /home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/label.img.bin not found!!
TXT:raw_data/label.raw.txt(read_full=True)
sentencebert None
Reading: 303296it [00:00, 1259527.90it/s]
/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/sentencebert/label.txt.seq.memmap
100%|████████████████████████████████████████████████████████████████████| 9478/9478 [00:31<00:00, 297.35it/s]
TXT:label.txt.seq.memmap(read_full=True)
Loading model..
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 131, in main
extract(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 69, in extract
embeddings = model.extract(data_dir=params.data_dir,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 82, in extract
self.load(self.params.model_dir, "model.pkl")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_base.py", line 158, in load
self.filter.load(os.path.join(model_dir, f"filter_{fname}"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/model_utils.py", line 35, in load
data = joblib.load(model_path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/joblib/numpy_pickle.py", line 650, in load
with open(filename, 'rb') as f:
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/filter_model.pkl'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
UTILS:TRANSFORM:USING DEFAULT
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model_MufinXAttnRanker.pkl', img_model='Identity', txt_model='Identity', config='configs/MM-AmazonTitles-300K.json', mode='train', seed=22, pred_fname='score.npz', extract_x_txt='module3/test.txt.pretrained', extract_x_shorty=None, extract_x_img='module3/test.img.pretrained', extract_y='tst_X_Y.txt', extract_fname='test_MufinXAttnRanker', filter_labels='filter_labels_test.txt', preload=False, save_all=True, keep_all=False, ranker='MufinXAttnRanker', encoder_init=None, cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=4, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=400, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='module3/label.img.pretrained', lbl_x_txt='module3/label.txt.pretrained', lr=0.05, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=20, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='module3/train.img.pretrained', trn_x_txt='module3/train.txt.pretrained', trn_y='trn_X_Y.txt', tst_x_img='module3/test.img.pretrained', tst_x_txt='module3/test.txt.pretrained', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1, lr_mf_enc=0.01)
MufinXAttnRanker(
(criterian): CosineEmbeddingLoss(m=0.5, pos_wts=1.0)
(item_encoder): DataParallel(
(module): MultiModalRanker(
(attn_encoder): MultiModalEncoder(
(txt_encoder): Identity(
(features): Residual(
(transform): Sequential(
(0): Linear(in_features=192, out_features=192, bias=True)
(1): Dropout(p=0.1, inplace=False)
(2): ReLU()
)
)
(bottle_neck): BottleNeck(
(features): Sequential()
)
)
(img_encoder): Identity(
(features): Residual(
(transform): Sequential(
(0): Linear(in_features=192, out_features=192, bias=True)
(1): Dropout(p=0.1, inplace=False)
(2): ReLU()
)
)
(bottle_neck): BottleNeck(
(features): Sequential()
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
(cross_encoder): RankInstances(
(features): RTDecoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
(label_clf): XAttn(
(features): Embedding(303297, 192, sparse=True)
(weights): Embedding(303297, 2, sparse=True)
(activation): Softmax(dim=-1)
)
)
)
)
Using linear scheudler, 0.01 as weight decay
IMG:module3/train.img.pretrained(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module3/train.img.pretrained.npz' /home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module3/train.img.pretrained not found!!
TXT:module3/train.txt.pretrained(read_full=True)
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 116, in main
train(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 17, in train
model.fit(data_dir=params.data_dir, trn_img=params.trn_x_img,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 452, in fit
X_trn = self.half_dataset(data_path, trn_img, trn_txt)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 18, in half_dataset
feat = GroupFts(data_dir, doc_img, doc_txt, _type=mode, rand_k=rand_k,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/dataset.py", line 48, in init
self.TXT = FtsData(data_dir, n_file_txt, _type="txt")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/dataset.py", line 21, in FtsData
return load_txt(data_dir, n_file)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/data_txt.py", line 27, in load_txt
return NPYTXTDataset(root, n_file)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/data_txt.py", line 186, in init
self.vect = load_file(os.path.join(root, f"{n_file}.npy"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/utils.py", line 174, in load_file
return np.load(path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/numpy/lib/npyio.py", line 405, in load
fid = stack.enter_context(open(os_fspath(file), "rb"))
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module3/train.txt.pretrained.npy'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
UTILS:TRANSFORM:USING DEFAULT
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model_MufinXAttnRanker.pkl', img_model='Identity', txt_model='Identity', config='configs/MM-AmazonTitles-300K.json', mode='predict', seed=22, pred_fname='score.npz', extract_x_txt='module3/test.txt.pretrained', extract_x_shorty=None, extract_x_img='module3/test.img.pretrained', extract_y='tst_X_Y.txt', extract_fname='test_MufinXAttnRanker', filter_labels='filter_labels_test.txt', preload=False, save_all=True, keep_all=False, ranker='MufinXAttnRanker', encoder_init=None, cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=4, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=400, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='module3/label.img.pretrained', lbl_x_txt='module3/label.txt.pretrained', lr=0.05, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=20, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='module3/train.img.pretrained', trn_x_txt='module3/train.txt.pretrained', trn_y='trn_X_Y.txt', tst_x_img='module3/test.img.pretrained', tst_x_txt='module3/test.txt.pretrained', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1, lr_mf_enc=0.01)
MufinXAttnRanker(
(criterian): CosineEmbeddingLoss(m=0.5, pos_wts=1.0)
(item_encoder): DataParallel(
(module): MultiModalRanker(
(attn_encoder): MultiModalEncoder(
(txt_encoder): Identity(
(features): Residual(
(transform): Sequential(
(0): Linear(in_features=192, out_features=192, bias=True)
(1): Dropout(p=0.1, inplace=False)
(2): ReLU()
)
)
(bottle_neck): BottleNeck(
(features): Sequential()
)
)
(img_encoder): Identity(
(features): Residual(
(transform): Sequential(
(0): Linear(in_features=192, out_features=192, bias=True)
(1): Dropout(p=0.1, inplace=False)
(2): ReLU()
)
)
(bottle_neck): BottleNeck(
(features): Sequential()
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
(cross_encoder): RankInstances(
(features): RTDecoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
(label_clf): XAttn(
(features): Embedding(303297, 192, sparse=True)
(weights): Embedding(303297, 2, sparse=True)
(activation): Softmax(dim=-1)
)
)
)
)
IMG:module3/test.img.pretrained(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module3/test.img.pretrained.npz' /home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module3/test.img.pretrained not found!!
TXT:module3/test.txt.pretrained(read_full=True)
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 121, in main
predict(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 27, in predict
score_mat = model.predict(
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 341, in predict
X = self.half_dataset(data_path, tst_img, tst_txt)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 18, in half_dataset
feat = GroupFts(data_dir, doc_img, doc_txt, _type=mode, rand_k=rand_k,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/dataset.py", line 48, in init
self.TXT = FtsData(data_dir, n_file_txt, _type="txt")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/dataset.py", line 21, in FtsData
return load_txt(data_dir, n_file)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/data_txt.py", line 27, in load_txt
return NPYTXTDataset(root, n_file)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/data_txt.py", line 186, in init
self.vect = load_file(os.path.join(root, f"{n_file}.npy"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/utils.py", line 174, in load_file
return np.load(path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/numpy/lib/npyio.py", line 405, in load
fid = stack.enter_context(open(os_fspath(file), "rb"))
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module3/test.txt.pretrained.npy'
Overlap is: 586902
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/CafeXC/xc/tools/evaluate.py", line 82, in
main(targets_file, train_label_file, result_dir,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/tools/evaluate.py", line 52, in main
m2 = _remove_overlap(load_file(m2_score_mat_dir).tolil(), docs, lbls)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/utils.py", line 172, in load_file
return sp.load_npz(path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/scipy/sparse/_matrix_io.py", line 125, in load_npz
with np.load(file, **PICKLE_KWARGS) as loaded:
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/numpy/lib/npyio.py", line 405, in load
fid = stack.enter_context(open(os_fspath(file), "rb"))
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module4/m2_test_MufinXAttnRanker.npz'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model_MufinXAttnRankerpp.pkl', img_model='ViT', txt_model='sentencebert', config='configs/MM-AmazonTitles-300K.json', mode='train', seed=22, pred_fname='score.npz', extract_x_txt='raw_data/test.raw.txt', extract_x_shorty=None, extract_x_img='images/test.img.bin', extract_y='tst_X_Y.txt', extract_fname='test_MufinXAttnRankerpp', filter_labels='filter_labels_test.txt', preload=False, save_all=True, keep_all=False, ranker='MufinXAttnRankerpp', encoder_init='module3/encoder.pkl', cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=4, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=200, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='images/label.img.bin', lbl_x_txt='raw_data/label.raw.txt', lr=0.01, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=20, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='images/train.img.bin', trn_x_txt='raw_data/train.raw.txt', trn_y='trn_X_Y.txt', tst_x_img='images/test.img.bin', tst_x_txt='raw_data/test.raw.txt', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1, lr_mf_enc=0.001)
MufinXAttnRankerpp(
(criterian): CosineEmbeddingLoss(m=0.5, pos_wts=1.0)
(item_encoder): DataParallel(
(module): MultiModalRanker(
(attn_encoder): MultiModalEncoder(
(txt_encoder): SentenceBert(
apply_pooler=True, device=cuda:0
(features): DistilBertModel(
(embeddings): Embeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(dropout): Dropout(p=0.1, inplace=False)
)
(transformer): Transformer(
(layer): ModuleList(
(0-5): 6 x TransformerBlock(
(attention): MultiHeadSelfAttention(
(dropout): Dropout(p=0.1, inplace=False)
(q_lin): Linear(in_features=768, out_features=768, bias=True)
(k_lin): Linear(in_features=768, out_features=768, bias=True)
(v_lin): Linear(in_features=768, out_features=768, bias=True)
(out_lin): Linear(in_features=768, out_features=768, bias=True)
)
(sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(ffn): FFN(
(dropout): Dropout(p=0.1, inplace=False)
(lin1): Linear(in_features=768, out_features=3072, bias=True)
(lin2): Linear(in_features=3072, out_features=768, bias=True)
(activation): GELUActivation()
)
(output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
)
(img_encoder): ModelViT(
apply_pooler=True
(features): ViTModel(
(embeddings): ViTEmbeddings(
(patch_embeddings): ViTPatchEmbeddings(
(projection): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(dropout): Dropout(p=0.0, inplace=False)
)
(encoder): ViTEncoder(
(layer): ModuleList(
(0-11): 12 x ViTLayer(
(attention): ViTAttention(
(attention): ViTSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(output): ViTSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(intermediate): ViTIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
(intermediate_act_fn): GELUActivation()
)
(output): ViTOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(layernorm_before): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(layernorm_after): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
(layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(pooler): ViTPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
(transform): Sequential(
(0): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
(cross_encoder): RankInstances(
(features): RTDecoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
(label_clf): XAttn(
(features): Embedding(303297, 192, sparse=True)
(weights): Embedding(303297, 2, sparse=True)
(activation): Softmax(dim=-1)
)
)
)
)
Using linear scheudler, 0.01 as weight decay
IMG:images/train.img.bin(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/train.img.bin.npz' /home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/train.img.bin not found!!
TXT:raw_data/train.raw.txt(read_full=True)
sentencebert None
TXT:train.txt.seq.memmap(read_full=True)
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 116, in main
train(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 17, in train
model.fit(data_dir=params.data_dir, trn_img=params.trn_x_img,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 454, in fit
S_trn = self.load_ground_truth(shorty_dir, "train.npz", "shorty")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 33, in load_ground_truth
return FtsData(data_dir, lbl_file, _type=_type)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/dataset.py", line 25, in FtsData
return SHORTYDataset(data_dir, n_file)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/data_shorty.py", line 14, in init
self.data = load_file(os.path.join(root, f"{n_file}"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/utils.py", line 172, in load_file
return sp.load_npz(path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/scipy/sparse/_matrix_io.py", line 125, in load_npz
with np.load(file, **PICKLE_KWARGS) as loaded:
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/numpy/lib/npyio.py", line 405, in load
fid = stack.enter_context(open(os_fspath(file), "rb"))
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module2/train.npz'
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
No module named 'diskannpy': DiskANN is not installed correctly
Model parameters: Namespace(img_db='images/img.bin', M=100, bucket=1, doc_thresh=1000, n_split=1, freeze_layer=-1, sample_nodes=-1, method='hnsw', not_use_module2=False, graph_init=0.1, weight_decay=0.01, doc_first=True, train_anns=99, data_dir='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K', dataset='MM-AmazonTitles-300K', model_dir='/home/amresh/scratch/XC/models/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', result_dir='/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN', emb_dir='random', model_fname='MufinMultiModal', model_out_name='model_MufinXAttnRankerpp.pkl', img_model='ViT', txt_model='sentencebert', config='configs/MM-AmazonTitles-300K.json', mode='predict', seed=22, pred_fname='score.npz', extract_x_txt='raw_data/test.raw.txt', extract_x_shorty=None, extract_x_img='images/test.img.bin', extract_y='tst_X_Y.txt', extract_fname='test_MufinXAttnRankerpp', filter_labels='filter_labels_test.txt', preload=False, save_all=True, keep_all=False, ranker='MufinXAttnRankerpp', encoder_init='module3/encoder.pkl', cosine_margin=0.5, ignore_lbl_imgs=False, validate=True, module=4, A=0.6, B=2.6, num_labels=303296, n_splits=1, accumulate=1, at_least=5, batch_size=200, boosting=False, dropout=0.1, emb_dim=768, embeddings='/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/random', f_name='trn_X_Xf', hard_pos=False, head_dims=2048, ignore_img=False, ignore_txt=False, keep_k=10, lbl_x_img='images/label.img.bin', lbl_x_txt='raw_data/label.raw.txt', lr=0.01, margin=0.3, max_csim=0.9, max_len=32, max_worker_thread=10, min_leaf_sz=32, min_splits=-1, n_layer=1, neg_sample=10, normalize=True, ntypes=2, num_epochs=20, num_vocab=40000, num_workers=7, optim='AdamW', prefetch_factor=1, n_heads=1, project_dim=192, ranker_project_dim=192, re_size=256, sampling=True, top_k=300, trn_x_img='images/train.img.bin', trn_x_txt='raw_data/train.raw.txt', trn_y='trn_X_Y.txt', tst_x_img='images/test.img.bin', tst_x_txt='raw_data/test.raw.txt', tst_y='tst_X_Y.txt', cl_update=5, validate_after=10, verbose=50000, warm_start=20, surrogate_warm=1, ranker_warm=1, multi_pos=4, sample_pos=2, sample_neg=12, lr_mf_clf=0.1, lr_mf_enc=0.001)
MufinXAttnRankerpp(
(criterian): CosineEmbeddingLoss(m=0.5, pos_wts=1.0)
(item_encoder): DataParallel(
(module): MultiModalRanker(
(attn_encoder): MultiModalEncoder(
(txt_encoder): SentenceBert(
apply_pooler=True, device=cuda:0
(features): DistilBertModel(
(embeddings): Embeddings(
(word_embeddings): Embedding(30522, 768, padding_idx=0)
(position_embeddings): Embedding(512, 768)
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(dropout): Dropout(p=0.1, inplace=False)
)
(transformer): Transformer(
(layer): ModuleList(
(0-5): 6 x TransformerBlock(
(attention): MultiHeadSelfAttention(
(dropout): Dropout(p=0.1, inplace=False)
(q_lin): Linear(in_features=768, out_features=768, bias=True)
(k_lin): Linear(in_features=768, out_features=768, bias=True)
(v_lin): Linear(in_features=768, out_features=768, bias=True)
(out_lin): Linear(in_features=768, out_features=768, bias=True)
)
(sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(ffn): FFN(
(dropout): Dropout(p=0.1, inplace=False)
(lin1): Linear(in_features=768, out_features=3072, bias=True)
(lin2): Linear(in_features=3072, out_features=768, bias=True)
(activation): GELUActivation()
)
(output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
)
(img_encoder): ModelViT(
apply_pooler=True
(features): ViTModel(
(embeddings): ViTEmbeddings(
(patch_embeddings): ViTPatchEmbeddings(
(projection): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16))
)
(dropout): Dropout(p=0.0, inplace=False)
)
(encoder): ViTEncoder(
(layer): ModuleList(
(0-11): 12 x ViTLayer(
(attention): ViTAttention(
(attention): ViTSelfAttention(
(query): Linear(in_features=768, out_features=768, bias=True)
(key): Linear(in_features=768, out_features=768, bias=True)
(value): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(output): ViTSelfOutput(
(dense): Linear(in_features=768, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
)
(intermediate): ViTIntermediate(
(dense): Linear(in_features=768, out_features=3072, bias=True)
(intermediate_act_fn): GELUActivation()
)
(output): ViTOutput(
(dense): Linear(in_features=3072, out_features=768, bias=True)
(dropout): Dropout(p=0.0, inplace=False)
)
(layernorm_before): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(layernorm_after): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
)
)
)
(layernorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
(pooler): ViTPooler(
(dense): Linear(in_features=768, out_features=768, bias=True)
(activation): Tanh()
)
)
(bottle_neck): BottleNeck(
(features): Sequential(
(0): Linear(in_features=768, out_features=384, bias=True)
(1): Linear(in_features=384, out_features=192, bias=True)
)
)
(transform): Sequential(
(0): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
)
)
(merge_embds): MergeInstances(
(features): RTEncoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
)
(cross_encoder): RankInstances(
(features): RTDecoder(
(layers): ModuleList(
(0): ReverseTransformerBlock(
(attented): MultiHeadAtttention(
(nheads): 1
(plin_q): Linear(in_features=192, out_features=192, bias=True)
(plin_k): Linear(in_features=192, out_features=192, bias=True)
(plin_v): Linear(in_features=192, out_features=192, bias=True)
(attention): QuadraticAttention(
(drop): Dropout(p=0.1, inplace=False)
(activation): Softmax(dim=-1)
)
(plin_o): Linear(in_features=192, out_features=192, bias=True)
(norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)
)
)
)
)
)
(label_clf): XAttn(
(features): Embedding(303297, 192, sparse=True)
(weights): Embedding(303297, 2, sparse=True)
(activation): Softmax(dim=-1)
)
)
)
)
IMG:images/test.img.bin(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/test.img.bin.npz' /home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/test.img.bin not found!!
TXT:raw_data/test.raw.txt(read_full=True)
sentencebert None
TXT:test.txt.seq.memmap(read_full=True)
IMG:images/label.img.bin(keep_k=-1)
[Errno 2] No such file or directory: '/home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/label.img.bin.npz' /home/amresh/scratch/XC/data/MM-AmazonTitles-300K/images/label.img.bin not found!!
TXT:raw_data/label.raw.txt(read_full=True)
sentencebert None
TXT:label.txt.seq.memmap(read_full=True)
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 149, in
main(args.params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 121, in main
predict(model, params)
File "/home/amresh/scratch/XC/programs/MUFIN/mufin.py", line 27, in predict
score_mat = model.predict(
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 343, in predict
S = self.load_ground_truth(shorty_dir, "test.npz", "shorty")
File "/home/amresh/scratch/XC/programs/CafeXC/xc/method/mufin/model.py", line 33, in load_ground_truth
return FtsData(data_dir, lbl_file, _type=_type)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/dataset.py", line 25, in FtsData
return SHORTYDataset(data_dir, n_file)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/data_shorty.py", line 14, in init
self.data = load_file(os.path.join(root, f"{n_file}"))
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/utils.py", line 172, in load_file
return sp.load_npz(path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/scipy/sparse/_matrix_io.py", line 125, in load_npz
with np.load(file, **PICKLE_KWARGS) as loaded:
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/numpy/lib/npyio.py", line 405, in load
fid = stack.enter_context(open(os_fspath(file), "rb"))
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module2/test.npz'
Overlap is: 586902
Traceback (most recent call last):
File "/home/amresh/scratch/XC/programs/CafeXC/xc/tools/evaluate.py", line 82, in
main(targets_file, train_label_file, result_dir,
File "/home/amresh/scratch/XC/programs/CafeXC/xc/tools/evaluate.py", line 52, in main
m2 = _remove_overlap(load_file(m2_score_mat_dir).tolil(), docs, lbls)
File "/home/amresh/scratch/XC/programs/CafeXC/xc/libs/utils.py", line 172, in load_file
return sp.load_npz(path)
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/scipy/sparse/_matrix_io.py", line 125, in load_npz
with np.load(file, **PICKLE_KWARGS) as loaded:
File "/home/amresh/miniconda3/envs/xc/lib/python3.9/site-packages/numpy/lib/npyio.py", line 405, in load
fid = stack.enter_context(open(os_fspath(file), "rb"))
FileNotFoundError: [Errno 2] No such file or directory: '/home/amresh/scratch/XC/results/MM-AmazonTitles-300K/MufinMultiModal/v_MUFIN/module4/m2_test_MufinXAttnRankerpp.npz'

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants