Skip to content

Commit

Permalink
Merge pull request #96 from ibm-granite/naming
Browse files Browse the repository at this point in the history
update links to reflect new naming
  • Loading branch information
wgifford authored Aug 1, 2024
2 parents be059fd + 515cec9 commit 27c684d
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 27 deletions.
6 changes: 3 additions & 3 deletions notebooks/hfdemo/patch_tsmixer_getting_started.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@
"source": [
"## Testing with a `PatchTSMixer` model that was trained on the training part of the `ETTh1` data\n",
"\n",
"A pre-trained model (on `ETTh1` data) is available at [ibm/patchtsmixer-etth1-forecasting](https://huggingface.co/ibm/patchtsmixer-etth1-forecasting)."
"A pre-trained model (on `ETTh1` data) is available at [ibm-granite/granite-timeseries-patchtsmixer](https://huggingface.co/ibm-granite/granite-timeseries-patchtsmixer)."
]
},
{
Expand All @@ -322,7 +322,7 @@
"source": [
"print(\"Loading pretrained model\")\n",
"inference_forecast_model = PatchTSMixerForPrediction.from_pretrained(\n",
" \"ibm/patchtsmixer-etth1-forecasting\"\n",
" \"ibm-granite/granite-timeseries-patchtsmixer\"\n",
")\n",
"print(\"Done\")"
]
Expand Down Expand Up @@ -885,7 +885,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.10.13"
}
},
"nbformat": 4,
Expand Down
4 changes: 2 additions & 2 deletions notebooks/hfdemo/patch_tst_getting_started.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@
"source": [
"## Testing with a `PatchTST` model that was trained on the training part of the `ETTh1` data\n",
"\n",
"A pre-trained model (on `ETTh1` data) is available at [ibm/patchtst-etth1-forecasting](https://huggingface.co/ibm/patchtst-etth1-forecasting)."
"A pre-trained model (on `ETTh1` data) is available at [ibm-granite/granite-timeseries-patchtst](https://huggingface.co/ibm-granite/granite-timeseries-patchtst)."
]
},
{
Expand All @@ -310,7 +310,7 @@
"source": [
"print(\"Loading pretrained model\")\n",
"inference_forecast_model = PatchTSTForPrediction.from_pretrained(\n",
" \"ibm/patchtst-etth1-forecasting\"\n",
" \"ibm-granite/granite-timeseries-patchtst\"\n",
")\n",
"print(\"Done\")"
]
Expand Down
14 changes: 7 additions & 7 deletions notebooks/hfdemo/ttm_getting_started.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"\n",
"Note: Alternatively, this notebook can be modified to try the TTM-1024-96 model.\n",
"\n",
"Pre-trained TTM models will be fetched from the [Hugging Face TTM Model Repository](https://huggingface.co/ibm/TTM)."
"Pre-trained TTM models will be fetched from the [Hugging Face TTM Model Repository](https://huggingface.co/ibm-granite/granite-timeseries-ttm-v1)."
]
},
{
Expand Down Expand Up @@ -141,12 +141,12 @@
" # Load model\n",
" if prediction_filter_length is None:\n",
" zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\n",
" \"ibm/TTM\", revision=TTM_MODEL_REVISION\n",
" \"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION\n",
" )\n",
" else:\n",
" if prediction_filter_length <= forecast_length:\n",
" zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\n",
" \"ibm/TTM\", revision=TTM_MODEL_REVISION, prediction_filter_length=prediction_filter_length\n",
" \"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, prediction_filter_length=prediction_filter_length\n",
" )\n",
" else:\n",
" raise ValueError(f\"`prediction_filter_length` should be <= `forecast_length\")\n",
Expand Down Expand Up @@ -214,22 +214,22 @@
" if \"ett\" in dataset_name:\n",
" if prediction_filter_length is None:\n",
" finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\n",
" \"ibm/TTM\", revision=TTM_MODEL_REVISION, head_dropout=0.7\n",
" \"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, head_dropout=0.7\n",
" )\n",
" elif prediction_filter_length <= forecast_length:\n",
" finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\n",
" \"ibm/TTM\", revision=TTM_MODEL_REVISION, head_dropout=0.7, prediction_filter_length=prediction_filter_length\n",
" \"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, head_dropout=0.7, prediction_filter_length=prediction_filter_length\n",
" )\n",
" else:\n",
" raise ValueError(f\"`prediction_filter_length` should be <= `forecast_length\")\n",
" else:\n",
" if prediction_filter_length is None:\n",
" finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\n",
" \"ibm/TTM\", revision=TTM_MODEL_REVISION,\n",
" \"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION,\n",
" )\n",
" elif prediction_filter_length <= forecast_length:\n",
" finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\n",
" \"ibm/TTM\", revision=TTM_MODEL_REVISION, prediction_filter_length=prediction_filter_length\n",
" \"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, prediction_filter_length=prediction_filter_length\n",
" )\n",
" else:\n",
" raise ValueError(f\"`prediction_filter_length` should be <= `forecast_length\")\n",
Expand Down
10 changes: 5 additions & 5 deletions notebooks/tutorial/ttm_tutorial.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"\n",
"Note: Alternatively, this notebook can be modified to try the TTM-1024-96 model.\n",
"\n",
"Pre-trained TTM models will be fetched from the [Hugging Face TTM Model Repository](https://huggingface.co/ibm/TTM)."
"Pre-trained TTM models will be fetched from the [Hugging Face TTM Model Repository](https://huggingface.co/ibm-granite/granite-timeseries-ttm-v1)."
]
},
{
Expand Down Expand Up @@ -49,7 +49,7 @@
],
"source": [
"# Clone the ibm/tsfm\n",
"! git clone https://github.com/IBM/tsfm.git"
"! git clone https://github.com/IBM-granite/granite-tsfm.git"
]
},
{
Expand Down Expand Up @@ -1014,7 +1014,7 @@
}
],
"source": [
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION)\n",
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION)\n",
"zeroshot_model"
]
},
Expand Down Expand Up @@ -1300,7 +1300,7 @@
}
],
"source": [
"finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION, head_dropout=0.7)\n",
"finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, head_dropout=0.7)\n",
"finetune_forecast_model"
]
},
Expand Down Expand Up @@ -1650,7 +1650,7 @@
"### Q 19.\tZero-shot on channel 0 and 2 for etth1\n",
"In your notebook, add `prediction_channel_indices=[0,2]` during model loading to forecast only 0th and 2nd channels. In this case, execute the following code and note the output shape.\n",
"```\n",
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION, prediction_channel_indices=[0,2])\n",
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, prediction_channel_indices=[0,2])\n",
"output = zeroshot_model.forward(test_dataset[0]['past_values'].unsqueeze(0), return_loss=False)\n",
"output.prediction_outputs.shape\n",
"```"
Expand Down
20 changes: 10 additions & 10 deletions notebooks/tutorial/ttm_tutorial_with_ans.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"\n",
"Note: Alternatively, this notebook can be modified to try the TTM-1024-96 model.\n",
"\n",
"Pre-trained TTM models will be fetched from the [Hugging Face TTM Model Repository](https://huggingface.co/ibm/TTM)."
"Pre-trained TTM models will be fetched from the [Hugging Face TTM Model Repository](https://huggingface.co/ibm-granite/granite-timeseries-ttm-v1)."
]
},
{
Expand All @@ -43,7 +43,7 @@
],
"source": [
"# Clone the ibm/tsfm\n",
"! git clone https://github.com/IBM/tsfm.git"
"! git clone https://github.com/IBM-granite/granite-tsfm.git"
]
},
{
Expand Down Expand Up @@ -1009,7 +1009,7 @@
}
],
"source": [
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION)\n",
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION)\n",
"zeroshot_model"
]
},
Expand Down Expand Up @@ -1295,7 +1295,7 @@
}
],
"source": [
"finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION, head_dropout=0.7)\n",
"finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, head_dropout=0.7)\n",
"finetune_forecast_model"
]
},
Expand Down Expand Up @@ -1694,7 +1694,7 @@
}
],
"source": [
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION)\n",
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION)\n",
"temp_dir = tempfile.mkdtemp()\n",
"# zeroshot_trainer\n",
"zeroshot_trainer = Trainer(\n",
Expand Down Expand Up @@ -1755,7 +1755,7 @@
}
],
"source": [
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION, prediction_filter_length=24)\n",
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, prediction_filter_length=24)\n",
"temp_dir = tempfile.mkdtemp()\n",
"# zeroshot_trainer\n",
"zeroshot_trainer = Trainer(\n",
Expand Down Expand Up @@ -1959,7 +1959,7 @@
}
],
"source": [
"finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION, head_dropout=0.7)\n",
"finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, head_dropout=0.7)\n",
"finetune_forecast_model"
]
},
Expand Down Expand Up @@ -2201,7 +2201,7 @@
"metadata": {},
"outputs": [],
"source": [
"finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION, head_dropout=0.7, loss=\"mae\")"
"finetune_forecast_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, head_dropout=0.7, loss=\"mae\")"
]
},
{
Expand Down Expand Up @@ -2425,7 +2425,7 @@
"### Q 19.\tZero-shot on channel 0 and 2 for etth1\n",
"In your notebook, add `prediction_channel_indices=[0,2]` during model loading to forecast only 0th and 2nd channels. In this case, execute the following code and note the output shape.\n",
"```\n",
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION, prediction_channel_indices=[0,2])\n",
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, prediction_channel_indices=[0,2])\n",
"output = zeroshot_model.forward(test_dataset[0]['past_values'].unsqueeze(0), return_loss=False)\n",
"output.prediction_outputs.shape\n",
"```"
Expand All @@ -2449,7 +2449,7 @@
}
],
"source": [
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm/TTM\", revision=TTM_MODEL_REVISION, prediction_channel_indices=[0,2])\n",
"zeroshot_model = TinyTimeMixerForPrediction.from_pretrained(\"ibm-granite/granite-timeseries-ttm-v1\", revision=TTM_MODEL_REVISION, prediction_channel_indices=[0,2])\n",
"output = zeroshot_model.forward(test_dataset[0]['past_values'].unsqueeze(0), return_loss=False)\n",
"output.prediction_outputs.shape"
]
Expand Down

0 comments on commit 27c684d

Please sign in to comment.