diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml index be24fce..b5787b5 100644 --- a/.github/workflows/cache.yml +++ b/.github/workflows/cache.yml @@ -7,7 +7,7 @@ jobs: deploy-runner: runs-on: ubuntu-latest steps: - - uses: iterative/setup-cml@v1 + - uses: iterative/setup-cml@v2 - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} @@ -22,12 +22,12 @@ jobs: --cloud-region=us-west-2 \ --cloud-type=p3.2xlarge \ --labels=cml-gpu \ - --cloud-hdd-size=40 + --cloud-hdd-size=100 cache: needs: deploy-runner runs-on: [self-hosted, cml-gpu] container: - image: docker://mmcky/quantecon-lecture-python:cuda-12.1.0-anaconda-2023-09-py311-c + image: docker://mmcky/quantecon-lecture-python:cuda-11.8.0-anaconda-2023-09-py311 options: --gpus all steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2354c6e..06a9a4d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,7 +4,7 @@ jobs: deploy-runner: runs-on: ubuntu-latest steps: - - uses: iterative/setup-cml@v1 + - uses: iterative/setup-cml@v2 - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} @@ -19,12 +19,12 @@ jobs: --cloud-region=us-west-2 \ --cloud-type=p3.2xlarge \ --labels=cml-gpu \ - --cloud-hdd-size=40 + --cloud-hdd-size=100 preview: needs: deploy-runner runs-on: [self-hosted, cml-gpu] container: - image: docker://mmcky/quantecon-lecture-python:cuda-12.1.0-anaconda-2023-09-py311-c + image: docker://mmcky/quantecon-lecture-python:cuda-11.8.0-anaconda-2023-09-py311 options: --gpus all steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 8edc167..e36727b 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -7,7 +7,7 @@ jobs: deploy-runner: runs-on: ubuntu-latest steps: - - uses: iterative/setup-cml@v1 + - uses: iterative/setup-cml@v2 - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} @@ -22,13 +22,13 @@ jobs: --cloud-region=us-west-2 \ --cloud-type=p3.2xlarge \ --labels=cml-gpu \ - --cloud-hdd-size=40 + --cloud-hdd-size=100 publish: if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') needs: deploy-runner runs-on: [self-hosted, cml-gpu] container: - image: docker://mmcky/quantecon-lecture-python:cuda-12.1.0-anaconda-2023-09-py311-c + image: docker://mmcky/quantecon-lecture-python:cuda-11.8.0-anaconda-2023-09-py311 options: --gpus all steps: - name: Checkout diff --git a/lectures/ar1_bayes.md b/lectures/ar1_bayes.md index 913e6f9..a38e880 100644 --- a/lectures/ar1_bayes.md +++ b/lectures/ar1_bayes.md @@ -13,15 +13,12 @@ kernelspec: # Posterior Distributions for AR(1) Parameters -We'll begin with some Python imports. - - ```{code-cell} ipython3 -:tags: [hide-output] - -!pip install arviz pymc numpyro jax +!pip install pymc ``` +We'll begin with some Python imports. + ```{code-cell} ipython3 import arviz as az diff --git a/lectures/ar1_turningpts.md b/lectures/ar1_turningpts.md index 3aa55a9..4064327 100644 --- a/lectures/ar1_turningpts.md +++ b/lectures/ar1_turningpts.md @@ -13,12 +13,6 @@ kernelspec: # Forecasting an AR(1) Process -```{code-cell} ipython3 -:tags: [hide-output] - -!pip install arviz pymc -``` - This lecture describes methods for forecasting statistics that are functions of future values of a univariate autogressive process. The methods are designed to take into account two possible sources of uncertainty about these statistics: diff --git a/lectures/back_prop.md b/lectures/back_prop.md index 101c894..d9b6e84 100644 --- a/lectures/back_prop.md +++ b/lectures/back_prop.md @@ -16,7 +16,6 @@ kernelspec: ```{code-cell} ipython3 :tags: [hide-output] -!pip install --upgrade jax jaxlib !conda install -y -c plotly plotly plotly-orca retrying ``` diff --git a/lectures/bayes_nonconj.md b/lectures/bayes_nonconj.md index c5b790b..9add465 100644 --- a/lectures/bayes_nonconj.md +++ b/lectures/bayes_nonconj.md @@ -41,14 +41,6 @@ The two Python modules are As usual, we begin by importing some Python code. - -```{code-cell} ipython3 -:tags: [hide-output] - -# install dependencies -!pip install numpyro pyro-ppl torch jax -``` - ```{code-cell} ipython3 import numpy as np import seaborn as sns diff --git a/lectures/mix_model.md b/lectures/mix_model.md index 4846cb7..833d7a4 100644 --- a/lectures/mix_model.md +++ b/lectures/mix_model.md @@ -14,14 +14,6 @@ kernelspec: (likelihood-ratio-process)= # Incorrect Models -In addition to what's in Anaconda, this lecture will need the following libraries: -```{code-cell} ipython ---- -tags: [hide-output] ---- -!pip install numpyro jax -``` - ## Overview This is a sequel to {doc}`this quantecon lecture `. diff --git a/lectures/status.md b/lectures/status.md index 8309f51..116105d 100644 --- a/lectures/status.md +++ b/lectures/status.md @@ -20,4 +20,18 @@ This table contains the latest execution statistics. These lectures are built on `linux` instances through `github actions` and `amazon web services (aws)` to enable access to a `gpu`. These lectures are built on a [p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) -that has access to `8 vcpu's`, a `V100 NVIDIA Tesla GPU`, and `61 Gb` of memory. \ No newline at end of file +that has access to `8 vcpu's`, a `V100 NVIDIA Tesla GPU`, and `61 Gb` of memory. + +You can check the backend used by JAX using: + +```{code-cell} ipython3 +import jax +# Check if JAX is using GPU +print(f"JAX backend: {jax.devices()[0].platform}") +``` + +and the hardware we are running on: + +```{code-cell} ipython3 +!nvidia-smi +``` \ No newline at end of file