diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..46dfa71 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,13 @@ +FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04 + +RUN apt-get update && apt-get install -y \ + python3 \ + python3-pip + +RUN pip3 install torch transformers + +WORKDIR /app + +COPY run_models.py /app/ + +CMD ["python3", "run_models.py"] \ No newline at end of file diff --git a/img/nosana_logo.png b/img/nosana_logo.png new file mode 100644 index 0000000..a1c29f6 Binary files /dev/null and b/img/nosana_logo.png differ diff --git a/run_models.py b/run_models.py new file mode 100644 index 0000000..2a0bb6d --- /dev/null +++ b/run_models.py @@ -0,0 +1,37 @@ +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer + +models = [ + "mistralai/Mistral-7B-Instruct-v0.2", # Mistral + "Qwen/Qwen1.5-72B-Chat", # Qwen 1.5 + "meta-llama/Llama-2-7b", # Llama 2 + "databricks/dbrx-instruct", # DBRX + "01-ai/Yi-34B-200K", # Yi + "xai-org/grok-1", # Grok +] + +prompts = [ + "What is the capital of France?", + "Explain the concept of artificial intelligence in simple terms.", + "Write a short story about a robot who dreams of becoming human.", + "What are the benefits of regular exercise?", + "How can we reduce our carbon footprint to combat climate change?", +] + +for model_name in models: + print(f"Running model: {model_name}") + print("---") + + tokenizer = AutoTokenizer.from_pretrained(model_name) + model = AutoModelForCausalLM.from_pretrained(model_name) + + for prompt in prompts: + input_ids = tokenizer.encode(prompt, return_tensors="pt") + output = model.generate(input_ids, max_length=100) + generated_text = tokenizer.decode(output[0], skip_special_tokens=True) + + print(f"Prompt: {prompt}") + print(f"Generated text: {generated_text}") + print("---") + + print("\n") \ No newline at end of file