diff --git a/app (3).py b/app (3).py new file mode 100644 index 000000000..5cefc3787 --- /dev/null +++ b/app (3).py @@ -0,0 +1,22 @@ +from flask import Flask, render_template, request, jsonify +from chat import chatbot + +app = Flask(__name__) + + +@app.route("/") +def hello(): + return render_template('chat.html') + +@app.route("/ask", methods=['POST']) +def ask(): + + message = str(request.form['messageText']) + + bot_response = chatbot(message) + + return jsonify({'status':'OK','answer':bot_response}) + + +if __name__ == "__main__": + app.run() diff --git a/chat (1).py b/chat (1).py new file mode 100644 index 000000000..72e4ec82c --- /dev/null +++ b/chat (1).py @@ -0,0 +1,26 @@ +from peft import AutoPeftModelForCausalLM +from transformers import GenerationConfig +from transformers import AutoTokenizer +import torch +tokenizer = AutoTokenizer.from_pretrained("Vasanth/mistral-finetuned-alpaca") + +model = AutoPeftModelForCausalLM.from_pretrained( + "Vasanth/mistral-finetuned-alpaca", + low_cpu_mem_usage=True, + return_dict=True, + torch_dtype=torch.float16, + device_map="cuda") + +generation_config = GenerationConfig( + do_sample=True, + top_k=1, + temperature=0.1, + max_new_tokens=100, + pad_token_id=tokenizer.eos_token_id +) + +def chatbot(message): + input_str = "###Human: " + message + " ###Assistant: " + inputs = tokenizer(input_str, return_tensors="pt").to("cuda") + outputs = model.generate(**inputs, generation_config=generation_config) + return tokenizer.decode(outputs[0], skip_special_tokens=True).replace(input_str, '') \ No newline at end of file diff --git a/config (1).py b/config (1).py new file mode 100644 index 000000000..9a835b3b5 --- /dev/null +++ b/config (1).py @@ -0,0 +1,2 @@ +##OPEN API STUFF +OPENAI_API_KEY = "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"