Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added deployer config #2

Merged
merged 7 commits into from
Jun 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions .github/workflows/deploy.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# Only modify variables that have a comment above them
# Contact IWG if you wish to alter the template otherwise

name: Deploy
on:
push:
branches: main
pull_request:
branches: ['*']

jobs:
deploy:
name: Environments
uses: arg-tech/deployment-templates/.github/workflows/default-deploy-template.yml@main
secrets: inherit
with:
# Specify the target production server
target_production_server_nickname: argand
# Define a URL for your app, without the http:// or www prefixes
full_app_url: vanilla-am-caasr.amfws.arg.tech
# The port that is exposed on localhost (must be the same as in docker-compose.yml)
app_port: 5015
7 changes: 4 additions & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,14 @@ RUN pip3 install torch
RUN pip3 install numpy
RUN pip3 install transformers
RUN pip3 install Cython
RUN pip3 install xaif_eval
RUN pip3 install xaif_eval==0.0.9
RUN pip3 install scikit-learn
RUN pip3 install amf-fast-inference
#RUN pip3 install amf-fast-inference
RUN pip3 install amf-fast-inference==0.0.3


COPY . /app
WORKDIR /app
RUN pip install -r requirements.txt
EXPOSE 5007
EXPOSE 5015
CMD python ./main.py
2 changes: 1 addition & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@ services:
dam:
build: .
ports:
- "5007:5007"
- "5015:5015"

8 changes: 4 additions & 4 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@


from src.caasr import CAASRArgumentStructure
from transformers import GPT2Tokenizer,pipeline
from transformers import GPT2Tokenizer,pipeline, AutoModelForSequenceClassification

from amf_fast_inference import model

Expand All @@ -12,12 +12,12 @@

logging.basicConfig(datefmt='%H:%M:%S', level=logging.DEBUG)


app = Flask(__name__)
metrics = PrometheusMetrics(app)
model_name = "debela-arg/dialogtp-am-medium"
loader = model.ModelLoader(model_name)
model = loader.load_model()
model = loader.load_model()
#model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
Expand Down Expand Up @@ -47,5 +47,5 @@ def caasra():
return info

if __name__ == "__main__":
app.run(host="0.0.0.0", port=int("5007"), debug=False)
app.run(host="0.0.0.0", port=int("5015"), debug=False)

33 changes: 19 additions & 14 deletions src/caasr.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import pandas as pd
from itertools import combinations
import torch.nn.functional as F
from xaif_eval import xaif
from xaif_eval import xaif
import json
from src.custom_model import MicroAndMacroContextPT2ForTokenClassification
from src.data_utils import prepare_inputs
Expand All @@ -12,7 +12,7 @@
logging.basicConfig(level=logging.INFO)

from transformers import pipeline
from amf_fast_inference import model
#from amf_fast_inference import model



Expand Down Expand Up @@ -119,26 +119,31 @@ def get_argument_structure(self,):
predicted_relations = []
propositions = []
for (p1, p2), relation in zip(combined_texts,predictions):
predicted_relations.append((p1,p2,relation))
if p1 not in propositions:
propositions.append(p1)
if p2 not in propositions:
propositions.append(p2)
if relation in ["CA", "RA",'MA']:
predicted_relations.append((p1,p2,relation))
if p1 not in propositions:
propositions.append(p1)
if p2 not in propositions:
propositions.append(p2)

generator = ArgumentStructureGenerator()
refined_structure = generator.generate_argument_structure_from_relations(propositions, predicted_relations)
logging.info(refined_structure)
relation_encoder = {0: "None", 1: "RA", 2: "CA", 3: "MA"}
for conclussion_id, premise_relation_list in refined_structure.items():
#print(node_id_prpos[conclussion_id], node_id_prpos[premise_relation_list[0]], relation_encoder[premise_relation_list[1]])
premise_id,AR_type = premise_relation_list[0], premise_relation_list[1]
logging.info(AR_type)
if AR_type in ['CA', 'RA']:
#premise_id,AR_type = premise_relation_list[0], premise_relation_list[1]
premises, relations = premise_relation_list[:len(premise_relation_list)//2], premise_relation_list[len(premise_relation_list)//2:]
for premise_id,AR_type in zip (premises, relations):
logging.info(AR_type)

aif.add_component("argument_relation", AR_type, conclussion_id, premise_id)

return(aif.aif)
if AR_type=="MA":
AR_type = "RA"
if AR_type in ['CA', 'RA',"MA"]:
logging.info(AR_type)

aif.add_component("argument_relation", AR_type, conclussion_id, premise_id)

return(aif.xaif)

'''
if __name__ == "__main__":
Expand Down
10 changes: 6 additions & 4 deletions src/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def tokenize_and_concat(propo1, propso2, argument, tokenizer):


def prepare_inputs(data, tokenizer):
# Implementation of prepare_inputs
# Implementation of prepare_inputs
tokenized_results = []
position_embeddings = []
micro_labels = []
Expand All @@ -54,9 +54,11 @@ def prepare_inputs(data, tokenizer):
prop_1_texts = data['prop_1']
prop_2_texts = data['prop_2']
for prop_1, prop_2, argument in zip(prop_1_texts, prop_2_texts, arguments):
tokenized_input,propos = tokenize_and_concat(prop_1, prop_2, argument,tokenizer)
tokenized_results.append(tokenized_input)
input_data.append(propos)

if (prop_1 not in ["Good evening both", "Rishi Sunak","Thank you, Rishi Sunak", "THANK YOU", "Rishi Sunak"]) and (prop_2 not in ["Good evening both", "Rishi Sunak","Thank you, Rishi Sunak", "THANK YOU", "Rishi Sunak"]):
tokenized_input,propos = tokenize_and_concat(prop_1, prop_2, argument,tokenizer)
tokenized_results.append(tokenized_input)
input_data.append(propos)
return tokenized_results, input_data