-
Notifications
You must be signed in to change notification settings - Fork 18
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
c1751d7
commit e3eab82
Showing
36 changed files
with
2,532 additions
and
0 deletions.
There are no files selected for viewing
91 changes: 91 additions & 0 deletions
91
...ctuality_disinformation_harmful_content/hate_speech/OSACT4SubtaskB_GPT4_FewShot_Arabic.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,91 @@ | ||
from llmebench.datasets import OSACT4SubtaskBDataset | ||
from llmebench.models import OpenAIModel | ||
from llmebench.tasks import HateSpeechTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Mohamed Bayan Kmainasi, Rakif Khan, Ali Ezzat Shahroor, Boushra Bendou, Maram Hasanain, and Firoj Alam", | ||
"affiliation": "Arabic Language Technologies, Qatar Computing Research Institute (QCRI), Hamad Bin Khalifa University (HBKU)", | ||
"model": "GPT-4o-2024-05-22", | ||
"description": "For a comprehensive analysis and results, refer to our peer-reviewed publication available at [Springer](https://doi.org/10.1007/978-981-96-0576-7_30) or explore the preprint version on [arXiv](https://arxiv.org/abs/2409.07054).", | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": OSACT4SubtaskBDataset, | ||
"task": HateSpeechTask, | ||
"model": OpenAIModel, | ||
"model_args": { | ||
"max_tries": 3, | ||
}, | ||
} | ||
|
||
|
||
def prompt(input_sample, examples): | ||
base_prompt = ( | ||
'هل تحتوي التغريدة التالية على خطاب كراهية؟ أجب بـ "كراهية" إن احتوت على خطاب كراهية، و أجب بـ "ليس كراهية" إن لم تكن كذلك.' | ||
" هذه بعض الأمثلة:\n" | ||
) | ||
|
||
return [ | ||
{ | ||
"role": "system", | ||
"content": "أنت خبير في تحليل و تصنيف التغريدات.", | ||
}, | ||
{ | ||
"role": "user", | ||
"content": few_shot_prompt(input_sample, base_prompt, examples), | ||
}, | ||
] | ||
|
||
|
||
def few_shot_prompt(input_sample, base_prompt, examples): | ||
out_prompt = base_prompt + "\n" | ||
for example in examples: | ||
# Found chatgpt confused when using 0 and 1 in the prompt | ||
# label = "no" if example["label"] == "0" else "yes" | ||
label = "ليس كراهية" if example["label"] == "NOT_HS" else "كراهية" | ||
out_prompt = ( | ||
out_prompt + "التغريدة: " + example["input"] + "التصنيف: " + label + "\n\n" | ||
) | ||
|
||
# Append the sentence we want the model to predict for but leave the Label blank | ||
out_prompt = out_prompt + "التغريدة: " + input_sample + "التصنيف:\n" | ||
|
||
# print("=========== FS Prompt =============\n") | ||
# print(out_prompt) | ||
|
||
return out_prompt | ||
|
||
|
||
def post_process(response): | ||
out = response["choices"][0]["message"]["content"] | ||
label = out.lower().strip() | ||
|
||
if ( | ||
"ليس" in label | ||
or "ليس كراهية" in label | ||
or "لا" in label | ||
or "no" in label | ||
or "not" in label | ||
or "don't" in label | ||
or "not_hs" in label | ||
or "not_hatespeech" in label | ||
or "not_hate_speech" in label | ||
): | ||
return "NOT_HS" | ||
elif ( | ||
"كراهية" in label | ||
or "نعم" in label | ||
or "أجل" in label | ||
or "yes" in label | ||
or "contins" in label | ||
or "hs" in label | ||
or "hatespeech" in label | ||
or "hate speech" in label | ||
): | ||
return "HS" | ||
else: | ||
return None |
91 changes: 91 additions & 0 deletions
91
...tuality_disinformation_harmful_content/hate_speech/OSACT4SubtaskB_GPT4_FewShot_English.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,91 @@ | ||
from llmebench.datasets import OSACT4SubtaskBDataset | ||
from llmebench.models import OpenAIModel | ||
from llmebench.tasks import HateSpeechTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Mohamed Bayan Kmainasi, Rakif Khan, Ali Ezzat Shahroor, Boushra Bendou, Maram Hasanain, and Firoj Alam", | ||
"affiliation": "Arabic Language Technologies, Qatar Computing Research Institute (QCRI), Hamad Bin Khalifa University (HBKU)", | ||
"model": "GPT-4o-2024-05-22", | ||
"description": "For a comprehensive analysis and results, refer to our peer-reviewed publication available at [Springer](https://doi.org/10.1007/978-981-96-0576-7_30) or explore the preprint version on [arXiv](https://arxiv.org/abs/2409.07054).", | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": OSACT4SubtaskBDataset, | ||
"task": HateSpeechTask, | ||
"model": OpenAIModel, | ||
"model_args": { | ||
"max_tries": 3, | ||
}, | ||
} | ||
|
||
|
||
def prompt(input_sample, examples): | ||
base_prompt = ( | ||
"Does the tweet contain hate speech? Answer by " | ||
" 'hate speech' if it does, and 'not hate speech' otherwise. Here are some examples:\n" | ||
) | ||
|
||
return [ | ||
{ | ||
"role": "system", | ||
"content": "You are an expert in tweets analysis and classification.", | ||
}, | ||
{ | ||
"role": "user", | ||
"content": few_shot_prompt(input_sample, base_prompt, examples), | ||
}, | ||
] | ||
|
||
|
||
def few_shot_prompt(input_sample, base_prompt, examples): | ||
out_prompt = base_prompt + "\n" | ||
for example in examples: | ||
# Found chatgpt confused when using 0 and 1 in the prompt | ||
# label = "no" if example["label"] == "0" else "yes" | ||
label = "not hate speech" if example["label"] == "NOT_HS" else "hate speech" | ||
out_prompt = ( | ||
out_prompt + "tweet: " + example["input"] + "\nlabel: " + label + "\n\n" | ||
) | ||
|
||
# Append the sentence we want the model to predict for but leave the Label blank | ||
out_prompt = out_prompt + "tweet: " + input_sample + "\nlabel:\n" | ||
|
||
# print("=========== FS Prompt =============\n") | ||
# print(out_prompt) | ||
|
||
return out_prompt | ||
|
||
|
||
def post_process(response): | ||
out = response["choices"][0]["message"]["content"] | ||
label = out.lower().strip() | ||
|
||
if ( | ||
"ليس" in label | ||
or "ليس كراهية" in label | ||
or "لا" in label | ||
or "no" in label | ||
or "not" in label | ||
or "don't" in label | ||
or "not_hs" in label | ||
or "not_hatespeech" in label | ||
or "not_hate_speech" in label | ||
): | ||
return "NOT_HS" | ||
elif ( | ||
"كراهية" in label | ||
or "نعم" in label | ||
or "أجل" in label | ||
or "yes" in label | ||
or "contins" in label | ||
or "hs" in label | ||
or "hatespeech" in label | ||
or "hate speech" in label | ||
): | ||
return "HS" | ||
else: | ||
return None |
85 changes: 85 additions & 0 deletions
85
...actuality_disinformation_harmful_content/hate_speech/OSACT4SubtaskB_GPT4_FewShot_Mixed.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,85 @@ | ||
from llmebench.datasets import OSACT4SubtaskBDataset | ||
from llmebench.models import OpenAIModel | ||
from llmebench.tasks import HateSpeechTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Mohamed Bayan Kmainasi, Rakif Khan, Ali Ezzat Shahroor, Boushra Bendou, Maram Hasanain, and Firoj Alam", | ||
"affiliation": "Arabic Language Technologies, Qatar Computing Research Institute (QCRI), Hamad Bin Khalifa University (HBKU)", | ||
"model": "GPT-4o-2024-05-22", | ||
"description": "For a comprehensive analysis and results, refer to our peer-reviewed publication available at [Springer](https://doi.org/10.1007/978-981-96-0576-7_30) or explore the preprint version on [arXiv](https://arxiv.org/abs/2409.07054).", | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": OSACT4SubtaskBDataset, | ||
"task": HateSpeechTask, | ||
"model": OpenAIModel, | ||
"model_args": { | ||
"max_tries": 3, | ||
}, | ||
} | ||
|
||
|
||
def prompt(input_sample, examples): | ||
base_prompt = 'هل تحتوي التغريدة التالية على خطاب كراهية؟ أجب بـ "hate speech" إن احتوت على خطاب كراهية، و أجب بـ "not hate speech" إن لم تكن كذلك.\n' | ||
|
||
return [ | ||
{ | ||
"role": "system", | ||
"content": "أنت خبير في تحليل و تصنيف التغريدات.", | ||
}, | ||
{ | ||
"role": "user", | ||
"content": few_shot_prompt(input_sample, base_prompt, examples), | ||
}, | ||
] | ||
|
||
|
||
def few_shot_prompt(input_sample, base_prompt, examples): | ||
out_prompt = base_prompt + "\n" | ||
for example in examples: | ||
# Found chatgpt confused when using 0 and 1 in the prompt | ||
# label = "no" if example["label"] == "0" else "yes" | ||
label = "not hate speech" if example["label"] == "NOT_HS" else "hate speech" | ||
out_prompt = ( | ||
out_prompt + "التغريدة: " + example["input"] + "التصنيف: " + label + "\n\n" | ||
) | ||
|
||
# Append the sentence we want the model to predict for but leave the Label blank | ||
out_prompt = out_prompt + "التغريدة: " + input_sample + "التصنيف:\n" | ||
|
||
return out_prompt | ||
|
||
|
||
def post_process(response): | ||
out = response["choices"][0]["message"]["content"] | ||
label = out.lower().strip() | ||
|
||
if ( | ||
"ليس" in label | ||
or "ليس كراهية" in label | ||
or "لا" in label | ||
or "no" in label | ||
or "not" in label | ||
or "don't" in label | ||
or "not_hs" in label | ||
or "not_hatespeech" in label | ||
or "not_hate_speech" in label | ||
): | ||
return "NOT_HS" | ||
elif ( | ||
"كراهية" in label | ||
or "نعم" in label | ||
or "أجل" in label | ||
or "yes" in label | ||
or "contins" in label | ||
or "hs" in label | ||
or "hatespeech" in label | ||
or "hate speech" in label | ||
): | ||
return "HS" | ||
else: | ||
return None |
72 changes: 72 additions & 0 deletions
72
...tuality_disinformation_harmful_content/hate_speech/OSACT4SubtaskB_GPT4_ZeroShot_Arabic.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,72 @@ | ||
from llmebench.datasets import OSACT4SubtaskBDataset | ||
from llmebench.models import OpenAIModel | ||
from llmebench.tasks import HateSpeechTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Mohamed Bayan Kmainasi, Rakif Khan, Ali Ezzat Shahroor, Boushra Bendou, Maram Hasanain, and Firoj Alam", | ||
"affiliation": "Arabic Language Technologies, Qatar Computing Research Institute (QCRI), Hamad Bin Khalifa University (HBKU)", | ||
"model": "GPT-4o-2024-05-22", | ||
"description": "For a comprehensive analysis and results, refer to our peer-reviewed publication available at [Springer](https://doi.org/10.1007/978-981-96-0576-7_30) or explore the preprint version on [arXiv](https://arxiv.org/abs/2409.07054).", | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": OSACT4SubtaskBDataset, | ||
"task": HateSpeechTask, | ||
"model": OpenAIModel, | ||
"model_args": { | ||
"class_labels": ["HS", "NOT_HS"], | ||
"max_tries": 3, | ||
}, | ||
} | ||
|
||
|
||
def prompt(input_sample): | ||
return [ | ||
{ | ||
"role": "system", | ||
"content": "أنت خبير في تحليل و تصنيف التغريدات.", | ||
}, | ||
{ | ||
"role": "user", | ||
"content": f' هل تحتوي التغريدة التالية على خطاب كراهية؟ أجب بـ "كراهية" إن احتوت على خطاب كراهية، و أجب بـ "ليس كراهية" إن لم تكن كذلك. \n' | ||
+ "التغريدة: " | ||
+ input_sample | ||
+ "\n" | ||
+ "التصنيف: ", | ||
}, | ||
] | ||
|
||
|
||
def post_process(response): | ||
out = response["choices"][0]["message"]["content"] | ||
label = out.lower().strip() | ||
|
||
if ( | ||
"ليس" in label | ||
or "ليس كراهية" in label | ||
or "لا" in label | ||
or "no" in label | ||
or "not" in label | ||
or "don't" in label | ||
or "not_hs" in label | ||
or "not_hatespeech" in label | ||
or "not_hate_speech" in label | ||
): | ||
return "NOT_HS" | ||
elif ( | ||
"كراهية" in label | ||
or "نعم" in label | ||
or "أجل" in label | ||
or "yes" in label | ||
or "contins" in label | ||
or "hs" in label | ||
or "hatespeech" in label | ||
or "hate speech" in label | ||
): | ||
return "HS" | ||
else: | ||
return None |
Oops, something went wrong.