This repository has been archived by the owner on Dec 1, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
filter_predictions.py
118 lines (91 loc) · 3.11 KB
/
filter_predictions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
from __future__ import annotations
import string
import argparse
import json
import math
import os
import pickle
import random
import sys
import time
from operator import le
import matplotlib.pyplot as plt
import nltk
import numpy as np
import pandas as pd
import spacy
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from spacy.matcher import Matcher
from spacy.util import filter_spans
nlp = spacy.load("en_core_web_sm")
# Get all verbs using a Spacy based function
def get_verbs(input_sentence):
sentence = input_sentence
pattern = [
{"POS": "VERB", "OP": "?"},
{"POS": "ADV", "OP": "*"},
{"POS": "AUX", "OP": "*"},
{"POS": "VERB", "OP": "+"},
]
# instantiate a Matcher instance
matcher = Matcher(nlp.vocab)
matcher.add("Verb phrase", [pattern])
doc = nlp(sentence)
# call the matcher to find matches
matches = matcher(doc)
spans = [doc[start:end] for _, start, end in matches]
return filter_spans(spans)
# Get sentence length
def get_sentence_length(input_sentence):
_list = word_tokenize(input_sentence)
return len(_list)
# FILTER sentences in the DataFrame
def filter_sent_in_df(df):
print(df.shape)
print(df.head())
for character in string.punctuation:
# print(character)
df = df[
df.sent1.str.replace(character, "", regex=False)
!= df.sent2.str.replace(character, "", regex=False)
]
df = df[
df.sent1.str.replace("—", "", regex=False)
!= df.sent2.str.replace("—", "", regex=False)
]
df = df[
df.sent1.str.replace("“( )", "", regex=False)
!= df.sent2.str.replace("“( )", "", regex=False)
]
df = df.drop(df[df.sent1.str.contains("( )", regex=False)].index)
df = df[df.sent1.str.len() >= 50]
df = df[df.sent2.str.len() >= 50]
df = df[df.sent1.str.len() <= 300]
df = df[df.sent1.str.len() <= 300]
return df
def main():
dataframe = pd.read_csv("paraphrase/figs/top_100000_noequal_bbc.csv")
df = filter_sent_in_df(dataframe)
df0 = pd.DataFrame({"para_probs": df.para_probs})
df1 = pd.DataFrame({"sent1": df.sent1})
df1["sent1_verbs"] = df1["sent1"].apply(lambda row: get_verbs(row))
df1["sent1_length"] = df1["sent1"].apply(
lambda row: get_sentence_length(row))
df2 = pd.DataFrame({"sent2": df.sent2})
df2["sent2_verbs"] = df2["sent2"].apply(lambda row: get_verbs(row))
df2["sent2_length"] = df2["sent2"].apply(
lambda row: get_sentence_length(row))
print("DF 1.......", df1.shape)
print(df1.head())
print("DF 2.......", df2.shape)
print(df2.head())
df3 = (df1["sent1_verbs"].str.len() - df2["sent2_verbs"].str.len()).abs()
df4 = (df1["sent1_length"] - df2["sent2_length"]).abs()
df3 = pd.DataFrame({"num_verbs_diff": df3})
df4 = pd.DataFrame({"sent_lent_diff": df4})
df_final = pd.concat([df0, df1, df2, df3, df4], axis=1)
# df_final.to_csv("paraphrase/figs/filtered_bbc.csv",index=False)
df_final.to_csv("paraphrase/figs/filtered_bbc_further.csv", index=False)
if __name__ == "__main__":
main()