abhinav-joshi commited on
Commit
e22e877
1 Parent(s): a6011ea

clean codebase

Browse files
Files changed (6) hide show
  1. dummy.py +0 -15
  2. eval_utils.py +155 -3
  3. evaluation_results.json +0 -38
  4. labels.txt +0 -12
  5. ner_helpers.py +0 -141
  6. uploads.py +41 -91
dummy.py DELETED
@@ -1,15 +0,0 @@
1
- import json
2
-
3
- # load the results json file
4
- with open("submissions/baseline/results.json") as f:
5
- results = json.load(f)
6
-
7
-
8
- # update the results
9
- with open("submissions/baseline/submission.json") as f:
10
- submission = json.load(f)
11
-
12
-
13
- breakpoint()
14
- # update the results
15
- results.append(submission[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval_utils.py CHANGED
@@ -13,7 +13,147 @@ from sklearn.metrics import f1_score
13
  from tqdm import tqdm
14
  from transformers import AutoTokenizer
15
 
16
- from ner_helpers import span2bio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
 
19
  def load_json(file_path):
@@ -76,8 +216,20 @@ def evaluate_cjpe(gold_data, pred_data):
76
 
77
 
78
  def evaluate_lner(gold_data, pred_data, text_data):
79
- with open("labels.txt") as f:
80
- labels = f.read().strip().split("\n")
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  results_per_fold = {}
83
  for fold in range(1, 4):
 
13
  from tqdm import tqdm
14
  from transformers import AutoTokenizer
15
 
16
+ from transformers import AutoTokenizer
17
+ import re
18
+ import string
19
+
20
+
21
+ class TF_Tokenizer:
22
+ def __init__(self, model_str):
23
+ tok = AutoTokenizer.from_pretrained(model_str)
24
+
25
+ def __call__(self, txt):
26
+ return self.tok.tokenize(txt)
27
+
28
+
29
+ class WS_Tokenizer:
30
+ def __init__(self):
31
+ pass
32
+
33
+ def __call__(self, txt):
34
+ return re.findall(r"[{}]|\w+".format(string.punctuation), txt)
35
+
36
+
37
+ def convert_spans_to_bio(txt, roles, tokenizer_func):
38
+ roles = sorted(roles, key=lambda x: x["start"])
39
+ roles_left = [r["start"] for r in roles]
40
+
41
+ ttxt = tokenizer_func(txt)
42
+
43
+ c = 0
44
+ cr = -1
45
+ prev = "O"
46
+ troles = []
47
+ for tok in ttxt:
48
+ if c >= len(txt):
49
+ break
50
+
51
+ while txt[c] == " ":
52
+ c += 1
53
+
54
+ else:
55
+ if c in roles_left: # Start of a new role
56
+ ind = roles_left.index(c)
57
+ cr = roles[ind]["end"]
58
+ prev = "I-" + roles[ind]["label"]
59
+ troles.append("B-" + roles[ind]["label"])
60
+ else:
61
+ if c < cr: # Assign previous role
62
+ troles.append(prev)
63
+ else: # Assign 'O'
64
+ troles.append("O")
65
+
66
+ c += len(tok)
67
+
68
+ if len(ttxt) != len(troles):
69
+ troles += ["O"] * (len(ttxt) - len(troles))
70
+
71
+ assert len(ttxt) == len(troles)
72
+ return troles
73
+
74
+
75
+ def convert_bio_to_spans(txt, troles, tokenizer_func):
76
+ c = 0
77
+ c2 = 0
78
+ cr = -1
79
+ cs = -1
80
+ prev = "O"
81
+
82
+ roles = []
83
+ ttxt = tokenizer_func(txt)
84
+
85
+ if len(ttxt) != len(troles):
86
+ ttxt = ttxt[: len(troles)]
87
+
88
+ for j, tok in enumerate(ttxt):
89
+ if c >= len(txt):
90
+ break
91
+
92
+ while c < len(txt) and txt[c].isspace():
93
+ c += 1
94
+
95
+ if tok[:2] == "##" or tok == "[UNK]":
96
+ c += len(tok) - 2 if tok[:2] == "##" else 1
97
+ else:
98
+ if troles[j].startswith("B-"):
99
+ if cs >= cr:
100
+ cr = c
101
+ if cs >= 0:
102
+ roles.append({"start": cs, "end": c2, "label": prev})
103
+ cs = c
104
+ prev = troles[j][2:]
105
+ else:
106
+ if troles[j] == "O":
107
+ if cs >= cr:
108
+ cr = c
109
+ if cs >= 0:
110
+ roles.append({"start": cs, "end": c2, "label": prev})
111
+ c += len(tok)
112
+ c2 = c
113
+
114
+ if cs >= cr:
115
+ if cs >= 0:
116
+ roles.append({"start": cs, "end": c2, "label": prev})
117
+
118
+ return roles
119
+
120
+
121
+ def span2bio(txt, labels):
122
+ roles = sorted(labels, key=lambda x: x["label"])
123
+ roles_left = [r["start"] for r in roles]
124
+
125
+ ttxt = re.findall(r"[{}]|\w+".format(string.punctuation), txt)
126
+
127
+ c = 0
128
+ cr = -1
129
+ prev = "O"
130
+ troles = []
131
+ for tok in ttxt:
132
+ if c >= len(txt):
133
+ break
134
+
135
+ while txt[c] == " ":
136
+ c += 1
137
+
138
+ else:
139
+ if c in roles_left: # Start of a new role
140
+ ind = roles_left.index(c)
141
+ cr = roles[ind]["end"]
142
+ prev = "I-" + roles[ind]["label"]
143
+ troles.append("B-" + roles[ind]["label"])
144
+ else:
145
+ if c < cr: # Assign previous role
146
+ troles.append(prev)
147
+ else: # Assign 'O'
148
+ troles.append("O")
149
+
150
+ c += len(tok)
151
+
152
+ if len(ttxt) != len(troles):
153
+ troles += ["O"] * (len(ttxt) - len(troles))
154
+
155
+ assert len(ttxt) == len(troles)
156
+ return ttxt, troles
157
 
158
 
159
  def load_json(file_path):
 
216
 
217
 
218
  def evaluate_lner(gold_data, pred_data, text_data):
219
+ labels = [
220
+ "APP",
221
+ "RESP",
222
+ "A.COUNSEL",
223
+ "R.COUNSEL",
224
+ "JUDGE",
225
+ "WIT",
226
+ "AUTH",
227
+ "COURT",
228
+ "STAT",
229
+ "PREC",
230
+ "DATE",
231
+ "CASENO",
232
+ ]
233
 
234
  results_per_fold = {}
235
  for fold in range(1, 4):
evaluation_results.json DELETED
@@ -1,38 +0,0 @@
1
- [
2
- {
3
- "Method": "GPT-5 (2-shot)",
4
- "Submitted By": "IL-TUR",
5
- "Github Link": "dummy submission",
6
- "L-NER": {
7
- "strict mF1": "-"
8
- },
9
- "RR": {
10
- "mF1": {
11
- "mF1": "0.10"
12
- }
13
- },
14
- "CJPE": {
15
- "mF1": "-",
16
- "ROUGE-L": "-",
17
- "BLEU": "-"
18
- },
19
- "BAIL": {
20
- "mF1": "0.02"
21
- },
22
- "LSI": {
23
- "mF1": "0.26"
24
- },
25
- "PCR": {
26
- "muF1@K": "0.63"
27
- },
28
- "SUMM": {
29
- "ROUGE-L": "-",
30
- "BERTSCORE": "-"
31
- },
32
- "L-MT": {
33
- "BLEU": "-",
34
- "GLEU": "-",
35
- "chrF++": "-"
36
- }
37
- }
38
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
labels.txt DELETED
@@ -1,12 +0,0 @@
1
- APP
2
- RESP
3
- A.COUNSEL
4
- R.COUNSEL
5
- JUDGE
6
- WIT
7
- AUTH
8
- COURT
9
- STAT
10
- PREC
11
- DATE
12
- CASENO
 
 
 
 
 
 
 
 
 
 
 
 
 
ner_helpers.py DELETED
@@ -1,141 +0,0 @@
1
- from transformers import AutoTokenizer
2
- import re
3
- import string
4
-
5
-
6
- class TF_Tokenizer:
7
- def __init__(self, model_str):
8
- tok = AutoTokenizer.from_pretrained(model_str)
9
-
10
- def __call__(self, txt):
11
- return self.tok.tokenize(txt)
12
-
13
-
14
- class WS_Tokenizer:
15
- def __init__(self):
16
- pass
17
-
18
- def __call__(self, txt):
19
- return re.findall(r"[{}]|\w+".format(string.punctuation), txt)
20
-
21
-
22
- def convert_spans_to_bio(txt, roles, tokenizer_func):
23
- roles = sorted(roles, key=lambda x: x["start"])
24
- roles_left = [r["start"] for r in roles]
25
-
26
- ttxt = tokenizer_func(txt)
27
-
28
- c = 0
29
- cr = -1
30
- prev = "O"
31
- troles = []
32
- for tok in ttxt:
33
- if c >= len(txt):
34
- break
35
-
36
- while txt[c] == " ":
37
- c += 1
38
-
39
- else:
40
- if c in roles_left: # Start of a new role
41
- ind = roles_left.index(c)
42
- cr = roles[ind]["end"]
43
- prev = "I-" + roles[ind]["label"]
44
- troles.append("B-" + roles[ind]["label"])
45
- else:
46
- if c < cr: # Assign previous role
47
- troles.append(prev)
48
- else: # Assign 'O'
49
- troles.append("O")
50
-
51
- c += len(tok)
52
-
53
- if len(ttxt) != len(troles):
54
- troles += ["O"] * (len(ttxt) - len(troles))
55
-
56
- assert len(ttxt) == len(troles)
57
- return troles
58
-
59
-
60
- def convert_bio_to_spans(txt, troles, tokenizer_func):
61
- c = 0
62
- c2 = 0
63
- cr = -1
64
- cs = -1
65
- prev = "O"
66
-
67
- roles = []
68
- ttxt = tokenizer_func(txt)
69
-
70
- if len(ttxt) != len(troles):
71
- ttxt = ttxt[: len(troles)]
72
-
73
- for j, tok in enumerate(ttxt):
74
- if c >= len(txt):
75
- break
76
-
77
- while c < len(txt) and txt[c].isspace():
78
- c += 1
79
-
80
- if tok[:2] == "##" or tok == "[UNK]":
81
- c += len(tok) - 2 if tok[:2] == "##" else 1
82
- else:
83
- if troles[j].startswith("B-"):
84
- if cs >= cr:
85
- cr = c
86
- if cs >= 0:
87
- roles.append({"start": cs, "end": c2, "label": prev})
88
- cs = c
89
- prev = troles[j][2:]
90
- else:
91
- if troles[j] == "O":
92
- if cs >= cr:
93
- cr = c
94
- if cs >= 0:
95
- roles.append({"start": cs, "end": c2, "label": prev})
96
- c += len(tok)
97
- c2 = c
98
-
99
- if cs >= cr:
100
- if cs >= 0:
101
- roles.append({"start": cs, "end": c2, "label": prev})
102
-
103
- return roles
104
-
105
-
106
- def span2bio(txt, labels):
107
- roles = sorted(labels, key=lambda x: x["label"])
108
- roles_left = [r["start"] for r in roles]
109
-
110
- ttxt = re.findall(r"[{}]|\w+".format(string.punctuation), txt)
111
-
112
- c = 0
113
- cr = -1
114
- prev = "O"
115
- troles = []
116
- for tok in ttxt:
117
- if c >= len(txt):
118
- break
119
-
120
- while txt[c] == " ":
121
- c += 1
122
-
123
- else:
124
- if c in roles_left: # Start of a new role
125
- ind = roles_left.index(c)
126
- cr = roles[ind]["end"]
127
- prev = "I-" + roles[ind]["label"]
128
- troles.append("B-" + roles[ind]["label"])
129
- else:
130
- if c < cr: # Assign previous role
131
- troles.append(prev)
132
- else: # Assign 'O'
133
- troles.append("O")
134
-
135
- c += len(tok)
136
-
137
- if len(ttxt) != len(troles):
138
- troles += ["O"] * (len(ttxt) - len(troles))
139
-
140
- assert len(ttxt) == len(troles)
141
- return ttxt, troles
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
uploads.py CHANGED
@@ -1,33 +1,38 @@
1
- from email.utils import parseaddr
2
- from huggingface_hub import HfApi
3
  import os
4
- import datetime
5
  import json
6
- import pandas as pd
 
 
 
 
7
  import gradio as gr
8
 
9
  from eval_utils import get_evaluation_scores
10
 
11
-
12
  LEADERBOARD_PATH = "Exploration-Lab/IL-TUR-Leaderboard"
13
  SUBMISSION_FORMAT = "predictions"
14
- # RESULTS_PATH = "Exploration-Lab/IL-TUR-Leaderboard-results"
15
  TOKEN = os.environ.get("TOKEN", None)
16
  YEAR_VERSION = "2024"
17
 
18
  api = HfApi(token=TOKEN)
19
 
20
 
 
 
 
 
 
21
  def format_error(msg):
22
- return f"<p style='color: red; font-size: 20px; text-align: center;'>{msg}</p>"
23
 
24
 
25
  def format_warning(msg):
26
- return f"<p style='color: orange; font-size: 20px; text-align: center;'>{msg}</p>"
27
 
28
 
29
  def format_log(msg):
30
- return f"<p style='color: green; font-size: 20px; text-align: center;'>{msg}</p>"
31
 
32
 
33
  def model_hyperlink(link, model_name):
@@ -35,26 +40,22 @@ def model_hyperlink(link, model_name):
35
 
36
 
37
  def input_verification(method_name, url, path_to_file, organisation, mail):
38
- for input in [method_name, url, path_to_file, organisation, mail]:
39
- if input == "":
40
- return format_warning("Please fill all the fields.")
41
-
42
- # Very basic email parsing
 
 
 
43
  _, parsed_mail = parseaddr(mail)
44
- if not "@" in parsed_mail:
45
- return format_warning("Please provide a valid email adress.")
46
 
 
47
  if path_to_file is None:
48
  return format_warning("Please attach a file.")
49
 
50
- # check the required fields
51
- required_fields = ["Method", "Submitted By", "url", "organisation", "mail"]
52
-
53
- # Check if the required_fields are not blank
54
- for field in required_fields:
55
- if field not in locals():
56
- raise gr.Error(f"{field} cannot be blank")
57
-
58
  return parsed_mail
59
 
60
 
@@ -66,98 +67,47 @@ def add_new_eval(
66
  organisation: str,
67
  mail: str,
68
  ):
 
69
 
70
- parsed_mail = input_verification(
71
- method_name,
72
- url,
73
- path_to_file,
74
- organisation,
75
- mail,
76
- )
77
-
78
- # # load the file
79
- # df = pd.read_csv(path_to_file)
80
- # submission_df = pd.read_csv(path_to_file)
81
-
82
- # # modify the df to include metadata
83
- # df["Method"] = method_name
84
- # df["url"] = url
85
- # df["organisation"] = organisation
86
- # df["mail"] = parsed_mail
87
- # df["timestamp"] = datetime.datetime.now()
88
-
89
- # submission_df = pd.read_csv(path_to_file)
90
- # submission_df["Method"] = method_name
91
- # submission_df["Submitted By"] = organisation
92
- # # upload to spaces using the hf api at
93
-
94
- # path_in_repo = f"submissions/{method_name}"
95
- # file_name = f"{method_name}-{organisation}-{datetime.datetime.now().strftime('%Y-%m-%d')}.csv"
96
-
97
- # upload the df to spaces
98
- import io
99
 
 
100
  if SUBMISSION_FORMAT == "predictions":
101
- # read the submission json file
102
  with open(path_to_file, "r") as f:
103
  submission_data = json.load(f)
104
-
105
- # read the gold json file
106
  with open("submissions/baseline/IL_TUR_eval_gold_small.json", "r") as f:
107
  gold_data = json.load(f)
108
 
 
109
  submission = get_evaluation_scores(gold_data, submission_data)
110
-
111
  else:
112
- # read the submission json file
113
  with open(path_to_file, "r") as f:
114
  submission = json.load(f)
115
 
 
116
  with open("submissions/baseline/results.json", "r") as f:
117
  results = json.load(f)
118
-
119
- # update the results
120
  results.append(submission[0])
121
 
122
- leaderboard_buffer = io.BytesIO()
123
- # df.to_csv(buffer, index=False) # Write the DataFrame to a buffer in CSV format
124
- # buffer.seek(0) # Rewind the buffer to the beginning
125
-
126
- # save the results to buffer
127
- leaderboard_buffer.write(json.dumps(results).encode())
128
  leaderboard_buffer.seek(0)
129
 
130
- # api.upload_file(
131
- # repo_id=RESULTS_PATH,
132
- # path_in_repo=f"{path_in_repo}/{file_name}",
133
- # path_or_fileobj=buffer,
134
- # token=TOKEN,
135
- # repo_type="dataset",
136
- # )
137
- # # read the leaderboard
138
- # leaderboard_df = pd.read_csv(f"submissions/baseline/baseline.csv")
139
-
140
- # # append the new submission_df csv to the leaderboard
141
- # # leaderboard_df = leaderboard_df._append(submission_df)
142
- # # leaderboard_df = pd.concat([leaderboard_df, submission_df], ignore_index=True)
143
-
144
- # # save the new leaderboard
145
- # # leaderboard_df.to_csv(f"submissions/baseline/baseline.csv", index=False)
146
- # leaderboard_buffer = io.BytesIO()
147
- # leaderboard_df.to_csv(leaderboard_buffer, index=False)
148
- # leaderboard_buffer.seek(0)
149
- # with open("submissions/baseline/results.json", "w") as f:
150
- # json.dump(results, f)
151
-
152
  api.upload_file(
153
  repo_id=LEADERBOARD_PATH,
154
- # path_in_repo=f"submissions/baseline/baseline.csv",
155
- path_in_repo=f"submissions/baseline/results.json",
156
  path_or_fileobj=leaderboard_buffer,
157
  token=TOKEN,
158
  repo_type="space",
159
  )
160
 
161
  return format_log(
162
- f"Method {method_name} submitted by {organisation} successfully. \nPlease refresh the leaderboard, and wait a bit to see the score displayed"
 
163
  )
 
 
 
1
  import os
 
2
  import json
3
+ import datetime
4
+ from email.utils import parseaddr
5
+ from io import BytesIO
6
+
7
+ from huggingface_hub import HfApi
8
  import gradio as gr
9
 
10
  from eval_utils import get_evaluation_scores
11
 
12
+ # Constants
13
  LEADERBOARD_PATH = "Exploration-Lab/IL-TUR-Leaderboard"
14
  SUBMISSION_FORMAT = "predictions"
 
15
  TOKEN = os.environ.get("TOKEN", None)
16
  YEAR_VERSION = "2024"
17
 
18
  api = HfApi(token=TOKEN)
19
 
20
 
21
+ # Helper functions for formatting messages
22
+ def format_message(msg, color):
23
+ return f"<p style='color: {color}; font-size: 20px; text-align: center;'>{msg}</p>"
24
+
25
+
26
  def format_error(msg):
27
+ return format_message(msg, "red")
28
 
29
 
30
  def format_warning(msg):
31
+ return format_message(msg, "orange")
32
 
33
 
34
  def format_log(msg):
35
+ return format_message(msg, "green")
36
 
37
 
38
  def model_hyperlink(link, model_name):
 
40
 
41
 
42
  def input_verification(method_name, url, path_to_file, organisation, mail):
43
+ """Verify the input fields for submission."""
44
+ # Check if any field is empty
45
+ if any(
46
+ input == "" for input in [method_name, url, path_to_file, organisation, mail]
47
+ ):
48
+ return format_warning("Please fill all the fields.")
49
+
50
+ # Verify email format
51
  _, parsed_mail = parseaddr(mail)
52
+ if "@" not in parsed_mail:
53
+ return format_warning("Please provide a valid email address.")
54
 
55
+ # Check if file is attached
56
  if path_to_file is None:
57
  return format_warning("Please attach a file.")
58
 
 
 
 
 
 
 
 
 
59
  return parsed_mail
60
 
61
 
 
67
  organisation: str,
68
  mail: str,
69
  ):
70
+ """Add a new evaluation to the leaderboard."""
71
 
72
+ # Verify input
73
+ parsed_mail = input_verification(method_name, url, path_to_file, organisation, mail)
74
+ if parsed_mail.startswith("<p"): # If it's a warning message
75
+ return parsed_mail
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
+ # Process submission
78
  if SUBMISSION_FORMAT == "predictions":
79
+ # Read submission and gold data
80
  with open(path_to_file, "r") as f:
81
  submission_data = json.load(f)
 
 
82
  with open("submissions/baseline/IL_TUR_eval_gold_small.json", "r") as f:
83
  gold_data = json.load(f)
84
 
85
+ # Get evaluation scores
86
  submission = get_evaluation_scores(gold_data, submission_data)
 
87
  else:
88
+ # Read submission directly if it's not in predictions format
89
  with open(path_to_file, "r") as f:
90
  submission = json.load(f)
91
 
92
+ # Update results
93
  with open("submissions/baseline/results.json", "r") as f:
94
  results = json.load(f)
 
 
95
  results.append(submission[0])
96
 
97
+ # Prepare buffer for upload
98
+ leaderboard_buffer = BytesIO(json.dumps(results).encode())
 
 
 
 
99
  leaderboard_buffer.seek(0)
100
 
101
+ # Upload to Hugging Face
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  api.upload_file(
103
  repo_id=LEADERBOARD_PATH,
104
+ path_in_repo="submissions/baseline/results.json",
 
105
  path_or_fileobj=leaderboard_buffer,
106
  token=TOKEN,
107
  repo_type="space",
108
  )
109
 
110
  return format_log(
111
+ f"Method {method_name} submitted by {organisation} successfully. \n"
112
+ "Please refresh the leaderboard, and wait a bit to see the score displayed"
113
  )