alvations commited on
Commit
62d10a3
1 Parent(s): ca8c990

Add my new, shiny module.

Browse files
Files changed (1) hide show
  1. llm_harness_mistral_arc.py +11 -23
llm_harness_mistral_arc.py CHANGED
@@ -2,19 +2,6 @@ import evaluate
2
  import datasets
3
  import lm_eval
4
 
5
- # TODO: Add BibTeX citation
6
- _CITATION = """
7
- """
8
-
9
- # TODO: Add description of the module here
10
- _DESCRIPTION = """
11
- """
12
-
13
-
14
- # TODO: Add description of the arguments of the module here
15
- _KWARGS_DESCRIPTION = """
16
- """
17
-
18
 
19
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
20
  class llm_harness_mistral_arc(evaluate.Metric):
@@ -23,19 +10,20 @@ class llm_harness_mistral_arc(evaluate.Metric):
23
  return evaluate.MetricInfo(
24
  # This is the description that will appear on the modules page.
25
  module_type="metric",
26
- description=_DESCRIPTION,
27
- citation=_CITATION,
28
- inputs_description=_KWARGS_DESCRIPTION,
29
  # This defines the format of each prediction and reference
30
- features={},
31
- # Homepage of the module for documentation
32
- homepage="http://module.homepage",
33
- # Additional links to the codebase or references
34
- codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
35
- reference_urls=["http://path.to.reference.url/new_module"]
 
36
  )
37
 
38
- def _compute(self, pretrained=None, tasks=[]):
39
  outputs = lm_eval.simple_evaluate(
40
  model="hf",
41
  model_args={"pretrained":pretrained},
 
2
  import datasets
3
  import lm_eval
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
7
  class llm_harness_mistral_arc(evaluate.Metric):
 
10
  return evaluate.MetricInfo(
11
  # This is the description that will appear on the modules page.
12
  module_type="metric",
13
+ description="",
14
+ citation="",
15
+ inputs_description="",
16
  # This defines the format of each prediction and reference
17
+ features=[
18
+ datasets.Features(
19
+ {
20
+ "pretrained": datasets.Value("string", id="sequence"),
21
+ "tasks": datasets.Sequence(datasets.Value("string", id="sequence"), id="tasks"),
22
+ }
23
+ )]
24
  )
25
 
26
+ def _compute(self, pretrained, tasks):
27
  outputs = lm_eval.simple_evaluate(
28
  model="hf",
29
  model_args={"pretrained":pretrained},