from dotenv import load_dotenv import gradio as gr import json import html import logging import numpy as np from utils.model import Model from utils.metric import metric_rouge_score from pages.summarization_playground import generate_answer from pages.summarization_playground import custom_css load_dotenv() def display_results(response_list): overall_score = np.mean([r['metric_score']['rouge_score'] for r in response_list]) html_output = f"

Overall Score: {overall_score:.2f}

" for i, item in enumerate(response_list, 1): dialogue = item['dialogue'] summary = item['summary'] response = item['response'] rouge_score = item['metric_score']['rouge_score'] dialogue = html.escape(item['dialogue']).replace('\n', '
') summary = html.escape(item['summary']).replace('\n', '
') response = html.escape(item['response']).replace('\n', '
') html_output += f"""
Response {i} (Rouge Score: {rouge_score:.2f})

Dialogue

{dialogue}

Summary

{summary}

Response

{response}
""" return html_output def process(model_selection, prompt, num=10): response_list = [] with open("test_samples/test_data.json", "r") as file: json_data = file.read() dataset = json.loads(json_data) for i, data in enumerate(dataset): logging.info(f"Start testing datapoint {i+1}") dialogue = data['dialogue'] format = data['format'] summary = data['summary'] response = generate_answer(dialogue, model_selection, prompt + f' Output following {format} format.') rouge_score = metric_rouge_score(response, summary) response_list.append( { 'dialogue': dialogue, 'summary': summary, 'response': response, 'metric_score': { 'rouge_score': rouge_score } } ) logging.info(f"Complete testing datapoint {i+1}") return display_results(response_list) def create_batch_evaluation_interface(): with gr.Blocks(theme=gr.themes.Soft(spacing_size="sm",text_size="sm"), css=custom_css) as demo: gr.Markdown("## Here are evaluation setups. It will run though datapoints in test_data.josn to generate and evaluate. Show results once finished.") model_dropdown = gr.Dropdown(choices=Model.__model_list__, label="Choose a model", value=Model.__model_list__[0]) Template_text = gr.Textbox(value="""Summarize the following dialogue""", label='Input Prompting Template', lines=8, placeholder='Input your prompts') submit_button = gr.Button("✨ Submit ✨") output = gr.HTML(label="Results") submit_button.click( process, inputs=[model_dropdown, Template_text], outputs=output ) return demo if __name__ == "__main__": demo = create_batch_evaluation_interface() demo.launch()