import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification import os # Load the tokenizer and models for the first pipeline tokenizer_ext = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_token") model_ext = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_token") tokenizer_ext.model_max_length = 512 pipe_ext = gr.pipeline("ner", model=model_ext, tokenizer=tokenizer_ext) # Load the tokenizer and models for the second pipeline tokenizer_ais = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_AIS-token") model_ais = AutoModelForTokenClassification.from_pretrained("AlGe/deberta-v3-large_AIS-token") tokenizer_ais.model_max_length = 512 pipe_ais = gr.pipeline("ner", model=model_ais, tokenizer=tokenizer_ais) # Load the tokenizer and models for the third pipeline auth_token = os.environ['HF_TOKEN'] model1 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_Int_segment", num_labels=1, use_auth_token=auth_token) tokenizer1 = AutoTokenizer.from_pretrained("AlGe/deberta-v3-large_Int_segment", use_auth_token=auth_token) model2 = AutoModelForSequenceClassification.from_pretrained("AlGe/deberta-v3-large_seq_ext", num_labels=1, use_auth_token=auth_token) # Define functions to process inputs def process_ner(text, pipeline): output = pipeline(text) entities = [] current_entity = None for token in output: entity_type = token['entity'][2:] entity_prefix = token['entity'][:1] if current_entity is None or entity_type != current_entity['entity'] or (entity_prefix == 'B' and entity_type == current_entity['entity']): if current_entity is not None: entities.append(current_entity) current_entity = { "entity": entity_type, "start": token['start'], "end": token['end'], "score": token['score'] } else: current_entity['end'] = token['end'] current_entity['score'] = max(current_entity['score'], token['score']) if current_entity is not None: entities.append(current_entity) return {"text": text, "entities": entities} def process_classification(text, model1, model2, tokenizer1): inputs1 = tokenizer1(text, max_length=512, return_tensors='pt', truncation=True, padding=True) with torch.no_grad(): outputs1 = model1(**inputs1) outputs2 = model2(**inputs1) prediction1 = outputs1[0].item() prediction2 = outputs2[0].item() score = prediction1 / (prediction2 + prediction1) return f"{round(prediction1, 1)}", f"{round(prediction2, 1)}", f"{round(score, 2)}" # Define Gradio interface iface = gr.Interface( fn={ "NER - Extended Sequence Classification": lambda text: process_ner(text, pipe_ext), "NER - Autobiographical Interview Scoring": lambda text: process_ner(text, pipe_ais), "Internal Detail Count": lambda text: process_classification(text, model1, model2, tokenizer1)[0], "External Detail Count": lambda text: process_classification(text, model1, model2, tokenizer1)[1], "Approximated Internal Detail Ratio": lambda text: process_classification(text, model1, model2, tokenizer1)[2] }, inputs=gr.Textbox(placeholder="Enter sentence here..."), outputs=[ gr.HighlightedText(label="NER - Extended Sequence Classification"), gr.HighlightedText(label="NER - Autobiographical Interview Scoring"), gr.Label(label="Internal Detail Count"), gr.Label(label="External Detail Count"), gr.Label(label="Approximated Internal Detail Ratio") ], title="Combined Demo", description="This demo combines two different NER models and two different sequence classification models. Enter a sentence to see the results.", theme="monochrome" ) # Launch the combined interface iface.launch()