# Importing module from transformers import AutoModelForSequenceClassification from transformers import TFAutoModelForSequenceClassification from transformers import AutoModel, AutoTokenizer from transformers import AutoTokenizer , pipeline , AutoConfig import numpy as np import gradio as gr from scipy.special import softmax # HuggingFace path where the fine tuned model is placed model_path = "Henok21/test_trainer" # Loading the model model = AutoModelForSequenceClassification.from_pretrained(model_path) # Loading config file config = AutoConfig.from_pretrained(model_path) # Loading tokenizer tokenizer = AutoTokenizer.from_pretrained('bert-base-cased') # Using pipeline calssifier = pipeline("sentiment-analysis" , model , tokenizer = tokenizer) # Preprocessor Function def preprocess(text): new_text = [] for t in text.split(" "): t = '@user' if t.startswith('@') and len(t) > 1 else t t = 'http' if t.startswith('http') else t new_text.append(t) return " ".join(new_text) # Adjusting config config.id2label = {0: 'NEGATIVE', 1: 'NEUTRAL', 2: 'POSITIVE'} # Function used for gradio app def sentiment_analysis(text): # Your code to get the scores for each class scores = output[0][0].detach().numpy() scores = softmax(scores) # Convert the numpy array into a list scores = scores.tolist() # Print labels and scores ranking = np.argsort(scores) ranking = ranking[::-1] for i in range(len(scores)): l = config.id2label[ranking[i]] s = scores[ranking[i]] a = f"{i+1}) {l} {np.round(float(s), 4)}" # Convert the numpy float32 object into a float d[l] = float(s) # Return the dictionary as the response content return d # Create your interface demo = gr.Interface(fn=sentiment_analysis, inputs="text", outputs="label") # Launch your interface demo.launch(debug = True)