File size: 1,302 Bytes
35a902c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from peft import AutoPeftModelForCausalLM
from transformers import AutoTokenizer, pipeline
# Load the model

def create_pipeline():
    model = AutoPeftModelForCausalLM.from_pretrained("Moritz-Pfeifer/financial-times-classification-llama-2-7b-v1.3")
    tokenizer = AutoTokenizer.from_pretrained("Moritz-Pfeifer/financial-times-classification-llama-2-7b-v1.3")
    prompt = f"""
            "You are given a news article regarding the greater Boston area.
            Analyze the sentiment of the article enclosed in square brackets,
            determine if it is positive, negative or neutral and return the answer as the corresponding sentiment label
            "positive", "negative", or "neutral"".

            """
    pipe = pipeline(task="text-generation",
                        model=model,
                        tokenizer=tokenizer,
                        max_new_tokens = 1,
                        temperature = 0.1,
                        )
    return prompt, pipe

def predict_text(text,pipe,prompt):
    result = pipe((prompt+"\n"+'['+'{'+text+'}'+']'+' '+'='))
    answer = result[0]['generated_text'].split("=")[-1]
    if "positive" in answer.lower():
        return "positive"
    elif "negative" in answer.lower():
        return "negative"
    else:
        return "neutral"