File size: 2,221 Bytes
2141c58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
997a220
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from flask import Flask, render_template, request, jsonify
from transformers import pipeline, LlamaTokenizer, LlamaForCausalLM

# Load the LLaMA model and tokenizer
model_name = "huggingface/llama-model"  # Replace with the specific LLaMA model you want to use
tokenizer = LlamaTokenizer.from_pretrained(model_name)
model = LlamaForCausalLM.from_pretrained(model_name)

# Initialize the text generation pipeline
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)

# Load the prompt from the text file
with open('website_text.txt', 'r') as file:
    prompt = file.read()



hotel_assistant_template = prompt + """
You are the hotel manager of Landon Hotel, named "Mr. Landon". 
Your expertise is exclusively in providing information and advice about anything related to Landon Hotel. 
This includes any general Landon Hotel related queries. 
You do not provide information outside of this scope. 
If a question is not about Landon Hotel, respond with, "I can't assist you with that, sorry!" 
Question: {question} 
Answer: 
"""
def query_llm(question):
    # Create the final prompt by inserting the question into the template
    final_prompt = hotel_assistant_template.format(question=question)
    
    # Generate a response using the LLaMA model
    response = llm_pipeline(final_prompt, max_length=150, do_sample=True)[0]['generated_text']
    
    # Extract the answer from the response (after "Answer:" text)
    answer = response.split("Answer:", 1)[-1].strip()
    
    return answer
hotel_assistant_prompt_template = PromptTemplate( 
    input_variables=["question"], 
    template=hotel_assistant_template 
    ) 

llm = OpenAI(model='gpt-3.5-turbo-instruct', temperature=0) 

llm_chain = hotel_assistant_prompt_template | llm 

def query_llm(question): 
    response = llm_chain.invoke({'question': question}) 
    return response 

app = Flask(__name__) 

@app.route("/") 
def index(): 
    return render_template("index.html") 

@app.route("/chatbot", methods=["POST"]) 
def chatbot(): 
    data = request.get_json() 
    question = data["question"] 
    response = query_llm(question) 
    return jsonify({"response": response}) 

if __name__ == "__main__": 
    app.run(debug=True)