Alimubariz124 commited on
Commit
2141c58
·
verified ·
1 Parent(s): aacf656

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +64 -43
main.py CHANGED
@@ -1,44 +1,65 @@
1
- from flask import Flask, render_template, request, jsonify
2
- from langchain_openai import OpenAI
3
- from langchain.prompts import PromptTemplate
4
-
5
- prompt = open('website_text.txt', 'r').read()
6
-
7
- hotel_assistant_template = prompt + """
8
- You are the hotel manager of Landon Hotel, named "Mr. Landon".
9
- Your expertise is exclusively in providing information and advice about anything related to Landon Hotel.
10
- This includes any general Landon Hotel related queries.
11
- You do not provide information outside of this scope.
12
- If a question is not about Landon Hotel, respond with, "I can't assist you with that, sorry!"
13
- Question: {question}
14
- Answer:
15
- """
16
-
17
- hotel_assistant_prompt_template = PromptTemplate(
18
- input_variables=["question"],
19
- template=hotel_assistant_template
20
- )
21
-
22
- llm = OpenAI(model='gpt-3.5-turbo-instruct', temperature=0)
23
-
24
- llm_chain = hotel_assistant_prompt_template | llm
25
-
26
- def query_llm(question):
27
- response = llm_chain.invoke({'question': question})
28
- return response
29
-
30
- app = Flask(__name__)
31
-
32
- @app.route("/")
33
- def index():
34
- return render_template("index.html")
35
-
36
- @app.route("/chatbot", methods=["POST"])
37
- def chatbot():
38
- data = request.get_json()
39
- question = data["question"]
40
- response = query_llm(question)
41
- return jsonify({"response": response})
42
-
43
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  app.run(debug=True)
 
1
+ from flask import Flask, render_template, request, jsonify
2
+ from transformers import pipeline, LlamaTokenizer, LlamaForCausalLM
3
+
4
+ # Load the LLaMA model and tokenizer
5
+ model_name = "huggingface/llama-model" # Replace with the specific LLaMA model you want to use
6
+ tokenizer = LlamaTokenizer.from_pretrained(model_name)
7
+ model = LlamaForCausalLM.from_pretrained(model_name)
8
+
9
+ # Initialize the text generation pipeline
10
+ llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
+
12
+ # Load the prompt from the text file
13
+ with open('website_text.txt', 'r') as file:
14
+ prompt = file.read()
15
+
16
+
17
+
18
+ hotel_assistant_template = prompt + """
19
+ You are the hotel manager of Landon Hotel, named "Mr. Landon".
20
+ Your expertise is exclusively in providing information and advice about anything related to Landon Hotel.
21
+ This includes any general Landon Hotel related queries.
22
+ You do not provide information outside of this scope.
23
+ If a question is not about Landon Hotel, respond with, "I can't assist you with that, sorry!"
24
+ Question: {question}
25
+ Answer:
26
+ """
27
+ def query_llm(question):
28
+ # Create the final prompt by inserting the question into the template
29
+ final_prompt = hotel_assistant_template.format(question=question)
30
+
31
+ # Generate a response using the LLaMA model
32
+ response = llm_pipeline(final_prompt, max_length=150, do_sample=True)[0]['generated_text']
33
+
34
+ # Extract the answer from the response (after "Answer:" text)
35
+ answer = response.split("Answer:", 1)[-1].strip()
36
+
37
+ return answer
38
+ hotel_assistant_prompt_template = PromptTemplate(
39
+ input_variables=["question"],
40
+ template=hotel_assistant_template
41
+ )
42
+
43
+ llm = OpenAI(model='gpt-3.5-turbo-instruct', temperature=0)
44
+
45
+ llm_chain = hotel_assistant_prompt_template | llm
46
+
47
+ def query_llm(question):
48
+ response = llm_chain.invoke({'question': question})
49
+ return response
50
+
51
+ app = Flask(__name__)
52
+
53
+ @app.route("/")
54
+ def index():
55
+ return render_template("index.html")
56
+
57
+ @app.route("/chatbot", methods=["POST"])
58
+ def chatbot():
59
+ data = request.get_json()
60
+ question = data["question"]
61
+ response = query_llm(question)
62
+ return jsonify({"response": response})
63
+
64
+ if __name__ == "__main__":
65
  app.run(debug=True)