File size: 1,547 Bytes
bb3aa0e
 
 
4450af9
bb3aa0e
4450af9
bb3aa0e
 
 
 
4450af9
bb3aa0e
 
 
a7322e4
bb3aa0e
 
a7322e4
bb3aa0e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a7322e4
 
bb3aa0e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from flask import Flask, request
import transformers
from transformers import pipeline

app = Flask(__name__)

# Load Code-Llama model and tokenizer (outside function for efficiency)
model_name = "codellama/CodeLlama-7b-hf"
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
model = pipeline("text-generation", model=model_name, temperature=1.0)  # Set temperature to 1

def answer_code_questions(question):
  """
  Answers code-related questions using Code-Llama as OmniCode.

  Args:
      question (str): The code-related question to be answered.

  Returns:
      str: The answer generated by Code-Llama.
  """

  # Create system message for OmniCode
  system_message = f"<<SYS>>\nYou are a code teaching assistant named OmniCode created by Anusha K.\n<</SYS>>\n"

  # Combine system message and user question
  prompt = f"{system_message}\n{question}"

  # Generate response using Code-Llama
  try:
    response = model(prompt, max_length=512, truncation=True)[0]["generated_text"]
    return response.strip()
  except Exception as e:
    print(f"Error during generation: {e}")
    return "I encountered an error while processing your question. Please try rephrasing it or providing more context."

@app.route("/", methods=["POST"])
def answer_question():
  """
  Handles user-submitted questions and returns answers via POST request.
  """
  question = request.form["question"]
  answer = answer_code_questions(question)
  return answer

if __name__ == "__main__":
  app.run(debug=True)  # Set debug=False for production use