SrijitMukherjee commited on
Commit
4ba2959
·
verified ·
1 Parent(s): f1fa612

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -13
app.py CHANGED
@@ -1,25 +1,24 @@
1
  import pandas as pd
2
  import streamlit as st
3
- #import ollama
4
- import transformers
5
  import torch
6
- model_id = "meta-llama/Meta-Llama-3-70B"
7
-
8
- pipeline = transformers.pipeline(
9
- "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto"
10
- )
11
 
12
  # Load your CSV file
13
  df = pd.read_csv("your_file.csv")
14
 
15
  # Function to generate responses using the Llama3 model
16
- # def generate_response(question):
17
- # response = ollama.chat(model='llama3', messages=[{'role': 'user', 'content': question}])
18
- # return response['message']['content']
19
-
20
  def generate_response(question):
21
- response = pipeline(questions)
22
- return response
 
 
 
 
23
 
24
  # Define the functions for solving problems, giving hints, and creating similar problems
25
  def show_problem(exam, year, problem):
 
1
  import pandas as pd
2
  import streamlit as st
3
+ import ollama
4
+ #import transformers
5
  import torch
6
+ #model_id = "meta-llama/Meta-Llama-3-70B"
7
+ # pipeline = transformers.pipeline(
8
+ # "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto"
9
+ # )
 
10
 
11
  # Load your CSV file
12
  df = pd.read_csv("your_file.csv")
13
 
14
  # Function to generate responses using the Llama3 model
 
 
 
 
15
  def generate_response(question):
16
+ response = ollama.chat(model='llama3', messages=[{'role': 'user', 'content': question}])
17
+ return response['message']['content']
18
+
19
+ # def generate_response(question):
20
+ # response = pipeline(questions)
21
+ # return response
22
 
23
  # Define the functions for solving problems, giving hints, and creating similar problems
24
  def show_problem(exam, year, problem):