SrijitMukherjee commited on
Commit
dd23fd2
·
verified ·
1 Parent(s): e01c07a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -4
app.py CHANGED
@@ -1,15 +1,26 @@
1
  import pandas as pd
2
  import streamlit as st
3
- import ollama
 
 
 
 
 
 
 
4
 
5
  # Load your CSV file
6
  df = pd.read_csv("your_file.csv")
7
 
8
  # Function to generate responses using the Llama3 model
9
- def generate_response(question):
10
- response = ollama.chat(model='llama3', messages=[{'role': 'user', 'content': question}])
11
- return response['message']['content']
12
 
 
 
 
 
13
  # Define the functions for solving problems, giving hints, and creating similar problems
14
  def show_problem(exam, year, problem):
15
  problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem"].values[0]
 
1
  import pandas as pd
2
  import streamlit as st
3
+ #import ollama
4
+ import transformers
5
+ import torch
6
+ model_id = "meta-llama/Meta-Llama-3-70B"
7
+
8
+ pipeline = transformers.pipeline(
9
+ "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto"
10
+ )
11
 
12
  # Load your CSV file
13
  df = pd.read_csv("your_file.csv")
14
 
15
  # Function to generate responses using the Llama3 model
16
+ # def generate_response(question):
17
+ # response = ollama.chat(model='llama3', messages=[{'role': 'user', 'content': question}])
18
+ # return response['message']['content']
19
 
20
+ def generate_response(question):
21
+ response = pipeline(questions)
22
+ return response
23
+
24
  # Define the functions for solving problems, giving hints, and creating similar problems
25
  def show_problem(exam, year, problem):
26
  problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem"].values[0]