dbeck22 commited on
Commit
0d5857e
·
verified ·
1 Parent(s): 9e2ae76

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from llama_cpp import Llama
3
+
4
+ # Initialize the Llama model
5
+ @st.cache_resource # Cache the model to avoid reloading it on every run
6
+ def load_llama_model():
7
+ return Llama.from_pretrained(
8
+ repo_id="Orenguteng/Llama-3-8B-Lexi-Uncensored-GGUF",
9
+ filename="Lexi-Llama-3-8B-Uncensored_F16.gguf",
10
+ )
11
+
12
+ # Title and description
13
+ st.title("AI Coin Error Detector")
14
+ st.write("This AI uses the Llama model to analyze coins for potential errors.")
15
+
16
+ # Load the model
17
+ model = load_llama_model()
18
+
19
+ # User input: Upload an image of a coin
20
+ uploaded_file = st.file_uploader("Upload a coin image (optional):", type=["jpg", "jpeg", "png"])
21
+ coin_description = st.text_area("Describe the coin (e.g., year, denomination, visible features):")
22
+
23
+ if st.button("Analyze"):
24
+ if not coin_description and not uploaded_file:
25
+ st.error("Please upload an image or provide a description of the coin.")
26
+ else:
27
+ # Generate prompt based on input
28
+ prompt = "Analyze the following coin for errors:\n"
29
+ if coin_description:
30
+ prompt += f"Description: {coin_description}\n"
31
+ if uploaded_file:
32
+ prompt += "Image has been uploaded. Please account for its visual features.\n"
33
+
34
+ # Run the Llama model
35
+ response = model.create_chat_completion(
36
+ messages=[{"role": "user", "content": prompt}]
37
+ )
38
+
39
+ # Display the result
40
+ st.write("### AI Response:")
41
+ st.write(response["choices"][0]["message"]["content"])