Create 1.py
Browse files
1.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from llama_cpp import Llama
|
2 |
+
|
3 |
+
# Load the model (ensure you have the correct model path)
|
4 |
+
model = Llama(model_path="path_to_your_model.gguf")
|
5 |
+
|
6 |
+
# Example prompt to analyze a coin
|
7 |
+
response = model.create_chat_completion(
|
8 |
+
messages=[
|
9 |
+
{"role": "user", "content": "Analyze this coin for potential errors based on the provided image or description."}
|
10 |
+
]
|
11 |
+
)
|
12 |
+
|
13 |
+
# Access the response content
|
14 |
+
print(response["choices"][0]["message"]["content"])
|