Update README.md
Browse files
README.md
CHANGED
@@ -32,16 +32,31 @@ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](http
|
|
32 |
You can use this model directly with a pipeline for text classification. Here is an example:
|
33 |
|
34 |
```python
|
35 |
-
|
|
|
36 |
|
37 |
-
|
38 |
-
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
```
|
46 |
|
47 |
## Training and evaluation data
|
|
|
32 |
You can use this model directly with a pipeline for text classification. Here is an example:
|
33 |
|
34 |
```python
|
35 |
+
import logging
|
36 |
+
from transformers import pipeline
|
37 |
|
38 |
+
# Set up logging
|
39 |
+
logging.basicConfig(filename='model_output.log', level=logging.INFO)
|
40 |
|
41 |
+
# Load the model using the pipeline API for text generation
|
42 |
+
model_name = "2nji/llama3-platypus"
|
43 |
+
generator = pipeline('text-generation', model=model_name)
|
44 |
|
45 |
+
# Example prompt
|
46 |
+
prompt = "Hello! How can AI help humans in daily life?"
|
47 |
+
|
48 |
+
# Generate response
|
49 |
+
try:
|
50 |
+
responses = generator(prompt, max_length=50) # Adjust max_length as needed
|
51 |
+
response_text = responses[0]['generated_text']
|
52 |
+
print("Model response:", response_text)
|
53 |
+
|
54 |
+
# Log the output
|
55 |
+
logging.info("Sent prompt: %s", prompt)
|
56 |
+
logging.info("Received response: %s", response_text)
|
57 |
+
|
58 |
+
except Exception as e:
|
59 |
+
logging.error("Error in generating response: %s", str(e))
|
60 |
```
|
61 |
|
62 |
## Training and evaluation data
|