File size: 1,501 Bytes
1fad4a0
ba1082d
1fad4a0
e03f966
7846d6e
e03f966
 
 
 
 
 
 
 
 
 
7846d6e
ba1082d
b69e9a7
58b75c9
 
 
 
ba1082d
58b75c9
 
 
 
ba1082d
58b75c9
ba1082d
7846d6e
ba1082d
 
 
b43651d
7846d6e
ba1082d
b43651d
7846d6e
ba1082d
b43651d
7846d6e
 
 
e03f966
7846d6e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import requests
import os

from transformers import Tool
# Import other necessary libraries if needed

class TextGenerationTool(Tool):
    name = "text_generator"
    description = (
        "This is a tool for text generation. It takes a prompt as input and returns the generated text."
    )

    inputs = ["text"]
    outputs = ["text"]

    def __call__(self, prompt: str):
        API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
        headers = {"Authorization": "Bearer " +  os.environ['hf']}

        payload = {
            "inputs": prompt  # Adjust this based on your model's input format
        }
        
        #def query(payload):
        response = requests.post(API_URL, headers=headers, json=payload)
        print(response)
        return response.json()
            

        
        # Define the payload for the request
        #payload = {
        #    "inputs": prompt  # Adjust this based on your model's input format
        #}

        # Make the request to the API
        #generated_text = requests.post(API_URL, headers=headers, json=payload).json()

        # Extract and return the generated text
        #return generated_text["generated_text"]

# Uncomment and customize the following lines based on your text generation needs
# text_generator = pipeline(model="gpt2")
# generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)

# Print the generated text if needed
# print(generated_text)