ikram commited on
Commit
07141a9
·
1 Parent(s): f8f0a7a
Files changed (2) hide show
  1. app.py +40 -23
  2. dockerfile +11 -10
app.py CHANGED
@@ -1,33 +1,50 @@
1
- import gradio as gr
2
- import fastapi
3
- from fastapi import FastAPI
4
- from transformers import pipeline
 
 
5
 
6
  # Initialize FastAPI app
7
  app = FastAPI()
8
 
9
- # Load Hugging Face model (text generation)
10
- generator = pipeline("text-generation", model="gpt2")
 
 
 
 
 
 
 
 
 
11
 
12
- # Function to generate text using HF model
13
- def generate_text(prompt: str):
14
- result = generator(prompt, max_length=100, do_sample=True)
15
- return result[0]["generated_text"]
 
 
 
16
 
17
- # Define Gradio interface
18
- gui = gr.Interface(
19
- fn=generate_text,
20
- inputs=gr.Textbox(label="Enter your prompt"),
21
- outputs=gr.Textbox(label="Generated Text"),
22
- title="Hugging Face Text Generator",
23
- description="Enter a prompt and the AI will generate text using GPT-2."
24
- )
25
 
26
- # Integrate Gradio with FastAPI
27
- app = gr.mount_gradio_app(FastAPI(), gui, path="/")
 
28
 
29
- # Uncomment to run standalone FastAPI server
 
 
 
 
 
 
 
 
30
  # if __name__ == "__main__":
31
  # import uvicorn
32
- # uvicorn.run(app, host="0.0.0.0", port=7860)
33
-
 
1
+ import pandas as pd
2
+ import matplotlib.pyplot as plt
3
+ import seaborn as sns
4
+ import openai
5
+ from fastapi import FastAPI, UploadFile, File
6
+ import io
7
 
8
  # Initialize FastAPI app
9
  app = FastAPI()
10
 
11
+ # Function to generate Python visualization code using Hugging Face model
12
+ def generate_viz_code(prompt: str) -> str:
13
+ """Generate Python code for visualization based on user prompt."""
14
+ response = openai.ChatCompletion.create(
15
+ model="mistralai/Mistral-7B", # Replace with the actual Hugging Face model
16
+ messages=[
17
+ {"role": "system", "content": "You are an AI assistant for data visualization."},
18
+ {"role": "user", "content": prompt}
19
+ ]
20
+ )
21
+ return response["choices"][0]["message"]["content"]
22
 
23
+ # Function to handle file upload and visualization
24
+ @app.post("/visualize")
25
+ def visualize_data(file: UploadFile = File(...), prompt: str = ""):
26
+ try:
27
+ # Read the uploaded Excel file
28
+ contents = file.file.read()
29
+ df = pd.read_excel(io.BytesIO(contents))
30
 
31
+ # Generate visualization code
32
+ code = generate_viz_code(prompt)
33
+ print("Generated Code:\n", code) # Debug output
 
 
 
 
 
34
 
35
+ # Execute the generated code
36
+ exec_globals = {"plt": plt, "sns": sns, "pd": pd, "df": df}
37
+ exec(code, exec_globals)
38
 
39
+ # Save the generated plot
40
+ img_path = "visualization.png"
41
+ plt.savefig(img_path)
42
+ plt.close()
43
+ return {"image_path": img_path}
44
+ except Exception as e:
45
+ return {"error": str(e)}
46
+
47
+ # Uncomment below to run standalone FastAPI app
48
  # if __name__ == "__main__":
49
  # import uvicorn
50
+ # uvicorn.run(app, host="0.0.0.0", port=8000)
 
dockerfile CHANGED
@@ -1,15 +1,16 @@
1
- # Use a lightweight Python image
 
 
2
  FROM python:3.9
3
 
4
- # Set the working directory
5
- WORKDIR /app
 
6
 
7
- # Copy the requirements file and install dependencies
8
- COPY requirements.txt .
9
- RUN pip install --no-cache-dir -r requirements.txt
10
 
11
- # Copy the app files
12
- COPY . .
13
 
14
- # Run the FastAPI application
15
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
  FROM python:3.9
5
 
6
+ RUN useradd -m -u 1000 user
7
+ USER user
8
+ ENV PATH="/home/user/.local/bin:$PATH"
9
 
10
+ WORKDIR /app
 
 
11
 
12
+ COPY --chown=user ./requirements.txt requirements.txt
13
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
 
15
+ COPY --chown=user . /app
16
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8080"]