File size: 1,577 Bytes
75a7624
4b3c08b
75a7624
 
4b3c08b
 
 
75a7624
 
 
4b3c08b
 
 
75a7624
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d75d863
75a7624
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from fastapi import FastAPI, Response
from pydantic import BaseModel
from huggingface_hub import InferenceClient
import graphviz

app = FastAPI()

# Initialize the inference client for the AI model
client = InferenceClient("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")

class CourseRequest(BaseModel):
    course_name: str

def format_prompt(course_name: str):
    return f"As an expert in education, please generate a detailed roadmap for the course '{course_name}'. Include key topics."

def generate_roadmap(item: CourseRequest):
    prompt = format_prompt(item.course_name)
    stream = client.text_generation(prompt, max_new_tokens=200)
    output = ""

    for response in stream:
        output += response.token.text

    return output

def create_diagram(roadmap_text: str):
    dot = graphviz.Digraph()

    # Split the roadmap text into lines or sections for diagram creation
    lines = roadmap_text.split('\n')
    for i, line in enumerate(lines):
        dot.node(str(i), line.strip())  # Create a node for each topic

        if i > 0:
            dot.edge(str(i - 1), str(i))  # Connect nodes sequentially

    return dot

@app.post("/generate/")
async def generate_roadmap_endpoint(course_request: CourseRequest):
    roadmap_text = generate_roadmap(course_request)
    diagram = create_diagram(roadmap_text)

    # Render the diagram to a PNG image
    diagram_path = "/tmp/roadmap"
    diagram.render(diagram_path, format='png', cleanup=True)

    with open(diagram_path + ".png", "rb") as f:
        return Response(content=f.read(), media_type="image/png")