ManojINaik commited on
Commit
fcb6b85
·
verified ·
1 Parent(s): d6697f3

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +45 -30
main.py CHANGED
@@ -1,50 +1,65 @@
1
- from fastapi import FastAPI, Response
2
  from pydantic import BaseModel
3
  from huggingface_hub import InferenceClient
4
- import graphviz
5
 
6
  app = FastAPI()
7
 
8
- # Initialize the inference client for the AI model
9
  client = InferenceClient("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
10
 
 
11
  class CourseRequest(BaseModel):
12
  course_name: str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- def format_prompt(course_name: str):
15
- return f"As an expert in education, please generate a detailed roadmap for the course '{course_name}'. Include key topics."
16
 
17
- def generate_roadmap(item: CourseRequest):
18
- prompt = format_prompt(item.course_name)
19
- stream = client.text_generation(prompt, max_new_tokens=200)
20
- output = ""
21
 
22
- for response in stream:
23
- output += response.token.text
24
 
25
- return output
26
 
27
- def create_diagram(roadmap_text: str):
28
- dot = graphviz.Digraph()
29
 
30
- # Split the roadmap text into lines or sections for diagram creation
31
- lines = roadmap_text.split('\n')
32
- for i, line in enumerate(lines):
33
- dot.node(str(i), line.strip()) # Create a node for each topic
34
 
35
- if i > 0:
36
- dot.edge(str(i - 1), str(i)) # Connect nodes sequentially
37
 
38
- return dot
 
 
 
 
 
 
 
39
 
40
- @app.post("/generate/")
41
- async def generate_roadmap_endpoint(course_request: CourseRequest):
42
- roadmap_text = generate_roadmap(course_request)
43
- diagram = create_diagram(roadmap_text)
 
 
 
 
44
 
45
- # Render the diagram to a PNG image
46
- diagram_path = "/tmp/roadmap.png"
47
- diagram.render(diagram_path, format='png', cleanup=True)
 
48
 
49
- with open(diagram_path, "rb") as f:
50
- return Response(content=f.read(), media_type="image/png")
 
 
1
+ from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from huggingface_hub import InferenceClient
4
+ import uvicorn
5
 
6
  app = FastAPI()
7
 
8
+ # Initialize the InferenceClient with the specified model
9
  client = InferenceClient("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
10
 
11
+ # Define the structure of the request body
12
  class CourseRequest(BaseModel):
13
  course_name: str
14
+ history: list = [] # Keeping history optional
15
+ temperature: float = 0.0
16
+ max_new_tokens: int = 1048
17
+ top_p: float = 0.15
18
+ repetition_penalty: float = 1.0
19
+
20
+ # Format the prompt for the model
21
+ def format_prompt(course_name, history):
22
+ prompt = "<s>"
23
+ for user_prompt, bot_response in history:
24
+ prompt += f"[INST] {user_prompt} [/INST] {bot_response} </s> "
25
+ prompt += f"[INST] Generate a roadmap for the course: {course_name} [/INST]"
26
+ return prompt
27
+
28
+ # Generate text using the specified parameters
29
+ def generate(course_request: CourseRequest):
30
+ temperature = max(float(course_request.temperature), 1e-2)
31
+ top_p = float(course_request.top_p)
32
 
 
 
33
 
 
 
 
 
34
 
 
 
35
 
 
36
 
 
 
37
 
 
 
 
 
38
 
 
 
39
 
40
+ generate_kwargs = {
41
+ 'temperature': temperature,
42
+ 'max_new_tokens': course_request.max_new_tokens,
43
+ 'top_p': top_p,
44
+ 'repetition_penalty': course_request.repetition_penalty,
45
+ 'do_sample': True,
46
+ 'seed': 42,
47
+ }
48
 
49
+ formatted_prompt = format_prompt(course_request.course_name, course_request.history)
50
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
51
+ output = ""
52
+
53
+ for response in stream:
54
+ output += response.token.text
55
+
56
+ return output
57
 
58
+ # Define the API endpoint for generating course roadmaps
59
+ @app.post("/generate-roadmap/")
60
+ async def generate_roadmap(course_request: CourseRequest):
61
+ return {"roadmap": generate(course_request)}
62
 
63
+ # Run the application (uncomment the next two lines if running this as a standalone script)
64
+ # if __name__ == "__main__":
65
+ # uvicorn.run(app, host="0.0.0.0", port=8000)