Rahul-8799 commited on
Commit
ea78289
·
verified ·
1 Parent(s): 083b9e0

Upload 6 files

Browse files
agents/product_manager_agent.py CHANGED
@@ -1,6 +1,8 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  from langchain_core.messages import AIMessage
 
 
4
 
5
  MODEL_REPO = "Rahul-8799/product_manager_mistral"
6
 
@@ -11,15 +13,43 @@ model = AutoModelForCausalLM.from_pretrained(
11
  device_map="auto"
12
  )
13
 
14
- def run(state: dict) -> dict:
15
- """Generates structured product requirements from user input prompt."""
16
- messages = state["messages"]
17
- prompt = messages[-1].content
18
-
19
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
20
- output_ids = model.generate(input_ids, max_new_tokens=3000)
21
- output = tokenizer.decode(output_ids[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
 
 
 
 
 
 
 
 
 
 
23
  return {
24
  "messages": [AIMessage(content=output)],
25
  "chat_log": state["chat_log"] + [{"role": "Product Manager", "content": output}],
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  from langchain_core.messages import AIMessage
4
+ import asyncio
5
+ from typing import Generator, Dict, Any
6
 
7
  MODEL_REPO = "Rahul-8799/product_manager_mistral"
8
 
 
13
  device_map="auto"
14
  )
15
 
16
+ async def stream_inference(prompt: str) -> Generator[str, None, None]:
17
+ """Stream the model's output token by token"""
 
 
 
18
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
19
+
20
+ # Generate tokens one by one
21
+ for _ in range(100): # Limit to 100 tokens for streaming demo
22
+ output_ids = model.generate(
23
+ input_ids,
24
+ max_new_tokens=1,
25
+ pad_token_id=tokenizer.eos_token_id
26
+ )
27
+
28
+ # Get the new token
29
+ new_token = output_ids[0][-1]
30
+ if new_token == tokenizer.eos_token_id:
31
+ break
32
+
33
+ # Decode and yield the token
34
+ token_text = tokenizer.decode([new_token])
35
+ yield token_text
36
+
37
+ # Update input_ids for next iteration
38
+ input_ids = output_ids
39
+
40
+ # Small delay to simulate streaming
41
+ await asyncio.sleep(0.05)
42
 
43
+ async def run(state: Dict[str, Any]) -> Dict[str, Any]:
44
+ """Product Manager generates structured product requirements with streaming output"""
45
+ messages = state["messages"]
46
+ prompt = messages[-1].content
47
+
48
+ # Stream the output
49
+ output = ""
50
+ async for token in stream_inference(prompt):
51
+ output += token
52
+
53
  return {
54
  "messages": [AIMessage(content=output)],
55
  "chat_log": state["chat_log"] + [{"role": "Product Manager", "content": output}],
agents/software_engineer_agent.py CHANGED
@@ -18,7 +18,7 @@ def run(state: dict) -> dict:
18
 
19
  # Enhance the prompt with UI implementation guidelines
20
  enhanced_prompt = f"""
21
- Objective
22
 
23
  Generate modern, responsive, and accessible UI code that is visually appealing and adheres to current frontend development best practices.
24
 
 
18
 
19
  # Enhance the prompt with UI implementation guidelines
20
  enhanced_prompt = f"""
21
+ Objective
22
 
23
  Generate modern, responsive, and accessible UI code that is visually appealing and adheres to current frontend development best practices.
24
 
agents/ui_designer_agent.py CHANGED
@@ -2,7 +2,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
  from langchain_core.messages import AIMessage
4
 
5
- MODEL_REPO = "Rahul-8799/product_manager_mistral"
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True)
8
  model = AutoModelForCausalLM.from_pretrained(
 
2
  import torch
3
  from langchain_core.messages import AIMessage
4
 
5
+ MODEL_REPO = "Rahul-8799/ui_designer_mistral" # You'll need to fine-tune this model
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True)
8
  model = AutoModelForCausalLM.from_pretrained(