MO1990D commited on
Commit
fddbcc4
·
1 Parent(s): d74e218

✅ fixed genratore ollama CI/CD and Dockerfile for deployment in hugging face

Browse files
src/marketingCampaignGen/components/model/__pycache__/generator.cpython-310.pyc CHANGED
Binary files a/src/marketingCampaignGen/components/model/__pycache__/generator.cpython-310.pyc and b/src/marketingCampaignGen/components/model/__pycache__/generator.cpython-310.pyc differ
 
src/marketingCampaignGen/components/model/generator.py CHANGED
@@ -1,8 +1,3 @@
1
- # src/marketingCampaignGen/utils/campaign_generator.py
2
-
3
-
4
- # from langchain.llms import Ollama
5
- from unittest.mock import patch
6
  from langchain_community.llms import Ollama
7
  from langchain.prompts import PromptTemplate
8
  from langchain.chains import LLMChain
@@ -11,10 +6,8 @@ from src.marketingCampaignGen.utils.vector_store import get_similar_examples
11
 
12
  class CampaignGenerator:
13
  def __init__(self, model_name="orca-mini"):
14
- # self.llm = Ollama(model=model_name)
15
  self.llm = Ollama(model="mistral:7b-instruct-q4_0")
16
 
17
-
18
  self.template = PromptTemplate(
19
  input_variables=["product_name", "features", "brand", "audience", "tone", "goal", "examples"],
20
  template="""
@@ -37,41 +30,20 @@ Write the campaign in the specified tone and make it persuasive.
37
 
38
  self.chain = LLMChain(llm=self.llm, prompt=self.template)
39
 
40
- # def generate(self, input_data: dict) -> str:
41
- # feature_query = ", ".join(input_data["features"])
42
- # examples = get_similar_examples(feature_query, top_k=2)
43
-
44
- # # Flatten list of documents
45
- # flat_examples = [doc for sublist in examples["documents"] for doc in sublist]
46
- # example_text = "\n\n".join(flat_examples) if flat_examples else "None"
47
-
48
- # response = self.chain.invoke(...).run(
49
- # product_name=input_data["product_name"],
50
- # features=feature_query,
51
- # brand=input_data["brand"],
52
- # audience=input_data["audience"],
53
- # tone=input_data["tone"],
54
- # goal=input_data["goal"],
55
- # # examples=example_text
56
- # )
57
-
58
- # return response
59
- # Adjust path below to the exact module & class where .invoke() is called
60
- @patch("src.marketingCampaignGen.components.model.generator.Ollama.invoke")
61
- def test_generate_campaign(mock_invoke):
62
- mock_invoke.return_value = "Mocked marketing campaign."
63
-
64
- response = client.post("/generate", json=sample_payload)
65
-
66
- print("\n[DEBUG] Status Code:", response.status_code)
67
- try:
68
- print("[DEBUG] Response JSON:", response.json())
69
- except Exception as e:
70
- print("[DEBUG] Response could not be parsed:", e)
71
- print("[DEBUG] Raw Response Text:", response.text)
72
-
73
- assert response.status_code == 200
74
- assert "campaign" in response.json()
75
- assert isinstance(response.json()["campaign"], str)
76
- assert len(response.json()["campaign"]) > 10
77
-
 
 
 
 
 
 
1
  from langchain_community.llms import Ollama
2
  from langchain.prompts import PromptTemplate
3
  from langchain.chains import LLMChain
 
6
 
7
  class CampaignGenerator:
8
  def __init__(self, model_name="orca-mini"):
 
9
  self.llm = Ollama(model="mistral:7b-instruct-q4_0")
10
 
 
11
  self.template = PromptTemplate(
12
  input_variables=["product_name", "features", "brand", "audience", "tone", "goal", "examples"],
13
  template="""
 
30
 
31
  self.chain = LLMChain(llm=self.llm, prompt=self.template)
32
 
33
+ def generate(self, input_data: dict) -> str:
34
+ feature_query = ", ".join(input_data["features"])
35
+ examples = get_similar_examples(feature_query, top_k=2)
36
+
37
+ # Flatten list of documents
38
+ flat_examples = [doc for sublist in examples["documents"] for doc in sublist]
39
+ example_text = "\n\n".join(flat_examples) if flat_examples else "None"
40
+
41
+ return self.chain.run(
42
+ product_name=input_data["product_name"],
43
+ features=feature_query,
44
+ brand=input_data["brand"],
45
+ audience=input_data["audience"],
46
+ tone=input_data["tone"],
47
+ goal=input_data["goal"],
48
+ examples=example_text
49
+ )