MO1990D commited on
Commit
e991f98
·
1 Parent(s): d300a25

✅ Added CI/CD and Dockerfile for deployment

Browse files
.github/workflows/deploy.yml CHANGED
@@ -1,5 +1,4 @@
1
- # .github/workflows/deploy.yml
2
- name: CI/CD Pipeline
3
 
4
  on:
5
  push:
@@ -11,29 +10,28 @@ jobs:
11
  runs-on: ubuntu-latest
12
 
13
  steps:
14
- - name: Checkout code
15
  uses: actions/checkout@v3
16
 
17
  - name: Set up Python
18
  uses: actions/setup-python@v4
19
  with:
20
- python-version: "3.10"
21
 
22
  - name: Install dependencies
23
  run: |
24
  python -m pip install --upgrade pip
25
  pip install -r requirements.txt
 
26
 
27
- - name: Run tests
28
- run: |
29
- echo "✅ Add Pytest or other checks here"
30
-
31
- - name: Build Docker image
32
- run: docker build -t llm-campaign-app .
33
 
34
- # Optional: Push to DockerHub/GHCR (needs secrets configured)
35
- # - name: Push Docker Image
36
- # run: |
37
- # echo "${{ secrets.DOCKERHUB_PASSWORD }}" | docker login -u "${{ secrets.DOCKERHUB_USERNAME }}" --password-stdin
38
- # docker tag llm-campaign-app your-dockerhub-user/llm-campaign-app
39
- # docker push your-dockerhub-user/llm-campaign-app
 
 
 
1
+ name: build-and-deploy
 
2
 
3
  on:
4
  push:
 
10
  runs-on: ubuntu-latest
11
 
12
  steps:
13
+ - name: Checkout repo
14
  uses: actions/checkout@v3
15
 
16
  - name: Set up Python
17
  uses: actions/setup-python@v4
18
  with:
19
+ python-version: '3.10'
20
 
21
  - name: Install dependencies
22
  run: |
23
  python -m pip install --upgrade pip
24
  pip install -r requirements.txt
25
+ pip install pytest
26
 
27
+ - name: Run unit tests
28
+ run: pytest tests/
 
 
 
 
29
 
30
+ - name: Deploy to Hugging Face Spaces (example)
31
+ env:
32
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
33
+ run: |
34
+ git config --global user.email "[email protected]"
35
+ git config --global user.name "Your Name"
36
+ git remote set-url origin https://huggingface.co/spaces/YOUR_USERNAME/YOUR_SPACE_NAME
37
+ git push origin main
__pycache__/app.cpython-310.pyc CHANGED
Binary files a/__pycache__/app.cpython-310.pyc and b/__pycache__/app.cpython-310.pyc differ
 
__pycache__/app.cpython-38.pyc ADDED
Binary file (1.37 kB). View file
 
app.py CHANGED
@@ -22,7 +22,7 @@ def read_root():
22
  def generate_campaign(data: ProductRequest):
23
  try:
24
  predictor = LLMPredictionPipeline()
25
- result = predictor.predict(data.dict())
26
 
27
 
28
  return {"campaign": result}
 
22
  def generate_campaign(data: ProductRequest):
23
  try:
24
  predictor = LLMPredictionPipeline()
25
+ result = predictor.predict(data.model_dump())
26
 
27
 
28
  return {"campaign": result}
requirements.txt CHANGED
@@ -14,3 +14,6 @@ chromadb
14
  sentence-transformers
15
  langchain
16
  langchain-community
 
 
 
 
14
  sentence-transformers
15
  langchain
16
  langchain-community
17
+ pytest
18
+ httpx
19
+ ollama
src/marketingCampaignGen/components/model/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (202 Bytes). View file
 
src/marketingCampaignGen/components/model/__pycache__/generator.cpython-310.pyc CHANGED
Binary files a/src/marketingCampaignGen/components/model/__pycache__/generator.cpython-310.pyc and b/src/marketingCampaignGen/components/model/__pycache__/generator.cpython-310.pyc differ
 
src/marketingCampaignGen/components/model/__pycache__/predictor.cpython-38.pyc ADDED
Binary file (1.2 kB). View file
 
src/marketingCampaignGen/components/model/generator.py CHANGED
@@ -1,15 +1,18 @@
1
  # src/marketingCampaignGen/utils/campaign_generator.py
2
 
3
 
4
- from langchain.llms import Ollama
 
5
  from langchain.prompts import PromptTemplate
6
  from langchain.chains import LLMChain
7
  from src.marketingCampaignGen.utils.vector_store import get_similar_examples
8
 
9
 
10
  class CampaignGenerator:
11
- def __init__(self, model_name="mistral"):
12
- self.llm = Ollama(model=model_name)
 
 
13
 
14
  self.template = PromptTemplate(
15
  input_variables=["product_name", "features", "brand", "audience", "tone", "goal", "examples"],
@@ -41,16 +44,16 @@ Write the campaign in the specified tone and make it persuasive.
41
  flat_examples = [doc for sublist in examples["documents"] for doc in sublist]
42
  example_text = "\n\n".join(flat_examples) if flat_examples else "None"
43
 
44
- response = self.chain.run(
45
- product_name=input_data["product_name"],
46
- features=feature_query,
47
- brand=input_data["brand"],
48
- audience=input_data["audience"],
49
- tone=input_data["tone"],
50
- goal=input_data["goal"],
51
- examples=example_text
52
- )
53
-
54
- return response
55
-
56
 
 
1
  # src/marketingCampaignGen/utils/campaign_generator.py
2
 
3
 
4
+ # from langchain.llms import Ollama
5
+ from langchain_community.llms import Ollama
6
  from langchain.prompts import PromptTemplate
7
  from langchain.chains import LLMChain
8
  from src.marketingCampaignGen.utils.vector_store import get_similar_examples
9
 
10
 
11
  class CampaignGenerator:
12
+ def __init__(self, model_name="orca-mini"):
13
+ # self.llm = Ollama(model=model_name)
14
+ self.llm = Ollama(model="mistral:7b-instruct-q4_0")
15
+
16
 
17
  self.template = PromptTemplate(
18
  input_variables=["product_name", "features", "brand", "audience", "tone", "goal", "examples"],
 
44
  flat_examples = [doc for sublist in examples["documents"] for doc in sublist]
45
  example_text = "\n\n".join(flat_examples) if flat_examples else "None"
46
 
47
+ # Now pass ALL required fields to the template using `.invoke()` (not `.run()`)
48
+ response = self.chain.invoke({
49
+ "product_name": input_data["product_name"],
50
+ "features": feature_query,
51
+ "brand": input_data["brand"],
52
+ "audience": input_data["audience"],
53
+ "tone": input_data["tone"],
54
+ "goal": input_data["goal"],
55
+ "examples": example_text
56
+ })
57
+
58
+ return response["text"]
59
 
src/marketingCampaignGen/utils/__pycache__/vector_store.cpython-310.pyc CHANGED
Binary files a/src/marketingCampaignGen/utils/__pycache__/vector_store.cpython-310.pyc and b/src/marketingCampaignGen/utils/__pycache__/vector_store.cpython-310.pyc differ
 
src/marketingCampaignGen/utils/__pycache__/vector_store.cpython-38.pyc CHANGED
Binary files a/src/marketingCampaignGen/utils/__pycache__/vector_store.cpython-38.pyc and b/src/marketingCampaignGen/utils/__pycache__/vector_store.cpython-38.pyc differ
 
src/marketingCampaignGen/utils/vector_store.py CHANGED
@@ -1,7 +1,7 @@
1
  # src/marketingCampaignGen/utils/vector_store.py
2
 
3
  import chromadb
4
- from sentence_transformers import SentenceTransformer
5
  from chromadb.utils import embedding_functions
6
 
7
  CHROMA_PATH = "chroma_db"
 
1
  # src/marketingCampaignGen/utils/vector_store.py
2
 
3
  import chromadb
4
+ # from sentence_transformers import SentenceTransformer
5
  from chromadb.utils import embedding_functions
6
 
7
  CHROMA_PATH = "chroma_db"
tests/__pycache__/test_generate.cpython-310-pytest-8.3.5.pyc ADDED
Binary file (2.49 kB). View file
 
tests/__pycache__/test_generate.cpython-38-pytest-7.0.1.pyc ADDED
Binary file (2.32 kB). View file
 
tests/test_generate.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from fastapi.testclient import TestClient
3
+ import sys
4
+ import os
5
+
6
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
7
+ from app import app
8
+
9
+
10
+ # tests/test_generate.py
11
+
12
+ client = TestClient(app)
13
+
14
+ sample_payload = {
15
+ "product_name": "EcoBottle",
16
+ "features": ["BPA-free", "Insulated", "Leak-proof"],
17
+ "brand": "EcoFlow",
18
+ "audience": "Eco-conscious travelers",
19
+ "tone": "enthusiastic",
20
+ "goal": "Drive conversions for the summer collection",
21
+ }
22
+
23
+ def test_generate_campaign():
24
+ response = client.post("/generate", json=sample_payload)
25
+
26
+ print("\n[DEBUG] Status Code:", response.status_code)
27
+ try:
28
+ print("[DEBUG] Response JSON:", response.json())
29
+ except Exception as e:
30
+ print("[DEBUG] Response could not be parsed:", e)
31
+ print("[DEBUG] Raw Response Text:", response.text)
32
+
33
+ assert response.status_code == 200
34
+ assert "campaign" in response.json()
35
+ assert isinstance(response.json()["campaign"], str)
36
+ assert len(response.json()["campaign"]) > 10