text
stringlengths 23
44.3k
| id
stringlengths 10
84
| metadata
dict |
---|---|---|
"""empty message
Revision ID: 6408613d0565
Revises: b74c85aaf1d5
Create Date: 2024-10-22 09:21:41.789712
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision: str = '6408613d0565'
down_revision: Union[str, None] = 'b74c85aaf1d5'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('cloud_cost_reports',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.String(length=32), nullable=False),
sa.Column('cloud_id', sa.String(length=32), nullable=False),
sa.Column('resource_json', postgresql.JSONB(astext_type=sa.Text()), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('cloud_cost_reports')
# ### end Alembic commands ### | migrations/versions/6408613d0565.py/0 | {
"file_path": "migrations/versions/6408613d0565.py",
"repo_id": "migrations"
} |
"""empty message
Revision ID: b74c85aaf1d5
Revises: 325ce6b01196
Create Date: 2024-10-16 05:07:47.396688
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'b74c85aaf1d5'
down_revision: Union[str, None] = '325ce6b01196'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('activity_tracking',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('workflow_id', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.String(length=32), nullable=False),
sa.Column('resource_name', sa.String(length=128), nullable=False),
sa.Column('resource_type', sa.String(length=128), nullable=False),
sa.Column('activity_type', sa.String(length=64), nullable=False),
sa.Column('created_at', sa.String(length=256), nullable=False),
sa.Column('started_at', sa.String(length=256), nullable=True),
sa.Column('completed_at', sa.String(length=256), nullable=True),
sa.Column('summary', sa.Text(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('status', sa.String(length=128), nullable=False),
sa.Column('chat_id', sa.String(length=32), nullable=False),
sa.Column('action_id', sa.String(length=32), nullable=False),
sa.Column('profile_id', sa.String(length=32), nullable=True),
sa.Column('fe_request_data', sa.JSON(), nullable=True),
sa.ForeignKeyConstraint(['profile_id'], ['profiles.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_activity_tracking_activity_type'), 'activity_tracking', ['activity_type'], unique=False)
op.create_index(op.f('ix_activity_tracking_resource_type'), 'activity_tracking', ['resource_type'], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_activity_tracking_resource_type'), table_name='activity_tracking')
op.drop_index(op.f('ix_activity_tracking_activity_type'), table_name='activity_tracking')
op.drop_table('activity_tracking')
# ### end Alembic commands ###
| migrations/versions/b74c85aaf1d5_.py/0 | {
"file_path": "migrations/versions/b74c85aaf1d5_.py",
"repo_id": "migrations"
} |
"""empty message
Revision ID: d1e6bfdc5a8f
Revises:
Create Date: 2024-07-07 10:50:16.678579
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision: str = 'd1e6bfdc5a8f'
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('profiles',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.String(length=36), nullable=False),
sa.Column('project_id', sa.String(), nullable=True),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('is_admin', sa.Boolean(), nullable=False),
sa.Column('api_key', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('chats',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('title', sa.String(length=200), nullable=False),
sa.Column('chat_type', sa.Enum('QnA', 'Action', name='chat_type_enum'), nullable=True),
sa.Column('is_visible', sa.Boolean(), nullable=False),
sa.Column('metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('profile_id', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['profile_id'], ['profiles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('messages',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('text', sa.Text(), nullable=False),
sa.Column('type', sa.Enum('Assistant', 'Human', name='message_type_enum'), nullable=False),
sa.Column('sent_at', sa.TIMESTAMP(), nullable=False),
sa.Column('msg_category', sa.Enum('QnA', 'Action', name='message_category'), nullable=True),
sa.Column('chat_id', sa.String(length=32), nullable=False),
sa.ForeignKeyConstraint(['chat_id'], ['chats.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('messages')
op.drop_table('chats')
op.drop_table('profiles')
# ### end Alembic commands ###
| migrations/versions/d1e6bfdc5a8f_.py/0 | {
"file_path": "migrations/versions/d1e6bfdc5a8f_.py",
"repo_id": "migrations"
} |
"""empty message
Revision ID: fe71e01fc7b4
Revises: 8498935c3938
Create Date: 2024-09-20 11:40:14.212698
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'fe71e01fc7b4'
down_revision: Union[str, None] = '8498935c3938'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('profiles', sa.Column('appearance', sa.JSON(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('profiles', 'appearance')
# ### end Alembic commands ###
| migrations/versions/fe71e01fc7b4_.py/0 | {
"file_path": "migrations/versions/fe71e01fc7b4_.py",
"repo_id": "migrations"
} |
"""empty message
Revision ID: 933e5e67d032
Revises: fe71e01fc7b4
Create Date: 2024-09-03 04:48:28.262520
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '933e5e67d032'
down_revision: Union[str, None] = 'fe71e01fc7b4'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('chats', sa.Column('confirmation_stage', sa.Boolean(), nullable=False))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('chats', 'confirmation_stage')
# ### end Alembic commands ###
| migrations/versions/933e5e67d032_.py/0 | {
"file_path": "migrations/versions/933e5e67d032_.py",
"repo_id": "migrations"
} |
"""empty message
Revision ID: 9bd8c5c53135
Revises: d7053de67323
Create Date: 2024-07-20 15:10:14.021639
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '9bd8c5c53135'
down_revision: Union[str, None] = 'd7053de67323'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('messages', sa.Column('is_visible', sa.Boolean(), nullable=False, server_default='true'))
op.add_column('profiles', sa.Column('api_key_status', sa.String(36), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('messages', 'is_visible')
op.drop_column('profiles', 'api_key_status')
# ### end Alembic commands ###
| migrations/versions/9bd8c5c53135_.py/0 | {
"file_path": "migrations/versions/9bd8c5c53135_.py",
"repo_id": "migrations"
} |
"""empty message
Revision ID: 8498935c3938
Revises: 9bd8c5c53135
Create Date: 2024-08-19 06:19:17.497652
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '8498935c3938'
down_revision: Union[str, None] = '9bd8c5c53135'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('profiles', sa.Column('last_updated_at', sa.TIMESTAMP(timezone=True), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('profiles', 'last_updated_at')
# ### end Alembic commands ###
| migrations/versions/8498935c3938_.py/0 | {
"file_path": "migrations/versions/8498935c3938_.py",
"repo_id": "migrations"
} |
"""Add onboarding column to profiles
Revision ID: cbe4893ba969
Revises: 4a431d838915
Create Date: 2024-09-30 13:03:58.832243
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'cbe4893ba969'
down_revision: Union[str, None] = '4a431d838915'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.execute("CREATE TYPE onboardingstatus AS ENUM ('app_tour', 'action_tour', 'onboarded')")
op.add_column('profiles', sa.Column('onboarding', sa.Enum('app_tour', 'action_tour', 'onboarded', name='onboardingstatus'), nullable=False, server_default='app_tour'))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('profiles', 'onboarding')
op.execute("DROP TYPE onboardingstatus")
# ### end Alembic commands ###
| migrations/versions/cbe4893ba969_.py/0 | {
"file_path": "migrations/versions/cbe4893ba969_.py",
"repo_id": "migrations"
} |
"""empty message
Revision ID: d7053de67323
Revises: d1e6bfdc5a8f
Create Date: 2024-07-09 11:52:47.696197
"""
from typing import Sequence, Union
from sqlalchemy.dialects import postgresql
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = 'd7053de67323'
down_revision: Union[str, None] = 'd1e6bfdc5a8f'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('actions',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('name', sa.String(length=200), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('json_metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.add_column('messages', sa.Column('action_id', sa.String(length=32), nullable=True))
op.alter_column('messages', 'chat_id',
existing_type=sa.VARCHAR(length=32),
nullable=True)
op.drop_column('chats', 'metadata')
op.create_foreign_key("action_id", 'messages', 'actions', ['action_id'], ['id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("action_id", 'messages', type_='foreignkey')
op.alter_column('messages', 'chat_id',
existing_type=sa.VARCHAR(length=32),
nullable=False)
op.drop_column('messages', 'action_id')
op.add_column('chats', sa.Column('metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=True))
op.drop_table('actions')
# ### end Alembic commands ###
| migrations/versions/d7053de67323_.py/0 | {
"file_path": "migrations/versions/d7053de67323_.py",
"repo_id": "migrations"
} |
name: Linting Ruff
on:
push:
branches:
- main
pull_request:
branches:
- main
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: "3.11"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install ruff
- name: Run Ruff
run: ruff check --output-format=github .
| .github/workflows/github_ci.yml/0 | {
"file_path": ".github/workflows/github_ci.yml",
"repo_id": ".github"
} |
name: Build and Push cloud-whisper-custom-bot Docker Image to GCP Artifact Registry
on:
push:
branches: ["main"]
workflow_dispatch:
jobs:
build-push-artifact:
permissions:
id-token: write
contents: read
runs-on: ubuntu-latest
steps:
- name: "Checkout"
uses: "actions/checkout@v3"
- name: Get short commit_id
run: echo "COMMIT_SHORT_SHA=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
- name: Get the branch name
run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | tr / -)" >> $GITHUB_ENV
- id: 'auth'
name: 'Authenticate to Google Cloud'
uses: 'google-github-actions/auth@v1'
with:
create_credentials_file: true
workload_identity_provider: 'projects/${{ secrets.PROJECT_ID }}/locations/global/workloadIdentityPools/${{ secrets.IDENTITY_POOL }}/providers/${{ secrets.IDENTITY_PROVIDER }}'
service_account: '${{ secrets.SERVICE_ACCOUNT }}'
- name: "Set up Cloud SDK"
uses: "google-github-actions/setup-gcloud@v1"
- name: "Docker auth"
run: |-
gcloud auth configure-docker us-east1-docker.pkg.dev --quiet
- name: Build image
run: docker build --tag ${{ secrets.ARTIFACTS_REPOSITORY }}:${{ env.BRANCH_NAME }}_${{ env.COMMIT_SHORT_SHA }} .
- name: Push image
run: docker push ${{ secrets.ARTIFACTS_REPOSITORY }}:${{ env.BRANCH_NAME }}_${{ env.COMMIT_SHORT_SHA }}
- name: Access cloud-whisper-deployment repository
uses: actions/checkout@v3
with:
repository: Wanclouds/cloud-whisper-deployment
ref: 'feat/cutom-bot-chart' # Change branch in cloud-whisper-deployment repo at 2 places in the workflow, once deployment PR is merged
token: ${{ secrets.PAT_GITHUB }}
- name: setup git config
run: |
git config user.name "adminwanclouds"
git config user.email "[email protected]"
yq -i eval '.images.backendimagetag= "${{ secrets.ARTIFACTS_REPOSITORY }}:${{ env.BRANCH_NAME }}_${{ env.COMMIT_SHORT_SHA }}"' cloud-whisper-custom-bot/values-production.yaml
git add .
git commit -m "update image tag of cloud-whisper-custom-bot with latest commit ID ${{ env.BRANCH_NAME}}_${{ env.COMMIT_SHORT_SHA }}"
git push origin feat/cutom-bot-chart
notify-slack:
runs-on: ubuntu-latest
needs: build-push-artifact
if: always()
steps:
- name: Notify github workflows status to Slack
uses: 8398a7/action-slack@v3
with:
status: ${{ needs.build-push-artifact.result }}
fields: repo,message,commit,author,eventName,ref,workflow,job,took
channel: '#ai-wanclouds-github-workflows'
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
| .github/workflows/github-actions-commit-id.yml/0 | {
"file_path": ".github/workflows/github-actions-commit-id.yml",
"repo_id": ".github"
} |
FROM nvidia/cuda:12.0.1-cudnn8-devel-ubuntu22.04
ENV HF_TOKEN='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'
ADD . /mistral
WORKDIR /mistral
RUN apt-get update && apt-get install -y \
curl \
wget \
gnupg \
cmake \
lsb-release \
build-essential \
dumb-init \
python3.11 \
python3-pip \
&& pip3 install --upgrade pip
RUN pip install -r /mistral/requirements.txt
RUN chmod 755 /mistral/scripts/model.sh
ENV PYTHONPATH=/mistral
ENTRYPOINT ["dumb-init", "--"]
CMD ["/bin/bash", "-c", "/mistral/scripts/model.sh"]
| mistral/Dockerfile/0 | {
"file_path": "mistral/Dockerfile",
"repo_id": "mistral"
} |
services:
mistral_web:
container_name: mistral_web
restart: always
build:
context: .
dockerfile: Dockerfile
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [ gpu ]
environment:
NVIDIA_VISIBLE_DEVICES: all
NVIDIA_DRIVER_CAPABILITIES: all
HF_AUTH_TOKEN: ""
ports:
- "9090:8008"
networks:
mistraL_web:
volumes:
- .:/mistral
networks:
mistraL_web: | mistral/docker-compose.yml/0 | {
"file_path": "mistral/docker-compose.yml",
"repo_id": "mistral"
} |
accelerate==0.24.0
transformers==4.36.2
fastapi==0.104.1
bitsandbytes==0.41.1
torch==2.2.0
peft==0.4.0
pydantic
uvicorn==0.24.0.post1
scipy==1.11.1
pydantic_settings
sse-starlette==1.6.5
starlette==0.27.0
loguru==0.7.2
| mistral/requirements.txt/0 | {
"file_path": "mistral/requirements.txt",
"repo_id": "mistral"
} |
from app.core.config import settings
from app.schema import Chat
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
app = FastAPI()
model = settings.model
@app.get("/chat")
async def chat(chat: Chat):
prompt = model.get_prompt(chat.system_prompt, chat.messages)
print(prompt)
if chat.stream:
return StreamingResponse(settings.model.run_generation_stream(prompt))
else:
result = settings.model.run_generation(prompt, stopping_criteria=chat.stopping_criteria)
return result
| mistral/app/chatbot.py/0 | {
"file_path": "mistral/app/chatbot.py",
"repo_id": "mistral"
} |
from typing import List
from pydantic_settings import BaseSettings
from pydantic import AnyHttpUrl
from app.model import MistralModelSettings
class Settings(BaseSettings):
URL_PREFIX: str = "/v1/whisper"
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = [
"http://localhost:3000",
"http://localhost:8008"
]
model: MistralModelSettings = MistralModelSettings()
def __init__(self):
super().__init__()
self.model.init()
settings = Settings()
| mistral/app/config.py/0 | {
"file_path": "mistral/app/config.py",
"repo_id": "mistral"
} |
from typing import List
from pydantic import BaseModel, validator
class Message(BaseModel):
role: str
content: str
class Chat(BaseModel):
messages: List[Message]
system_prompt: str = None
stream: bool = False
stopping_criteria: list = []
@validator("messages")
def validate_messages(cls, v):
"""
Custom validator to enforce first role is "user" and alternates.
"""
allowed_roles = ["user", "assistant"]
for index, message in enumerate(v):
print(message)
if message.role not in allowed_roles:
raise ValueError("Role must be one of {}".format(allowed_roles))
if index == 0 and message.role != "user":
raise ValueError("First message must have role 'user'")
elif index % 2 == 0 and message.role != "user":
raise ValueError("Even-numbered messages must have role 'user'")
elif index % 2 == 1 and message.role != "assistant":
raise ValueError("Odd-numbered messages must have role 'assistant'")
return v
| mistral/app/schema.py/0 | {
"file_path": "mistral/app/schema.py",
"repo_id": "mistral"
} |
import os
import typing
from threading import Thread
import loguru
import torch
from app.schema import Message
from peft import PeftConfig, PeftModel
from pydantic_settings import BaseSettings
from transformers import (AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer)
class MistralModelSettings(BaseSettings):
HF_NAME: str = "mistralai/Mistral-7B-Instruct-v0.2"
HF_AUTH_TOKEN: str = os.environ.get("HF_AUTH_TOKEN", None)
HF_ADAPTOR_NAME: str = "Wanclouds/mistral_qna_wanclouds"
mistral_model: typing.Any = None
tokenizer: typing.Any = None
def __init__(self):
super().__init__()
def init(self):
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
config = PeftConfig.from_pretrained('Wanclouds/mistral_qna_wanclouds')
self.mistral_model = AutoModelForCausalLM.from_pretrained('mistralai/Mistral-7B-Instruct-v0.2',
quantization_config=bnb_config, device_map="auto")
self.tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path, add_bos_token=True,
trust_remote_code=True)
self.mistral_model = PeftModel.from_pretrained(self.mistral_model, 'Wanclouds/mistral_qna_wanclouds')
self.tokenizer.pad_token = self.tokenizer.eos_token
self.mistral_model = self.mistral_model.eval()
def get_prompt(self, system_prompt: str, messages: typing.List[Message]) -> str:
print(messages)
text = '[INST] '
if system_prompt:
text = f'{text}{system_prompt.strip()} '
for index, message in enumerate(messages):
role, content = message.role, message.content
if role == 'user':
text = f'{text}{content.strip()}'
elif role == 'assistant':
text = f'{text} [/INST]{content.strip()}</s> [INST] '
text = f'{text} [/INST]'
print(text)
return text
def run_generation_stream(
self, prompt: str, tokens=2000
):
temperature: float = 0.001
top_p: float = 0.95
top_k: int = 50
inputs = self.tokenizer([prompt], return_tensors='pt').to('cuda')
generate_kwargs = dict(
inputs,
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
max_new_tokens=tokens,
num_beams=1,
repetition_penalty=1.2
)
streamer = TextIteratorStreamer(
self.tokenizer,
timeout=10.,
skip_prompt=True,
skip_special_tokens=True
)
generate_kwargs['streamer'] = streamer
thread = Thread(target=self.mistral_model.generate, kwargs=generate_kwargs)
thread.start()
generated_text = ""
for word in streamer:
if '[INST]' in word:
return
generated_text += word
print(word)
yield word
def run_generation(
self, prompt: str, tokens=2000, stopping_criteria: typing.List = None
):
def custom_stopping_criteria(input_ids, scores):
# Your custom logic here to determine when to stop generation
# For example, stop if the generated text contains a specific token
generated_text = self.tokenizer.decode(input_ids[0], skip_special_tokens=True)
loguru.logger.info(stop_item in generated_text for stop_item in [stopping_criteria])
return any(stop_item in generated_text for stop_item in stopping_criteria)
loguru.logger.info(stopping_criteria)
temperature: float = 0.001
top_p: float = 0.95
top_k: int = 50
inputs = self.tokenizer([prompt], return_tensors='pt').to('cuda')
generate_kwargs = dict(
inputs,
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=top_k,
max_new_tokens=tokens,
num_beams=1,
repetition_penalty=1.2,
stopping_criteria=[custom_stopping_criteria] if stopping_criteria else None
)
result = self.mistral_model.generate(**generate_kwargs)
result = self.tokenizer.decode(result[0], skip_special_tokens=True)
print(f'resultt {result}')
print(f'prompt {prompt}')
result = result.split(prompt)[1].strip()
return result
| mistral/app/model.py/0 | {
"file_path": "mistral/app/model.py",
"repo_id": "mistral"
} |
#!/bin/bash
set -x
APP_DIR="/mistral"
cd "$APP_DIR" || exit 1
uvicorn app.chatbot:app --reload --host 0.0.0.0 --port 8008 || {
echo 'Failed to start the Application'
exit 1
}
| mistral/scripts/model.sh/0 | {
"file_path": "mistral/scripts/model.sh",
"repo_id": "mistral"
} |
#!/usr/bin/env bash
rm celerybeat.pid
celery -A app.redis_scheduler.celery_app beat --loglevel=info
| scripts/beat.sh/0 | {
"file_path": "scripts/beat.sh",
"repo_id": "scripts"
} |
#!/bin/bash
set -x
APP_DIR="/CloudWhisperCustomBot"
cd "$APP_DIR" || exit 1
# Add a "make upgrade" command here
alembic upgrade head || {
echo 'Failed to run make upgrade'
exit 1
}
uvicorn app.main:app --reload --host 0.0.0.0 --port 8008 || {
echo 'Failed to start the Application'
exit 1
}
| scripts/dev.sh/0 | {
"file_path": "scripts/dev.sh",
"repo_id": "scripts"
} |
#!/usr/bin/env bash
celery -A app.redis_scheduler.celery_app worker -Q redis_queue --loglevel=info -c 5 --loglevel=info --without-gossip --without-mingle --without-heartbeat --max-tasks-per-child=1 -Ofair
| scripts/discovery_worker.sh/0 | {
"file_path": "scripts/discovery_worker.sh",
"repo_id": "scripts"
} |
#!/usr/bin/env bash
celery -A app.redis_scheduler.celery_app worker --loglevel=INFO -c 5 --loglevel=info --without-gossip --without-mingle --without-heartbeat --max-tasks-per-child=1 -Ofair
| scripts/worker.sh/0 | {
"file_path": "scripts/worker.sh",
"repo_id": "scripts"
} |
worker_processes 1;
user nobody nogroup;
pid /tmp/nginx.pid;
error_log /tmp/nginx.error.log;
events {
worker_connections 1024; # increase if you have lots of clients
accept_mutex off; # set to 'on' if nginx worker_processes > 1
}
error_log /dev/stdout info;
http {
include mime.types;
default_type application/octet-stream;
access_log /tmp/nginx.access.log combined;
access_log /dev/stdout;
sendfile off;
server_tokens off;
upstream app_server {
server unix:/tmp/gunicorn.sock fail_timeout=0;
}
server {
# if no Host match, close the connection to prevent host spoofing
listen 8008 default_server;
return 444;
}
server {
listen 80 default_server;
listen 443 ssl; # comment this line for local deployment
ssl_certificate /etc/ssl/certs/server.pem;
ssl_certificate_key /etc/ssl/certs/my-server.key.pem;
ssl_session_cache builtin:1000 shared:SSL:10m;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;
ssl_prefer_server_ciphers on;
keepalive_timeout 5;
underscores_in_headers on;
location /v1/users/ {
client_max_body_size 4M;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
send_timeout 600s;
proxy_redirect off;
proxy_pass https://vpc-stage.wanclouds.net;
proxy_ssl_server_name on;
proxy_ssl_verify_depth 2;
proxy_request_buffering off;
}
# Use regex to match both /v1/ibm/clouds and /v1/ibm/clouds/<cloud_id>
location ~ ^/v1/ibm/clouds(/[^/]+)?$ {
client_max_body_size 4M;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
send_timeout 600s;
proxy_redirect off;
proxy_pass https://vpc-stage.wanclouds.net/v1/ibm/clouds$1;
proxy_ssl_server_name on;
proxy_ssl_verify_depth 2;
proxy_request_buffering off;
}
# Use regex to match both /v1/softlayer/accounts and /v1/softlayer/accounts/<account_id>
location ~ ^/v1/softlayer/accounts(/[^/]+)?$ {
client_max_body_size 4M;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
send_timeout 600s;
proxy_redirect off;
proxy_pass https://vpc-stage.wanclouds.net/v1/softlayer/accounts$1;
proxy_ssl_server_name on;
proxy_ssl_verify_depth 2;
proxy_request_buffering off;
}
location ~ ^/v1/ibm/workflows(/[^/]+)?$ {
client_max_body_size 4M;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
send_timeout 600s;
proxy_redirect off;
proxy_pass https://vpc-stage.wanclouds.net/v1/ibm/workflows$1;
proxy_ssl_server_name on;
proxy_ssl_verify_depth 2;
proxy_request_buffering off;
}
location /v1/users/api_key {
client_max_body_size 4M;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
send_timeout 600s;
proxy_redirect off;
proxy_pass https://vpc-stage.wanclouds.net/v1/users/api_key;
proxy_ssl_server_name on;
proxy_ssl_verify_depth 2;
proxy_request_buffering off;
}
location /v1/whisper/ {
client_max_body_size 4M;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
send_timeout 600s;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://cloud_whisper_web:8008;
proxy_ssl_verify_depth 2;
proxy_request_buffering off;
}
location / {
client_max_body_size 4M;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
send_timeout 600s;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://cloud_whisper_fe:3000; #http://localhost:3000; for deploying locally and remove fe container
proxy_ssl_verify_depth 2;
proxy_request_buffering off;
}
location /v1/whisper/websockets/ {
proxy_pass http://cloud_whisper_web:8008;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_buffering off;
proxy_set_header Host $host;
}
}
}
| nginx/nginx.conf/0 | {
"file_path": "nginx/nginx.conf",
"repo_id": "nginx"
} |
Subsets and Splits