date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | facebookresearch/holotorch | holotorch~ComponentWrapper~SourceFactory.py | ########################################################
# Copyright (c) 2022 Meta Platforms, Inc. and affiliates
#
# Holotorch is an optimization framework for differentiable wave-propagation written in PyTorch
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
#
# Contact:
# florianschiffers (at) gmail.com
# ocossairt ( at ) fb.com
#
########################################################
import torch
from holotorch.LightSources.Source import Source
from holotorch.LightSources.CoherentSource import CoherentSource
from holotorch.Spectra.SpacingContainer import SpacingContainer
from holotorch.utils.Enumerators import *
from holotorch.utils.units import *
from holotorch.Spectra.WavelengthContainer import WavelengthContainer
from holotorch.ComponentWrapper import PARAM_SOURCE
import holotorch.utils.Dimensions as Dimensions
import holotorch.Spectra as Spectra
def create_source(source : PARAM_SOURCE) -> Source:
source_type = source.source_type
color_flag = source.color_flag
spectrum_type = source.spectrum_type
source_dim = Dimensions.HW(
height = source.height,
width = source.width,
)
if spectrum_type is ENUM_SPECTRUM.NO_SPECTRUM:
source = create_no_spectrum_source(
color_flag = color_flag,
source_dimension = source_dim,
grid_spacing = source.grid_spacing,
wavelengths = source.wavelengths
)
else:
source = create_source_from_param(
source_type = source_type,
color_flag = color_flag,
spectrum_type = spectrum_type,
source_dimension = source_dim,
grid_spacing = source.grid_spacing,
num_modes_per_center_wavelength = source.num_modes_per_center_wavelength,
focal_length_collimating_lens = source.f_col,
# Diameter of the source
source_diameter = source.source_diameter,
# Standard deviation of the gaussian amplitude
spatial_coherence_sampling_type = source.spatial_coherence_sampling_type,
temporal_coherence_sampling_type = source.temporal_coherence_sampling_type,
wavelengths = source.wavelengths,
amplitudes = source.amplitudes,
sigma = source.sigma
)
return source
def create_no_spectrum_source(
color_flag : ENUM_SENSOR_TYPE,
source_dimension : Dimensions.HW,
grid_spacing : float,
wavelengths
) -> Source:
if not torch.is_tensor(wavelengths):
wavelengths = torch.tensor(wavelengths)
# NOTE: make sure that grid spacing always has TCD dimensions
grid_spacing = SpacingContainer(
spacing=torch.tensor([grid_spacing]).expand(1,1,2),
tensor_dimension= Dimensions.TCD(1,1,2)
)
if color_flag == ENUM_SENSOR_TYPE.BAYER:
n_time = 3
elif color_flag == ENUM_SENSOR_TYPE.MONOCHROMATIC:
n_time = 1
else:
raise NotImplementedError("this color flag doesnt exist. please implement or set.")
tensor_dimension = Dimensions.T(n_time = n_time)
wavelengths = WavelengthContainer(
wavelengths = wavelengths,
tensor_dimension = tensor_dimension,
center_wavelength=wavelengths
)
coherent_size = Dimensions.TCHW(
n_time=wavelengths.tensor_dimension.time,
n_channel=1,
height = source_dimension.height,
width = source_dimension.width,
)
source = CoherentSource(
tensor_dimension=coherent_size,
wavelengths=wavelengths,
grid_spacing=grid_spacing
)
return source
def create_source_from_param(
amplitudes,
sigma,
spectrum_type : ENUM_SPECTRUM,
color_flag : ENUM_SENSOR_TYPE,
source_type : ENUM_SOURCE_TYPE,
source_dimension : Dimensions.HW,
grid_spacing : float,
num_modes_per_center_wavelength : int,
wavelengths,
source_diameter : float,
focal_length_collimating_lens : float,
spatial_coherence_sampling_type : ENUM_SPATIAL_COHERENCE_SAMPLER = ENUM_SPATIAL_COHERENCE_SAMPLER.PLANE_WAVES,
temporal_coherence_sampling_type : ENUM_TEMPORAL_COHERENCE_SAMPLER = ENUM_TEMPORAL_COHERENCE_SAMPLER.UNIFORM,
) -> Source:
if spectrum_type is ENUM_SPECTRUM.STANFORD_LED:
spectrum = Spectra.SpectrumRGB.stanford_LED()
elif spectrum_type is ENUM_SPECTRUM.STANFORD_SLED:
spectrum = Spectra.SpectrumRGB.stanford_sLED()
elif spectrum_type is ENUM_SPECTRUM.PARAMETERIZED:
spectrum = Spectra.SpectrumRGB.get_analytic(
center_wavelengths=wavelengths,
amplitudes=amplitudes,
sigma_center_wavelengths=sigma
)
else:
raise NotImplementedError("This spectrum type has not been implemented.")
# NOTE: make sure that grid spacing always has TCD dimensions
grid_spacing = SpacingContainer(
spacing=torch.tensor([grid_spacing]).expand(1,1,2),
tensor_dimension= Dimensions.TCD(1,1,2)
)
# This works because stanford LED exports these
center_wavelengths = spectrum.center_wavelengths
sigma_wavelengths = spectrum.sigma_wavelengths
if color_flag is ENUM_SENSOR_TYPE.MONOCHROMATIC:
center_wavelengths = center_wavelengths[0]
sigma_wavelengths = sigma_wavelengths[0]
if center_wavelengths.ndim == 0:
center_wavelengths = torch.tensor([center_wavelengths])
sigma_wavelengths = torch.tensor([sigma_wavelengths])
if source_type == ENUM_SOURCE_TYPE.COHERENT:
tensor_dimension = Dimensions.T(n_time = len(center_wavelengths))
wavelengths = WavelengthContainer(
wavelengths=center_wavelengths,
tensor_dimension=tensor_dimension,
)
coherent_size = Dimensions.TCHW(
n_time=wavelengths.tensor_dimension.time,
n_channel=1,
height = source_dimension.height,
width = source_dimension.width,
)
source = CoherentSource(
tensor_dimension = coherent_size,
wavelengths = wavelengths,
grid_spacing = grid_spacing
)
elif source_type == ENUM_SOURCE_TYPE.PARTIALLY_COHERENT:
from holotorch.LightSources.PartialCoherentSource import PartialCoherentSource
source = PartialCoherentSource(
tensor_dimension = source_dimension,
spectrum = spectrum,
num_modes_per_center_wavelength = num_modes_per_center_wavelength,
grid_spacing = grid_spacing,
source_diameter = source_diameter,
focal_length_collimating_lens = focal_length_collimating_lens,
spatial_coherence_sampling_type = spatial_coherence_sampling_type,
temporal_coherence_sampling_type = temporal_coherence_sampling_type,
)
return source | [] |
2024-01-10 | dhpitt/nytautobrief | nyt_interface.py | # Pull headlines
import argparse
import os
from pynytimes import NYTAPI
import openai
def retrieve_and_summarize(section="science", length=5):
nyt = NYTAPI(os.getenv("NYT_API_KEY"), parse_dates=True)
openai.api_key = os.getenv("OPENAI_API_KEY")
top_stories = nyt.top_stories(section=section)
blurb = ""
for story in top_stories[2:]:
blurb += (
story["title"]
+ ". "
+ story["abstract"]
+ " "
+ story["multimedia"][0]["caption"]
)
text = openai.Completion.create(
model="text-davinci-003",
prompt=f"Summarize the following into {length} sentences for a layperson who is a science enthusiast: "
+ blurb,
max_tokens=50 * int(length),
temperature=0,
)
return text["choices"][0]["text"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="nyt_section_summarizer",
description="Pick a section: science, politics, sports. Get a 5-sentence summary.",
epilog="Text at the bottom of help",
)
parser.add_argument("-s", "--section", required=False)
parser.add_argument("-l", "--length", required=False)
args = parser.parse_args()
print(retrieve_and_summarize(args.section, args.length))
| [
"Summarize the following into PLACEHOLDER sentences for a layperson who is a science enthusiast: "
] |
2024-01-10 | 0x0918/Masamune | embedding~faissoor.py | import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
import json
from langchain.document_loaders import JSONLoader
def faiss_embed_with_metadata():
# List all the files in the `results` folder
results = os.listdir("../results")
resulting_docs = []
resulting_metadata = []
# define the embeddings
embeddings = OpenAIEmbeddings()
# for each file, load the json and add to the FAISS index
for file in results:
# load the json file, so that it's parsed and can be checked for the fields
parsed_file = json.load(
open("../results/" + file, "r")
)
to_be_schema = ""
# NOTE: for codearena, we've temporarily patched "body" / "title" by putting the title in the body
if file == "hacklabs_findings.json":
to_be_schema = ".[].title"
else:
to_be_schema = ".[].body"
# load the json to embed. we're essentially embedding either the title or the body, depending on the relevance of the field
loader = JSONLoader(
file_path = "../results/" + file,
jq_schema = to_be_schema
)
documents = loader.load()
parsed_metadata = []
for i in range(len(parsed_file)):
parsed_metadata.append({
# make a one-line if statement for each of the keys to check if they exist, otherwise they should not be added
"title": parsed_file[i]["title"] if "title" in parsed_file[i] else None,
"labels": parsed_file[i]["labels"] if "labels" in parsed_file[i] else None,
"html_url": parsed_file[i]["html_url"] if "html_url" in parsed_file[i] else None,
"target": parsed_file[i]["target"] if "target" in parsed_file[i] else None,
"body": parsed_file[i]["body"] if "body" in parsed_file[i] else None
}
)
resulting_docs += [doc.page_content for doc in documents]
resulting_metadata += parsed_metadata
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
docs = text_splitter.create_documents(
resulting_docs,
metadatas = resulting_metadata
)
# add the file to the index
vectorstore = FAISS.from_documents(
documents = docs,
embedding = embeddings,
)
# save the index
vectorstore.save_local("faiss_merged_extrametadata")
def query_stuff():
# embeddings
embeddings = OpenAIEmbeddings()
# load the index
vectorstore = FAISS.load_local("faiss_merged_extrametadata", embeddings)
# query the index
query = "impair a loan"
results = vectorstore.similarity_search(
query = query,
k = 10,
)
for i, res in enumerate(results):
print(str(i) + ". " + res.page_content + "\n\n")
# print()
if __name__ == "__main__":
# faiss_embed_with_metadata()
query_stuff()
| [] |
2024-01-10 | halilagin/LLMs-on-kubernetes | llms~llm-0001~llm_agent~query_agent.py | from rag.embed import get_embedding_model
import numpy as np
import psycopg2
from pgvector.psycopg2 import register_vector
import os
import openai
import time
from rag.generate import prepare_response
from rag.utils import get_credentials
from model import Query, Answer
def get_sources_and_context(query, embedding_model, num_chunks):
embedding = np.array(embedding_model.embed_query(query))
with psycopg2.connect(os.environ["DB_CONNECTION_STRING"]) as conn:
register_vector(conn)
with conn.cursor() as cur:
cur.execute("SELECT * FROM document ORDER BY embedding <=> %s LIMIT %s", (embedding, num_chunks))
rows = cur.fetchall()
context = [{"text": row[1]} for row in rows]
sources = [row[2] for row in rows]
return sources, context
class QueryAgent:
def __init__(self, embedding_model_name="thenlper/gte-base",
llm="meta-llama/Llama-2-70b-chat-hf", temperature=0.0,
max_context_length=4096, system_content="", assistant_content=""):
# Embedding model
self.embedding_model = get_embedding_model(
embedding_model_name=embedding_model_name,
model_kwargs={"device": "cuda"},
encode_kwargs={"device": "cuda", "batch_size": 100})
# LLM
self.llm = llm
self.temperature = temperature
self.context_length = max_context_length - len(system_content + assistant_content)
self.system_content = system_content
self.assistant_content = assistant_content
def __call__(self, query, num_chunks=5, stream=True) -> Answer:
# Get sources and context
sources, context = get_sources_and_context(
query=query,
embedding_model=self.embedding_model,
num_chunks=num_chunks)
# Generate response
user_content = f"query: {query}, context: {context}"
answer = generate_response(
llm=self.llm,
temperature=self.temperature,
stream=stream,
system_content=self.system_content,
assistant_content=self.assistant_content,
user_content=user_content[: self.context_length])
# Result
result = Answer(
question = query,
sources = sources,
answer = answer,
llm = self.llm,
)
return result
def generate_response(
llm, temperature=0.0, stream=True,
system_content="", assistant_content="", user_content="",
max_retries=3, retry_interval=60):
"""Generate response from an LLM."""
retry_count = 0
api_base, api_key = get_credentials(llm=llm)
while retry_count < max_retries:
try:
response = openai.ChatCompletion.create(
model=llm,
temperature=temperature,
stream=stream,
api_base=api_base,
api_key=api_key,
messages=[
{"role": "system", "content": system_content},
{"role": "assistant", "content": assistant_content},
{"role": "user", "content": user_content},
],
)
return prepare_response(response=response, stream=stream)
except Exception as e:
print(f"Exception: {e}")
time.sleep(retry_interval) # default is per-minute rate limits
retry_count += 1
return ""
| [] |
2024-01-10 | westfish/PaddleMIX | ppdiffusers~ppdiffusers~pipelines~stable_diffusion_xl~pipeline_stable_diffusion_xl_inpaint.py | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import paddle
import PIL
from ...image_processor import VaeImageProcessor
from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, UNet2DConditionModel
from ...models.attention_processor import (
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
is_invisible_watermark_available,
logging,
randn_tensor,
replace_example_docstring,
)
from ..pipeline_utils import DiffusionPipeline
from . import StableDiffusionXLPipelineOutput
if is_invisible_watermark_available():
from .watermark import StableDiffusionXLWatermarker
import paddlenlp
logger = logging.get_logger(__name__)
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import paddle
>>> from ppdiffusers import StableDiffusionXLInpaintPipeline
>>> from ppdiffusers.utils import load_image
>>> pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-base-1.0",
... paddle_dtype=paddle.float16,
... variant="fp16",
... use_safetensors=True,
... )
>>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
>>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
>>> init_image = load_image(img_url).convert("RGB")
>>> mask_image = load_image(mask_url).convert("RGB")
>>> prompt = "A majestic tiger sitting on a bench"
>>> image = pipe(
... prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80
... ).images[0]
```
"""
# Copied from ppdiffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(axis=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(axis=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
def mask_pil_to_paddle(mask, height, width):
if isinstance(mask, (PIL.Image.Image, np.ndarray)):
mask = [mask]
if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
mask = np.concatenate([np.array(m.convert("L"))[(None), (None), :] for m in mask], axis=0)
mask = mask.astype(np.float32) / 255.0
elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
mask = np.concatenate([m[(None), (None), :] for m in mask], axis=0)
mask = paddle.to_tensor(data=mask)
return mask
def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
"""
Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
converted to ``paddle.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
``image`` and ``1`` for the ``mask``.
The ``image`` will be converted to ``paddle.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
binarized (``mask > 0.5``) and cast to ``paddle.float32`` too.
Args:
image (Union[np.array, PIL.Image, paddle.Tensor]): The image to inpaint.
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
``paddle.Tensor`` or a ``batch x channels x height x width`` ``paddle.Tensor``.
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
``paddle.Tensor`` or a ``batch x 1 x height x width`` ``paddle.Tensor``.
Raises:
ValueError: ``paddle.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``paddle.Tensor`` mask
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
TypeError: ``mask`` is a ``paddle.Tensor`` but ``image`` is not
(ot the other way around).
Returns:
tuple[paddle.Tensor]: The pair (mask, masked_image) as ``paddle.Tensor`` with 4
dimensions: ``batch x channels x height x width``.
"""
# checkpoint. TOD(Yiyi) - need to clean this up later
if image is None:
raise ValueError("`image` input cannot be undefined.")
if mask is None:
raise ValueError("`mask_image` input cannot be undefined.")
if isinstance(image, paddle.Tensor):
if not isinstance(mask, paddle.Tensor):
mask = mask_pil_to_paddle(mask, height, width)
if image.ndim == 3:
image = image.unsqueeze(axis=0)
# Batch and add channel dim for single mask
if mask.ndim == 2:
mask = mask.unsqueeze(axis=0).unsqueeze(axis=0)
# Batch single mask or add channel dim
if mask.ndim == 3:
# Single batched mask, no channel dim or single mask not batched but channel dim
if mask.shape[0] == 1:
mask = mask.unsqueeze(axis=0)
# Batched masks no channel dim
else:
mask = mask.unsqueeze(axis=1)
assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
# assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
# Check image is in [-1, 1]
# if image.min() < -1 or image.max() > 1:
# raise ValueError("Image should be in [-1, 1] range")
# Check mask is in [0, 1]
if mask.min() < 0 or mask.max() > 1:
raise ValueError("Mask should be in [0, 1] range")
# Binarize mask
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
# Image as float32
image = image.cast(dtype="float32")
elif isinstance(mask, paddle.Tensor):
raise TypeError(f"`mask` is a paddle.Tensor but `image` (type: {type(image)} is not")
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
# resize all images w.r.t passed height an width
image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
image = [np.array(i.convert("RGB"))[(None), :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[(None), :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = paddle.to_tensor(data=image).cast(dtype="float32") / 127.5 - 1.0
mask = mask_pil_to_paddle(mask, height, width)
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
if image.shape[1] == 4:
# images are in latent space and thus can't
# be masked set masked_image to None
# we assume that the checkpoint is not an inpainting
# checkpoint. TOD(Yiyi) - need to clean this up later
masked_image = None
else:
masked_image = image * (mask < 0.5)
# n.b. ensure backwards compatibility as old function does not return image
if return_image:
return mask, masked_image, image
return mask, masked_image
class StableDiffusionXLInpaintPipeline(
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
):
"""
Pipeline for text-to-image generation using Stable Diffusion XL.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
- *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion XL uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
text_encoder_2 ([` CLIPTextModelWithProjection`]):
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
specifically the
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
tokenizer_2 (`CLIPTokenizer`):
Second Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
_optional_components = ["tokenizer", "text_encoder"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: paddlenlp.transformers.CLIPTextModel,
text_encoder_2: paddlenlp.transformers.CLIPTextModelWithProjection,
tokenizer: paddlenlp.transformers.CLIPTokenizer,
tokenizer_2: paddlenlp.transformers.CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
requires_aesthetics_score: bool = False,
force_zeros_for_empty_prompt: bool = True,
add_watermarker: Optional[bool] = None,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
if add_watermarker:
self.watermark = StableDiffusionXLWatermarker()
else:
self.watermark = None
# Copied from ppdiffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
def encode_prompt(
self,
prompt: str,
prompt_2: Optional[str] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt: Optional[str] = None,
negative_prompt_2: Optional[str] = None,
prompt_embeds: Optional[paddle.Tensor] = None,
negative_prompt_embeds: Optional[paddle.Tensor] = None,
pooled_prompt_embeds: Optional[paddle.Tensor] = None,
negative_pooled_prompt_embeds: Optional[paddle.Tensor] = None,
lora_scale: Optional[float] = None,
):
"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`paddle.Tensoroptional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
prompt_2 = prompt_2 or prompt
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
prompts = [prompt, prompt_2]
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pd",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
if (
untruncated_ids.shape[-1] >= text_input_ids.shape[-1]
and not paddle.equal_all(x=text_input_ids, y=untruncated_ids).item()
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
f"The following part of your input was truncated because CLIP can only handle sequences up to {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(text_input_ids, output_hidden_states=True)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = paddle.concat(x=prompt_embeds_list, axis=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = paddle.zeros_like(x=prompt_embeds)
negative_pooled_prompt_embeds = paddle.zeros_like(x=pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
negative_prompt_2 = negative_prompt_2 or negative_prompt
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt, negative_prompt_2]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`."
)
else:
uncond_tokens = [negative_prompt, negative_prompt_2]
negative_prompt_embeds_list = []
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pd"
)
negative_prompt_embeds = text_encoder(uncond_input.input_ids, output_hidden_states=True)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = paddle.concat(x=negative_prompt_embeds_list, axis=-1)
prompt_embeds = prompt_embeds.cast(dtype=self.text_encoder_2.dtype)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.tile(repeat_times=[1, num_images_per_prompt, 1])
prompt_embeds = prompt_embeds.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.cast(dtype=self.text_encoder_2.dtype)
negative_prompt_embeds = negative_prompt_embeds.tile(repeat_times=[1, num_images_per_prompt, 1])
negative_prompt_embeds = negative_prompt_embeds.reshape([batch_size * num_images_per_prompt, seq_len, -1])
pooled_prompt_embeds = pooled_prompt_embeds.tile(repeat_times=[1, num_images_per_prompt]).reshape(
[bs_embed * num_images_per_prompt, -1]
)
if do_classifier_free_guidance:
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.tile(
repeat_times=[1, num_images_per_prompt]
).reshape([bs_embed * num_images_per_prompt, -1])
return (prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds)
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
prompt_2,
height,
width,
strength,
callback_steps,
negative_prompt=None,
negative_prompt_2=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (
callback_steps is None
or callback_steps is not None
and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two."
)
elif prompt_2 is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
f"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}."
)
def prepare_latents(
self,
batch_size,
num_channels_latents,
height,
width,
dtype,
generator,
latents=None,
image=None,
timestep=None,
is_strength_max=True,
add_noise=True,
return_noise=False,
return_image_latents=False,
):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if (image is None or timestep is None) and not is_strength_max:
raise ValueError(
"Since strength < 1. initial latents are to be initialised as a combination of Image + Noise.However, either the image or the noise timestep has not been provided."
)
if image.shape[1] == 4:
image_latents = image.cast(dtype=dtype)
elif return_image_latents or latents is None and not is_strength_max:
image = image.cast(dtype=dtype)
image_latents = self._encode_vae_image(image=image, generator=generator)
if latents is None and add_noise:
noise = randn_tensor(shape, generator=generator, dtype=dtype)
# if strength is 1. then initialise the latents to noise, else initial to image + noise
latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
# if pure noise then scale the initial latents by the Scheduler's init sigma
latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
elif add_noise:
noise = latents
latents = noise * self.scheduler.init_noise_sigma
else:
noise = randn_tensor(shape, generator=generator, dtype=dtype)
latents = image_latents
outputs = (latents,)
if return_noise:
outputs += (noise,)
if return_image_latents:
outputs += (image_latents,)
return outputs
def _encode_vae_image(self, image: paddle.Tensor, generator: paddle.Generator):
dtype = image.dtype
if self.vae.config.force_upcast:
image = image.astype(dtype="float32")
self.vae.to(dtype="float32")
if isinstance(generator, list):
image_latents = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
for i in range(image.shape[0])
]
image_latents = paddle.concat(x=image_latents, axis=0)
else:
image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
if self.vae.config.force_upcast:
self.vae.to(dtype=dtype)
image_latents = image_latents.cast(dtype)
image_latents = self.vae.config.scaling_factor * image_latents
return image_latents
def prepare_mask_latents(
self, mask, masked_image, batch_size, height, width, dtype, generator, do_classifier_free_guidance
):
# resize the mask to latents shape as we concatenate the mask to the latents
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
# and half precision
mask = paddle.nn.functional.interpolate(
x=mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
)
mask = mask.cast(dtype=dtype)
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
if mask.shape[0] < batch_size:
if not batch_size % mask.shape[0] == 0:
raise ValueError(
f"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number of masks that you pass is divisible by the total requested batch size."
)
mask = mask.tile(repeat_times=[batch_size // mask.shape[0], 1, 1, 1])
mask = paddle.concat(x=[mask] * 2) if do_classifier_free_guidance else mask
masked_image_latents = None
if masked_image is not None:
masked_image = masked_image.cast(dtype=dtype)
masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
if masked_image_latents.shape[0] < batch_size:
if not batch_size % masked_image_latents.shape[0] == 0:
raise ValueError(
f"The passed images and the required batch size don't match. Images are supposed to be duplicated to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed. Make sure the number of images that you pass is divisible by the total requested batch size."
)
masked_image_latents = masked_image_latents.tile(
repeat_times=[batch_size // masked_image_latents.shape[0], 1, 1, 1]
)
masked_image_latents = (
paddle.concat(x=[masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
)
# aligning device to prevent device errors when concating it with the latent model input
masked_image_latents = masked_image_latents.cast(dtype=dtype)
return mask, masked_image_latents
def get_timesteps(self, num_inference_steps, strength, denoising_start=None):
# get the original timestep using init_timestep
if denoising_start is None:
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
else:
t_start = 0
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
# Strength is irrelevant if we directly request a timestep to start at;
# that is, strength is determined by the denoising_start instead.
if denoising_start is not None:
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- denoising_start * self.scheduler.config.num_train_timesteps
)
)
timesteps = list(filter(lambda ts: ts < discrete_timestep_cutoff, timesteps))
return paddle.to_tensor(data=timesteps), len(timesteps)
return timesteps, num_inference_steps - t_start
# Copied from ppdiffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
def _get_add_time_ids(
self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, dtype
):
if self.config.requires_aesthetics_score:
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
else:
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.weight.shape[0]
if (
expected_add_embed_dim > passed_add_embed_dim
and expected_add_embed_dim - passed_add_embed_dim == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
)
elif (
expected_add_embed_dim < passed_add_embed_dim
and passed_add_embed_dim - expected_add_embed_dim == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
)
elif expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = paddle.to_tensor(data=[add_time_ids], dtype=dtype)
add_neg_time_ids = paddle.to_tensor(data=[add_neg_time_ids], dtype=dtype)
return add_time_ids, add_neg_time_ids
# Copied from ppdiffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
def upcast_vae(self):
dtype = self.vae.dtype
self.vae.to(dtype="float32")
use_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
),
)
if use_xformers:
self.vae.post_quant_conv.to(dtype=dtype)
self.vae.decoder.conv_in.to(dtype=dtype)
self.vae.decoder.mid_block.to(dtype=dtype)
@paddle.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: Union[paddle.Tensor, PIL.Image.Image] = None,
mask_image: Union[paddle.Tensor, PIL.Image.Image] = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 1.0,
num_inference_steps: int = 50,
denoising_start: Optional[float] = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
latents: Optional[paddle.Tensor] = None,
prompt_embeds: Optional[paddle.Tensor] = None,
negative_prompt_embeds: Optional[paddle.Tensor] = None,
pooled_prompt_embeds: Optional[paddle.Tensor] = None,
negative_pooled_prompt_embeds: Optional[paddle.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
aesthetic_score: float = 6.0,
negative_aesthetic_score: float = 2.5,
):
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
used in both text-encoders
image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
be masked out with `mask_image` and repainted according to `prompt`.
mask_image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be `(B, H, W, 1)`.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
strength (`float`, *optional*, defaults to 1.):
Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be
between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the
`strength`. The number of denoising steps depends on the amount of noise initially added. When
`strength` is 1, added noise will be maximum and the denoising process will run for the full number of
iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked
portion of the reference `image`. Note that in the case of `denoising_start` being declared as an
integer, the value of `strength` will be ignored.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
denoising_start (`float`, *optional*):
When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
denoising_end (`float`, *optional*):
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
completed before it is intentionally prematurely terminated. As a result, the returned sample will
still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
negative_prompt_2 (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
prompt_embeds (`paddle.Tensoroptional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`paddle.Generator`, *optional*):
One or a list of paddle generator(s).
to make generation deterministic.
latents (`paddle.Tensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in ppdiffusers.cross_attention.
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
`original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
For most cases, `target_size` should be set to the desired height and width of the generated image. If
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
aesthetic_score (`float`, *optional*, defaults to 6.0):
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
Part of SDXL's micro-conditioning as explained in section 2.2 of
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
simulate an aesthetic score of the generated image by influencing the negative text condition.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple. `tuple. When returning a tuple, the first element is a list with the generated images.
"""
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
self.check_inputs(
prompt,
prompt_2,
height,
width,
strength,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
)
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt=prompt,
prompt_2=prompt_2,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. set timesteps
def denoising_value_valid(dnv):
return type(denoising_end) == float and 0 < dnv < 1
self.scheduler.set_timesteps(num_inference_steps)
timesteps, num_inference_steps = self.get_timesteps(
num_inference_steps, strength, denoising_start=denoising_start if denoising_value_valid else None
)
# check that number of inference steps is not < 1 - as this doesn't make sense
if num_inference_steps < 1:
raise ValueError(
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipelinesteps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
)
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
latent_timestep = timesteps[:1].tile(repeat_times=[batch_size * num_images_per_prompt])
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
is_strength_max = strength == 1.0
# 5. Preprocess mask and image
mask, masked_image, init_image = prepare_mask_and_masked_image(
image, mask_image, height, width, return_image=True
)
# 6. Prepare latent variables
num_channels_latents = self.vae.config.latent_channels
num_channels_unet = self.unet.config.in_channels
return_image_latents = num_channels_unet == 4
add_noise = True if denoising_start is None else False
latents_outputs = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
generator,
latents,
image=init_image,
timestep=latent_timestep,
is_strength_max=is_strength_max,
add_noise=add_noise,
return_noise=True,
return_image_latents=return_image_latents,
)
if return_image_latents:
latents, noise, image_latents = latents_outputs
else:
latents, noise = latents_outputs
# 7. Prepare mask latent variables
mask, masked_image_latents = self.prepare_mask_latents(
mask,
masked_image,
batch_size * num_images_per_prompt,
height,
width,
prompt_embeds.dtype,
generator,
do_classifier_free_guidance,
)
# 8. Check that sizes of mask, masked image and latents match
if num_channels_unet == 9:
# default case for runwayml/stable-diffusion-inpainting
num_channels_mask = mask.shape[1]
num_channels_masked_image = masked_image_latents.shape[1]
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
raise ValueError(
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} + `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image} = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of `pipeline.unet` or your `mask_image` or `image` input."
)
elif num_channels_unet != 4:
raise ValueError(
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
)
# 8.1 Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
height, width = latents.shape[-2:]
height = height * self.vae_scale_factor
width = width * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 10. Prepare added time ids & embeddings
add_text_embeds = pooled_prompt_embeds
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
dtype=prompt_embeds.dtype,
)
add_time_ids = add_time_ids.tile(repeat_times=[batch_size * num_images_per_prompt, 1])
if do_classifier_free_guidance:
prompt_embeds = paddle.concat(x=[negative_prompt_embeds, prompt_embeds], axis=0)
add_text_embeds = paddle.concat(x=[negative_pooled_prompt_embeds, add_text_embeds], axis=0)
add_neg_time_ids = add_neg_time_ids.tile(repeat_times=[batch_size * num_images_per_prompt, 1])
add_time_ids = paddle.concat(x=[add_neg_time_ids, add_time_ids], axis=0)
# 11. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
if (
denoising_end is not None
and denoising_start is not None
and denoising_value_valid(denoising_end)
and denoising_value_valid(denoising_start)
and denoising_start >= denoising_end
):
raise ValueError(
f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
+ f" {denoising_end} when using type float."
)
elif denoising_end is not None and denoising_value_valid(denoising_end):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- denoising_end * self.scheduler.config.num_train_timesteps
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = paddle.concat(x=[latents] * 2) if do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
if num_channels_unet == 9:
latent_model_input = paddle.concat(x=[latent_model_input, mask, masked_image_latents], axis=1)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(chunks=2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if num_channels_unet == 4:
init_latents_proper = image_latents[:1]
init_mask = mask[:1]
if i < len(timesteps) - 1:
noise_timestep = timesteps[i + 1]
init_latents_proper = self.scheduler.add_noise(
init_latents_proper, noise, paddle.to_tensor(data=[noise_timestep])
)
# call the callback, if provided
latents = (1 - init_mask) * init_latents_proper + init_mask * latents
if i == len(timesteps) - 1 or i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0:
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
# make sure the VAE is in float32 mode, as it overflows in float16
if (self.vae.dtype == paddle.float16 or self.vae.dtype == "float16") and self.vae.config.force_upcast:
self.upcast_vae()
latents = latents.cast(next(iter(self.vae.post_quant_conv.parameters())).dtype)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
else:
return StableDiffusionXLPipelineOutput(images=latents)
# apply watermark if available
if self.watermark is not None:
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
| [
"[]",
"[PLACEHOLDER, PLACEHOLDER]",
"False"
] |
2024-01-10 | westfish/PaddleMIX | ppdiffusers~ppdiffusers~pipelines~stable_diffusion~pipeline_stable_diffusion.py | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import warnings
from typing import Any, Callable, Dict, List, Optional, Union
import paddle
from packaging import version
from paddlenlp.transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from ...configuration_utils import FrozenDict
from ...image_processor import VaeImageProcessor
from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, UNet2DConditionModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import deprecate, logging, randn_tensor, replace_example_docstring
from ..pipeline_utils import DiffusionPipeline
from . import StableDiffusionPipelineOutput
from .safety_checker import StableDiffusionSafetyChecker
logger = logging.get_logger(__name__)
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import paddle
>>> from ppdiffusers import StableDiffusionPipeline
>>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", paddle_dtype=paddle.float16)
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt).images[0]
```
"""
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(axis=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(axis=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin):
"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer ([`~transformers.CLIPTokenizer`]):
A `CLIPTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
about a model's potential harms.
feature_extractor ([`~transformers.CLIPImageProcessor`]):
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
"""
_optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = True,
):
super().__init__()
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
deprecation_message = f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset` should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure to update the config accordingly as leaving `steps_offset` might led to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
deprecation_message = f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`. `clip_sample` should be set to False in the configuration file. Please make sure to update the config accordingly as not setting `clip_sample` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["clip_sample"] = False
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
version.parse(unet.config._diffusers_version).base_version
) < version.parse("0.9.0.dev0")
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = """The configuration file of the unet has set the default `sample_size` to smaller than 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the following:
- CompVis/stable-diffusion-v1-4
- CompVis/stable-diffusion-v1-3
- CompVis/stable-diffusion-v1-2
- CompVis/stable-diffusion-v1-1
- runwayml/stable-diffusion-v1-5
- runwayml/stable-diffusion-inpainting
you should change 'sample_size' to 64 in the configuration file. Please make sure to update the config accordingly as leaving `sample_size=32` in the config might lead to incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for the `unet/config.json` file"""
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.register_to_config(requires_safety_checker=requires_safety_checker)
def enable_vae_slicing(self):
"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
def disable_vae_slicing(self):
"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
def enable_vae_tiling(self):
"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
"""
self.vae.enable_tiling()
def disable_vae_tiling(self):
"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def _encode_prompt(
self,
prompt,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[paddle.Tensor] = None,
negative_prompt_embeds: Optional[paddle.Tensor] = None,
lora_scale: Optional[float] = None,
):
"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pd",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pd").input_ids
if (
untruncated_ids.shape[-1] >= text_input_ids.shape[-1]
and not paddle.equal_all(x=text_input_ids, y=untruncated_ids).item()
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
f"The following part of your input was truncated because CLIP can only handle sequences up to {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask
else:
attention_mask = None
prompt_embeds = self.text_encoder(text_input_ids, attention_mask=attention_mask)
prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.cast(dtype=self.text_encoder.dtype)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.tile(repeat_times=[1, num_images_per_prompt, 1])
prompt_embeds = prompt_embeds.reshape([bs_embed * num_images_per_prompt, seq_len, -1])
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} != {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`: {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pd"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(uncond_input.input_ids, attention_mask=attention_mask)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.cast(dtype=self.text_encoder.dtype)
negative_prompt_embeds = negative_prompt_embeds.tile(repeat_times=[1, num_images_per_prompt, 1])
negative_prompt_embeds = negative_prompt_embeds.reshape([batch_size * num_images_per_prompt, seq_len, -1])
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = paddle.concat(x=[negative_prompt_embeds, prompt_embeds])
return prompt_embeds
def run_safety_checker(self, image, dtype):
if self.safety_checker is None:
has_nsfw_concept = None
else:
if paddle.is_tensor(x=image):
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
else:
feature_extractor_input = self.image_processor.numpy_to_pil(image)
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pd")
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.cast(dtype)
)
return image, has_nsfw_concept
def decode_latents(self, latents):
warnings.warn(
"The decode_latents method is deprecated and will be removed in a future version. Please use VaeImageProcessor instead",
FutureWarning,
)
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents, return_dict=False)[0]
image = (image / 2 + 0.5).clip(min=0, max=1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().transpose(perm=[0, 2, 3, 1]).astype(dtype="float32").numpy()
return image
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (
callback_steps is None
or callback_steps is not None
and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`: {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
f"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds` {negative_prompt_embeds.shape}."
)
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch size of {batch_size}. Make sure the batch size matches the length of the generators."
)
latents = randn_tensor(shape, generator=generator, dtype=dtype)
latents = latents * self.scheduler.init_noise_sigma
return latents
@paddle.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
latents: Optional[paddle.Tensor] = None,
prompt_embeds: Optional[paddle.Tensor] = None,
negative_prompt_embeds: Optional[paddle.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
):
"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`paddle.Generator` or `List[paddle.Generator]`, *optional*):
A [`paddle.Generator`](https://pytorch.org/docs/stable/generated/paddle.Generator.html) to make
generation deterministic.
latents (`paddle.Tensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`paddle.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
using zero terminal SNR.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds = self._encode_prompt(
prompt,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
self.scheduler.set_timesteps(num_inference_steps)
# 4. Prepare timesteps
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
# 7. Denoising loop
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = paddle.concat(x=[latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(chunks=2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or i + 1 > num_warmup_steps and (i + 1) % self.scheduler.order == 0:
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [(not has_nsfw) for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
if not return_dict:
return image, has_nsfw_concept
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| [] |
2024-01-10 | Lassehhansen/Arxiv_search | src~claim_search_openai.py | import json
import openai
import time
from tqdm import tqdm
import pandas as pd
from typing import List, Tuple
def create_keyword_pattern(keywords):
"""
Create a regex pattern for keyword matching.
"""
pattern = r'(?:(?<=\W)|(?<=^))(' + '|'.join(map(re.escape, keywords)) + r')(?=\W|$)'
return re.compile(pattern, re.IGNORECASE)
def get_context_windows(text, keywords, window_size):
"""
Extract context windows around keywords in the text, ensuring that overlapping
windows are merged into one.
"""
context_windows = []
keyword_pattern = create_keyword_pattern(keywords)
matches = list(keyword_pattern.finditer(text))
merged_matches = []
i = 0
while i < len(matches):
current_match = matches[i]
start_pos = current_match.start()
end_pos = current_match.end()
# Check if the next matches are within the window size
while i + 1 < len(matches) and matches[i + 1].start() - end_pos <= window_size * 2:
i += 1
end_pos = matches[i].end()
merged_matches.append((start_pos, end_pos))
i += 1
for start_pos, end_pos in merged_matches:
words = text.split()
start_word_pos = len(text[:start_pos].split()) - 1
end_word_pos = len(text[:end_pos].split())
context_words = words[max(0, start_word_pos - window_size):min(len(words), end_word_pos + window_size)]
context_str = ' '.join(context_words)
context_windows.append(context_str)
return context_windows
def process_with_gpt(context_window, model, system_prompt, openai_api_key, delay=1.0):
"""
Processes a context window with the GPT model to extract relevant information based on the given prompt.
Parameters:
- context_window (str): The context window text to be processed.
- model (str): The OpenAI GPT model to be used.
- system_prompt (str): The prompt to be used for querying the GPT model.
- openai_api_key (str): The API key for OpenAI.
- delay (float): Delay between API calls in seconds to manage rate limits.
Returns:
- str: The GPT response as a string.
"""
openai.api_key = openai_api_key
retry_delay = 1 # Initial delay in seconds for retries
max_retries = 5 # Maximum number of retries
for attempt in range(max_retries):
try:
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": context_window}
]
)
return response.choices[0].message.content
except openai.error.RateLimitError as e:
print(f"Rate limit reached, retrying in {retry_delay} seconds...")
time.sleep(retry_delay)
retry_delay *= 2 # Exponential backoff
except Exception as e:
print(f"Failed processing context window with error: {e}")
return f"Error: {str(e)}"
time.sleep(delay) # Delay between API calls
return "Error: Rate limit retries exceeded"
| [] |
2024-01-10 | Soullesskid/MLSPF | News_Data~GPTscore.py | import openai
import pandas as pd
import requests
from tqdm import tqdm
import time
def score_title(title):
message = {
"role": "user",
"content": f"Please rate the following article titles on a scale of -1 to 1, with higher scores being more positive and a score of 0 being emotionally neutral,you only need the score and no other information: \"{title}\""
}
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[message]
)
# return response
score_flt = float(response.choices[0].message.content.strip())
return score_flt
api_key = "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"
#czy's API For demonstration purposes only, please do not use this api key extensively.
openai.api_key = api_key
input_file = "your_output_csv_file.csv"
output_file = "your_output_csv_file.csv"
df = pd.read_csv(input_file)
# If the "scoregpt" column does not exist, create a new empty column
if "scoregpt" not in df.columns:
df["scoregpt"] = None
i=0
y = 0
for idx, row in tqdm(df.iterrows(), total=len(df), desc="Processing titles"):
i+=1
y+=1
if y > 50:
title = row["Title"]
score = score_title(title)
time.sleep(15) #Frequent requests will be actively rejected by the api
df.at[idx, "scoregpt"] = score
if i%5 == 0:
df.to_csv(output_file, index=False)
df.to_csv(output_file, index=False)
print(f"Emotion scores have been saved to '{output_file}'.")
| [
"Please rate the following article titles on a scale of -1 to 1, with higher scores being more positive and a score of 0 being emotionally neutral,you only need the score and no other information: \"PLACEHOLDER\""
] |
2024-01-10 | maarikamarkus/image-description-generator | image_description_generator.py | import io
import json
import os
import base64
import openai
import requests
from dotenv import load_dotenv
from google.cloud import vision
from PIL import ExifTags, Image
from google.oauth2 import service_account
load_dotenv()
GOOGLE_API_KEY = os.environ.get('google_api_key')
OPENAI_API_KEY = os.environ.get('openai_api_key')
GOOGLE_CREDENTIALS = os.environ.get('google_credentials')
credentials = service_account.Credentials.from_service_account_info(
json.loads(base64.b64decode(GOOGLE_CREDENTIALS))
)
def get_img_keywords(file_name):
client = vision.ImageAnnotatorClient(credentials=credentials)
file_name = os.path.abspath(file_name)
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.label_detection(image=image)
labels = response.label_annotations
labels = list(map(lambda x : x.description.lower(), labels))
return labels
def get_img_gps_location(path):
img = Image.open(path)
exif_data = img._getexif()
if exif_data is None:
return (None, None)
exif = {
ExifTags.TAGS[k]: v
for k, v in exif_data.items()
if k in ExifTags.TAGS
}
if 'GPSInfo' not in exif:
return (None, None)
gps_info = exif['GPSInfo']
if gps_info == None:
return (None, None)
lat, lng = to_decimal(gps_info[2]), to_decimal(gps_info[4])
if gps_info[1] == 'S':
lat *= -1
elif gps_info[3] == 'W':
lng *= -1
return (lat, lng)
def to_decimal(x):
deg, mins, secs = x
return float(deg + mins/60 + secs/60/60)
def get_address(path):
lat, lng = get_img_gps_location(path)
if lat is None or lng is None:
return None
r = requests.get(f'https://maps.googleapis.com/maps/api/geocode/json?latlng={lat},{lng}&key={GOOGLE_API_KEY}').json()
if r['status'] != "OK":
return None
# Võtame geocode infost riigi ja asula nime.
return [comp['long_name'] for comp in r['results'][0]['address_components'] \
if 'country' in comp['types'] or 'locality' in comp['types']]
def get_img_desc(img_path):
img_keywords = get_img_keywords(img_path)
loc_keywords = get_address(img_path)
openai.api_key = OPENAI_API_KEY
if loc_keywords is None:
prompt = f"Describe picture using keywords: {', '.join(img_keywords)}"
else:
prompt = f"Describe picture taken at {', '.join(loc_keywords)} using keywords: {', '.join(img_keywords)}"
response = openai.Completion.create(
engine="davinci-instruct-beta-v3",
prompt=prompt,
max_tokens=100
)
desc = response['choices'][0]['text'].strip()
return desc
| [
", "
] |
2024-01-10 | LukeHackett12/MODRL | tensorflow_agents~mario_baseline.py | from nes_py.wrappers import JoypadSpace
import gym_super_mario_bros
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, RIGHT_ONLY
import gym
from gym.spaces import Box
from gym.wrappers import FrameStack, GrayScaleObservation, ResizeObservation
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from typing import NamedTuple
from collections import namedtuple, deque
import random
import datetime
import time
import tensorflow as tf
from tensorflow import keras, Tensor
class Transition(NamedTuple):
currStates: Tensor
actions: Tensor
rewards: Tensor
nextStates: Tensor
dones: Tensor
class DQNAgent:
def __init__(self, stateShape, actionSpace, numPicks, memorySize, sync=100000, burnin=100, alpha=0.0001, epsilon=1, epsilon_decay=0.99999975, epsilon_min=0.01, gamma=0.9):
self.numPicks = numPicks
self.replayMemory = deque(maxlen=memorySize)
self.stateShape = stateShape
self.actionSpace = actionSpace
self.step = 0
self.sync = sync
self.burnin = burnin
self.alpha = alpha
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
self.gamma = gamma
self.trainNetwork = self.createNetwork(
stateShape, actionSpace.n, self.alpha)
self.targetNetwork = self.createNetwork(
stateShape, actionSpace.n, self.alpha)
self.targetNetwork.set_weights(self.trainNetwork.get_weights())
def createNetwork(self, n_input, n_output, learningRate):
model = keras.models.Sequential()
model.add(keras.layers.experimental.preprocessing.Rescaling(1./255, input_shape=n_input))
model.add(keras.layers.Conv2D(32, kernel_size=8, strides=4, activation='relu'))
model.add(keras.layers.Conv2D(64, kernel_size=4, strides=2, activation='relu'))
model.add(keras.layers.Conv2D(64, kernel_size=3, strides=1, activation='relu'))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(512, activation='linear'))
model.add(keras.layers.Dense(n_output, activation='linear'))
model.compile(loss=keras.losses.Huber(), optimizer=keras.optimizers.Adam(lr=learningRate))
print(model.summary())
return model
def trainDQN(self):
if len(self.replayMemory) <= self.numPicks or len(self.replayMemory) <= self.burnin:
return 0
samples = random.sample(self.replayMemory, self.numPicks)
batch = Transition(*zip(*samples))
currStates, actions, rewards, nextStates, _ = batch
currStates = np.squeeze(np.array(currStates))
Q_currents = self.trainNetwork(currStates, training=False).numpy()
nextStates = np.squeeze(np.array(nextStates))
Q_futures = self.targetNetwork(nextStates, training=False).numpy().max(axis=1)
rewards = np.array(rewards).reshape(self.numPicks,).astype(float)
actions = np.array(actions).reshape(self.numPicks,).astype(int)
Q_currents[np.arange(self.numPicks), actions] = rewards + Q_futures * self.gamma
hist = self.trainNetwork.train_on_batch(currStates, Q_currents)
return hist.history['loss'][0]
def selectAction(self, state):
self.step += 1
if self.step % self.sync == 0:
self.targetNetwork.set_weights(self.trainNetwork.get_weights())
q = -100000
if np.random.rand(1) < self.epsilon:
action = np.random.randint(0, 3)
else:
preds = np.squeeze(self.trainNetwork(
state, training=False).numpy(), axis=0)
action = np.argmax(preds)
q = preds[action]
return action, q
def addMemory(self, memory):
self.replayMemory.append(memory)
def save(self, ep):
save_path = (
f"./mario_{int(ep)}.chkpt"
)
self.trainNetwork.save(save_path)
print(f"MarioNet saved to {save_path} done!")
class SkipFrame(gym.Wrapper):
def __init__(self, env, skip):
"""Return only every `skip`-th frame"""
super().__init__(env)
self._skip = skip
def step(self, action):
"""Repeat action, and sum reward"""
total_reward = 0.0
done = False
for i in range(self._skip):
# Accumulate reward and repeat the same action
obs, reward, done, info = self.env.step(action)
total_reward += reward
if done:
break
return obs, total_reward, done, info
# Taken from OpenAI baselines: https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class MarioBaseline(object):
def __init__(self, episodes):
self.current_episode = 0
self.episodes = episodes
self.episode_score = []
self.episode_qs = []
self.episode_distance = []
self.episode_loss = []
self.fig, self.ax = plt.subplots(2, 2)
self.fig.canvas.draw()
plt.show(block=False)
self.env = gym_super_mario_bros.make('SuperMarioBros-v0')
# Apply Observation Wrappers
self.env = GrayScaleObservation(self.env)
self.env = ResizeObservation(self.env, 84)
# Apply Control Wrappers
self.env = JoypadSpace(self.env, SIMPLE_MOVEMENT)
self.env = NoopResetEnv(self.env)
# Apply Frame Wrappers
self.env = SkipFrame(self.env, 4)
self.env = FrameStack(self.env, 4)
self.agent = DQNAgent(stateShape=(84, 84, 4),
actionSpace=self.env.action_space, numPicks=32, memorySize=100000)
def train(self):
for _ in range(self.episodes):
self.episode()
self.plot()
self.current_episode += 1
self.env.close()
def episode(self):
done = False
rewardsSum = 0
qSum = 0
qActions = 1
lossSum = 0
state = np.array(self.env.reset()).transpose(3, 1, 2, 0)
maxDistance = -1000000
lastX = 0
while not done:
action, q = self.agent.selectAction(state)
if q != -100000:
qSum += q
qActions += 1
obs, reward, done, info = self.env.step(action)
self.env.render()
if info['x_pos'] < lastX:
reward -= 1
if info['flag_get']:
reward += 10
if info['x_pos'] > maxDistance:
maxDistance = info['x_pos']
nextState = np.array(obs).transpose(3, 1, 2, 0)
rewardsSum = np.add(rewardsSum, reward)
self.agent.addMemory((state, action, reward, nextState, done))
loss = self.agent.trainDQN()
state = nextState
lossSum += loss
if self.current_episode % 200 == 0:
self.agent.save(self.current_episode)
print("now epsilon is {}, the reward is {} with loss {} in episode {}".format(
self.agent.epsilon, rewardsSum, lossSum, self.current_episode))
self.episode_score.append(rewardsSum)
self.episode_qs.append(qSum/qActions)
self.episode_distance.append(maxDistance)
self.episode_loss.append(lossSum)
def plot(self):
self.ax[0][0].title.set_text('Training Score')
self.ax[0][0].set_xlabel('Episode')
self.ax[0][0].set_ylabel('Score')
self.ax[0][0].plot(self.episode_score, 'b')
self.ax[0][1].title.set_text('Training Distance')
self.ax[0][1].set_xlabel('Episode')
self.ax[0][1].set_ylabel('Distance')
self.ax[0][1].plot(self.episode_distance, 'g')
self.ax[1][0].title.set_text('Training Loss')
self.ax[1][0].set_xlabel('Episode')
self.ax[1][0].set_ylabel('Loss')
self.ax[1][0].plot(self.episode_loss, 'r')
self.ax[1][1].title.set_text('Training Q Vals')
self.ax[1][1].set_xlabel('Episode')
self.ax[1][1].set_ylabel('Qs')
self.ax[1][1].plot(self.episode_qs, 'c')
self.fig.canvas.draw()
plt.show(block=False)
plt.pause(.001)
| [] |
2024-01-10 | LukeHackett12/MODRL | agentRunner.py | import argparse
from tensorflow_agents.mountain_car_model_tester import MountainCarModelTester
from tensorflow_agents.mountain_car_mo_dqn import MultiObjectiveMountainCarDQN
from tensorflow_agents.mountain_car_mo_ddqn import MultiObjectiveMountainCarDDQN
from tensorflow_agents.mountain_car_mo_pddqn import MultiObjectiveMountainCarPDDQN
from tensorflow_agents.mountain_car_mo_wpddqn import MultiObjectiveWMountainCar
from tensorflow_agents.mountain_car_graphical_ddqn import MountainCarGraphicalDDQN
from tensorflow_agents.mountain_car_open_ai import OpenAIMountainCar
from tensorflow_agents.deep_sea_baseline_dqn import DeepSeaTreasureBaselineDQN
from tensorflow_agents.deep_sea_baseline_ddqn import DeepSeaTreasureBaselineDDQN
from tensorflow_agents.deep_sea_graphical_pddqn import DeepSeaTreasureGraphicalPDDQN
from tensorflow_agents.deep_sea_graphical_ddqn import DeepSeaTreasureGraphicalDDQN
from tensorflow_agents.deep_sea_graphical_dqn import DeepSeaTreasureGraphicalDQN
from tensorflow_agents.deep_sea_mo_wdqn import DeepSeaWAgent
from tensorflow_agents.deep_sea_graphical_wpddqn import MultiObjectiveDeepSeaW
from tensorflow_agents.mario_baseline import MarioBaseline
parser = argparse.ArgumentParser(description='Run agentArg model for game')
parser.add_argument("-a", "--agentArg", required=True)
args = parser.parse_args()
agentArg = args.agentArg
if agentArg == 'mountain_car_mo_dqn':
agent = MultiObjectiveMountainCarDQN(1001)
elif agentArg == 'mountain_car_mo_ddqn':
agent = MultiObjectiveMountainCarDDQN(1001)
elif agentArg == 'mountain_car_mo_pddqn':
agent = MultiObjectiveMountainCarPDDQN(1001)
elif agentArg == 'mountain_car_mo_wpddqn':
agent = MultiObjectiveWMountainCar(5000)
elif agentArg == 'mountain_car_graphical_ddqn':
agent = MountainCarGraphicalDDQN(5000)
elif agentArg == 'mountain_car_open_ai':
agent = OpenAIMountainCar(2000)
elif agentArg == 'deep_sea_baseline_ddqn':
agent = DeepSeaTreasureBaselineDDQN(350)
elif agentArg == 'deep_sea_graphical_pddqn':
agent = DeepSeaTreasureGraphicalPDDQN(301)
elif agentArg == 'deep_sea_baseline_dqn':
agent = DeepSeaTreasureBaselineDQN(300)
elif agentArg == 'deep_sea_mo_wdqn':
agent = DeepSeaWAgent(2000)
elif agentArg == 'deep_sea_graphical_ddqn':
agent = DeepSeaTreasureGraphicalDDQN(1501)
elif agentArg == 'deep_sea_graphical_dqn':
agent = DeepSeaTreasureGraphicalDQN(2001)
elif agentArg == 'deep_sea_graphical_wpddqn':
agent = MultiObjectiveDeepSeaW(301)
elif agentArg == 'mario_baseline':
agent = MarioBaseline(2000)
agent.train()
'''
agentArg = MountainCarModelTester("./mountain_car_wnet_54540.chkpt")
agentArg.test()
'''
| [] |
2024-01-10 | LukeHackett12/MODRL | mario_tf.py | import torch
from torch import FloatTensor, LongTensor, BoolTensor
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import transforms as T
import datetime
import random
from collections import namedtuple, deque
from typing import NamedTuple
import numpy as np
from gym.wrappers import FrameStack
from gym.spaces import Box
import gym
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, RIGHT_ONLY
import gym_super_mario_bros
from nes_py.wrappers import JoypadSpace
import math
from copy import deepcopy
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
class Transition(NamedTuple):
currStates: FloatTensor
actions: LongTensor
rewards: FloatTensor
nextStates: FloatTensor
dones: LongTensor
class DQN(nn.Module):
def __init__(self, input_shape, num_actions):
super(DQN, self).__init__()
self._input_shape = input_shape
self._num_actions = num_actions
self.features = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
self.fc = nn.Sequential(
nn.Linear(self.feature_size, 512),
nn.ReLU(),
nn.Linear(512, num_actions)
)
def forward(self, x):
x = self.features(x).view(x.size()[0], -1)
return self.fc(x)
@property
def feature_size(self):
x = self.features(torch.zeros(1, *self._input_shape))
return x.view(1, -1).size(1)
class DQNAgent:
def __init__(self, stateShape, actionSpace, numPicks, memorySize, sync=1000, burnin=100, alpha=0.00025, epsilon=1, epsilon_decay=100000, epsilon_min=0.01, gamma=0.95, checkpoint=None):
self.numPicks = numPicks
self.replayMemory = deque(maxlen=memorySize)
self.stateShape = stateShape
self.actionSpace = actionSpace
self.step = 0
self.sync = sync
self.burnin = burnin
self.alpha = alpha
self.epsilon_start = epsilon
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
self.gamma = gamma
if checkpoint != None:
self.trainNetwork = torch.load(checkpoint).to(device)
else:
self.trainNetwork = DQN(stateShape, actionSpace.n).to(device)
self.targetNetwork = DQN(stateShape, actionSpace.n).to(device)
self.targetNetwork.load_state_dict(self.trainNetwork.state_dict())
self.optimizer = optim.Adam(self.targetNetwork.parameters(), self.alpha)
self.lossfn = torch.nn.SmoothL1Loss()
def trainDQN(self):
if len(self.replayMemory) <= self.numPicks or len(self.replayMemory) <= self.burnin:
return 0
#indices = np.random.choice([i for i in range(len(self.replayMemory))], self.numPicks, replace=False)
samples = random.sample(self.replayMemory, self.numPicks)
batch = Transition(*zip(*samples))
currStates, actions, rewards, nextStates, done = batch
rewards = torch.stack(rewards).squeeze().to(device)
actions = torch.stack(actions).squeeze().to(device)
done = torch.stack(done).squeeze().to(device)
currStates = torch.stack(currStates).to(device)
nextStates = torch.stack(nextStates).to(device)
Q_currents = self.trainNetwork(currStates)[np.arange(self.numPicks), actions]
Q_futures = self.targetNetwork(nextStates).max(1).values
Q_currents_improved = rewards + (1-done) * Q_futures * self.gamma
loss = self.lossfn(Q_currents, Q_currents_improved)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def selectAction(self, state):
self.step += 1
q_value = -100000
if np.random.rand(1) < self.epsilon:
action = np.random.randint(0, 3)
else:
state = torch.FloatTensor(np.float32(state)).squeeze().unsqueeze(0).to(device)
preds = self.trainNetwork(state)
action = torch.argmax(preds, axis=1).item()
q_value =torch.max(preds, axis=1)
return action, q_value
def addMemory(self, state, action, reward, next_state, done):
self.replayMemory.append((state, action, reward, next_state, done))
def save(self, ep):
save_path = (
f"./mario_torch_{int(ep)}.chkpt"
)
torch.save(self.trainNetwork, save_path)
print(f"MarioNet saved to {save_path} done!")
class GrayScaleObservation(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
obs_shape = self.observation_space.shape[:2]
self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def permute_orientation(self, observation):
# permute [H, W, C] array to [C, H, W] tensor
observation = np.transpose(observation, (2, 0, 1))
observation = torch.tensor(observation.copy(), dtype=torch.float)
return observation
def observation(self, observation):
observation = self.permute_orientation(observation)
transform = T.Grayscale()
observation = transform(observation)
return observation
class ResizeObservation(gym.ObservationWrapper):
def __init__(self, env, shape):
super().__init__(env)
if isinstance(shape, int):
self.shape = (shape, shape)
else:
self.shape = tuple(shape)
obs_shape = self.shape + self.observation_space.shape[2:]
self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def observation(self, observation):
transforms = T.Compose(
[T.Resize(self.shape), T.Normalize(0, 255)]
)
observation = transforms(observation).squeeze(0)
return observation
class SkipFrame(gym.Wrapper):
def __init__(self, env, skip):
"""Return only every `skip`-th frame"""
super().__init__(env)
self._skip = skip
def step(self, action):
"""Repeat action, and sum reward"""
total_reward = 0.0
done = False
for i in range(self._skip):
# Accumulate reward and repeat the same action
obs, reward, done, info = self.env.step(action)
total_reward += reward
if done:
break
return obs, total_reward, done, info
# Taken from OpenAI baselines: https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class MarioBaseline(object):
def __init__(self, episodes, checkpoint, current_episode, epsilon):
self.current_episode = current_episode
self.episodes = episodes
self.episode_score = []
self.episode_qs = []
self.episode_distance = []
self.episode_loss = []
self.env = gym_super_mario_bros.make('SuperMarioBros-1-1-v0')
self.env = JoypadSpace(self.env, SIMPLE_MOVEMENT)
# Apply Frame Wrappers
self.env = SkipFrame(self.env, 4)
self.env = GrayScaleObservation(self.env)
self.env = ResizeObservation(self.env, 84)
self.env = FrameStack(self.env, 4)
self.agent = DQNAgent(stateShape=(4, 84, 84), actionSpace=self.env.action_space, numPicks=32, memorySize=20000, epsilon=epsilon, checkpoint=checkpoint)
def train(self):
for _ in range(self.episodes):
self.episode()
self.current_episode += 1
self.env.close()
def episode(self):
done = False
rewardsSum = 0
qSum = 0
qActions = 1
lossSum = 0
state = np.array(self.env.reset())
maxDistance = -1000000
while not done:
action, q = self.agent.selectAction(state)
'''
if q != -100000:
qSum += q
qActions += 1
'''
obs, reward, done, info = self.env.step(action)
if info['x_pos'] > maxDistance:
maxDistance = info['x_pos']
next_state = np.array(obs)
rewardsSum = np.add(rewardsSum, reward)
self.agent.addMemory(FloatTensor(state), LongTensor([action]), FloatTensor([reward]), FloatTensor(next_state), LongTensor([done]))
loss = self.agent.trainDQN()
state = next_state
lossSum += loss
if self.agent.step % self.agent.sync == 0:
self.agent.targetNetwork.load_state_dict(self.agent.trainNetwork.state_dict())
self.agent.epsilon = self.agent.epsilon_min + (self.agent.epsilon_start - self.agent.epsilon_min) * math.exp(-1 * ((self.agent.step + 1) / self.agent.epsilon_decay))
if self.current_episode % 200 == 0:
self.agent.save(self.current_episode)
print("now epsilon is {}, the reward is {} with loss {} in episode {}, step {}, dist {}".format(
self.agent.epsilon, rewardsSum, lossSum, self.current_episode, self.agent.step, maxDistance))
self.episode_score.append(rewardsSum)
self.episode_qs.append(qSum/qActions)
self.episode_distance.append(maxDistance)
self.episode_loss.append(lossSum)
agent = MarioBaseline(10000, None, 0, 1)
agent.train() | [] |
2024-01-10 | LukeHackett12/MODRL | tensorflow_agents~mario_w.py | from tensorboard.plugins.hparams import api as hp
from tensorflow import keras, Tensor
import tensorflow as tf
import datetime
import random
from collections import namedtuple, deque
from typing import NamedTuple
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from scipy.interpolate.interpolate import interp1d
from enum import Enum
from gym.wrappers import FrameStack, GrayScaleObservation, ResizeObservation
from gym.spaces import Box
import gym
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT, RIGHT_ONLY
import gym_super_mario_bros
from nes_py.wrappers import JoypadSpace
import argparse
import math
from copy import deepcopy
from replay_buffer_policy import PrioritizedReplayBuffer
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--checkpoint", required=False)
parser.add_argument("-e", "--episode", required=False)
parser.add_argument("-x", "--epsilon", required=False)
# %matplotlib inline
GROUP_NUM = 20
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir, histogram_freq=1)
class PolEnum(Enum):
xpos = 0
time = 1
coin = 2
deaths = 3
random = 4
class Transition(NamedTuple):
currStates: Tensor
actions: Tensor
rewards: Tensor
nextStates: Tensor
dones: Tensor
class DQNAgent:
def __init__(self, stateShape, actionSpace, numPicks, memorySize, numRewards, sync=1000, burnin=10000, alpha=0.0001, epsilon=1, epsilon_decay=100000, epsilon_min=0.01, gamma=0.95, checkpoint=None):
self.numPicks = numPicks
self.stateShape = stateShape
self.actionSpace = actionSpace
self.numRewards = numRewards
self.step = 0
self.sync = sync
self.burnin = burnin
self.alpha = alpha
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.epsilon_min = epsilon_min
self.gamma = gamma
self.walpha = 0.01
self.delay = 1.1
self.train_network = self.createNetwork(
stateShape, (actionSpace.n), self.alpha
)
self.target_network = self.createNetwork(
stateShape, (actionSpace.n), self.alpha
)
# Store network weights for each policy
self.policy_train_weights = [
deepcopy(self.train_network.get_weights())
] * self.numRewards
self.policy_target_weights = [
deepcopy(self.train_network.get_weights())
] * self.numRewards
# Individual replay buffers for policies and for w net
self.replayMemory = []
for i in range(self.numRewards):
self.replayMemory.append(PrioritizedReplayBuffer(memorySize, 0.6))
# Create and store network weights for W-values
self.w_train_network = self.createNetwork(stateShape, numRewards, self.alpha)
self.wnet_train_weights = [
deepcopy(self.w_train_network.get_weights())
] * self.numRewards
def createNetwork(self, n_input, n_output, learningRate):
model = keras.models.Sequential()
model.add(keras.layers.experimental.preprocessing.Rescaling(1./255, input_shape=n_input))
model.add(keras.layers.Conv2D(32, kernel_size=8, strides=4, activation='relu', data_format="channels_first"))
model.add(keras.layers.Conv2D(64, kernel_size=4, strides=2, activation='relu', data_format="channels_first"))
model.add(keras.layers.Conv2D(64, kernel_size=3, strides=1, activation='relu', data_format="channels_first"))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(512, activation='linear'))
model.add(keras.layers.Dense(n_output, activation='linear'))
model.compile(loss=keras.losses.Huber(), optimizer=keras.optimizers.Adam(lr=learningRate))
return model
def trainDQN(self):
if len(self.replayMemory[0]) <= self.numPicks or len(self.replayMemory[0]) <= self.burnin:
return [(0, 0)] * self.numRewards
agentsLoss = []
beta = 0.4 + self.step * (1.0 - 0.4) / 1000000
for i in range(self.numRewards):
samples = self.replayMemory[i].sample(self.numPicks, beta)
(
currStates,
actions,
policies,
rewards,
nextStates,
dones,
weights,
indices,
) = samples
currStates = np.array(currStates)
nextStates = np.array(nextStates)
rewards = (
np.array(rewards)
.reshape(
self.numPicks,
)
.astype(float)
)
actions = (
np.array(actions)
.reshape(
self.numPicks,
)
.astype(int)
)
policies = (
np.array(policies)
.reshape(
self.numPicks,
)
.astype(int)
)
dones = np.array(dones).astype(bool)
notDones = (~dones).astype(float)
dones = dones.astype(float)
self.train_network.set_weights(self.policy_train_weights[i])
Q_currents_all = self.train_network(currStates, training=False).numpy()
self.target_network.set_weights(self.policy_target_weights[i])
Q_futures_all = (
self.target_network(nextStates, training=False).numpy().max(axis=1)
)
Q_currents = np.copy(Q_currents_all)
Q_futures = np.copy(Q_futures_all)
# Q-Learning
Q_currents[np.arange(self.numPicks), actions] = (
rewards * dones + (rewards + Q_futures * self.gamma) * notDones
)
lossQ = self.train_network.train_on_batch(currStates, Q_currents)
self.policy_train_weights[i] = deepcopy(self.train_network.get_weights())
# PER Update
prios = (np.abs(lossQ) * weights) + 1e-5
self.replayMemory[i].update_priorities(indices, prios)
lossW = 0
# Leave in exploration actions for now, can remove with "policy[p] != -1"
inverted_policy_mask = np.array(
[p for p in range(self.numPicks) if policies[p] != i]
)
if len(inverted_policy_mask) > 0:
# W-Learning
self.w_train_network.set_weights(self.wnet_train_weights[i])
currStatesNP = currStates[inverted_policy_mask]
policiesNP = policies[inverted_policy_mask]
rewardNP = rewards[inverted_policy_mask]
donesNP = dones[inverted_policy_mask]
notDonesNP = notDones[inverted_policy_mask]
Q_currents_np = Q_currents_all[inverted_policy_mask].max(axis=1)
Q_futures_np = Q_futures_all[inverted_policy_mask]
w_train = self.w_train_network(currStatesNP, training=False).numpy()
# maybe (Q_currents_not_policy - ((rewardNP * dones) + (self.gamma * Q_futures_not_policy) * notDonesNP)) * walpha^delay ?
w_train[np.arange(len(inverted_policy_mask)), policiesNP] = (
(1 - self.walpha)
* w_train[np.arange(len(inverted_policy_mask)), policiesNP]
) + (
(self.walpha ** self.delay)
* (
Q_currents_np
- (
(rewardNP * donesNP)
+ (self.gamma * Q_futures_np) * notDonesNP
)
)
)
lossW = self.w_train_network.train_on_batch(currStatesNP, w_train)
self.wnet_train_weights[i] = self.w_train_network.get_weights()
agentsLoss.append((lossQ, lossW))
return agentsLoss
def selectAction(self, state):
self.step += 1
state = np.expand_dims(np.array(state), 0)
if self.step % self.sync == 0:
self.policy_target_weights = deepcopy(self.policy_train_weights)
emptyPolicies = [0] * self.numRewards
policy, qs, ws = (-1, -1, emptyPolicies)
random = True
if np.random.rand(1) < self.epsilon:
action = np.random.randint(0, 3)
else:
ws = []
if np.random.rand(1) < self.epsilon:
policy = np.random.randint(0, self.numRewards)
else:
for i in range(self.numRewards):
self.w_train_network.set_weights(self.wnet_train_weights[i])
w_val = self.w_train_network(state, training=False).numpy()[0]
ws.append(w_val[np.argmax(w_val)])
random = False
policy = np.argmax(ws)
self.train_network.set_weights(self.policy_train_weights[policy])
pred = np.squeeze(self.train_network(state, training=False).numpy(), axis=0)
action = np.argmax(pred)
qs = np.max(pred)
return action, policy, qs, ws, random
def addMemory(self, state, action, policy, reward, nextState, done):
for i in range(self.numRewards):
self.replayMemory[i].add(state, action, policy, reward[i], nextState, done)
def save(self, ep):
save_path = (
f"./mario_w_{int(ep)}.chkpt"
)
weights = []
for i in range(self.numRewards):
train_w = self.policy_train_weights[i]
target_w = self.policy_train_weights[i]
w_w = self.wnet_train_weights[i]
weights.append([train_w, target_w, w_w])
with open(save_path, "wb") as f:
pickle.dump(weights, f)
print(f"MarioNet saved to {save_path} done!")
class SkipFrame(gym.Wrapper):
def __init__(self, env, skip):
"""Return only every `skip`-th frame"""
super().__init__(env)
self._skip = skip
def step(self, action):
"""Repeat action, and sum reward"""
total_reward = 0.0
done = False
for i in range(self._skip):
# Accumulate reward and repeat the same action
obs, reward, done, info = self.env.step(action)
total_reward += reward
if done:
break
return obs, total_reward, done, info
# Taken from OpenAI baselines: https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class MarioBaseline(object):
def __init__(self, episodes, checkpoint, current_episode, epsilon):
self.current_episode = current_episode
self.episodes = episodes
self.episode_score = []
self.episode_qs = []
self.episode_distance = []
self.episode_loss = []
self.episode_policies = []
self.fig, self.ax = plt.subplots(1, 2, figsize=(12, 4))
self.fig.canvas.draw()
self.env = gym_super_mario_bros.make('SuperMarioBros-1-1-v0')
# Apply Observation Wrappers
self.env = GrayScaleObservation(self.env)
self.env = ResizeObservation(self.env, 84)
# Apply Control Wrappers
self.env = JoypadSpace(self.env, SIMPLE_MOVEMENT)
self.env = NoopResetEnv(self.env)
# Apply Frame Wrappers
self.env = SkipFrame(self.env, 4)
self.env = FrameStack(self.env, 4)
self.agent = DQNAgent(stateShape=(4, 84, 84), actionSpace=self.env.action_space, numPicks=32, memorySize=20000, numRewards=4, epsilon=epsilon, checkpoint=checkpoint)
def train(self):
for _ in range(self.episodes):
self.episode()
self.current_episode += 1
self.env.close()
def episode(self):
done = False
rewardsSum = 0
qSum = 0
qActions = 1
lossSum = 0
policies = [0] * (4 + 1)
lossSums = [0] * (4)
state = np.array(self.env.reset())
maxDistance = -1000000
lastX = 0
lastT = 0
lastC = 0
while not done:
action, policy, qs, ws, random = self.agent.selectAction(state)
policies[policy] += 1
obs, _, done, info = self.env.step(action)
#self.env.render()
if info['x_pos'] > maxDistance:
maxDistance = info['x_pos']
rewardX = info['x_pos'] - lastX
lastX = info['x_pos']
rewardT = info['time'] - lastT
if rewardT > 0: rewardT = 0
lastT = info['time']
rewardC = info['coins'] - lastC
lastC = info['coins']
rewardD = self.env.unwrapped._death_penalty
next_state = np.array(obs)
rewardsSum = np.add(rewardsSum, rewardX)
rewardsSum = np.add(rewardsSum, rewardT)
rewardsSum = np.add(rewardsSum, rewardC)
rewardsSum = np.add(rewardsSum, rewardD)
self.agent.addMemory(state, action, policy, [rewardX, rewardT, rewardC, rewardD], next_state, done)
loss = self.agent.trainDQN()
state = next_state
lossSums = [lossSums[i] + loss[i][0] for i in range(len(lossSums))]
self.agent.epsilon = self.agent.epsilon_min + (1 - self.agent.epsilon_min) * math.exp(-1 * ((self.agent.step + 1) / self.agent.epsilon_decay))
print("now epsilon is {}, the reward is {} with loss {} in episode {}, step {}, dist {}".format(
self.agent.epsilon, rewardsSum, lossSums, self.current_episode, self.agent.step, maxDistance))
self.episode_score.append(rewardsSum)
self.episode_policies.append(policies)
if self.current_episode % 200 == 0:
self.agent.save(self.current_episode)
self.plot()
def plot(self):
spline_x = np.linspace(0, self.current_episode, num=self.current_episode)
ep_scores = np.array(self.episode_score)
ep_groups = [
ep_scores[i * GROUP_NUM : (i + 1) * GROUP_NUM]
for i in range((len(ep_scores) + GROUP_NUM - 1) // GROUP_NUM)
]
# Pad for weird numpy error for now
ep_groups[-1] = np.append(
ep_groups[-1], [np.mean(ep_groups[-1])] * (GROUP_NUM - len(ep_groups[-1]))
)
x_groups = [i * GROUP_NUM for i in range(len(ep_groups))]
self.ax[0].clear()
if len(x_groups) > 5:
ep_avgs = np.mean(ep_groups, 1)
avg_spl = interp1d(
x_groups, ep_avgs, kind="cubic", fill_value="extrapolate"
)
ep_std = np.std(ep_groups, 1)
std_spl = interp1d(x_groups, ep_std, kind="cubic", fill_value="extrapolate")
self.ax[0].plot(spline_x, avg_spl(spline_x), lw=0.7, c="blue")
self.ax[0].fill_between(
spline_x,
avg_spl(spline_x) - std_spl(spline_x),
avg_spl(spline_x) + std_spl(spline_x),
alpha=0.5,
facecolor="red",
interpolate=True,
)
self.ax[0].title.set_text("Training Score")
self.ax[0].set_xlabel("Episode")
self.ax[0].set_ylabel("Score")
policies = np.transpose(self.episode_policies)
colors = pl.cm.jet(np.linspace(0, 1, len(policies) * 2))
self.ax[1].clear()
self.ax[1].title.set_text("Policy Choices")
for i, policy in enumerate(policies):
if len(x_groups) > 5:
ep_groups = [
policy[i * GROUP_NUM : (i + 1) * GROUP_NUM]
for i in range((len(policy) + GROUP_NUM - 1) // GROUP_NUM)
]
# Pad for weird numpy error for now
ep_groups[-1] = np.append(
ep_groups[-1],
[np.mean(ep_groups[-1])] * (GROUP_NUM - len(ep_groups[-1])),
)
x_groups = [i * GROUP_NUM for i in range(len(ep_groups))]
ep_avgs = np.mean(ep_groups, 1)
avg_spl = interp1d(
x_groups, ep_avgs, kind="cubic", fill_value="extrapolate"
)
ep_std = np.std(ep_groups, 1)
std_spl = interp1d(
x_groups, ep_std, kind="cubic", fill_value="extrapolate"
)
self.ax[1].plot(
spline_x,
avg_spl(spline_x),
lw=0.7,
c=colors[i],
label="{} policy".format(PolEnum(i).name),
)
self.ax[1].fill_between(
spline_x,
avg_spl(spline_x) - std_spl(spline_x),
avg_spl(spline_x) + std_spl(spline_x),
alpha=0.5,
facecolor=colors[-1 - i],
interpolate=True,
)
self.ax[1].legend()
self.fig.canvas.draw()
plt.savefig("mario_w_pddqn_{}.png".format(self.current_episode))
args = parser.parse_args()
checkpoint = args.checkpoint
current_episode = args.episode
epsilon = args.epsilon
if current_episode == None:
current_episode = 0
else:
current_episode = int(current_episode)
if epsilon == None:
epsilon = 1
else:
epsilon = float(epsilon)
agent = MarioBaseline(10000, checkpoint, current_episode, epsilon)
agent.train()
| [] |
2024-01-10 | ilyamk/Discord-AI-Chatbot | bot_utilities~ai_utils.py | import aiohttp
import io
from datetime import datetime
import time
import random
from urllib.parse import quote
from bot_utilities.config_loader import load_current_language, config
import openai
import os
from dotenv import find_dotenv, load_dotenv
import json
from langchain.agents import initialize_agent, AgentType, Tool
from langchain.chains import LLMMathChain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.prompts import MessagesPlaceholder
from langchain.schema import SystemMessage
from langchain.memory import ConversationBufferWindowMemory
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain import PromptTemplate
from bs4 import BeautifulSoup
from pydantic import Field
from langchain.prompts import ChatPromptTemplate
import requests
load_dotenv(find_dotenv())
openai.api_key = os.getenv("OPENAI_API_KEY")
load_dotenv()
current_language = load_current_language()
internet_access = config['INTERNET_ACCESS']
# openai.api_key = os.getenv('CHIMERA_GPT_KEY')
# openai.api_base = "https://api.naga.ac/v1"
def sdxl(prompt):
response = openai.Image.create(
model="sdxl",
prompt=prompt,
n=1, # images count
size="1024x1024"
)
return response['data'][0]["url"]
def knowledge_retrieval(query):
# Define the data to be sent in the request
data = {
"params":{
"query":query
},
"project": "feda14180b9d-4ba2-9b3c-6c721dfe8f63"
}
# Convert Python object to JSON string
data_json = json.dumps(data)
# Send the POST request
response = requests.post("https://api-1e3042.stack.tryrelevance.com/latest/studios/6eba417b-f592-49fc-968d-6b63702995e3/trigger_limited", data=data_json)
# Check the response status code
if response.status_code == 200:
return response.json()["output"]["answer"]
else:
print(f"HTTP request failed with status code {response.status_code}")
def summary(content):
llm = ChatOpenAI(temperature = 0, model = "gpt-3.5-turbo-16k-0613")
text_splitter = RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size = 10000, chunk_overlap=500)
docs = text_splitter.create_documents([content])
map_prompt = """
Write a summary of the following text:
"{text}"
SUMMARY:
"""
map_prompt_template = PromptTemplate(template=map_prompt, input_variables=["text"])
summary_chain = load_summarize_chain(
llm=llm,
chain_type='map_reduce',
map_prompt = map_prompt_template,
combine_prompt = map_prompt_template,
verbose = True
)
output = summary_chain.run(input_documents=docs,)
return output
def scrape_website(url: str):
#scrape website, and also will summarize the content based on objective if the content is too large
#objective is the original objective & task that user give to the agent, url is the url of the website to be scraped
print("Scraping website...")
# Define the headers for the request
headers = {
'Cache-Control': 'no-cache',
'Content-Type': 'application/json',
}
# Define the data to be sent in the request
data = {
"url": url
}
# Convert Python object to JSON string
data_json = json.dumps(data)
# Send the POST request
response = requests.post("https://chrome.browserless.io/content?token=2db344e9-a08a-4179-8f48-195a2f7ea6ee", headers=headers, data=data_json)
# Check the response status code
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
text = soup.get_text()
print("CONTENTTTTTT:", text)
if len(text) > 10000:
output = summary(text)
return output
else:
return text
else:
print(f"HTTP request failed with status code {response.status_code}")
def search(query):
"""
Asynchronously searches for a prompt and returns the search results as a blob.
Args:
prompt (str): The prompt to search for.
Returns:
str: The search results as a blob.
Raises:
None
"""
endpoint = "https://ddg-api.herokuapp.com/search"
params = {
'query': query, # Replace with your search query
'limit': 5 # Replace with your desired limit
}
# Make the GET request
response = requests.get(endpoint, params=params)
# Check if the request was successful
if response.status_code == 200:
results = response.json()
return results
else:
return (f"Didn't get any results")
def research(query):
system_message = SystemMessage(
content="""You are a world class researcher, who can do detailed research on any topic and produce facts based results;
you do not make things up, you will try as hard as possible to gather facts & data to back up the research
Please make sure you complete the objective above with the following rules:
1/ You will always searching for internal knowledge base first to see if there are any relevant information
2/ If the internal knowledge doesnt have good result, then you can go search online
3/ While search online:
a/ You will try to collect as many useful details as possible
b/ If there are url of relevant links & articles, you will scrape it to gather more information
c/ After scraping & search, you should think "is there any new things i should search & scraping based on the data I collected to increase research quality?" If answer is yes, continue; But don't do this more than 3 iteratins
4/ You should not make things up, you should only write facts & data that you have gathered
5/ In the final output, You should include all reference data & links to back up your research; You should include all reference data & links to back up your research
6/ In the final output, You should include all reference data & links to back up your research; You should include all reference data & links to back up your research"""
)
agent_kwargs = {
"system_message": system_message,
}
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
tools = [
Tool(
name="Knowledge_retrieval",
func=knowledge_retrieval,
description="Use this to get our internal knowledge base data for curated information, always use this first before searching online"
),
Tool(
name = "Google_search",
func = search,
description = "Always use this to answer questions about current events, data, or terms that you don't really understand. You should ask targeted questions"
),
Tool(
name = "Scrape_website",
func = scrape_website,
description = "Use this to load content from a website url"
),
]
agent = initialize_agent(
tools,
llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=False,
agent_kwargs=agent_kwargs,
)
results = agent.run(query)
return results
def trigger_github_weekly_trending_repo_scrape():
url = "https://api.browse.ai/v2/robots/0c0f94bf-207a-4660-8ade-238cd778bb25/tasks"
payload = {"inputParameters":
{"originUrl": "https://github.com/trending"}
}
headers = {"Authorization": "Bearer ec2cc08b-3343-47c9-9dd3-dc5d40d4aa3b:dead067b-d485-496d-a3e0-4902339f6cfe"}
response = requests.request("POST", url, json=payload, headers=headers)
print("id: ", response.json()["result"]["id"], "is :", response.text)
return response.json()["result"]["id"]
def retrieve_github_weekly_trending_repo(task_id):
url = f"https://api.browse.ai/v2/robots/0c0f94bf-207a-4660-8ade-238cd778bb25/tasks/{task_id}"
headers = {"Authorization": "Bearer ec2cc08b-3343-47c9-9dd3-dc5d40d4aa3b:dead067b-d485-496d-a3e0-4902339f6cfe"}
response = requests.request("GET", url, headers=headers)
return response.json()
def get_github_weekly_trending_repo():
task_id = trigger_github_weekly_trending_repo_scrape()
while True:
time.sleep(5)
response = retrieve_github_weekly_trending_repo(task_id)
# print(response)
if response["statusCode"] == 200:
if response["result"]["status"] == "successful":
repos = response["result"]["capturedLists"]
return repos
elif response["result"]["status"] == "failed":
return "failed to get data"
elif response["statusCode"] in {400, 401, 403, 404, 500, 503}:
return response["messageCode"]
def filter_ai_github_repos(repos):
model = ChatOpenAI()
prompt_template = """
{repos}
Above is the list of scraped trending github repos this week,
can you help me filter out ones that is related to AI, knowledge graph, computer vision, large language model?
The report should be in certain format:
"🚀 Daily trending AI projects:
**coqui-ai / TTS**
- 🌟 3,952 stars this week | 18,952 total stars
- 📖 a deep learning toolkit for Text-to-Speech, battle-tested in research and production
- 🌐 https://github.com/coqui-ai/TTS
**tldraw / tldraw**
- 🌟 2,196 stars this week | 20,812 total stars
- 📖 a very good whiteboard
- 🌐 https://github.com/yoheinakajima/instagraph
...."
if there is no any relevant repo, you can just say "Looks like no new interesting AI project today, let me know if I missed any pls!"
"""
prompt = ChatPromptTemplate.from_template(prompt_template)
chain = prompt | model
results = chain.invoke({"repos": repos})
return results.content
def generate_trending_git_report():
repos = get_github_weekly_trending_repo()
filtered_repos = filter_ai_github_repos(repos)
return filtered_repos
async def fetch_models():
return openai.Model.list()
agents = {}
def create_agent(id, user_name, ai_name, instructions):
system_message = SystemMessage(
content=instructions
)
agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
"system_message": system_message,
}
memory = ConversationBufferWindowMemory(memory_key="memory", return_messages=True, ai_prefix=ai_name, user_prefix=user_name)
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
tools = [
Tool(
name = "research",
func = research,
description = "Always use this to answer questions about current events, data, or terms that you don't really understand. You should ask targeted questions"
),
Tool(
name = "Scrape_website",
func = scrape_website,
description = "Use this to load content from a website url"
),
]
agent = initialize_agent(
tools,
llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=agent_kwargs,
memory=memory
)
agents[id] = agent
return agent
def generate_response(instructions, user_input):
id = user_input["id"]
message = user_input["message"]
if id not in agents:
user_name = user_input["user_name"]
ai_name = user_input["ai_name"]
agent = create_agent(id, user_name, ai_name, instructions)
else:
agent = agents[id]
print(message)
response = agent.run(message)
return response
def generate_response_old(instructions, search, history):
if search is not None:
search_results = search
elif search is None:
search_results = "Search feature is disabled"
messages = [
{"role": "system", "name": "instructions", "content": instructions},
*history,
{"role": "system", "name": "search_results", "content": search_results},
]
response = openai.ChatCompletion.create(
model=config['GPT_MODEL'],
messages=messages
)
message = response.choices[0].message.content
return message
def generate_gpt4_response(prompt):
messages = [
{"role": "system", "name": "admin_user", "content": prompt},
]
response = openai.ChatCompletion.create(
model='gpt-4',
messages=messages
)
message = response.choices[0].message.content
return message
async def poly_image_gen(session, prompt):
seed = random.randint(1, 100000)
image_url = f"https://image.pollinations.ai/prompt/{prompt}?seed={seed}"
async with session.get(image_url) as response:
image_data = await response.read()
image_io = io.BytesIO(image_data)
return image_io
# async def fetch_image_data(url):
# async with aiohttp.ClientSession() as session:
# async with session.get(url) as response:
# return await response.read()
async def dall_e_gen(model, prompt, size, num_images):
response = openai.Image.create(
model=model,
prompt=prompt,
n=num_images,
size=size,
)
imagefileobjs = []
for image in response["data"]:
image_url = image["url"]
async with aiohttp.ClientSession() as session:
async with session.get(image_url) as response:
content = await response.content.read()
img_file_obj = io.BytesIO(content)
imagefileobjs.append(img_file_obj)
return imagefileobjs
async def generate_image_prodia(prompt, model, sampler, seed, neg):
print("\033[1;32m(Prodia) Creating image for :\033[0m", prompt)
start_time = time.time()
async def create_job(prompt, model, sampler, seed, neg):
if neg is None:
negative = "(nsfw:1.5),verybadimagenegative_v1.3, ng_deepnegative_v1_75t, (ugly face:0.8),cross-eyed,sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, bad anatomy, DeepNegative, facing away, tilted head, {Multiple people}, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worstquality, low quality, normal quality, jpegartifacts, signature, watermark, username, blurry, bad feet, cropped, poorly drawn hands, poorly drawn face, mutation, deformed, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, extra fingers, fewer digits, extra limbs, extra arms,extra legs, malformed limbs, fused fingers, too many fingers, long neck, cross-eyed,mutated hands, polar lowres, bad body, bad proportions, gross proportions, text, error, missing fingers, missing arms, missing legs, extra digit, extra arms, extra leg, extra foot, repeating hair, nsfw, [[[[[bad-artist-anime, sketch by bad-artist]]]]], [[[mutation, lowres, bad hands, [text, signature, watermark, username], blurry, monochrome, grayscale, realistic, simple background, limited palette]]], close-up, (swimsuit, cleavage, armpits, ass, navel, cleavage cutout), (forehead jewel:1.2), (forehead mark:1.5), (bad and mutated hands:1.3), (worst quality:2.0), (low quality:2.0), (blurry:2.0), multiple limbs, bad anatomy, (interlocked fingers:1.2),(interlocked leg:1.2), Ugly Fingers, (extra digit and hands and fingers and legs and arms:1.4), crown braid, (deformed fingers:1.2), (long fingers:1.2)"
else:
negative = neg
url = 'https://api.prodia.com/generate'
params = {
'new': 'true',
'prompt': f'{quote(prompt)}',
'model': model,
'negative_prompt': f"{negative}",
'steps': '100',
'cfg': '9.5',
'seed': f'{seed}',
'sampler': sampler,
'upscale': 'True',
'aspect_ratio': 'square'
}
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
data = await response.json()
return data['job']
job_id = await create_job(prompt, model, sampler, seed, neg)
url = f'https://api.prodia.com/job/{job_id}'
headers = {
'authority': 'api.prodia.com',
'accept': '*/*',
}
async with aiohttp.ClientSession() as session:
while True:
async with session.get(url, headers=headers) as response:
json = await response.json()
if json['status'] == 'succeeded':
async with session.get(f'https://images.prodia.xyz/{job_id}.png?download=1', headers=headers) as response:
content = await response.content.read()
img_file_obj = io.BytesIO(content)
duration = time.time() - start_time
print(f"\033[1;34m(Prodia) Finished image creation\n\033[0mJob id : {job_id} Prompt : ", prompt, "in", duration, "seconds.")
return img_file_obj
| [
"\n Write a summary of the following text:\n \"{text}\"\n SUMMARY:\n ",
"\n {repos} \n Above is the list of scraped trending github repos this week, \n can you help me filter out ones that is related to AI, knowledge graph, computer vision, large language model?\n\n The report should be in certain format:\n \"🚀 Daily trending AI projects:\n\n **coqui-ai / TTS**\n - 🌟 3,952 stars this week | 18,952 total stars\n - 📖 a deep learning toolkit for Text-to-Speech, battle-tested in research and production\n - 🌐 https://github.com/coqui-ai/TTS\n\n **tldraw / tldraw**\n - 🌟 2,196 stars this week | 20,812 total stars\n - 📖 a very good whiteboard\n - 🌐 https://github.com/yoheinakajima/instagraph\n\n ....\"\n\n if there is no any relevant repo, you can just say \"Looks like no new interesting AI project today, let me know if I missed any pls!\"\n ",
"You are a world class researcher, who can do detailed research on any topic and produce facts based results; \n you do not make things up, you will try as hard as possible to gather facts & data to back up the research\n \n Please make sure you complete the objective above with the following rules:\n 1/ You will always searching for internal knowledge base first to see if there are any relevant information\n 2/ If the internal knowledge doesnt have good result, then you can go search online\n 3/ While search online:\n a/ You will try to collect as many useful details as possible\n b/ If there are url of relevant links & articles, you will scrape it to gather more information\n c/ After scraping & search, you should think \"is there any new things i should search & scraping based on the data I collected to increase research quality?\" If answer is yes, continue; But don't do this more than 3 iteratins\n 4/ You should not make things up, you should only write facts & data that you have gathered\n 5/ In the final output, You should include all reference data & links to back up your research; You should include all reference data & links to back up your research\n 6/ In the final output, You should include all reference data & links to back up your research; You should include all reference data & links to back up your research"
] |
2024-01-10 | zzachw/deepchem | deepchem~rl~tests~test_a3c.py | from flaky import flaky
import deepchem as dc
from deepchem.models.tensorgraph.optimizers import Adam, PolynomialDecay
from tensorflow.keras.layers import Input, Dense, GRU, Reshape, Softmax
import numpy as np
import tensorflow as tf
import unittest
from nose.plugins.attrib import attr
class TestA3C(unittest.TestCase):
@flaky
def test_roulette(self):
"""Test training a policy for the roulette environment."""
# This is modeled after the Roulette-v0 environment from OpenAI Gym.
# The player can bet on any number from 0 to 36, or walk away (which ends the
# game). The average reward for any bet is slightly negative, so the best
# strategy is to walk away.
class RouletteEnvironment(dc.rl.Environment):
def __init__(self):
super(RouletteEnvironment, self).__init__([(1,)], 38)
self._state = [np.array([0])]
def step(self, action):
if action == 37:
self._terminated = True # Walk away.
return 0.0
wheel = np.random.randint(37)
if wheel == 0:
if action == 0:
return 35.0
return -1.0
if action != 0 and wheel % 2 == action % 2:
return 1.0
return -1.0
def reset(self):
self._terminated = False
env = RouletteEnvironment()
# This policy just learns a constant probability for each action, and a constant for the value.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy, self).__init__(['action_prob', 'value'])
def create_model(self, **kwargs):
class TestModel(tf.keras.Model):
def __init__(self):
super(TestModel, self).__init__(**kwargs)
self.action = tf.Variable(np.ones(env.n_actions, np.float32))
self.value = tf.Variable([0.0], tf.float32)
def call(self, inputs, **kwargs):
prob = tf.nn.softmax(tf.reshape(self.action, (-1, env.n_actions)))
return (prob, self.value)
return TestModel()
# Optimize it.
a3c = dc.rl.A3C(
env,
TestPolicy(),
max_rollout_length=20,
optimizer=Adam(learning_rate=0.001))
a3c.fit(100000)
# It should have learned that the expected value is very close to zero, and that the best
# action is to walk away.
action_prob, value = a3c.predict([[0]])
assert -0.5 < value[0] < 0.5
assert action_prob.argmax() == 37
assert a3c.select_action([[0]], deterministic=True) == 37
# Verify that we can create a new A3C object, reload the parameters from the first one, and
# get the same result.
new_a3c = dc.rl.A3C(env, TestPolicy(), model_dir=a3c._model.model_dir)
new_a3c.restore()
action_prob2, value2 = new_a3c.predict([[0]])
assert value2 == value
# Do the same thing, only using the "restore" argument to fit().
new_a3c = dc.rl.A3C(env, TestPolicy(), model_dir=a3c._model.model_dir)
new_a3c.fit(0, restore=True)
action_prob2, value2 = new_a3c.predict([[0]])
assert value2 == value
def test_recurrent_states(self):
"""Test a policy that involves recurrent layers."""
# The environment just has a constant state.
class TestEnvironment(dc.rl.Environment):
def __init__(self):
super(TestEnvironment, self).__init__((10,), 10)
self._state = np.random.random(10)
def step(self, action):
self._state = np.random.random(10)
return 0.0
def reset(self):
pass
# The policy includes a single recurrent layer.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy, self).__init__(['action_prob', 'value', 'rnn_state'],
[np.zeros(10)])
def create_model(self, **kwargs):
state = Input(shape=(10,))
rnn_state = Input(shape=(10,))
reshaped = Reshape((1, 10))(state)
gru, rnn_final_state = GRU(
10, return_state=True, return_sequences=True)(
reshaped, initial_state=rnn_state)
output = Softmax()(Reshape((10,))(gru))
value = dc.models.layers.Variable([0.0])([])
return tf.keras.Model(
inputs=[state, rnn_state], outputs=[output, value, rnn_final_state])
# We don't care about actually optimizing it, so just run a few rollouts to make
# sure fit() doesn't crash, then check the behavior of the GRU state.
env = TestEnvironment()
a3c = dc.rl.A3C(env, TestPolicy())
a3c.fit(100)
# On the first call, the initial state should be all zeros.
prob1, value1 = a3c.predict(
env.state, use_saved_states=True, save_states=False)
# It should still be zeros since we didn't save it last time.
prob2, value2 = a3c.predict(
env.state, use_saved_states=True, save_states=True)
# It should be different now.
prob3, value3 = a3c.predict(
env.state, use_saved_states=True, save_states=False)
# This should be the same as the previous one.
prob4, value4 = a3c.predict(
env.state, use_saved_states=True, save_states=False)
# Now we reset it, so we should get the same result as initially.
prob5, value5 = a3c.predict(
env.state, use_saved_states=False, save_states=True)
assert np.array_equal(prob1, prob2)
assert np.array_equal(prob1, prob5)
assert np.array_equal(prob3, prob4)
assert not np.array_equal(prob2, prob3)
@attr('slow')
def test_hindsight(self):
"""Test Hindsight Experience Replay."""
# The environment is a plane in which the agent moves by steps until it reaches a randomly
# positioned goal. No reward is given until it reaches the goal. That makes it very hard
# to learn by standard methods, since it may take a very long time to receive any feedback
# at all. Using hindsight makes it much easier.
class TestEnvironment(dc.rl.Environment):
def __init__(self):
super(TestEnvironment, self).__init__((4,), 4)
self.moves = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def reset(self):
self._state = np.concatenate([[0, 0], np.random.randint(-50, 50, 2)])
self._terminated = False
self.count = 0
def step(self, action):
new_state = self._state.copy()
new_state[:2] += self.moves[action]
self._state = new_state
self.count += 1
reward = 0
if np.array_equal(new_state[:2], new_state[2:]):
self._terminated = True
reward = 1
elif self.count == 1000:
self._terminated = True
return reward
def apply_hindsight(self, states, actions, goal):
new_states = []
rewards = []
goal_pos = goal[:2]
for state, action in zip(states, actions):
new_state = state.copy()
new_state[2:] = goal_pos
new_states.append(new_state)
pos_after_action = new_state[:2] + self.moves[action]
if np.array_equal(pos_after_action, goal_pos):
rewards.append(1)
else:
rewards.append(0)
return new_states, rewards
# A simple policy with two hidden layers.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy, self).__init__(['action_prob', 'value'])
def create_model(self, **kwargs):
state = Input(shape=(4,))
dense1 = Dense(6, activation=tf.nn.relu)(state)
dense2 = Dense(6, activation=tf.nn.relu)(dense1)
output = Dense(4, activation=tf.nn.softmax, use_bias=False)(dense2)
value = Dense(1)(dense2)
return tf.keras.Model(inputs=state, outputs=[output, value])
# Optimize it.
env = TestEnvironment()
learning_rate = PolynomialDecay(
initial_rate=0.0005, final_rate=0.0002, decay_steps=2000000)
a3c = dc.rl.A3C(
env,
TestPolicy(),
use_hindsight=True,
optimizer=Adam(learning_rate=learning_rate))
a3c.fit(2000000)
# Try running it a few times and see if it succeeds.
pass_count = 0
for i in range(5):
env.reset()
while not env.terminated:
env.step(a3c.select_action(env.state))
if np.array_equal(env.state[:2], env.state[2:]):
pass_count += 1
assert pass_count >= 3
def test_continuous(self):
"""Test A3C on an environment with a continous action space."""
# The state consists of two numbers: a current value and a target value.
# The policy just needs to learn to output the target value (or at least
# move toward it).
class TestEnvironment(dc.rl.Environment):
def __init__(self):
super(TestEnvironment, self).__init__((2,), action_shape=(1,))
def reset(self):
target = np.random.uniform(-50, 50)
self._state = np.array([0, target])
self._terminated = False
self.count = 0
def step(self, action):
target = self._state[1]
dist = np.abs(target - action[0])
old_dist = np.abs(target - self._state[0])
new_state = np.array([action[0], target])
self._state = new_state
self.count += 1
reward = old_dist - dist
self._terminated = (self.count == 10)
return reward
# A simple policy with no hidden layers.
class TestPolicy(dc.rl.Policy):
def __init__(self):
super(TestPolicy, self).__init__(['action_mean', 'action_std', 'value'])
def create_model(self, **kwargs):
class TestModel(tf.keras.Model):
def __init__(self):
super(TestModel, self).__init__(**kwargs)
self.mean = Dense(1, kernel_initializer='zeros')
self.std = tf.constant([10.0])
self.value = Dense(1)
def call(self, inputs, **kwargs):
return (self.mean(inputs), self.std, self.value(inputs))
return TestModel()
# Optimize it.
env = TestEnvironment()
learning_rate = PolynomialDecay(
initial_rate=0.005, final_rate=0.0005, decay_steps=25000)
a3c = dc.rl.A3C(
env,
TestPolicy(),
discount_factor=0,
optimizer=Adam(learning_rate=learning_rate))
a3c.fit(25000)
# Try running it and see if it reaches the target
env.reset()
while not env.terminated:
env.step(a3c.select_action(env.state, deterministic=True))
distance = np.abs(env.state[0] - env.state[1])
tolerance = max(1.0, 0.1 * np.abs(env.state[1]))
assert distance < tolerance
| [] |
2024-01-10 | zzachw/deepchem | contrib~rl~test_mcts.py | from flaky import flaky
import deepchem as dc
from deepchem.models.tensorgraph.layers import Reshape, Variable, SoftMax, GRU, Dense
from deepchem.models.tensorgraph.optimizers import Adam, PolynomialDecay
import numpy as np
import tensorflow as tf
import unittest
from nose.plugins.attrib import attr
class TestMCTS(unittest.TestCase):
@flaky
def test_roulette(self):
"""Test training a policy for the roulette environment."""
# This is modeled after the Roulette-v0 environment from OpenAI Gym.
# The player can bet on any number from 0 to 36, or walk away (which ends the
# game). The average reward for any bet is slightly negative, so the best
# strategy is to walk away.
class RouletteEnvironment(dc.rl.Environment):
def __init__(self):
super(RouletteEnvironment, self).__init__([(1,)], 38)
self._state = [np.array([0])]
def step(self, action):
if action == 37:
self._terminated = True # Walk away.
return 0.0
wheel = np.random.randint(37)
if wheel == 0:
if action == 0:
return 35.0
return -1.0
if action != 0 and wheel % 2 == action % 2:
return 1.0
return -1.0
def reset(self):
self._terminated = False
env = RouletteEnvironment()
# This policy just learns a constant probability for each action, and a constant for the value.
class TestPolicy(dc.rl.Policy):
def create_layers(self, state, **kwargs):
action = Variable(np.ones(env.n_actions))
output = SoftMax(
in_layers=[Reshape(in_layers=[action], shape=(-1, env.n_actions))])
value = Variable([0.0])
return {'action_prob': output, 'value': value}
# Optimize it.
mcts = dc.rl.MCTS(
env,
TestPolicy(),
max_search_depth=5,
n_search_episodes=200,
optimizer=Adam(learning_rate=0.005))
mcts.fit(10, steps_per_iteration=50, epochs_per_iteration=50)
# It should have learned that the expected value is very close to zero, and that the best
# action is to walk away.
action_prob, value = mcts.predict([[0]])
assert -0.5 < value[0] < 0.5
assert action_prob.argmax() == 37
assert mcts.select_action([[0]], deterministic=True) == 37
# Verify that we can create a new MCTS object, reload the parameters from the first one, and
# get the same result.
new_mcts = dc.rl.MCTS(env, TestPolicy(), model_dir=mcts._graph.model_dir)
new_mcts.restore()
action_prob2, value2 = new_mcts.predict([[0]])
assert value2 == value
# Do the same thing, only using the "restore" argument to fit().
new_mcts = dc.rl.MCTS(env, TestPolicy(), model_dir=mcts._graph.model_dir)
new_mcts.fit(0, restore=True)
action_prob2, value2 = new_mcts.predict([[0]])
assert value2 == value
| [] |
2024-01-10 | fuyu-quant/IBLM | src~iblm~iblboosting~iblboosting.py | from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
import re
import numpy as np
import warnings
warnings.filterwarnings('ignore')
class IBLBoosting():
def __init__(
self,
llm_model_name,
params
):
self.llm_model_name = llm_model_name
self.llm_model = OpenAI(temperature=0, model_name = self.llm_model_name)
#self.llm_model = llm_model,
self.columns_name = params['columns_name']
self.model_code = None
def fit(self, x, y, model_name, file_path=None):
print("> Start of model creating.")
df = x.copy()
df['target'] = y
# Determine whether binary or multivalued classification is used
if len(df['target'].unique()) == 2:
task_type = 'binary classification'
output_code = 'y = 1 / (1 + np.exp(-y))'
else:
task_type = 'multi-class classification'
# Obtaining data types
data_type = ', '.join(df.dtypes.astype(str))
# Create a string dataset
dataset = []
for index, row in df.iterrows():
row_as_str = [str(item) for item in row.tolist()]
dataset.append(','.join(row_as_str))
dataset_str = '\n'.join(dataset)
# column name
if self.columns_name:
col_name = ', '.join(df.columns.astype(str))
col_option = ''
else:
# serial number
df.columns = range(df.shape[1])
col_name = ', '.join(df.columns.astype(str))
col_option = 'df.columns = range(df.shape[1])'
create_prompt = """
Please create your code in compliance with all of the following conditions. Output should be code only. Do not enclose the output in ``python ``` or the like.
・Analyze the large amount of data below and create a {task_type_} code to accurately predict "target".
------------------
{dataset_str_}
------------------
・Each data type is as follows. If necessary, you can change the data type.
・Create code that can make predictions about new data based on logic from large amounts of input data without using machine learning models.
・If input is available, the column names below should also be used to help make decisions when creating the predictive model. Column Name:{col_name_}
・Create a code like the following. Do not change the input or output format.
・If {col_option_} is not blank, add it after 'df = x.copy()'.
・You do not need to provide examples.
------------------
import numpy as np
def predict(x):
df = x.copy()
output = []
for index, row in df.iterrows():
# Feature creation and data preprocessing
{output_code_}
output.append(y)
output = np.array(output)
return output
""".format(
task_type_ = task_type,
dataset_str_ = dataset_str,
model_name_ = model_name,
col_name_ = col_name,
col_option_ = col_option,
output_code_ = output_code
)
#print(create_prompt)
with get_openai_callback() as cb:
model_code = self.llm_model(create_prompt)
print(cb)
# Save to File
if file_path != None:
with open(file_path + f'{model_name}.py', mode='w') as file:
file.write(model_code)
self.model_code = model_code
return model_code
def predict(self, x):
if self.model_code is None:
raise Exception("You must train the model before predicting!")
code = self.model_code
# = re.search(r'def (\w+)', function_string).group(1)
#code = self.model_code + '\n'# + f'model = model({x})'
exec(code, globals())
#model = namespace["code"]
y = predict(x)
return y
def interpret(self):
if self.model_code is None:
raise Exception("You must train the model before interpreting!")
interpret_prompt = """
Refer to the code below and explain how you are going to process the data and make predictions.
The only part to explain is the part where the data is processed.
Do not explain df = x.copy().
Please output the data in bulleted form.
Please tell us what you can say based on the whole process.
------------------
{model_code_}
""".format(
model_code_ = self.model_code
)
with get_openai_callback() as cb:
output = self.llm_model(interpret_prompt)
print(cb)
return output | [
"\n Refer to the code below and explain how you are going to process the data and make predictions.\n The only part to explain is the part where the data is processed.\n Do not explain df = x.copy().\n Please output the data in bulleted form.\n Please tell us what you can say based on the whole process.\n ------------------\n {model_code_}\n ",
"\n Please create your code in compliance with all of the following conditions. Output should be code only. Do not enclose the output in ``python ``` or the like.\n ・Analyze the large amount of data below and create a PLACEHOLDER code to accurately predict \"target\".\n ------------------\n PLACEHOLDER\n ------------------\n ・Each data type is as follows. If necessary, you can change the data type.\n ・Create code that can make predictions about new data based on logic from large amounts of input data without using machine learning models.\n ・If input is available, the column names below should also be used to help make decisions when creating the predictive model. Column Name:PLACEHOLDER\n ・Create a code like the following. Do not change the input or output format.\n ・If PLACEHOLDER is not blank, add it after 'df = x.copy()'.\n ・You do not need to provide examples.\n ------------------\n import numpy as np\n\n def predict(x):\n df = x.copy()\n\n output = []\n for index, row in df.iterrows():\n\n\n # Feature creation and data preprocessing\n\n\n PLACEHOLDER\n output.append(y)\n\n output = np.array(output)\n \n return output\n "
] |
2024-01-10 | fuyu-quant/IBLM | src~iblm~iblmodel~iblmodel.py | from langchain.callbacks import get_openai_callback
import numpy as np
import pandas as pd
from importlib import resources
from ..utils import preprocessing
import warnings
warnings.filterwarnings('ignore')
class IBLModel():
def __init__(
self,
llm_model,
params
):
self.llm_model = llm_model
self.columns_name = params['columns_name']
self.objective = params['objective']
self.code_model = None
def fit(self, x, y, prompt = None, model_name = None, file_path = None):
df = x.copy()
df['target'] = y
# Obtaining data types
data_type = ', '.join(df.dtypes.astype(str))
# Create a string dataset
dataset_str = preprocessing.text_converter(df)
# column name
col_name, col_option = preprocessing.columns_name(self.columns_name, df)
# create prompt
if prompt == None:
if self.objective == 'regression':
with resources.open_text('iblm.iblmodel.prompt', 'regression.txt') as file:
prompt = file.read()
elif self.objective == 'classification':
with resources.open_text('iblm.iblmodel.prompt', 'classification_2.txt') as file:
prompt = file.read()
create_prompt = prompt.format(
dataset_str_ = dataset_str,
data_type_ = data_type,
col_name_ = col_name,
col_option_ = col_option
)
code_model = self.llm_model(create_prompt)
# prompt modification
modification_prompt = preprocessing.prompt_modification(code_model)
code_model = self.llm_model(modification_prompt)
self.code_model = code_model
# Save to File
preprocessing.save_codemodel(file_path, model_name, self.code_model)
return self.code_model
def predict(self, x):
if self.code_model is None:
raise Exception("You must train the model before predicting!")
code = self.code_model
exec(code, globals())
y = predict(x)
return y
def interpret(self):
if self.code_model is None:
raise Exception("You must train the model before interpreting!")
interpret_prompt = preprocessing.interpret_codemodel(self.code_model)
output = self.llm_model(interpret_prompt)
return output | [] |
2024-01-10 | nerds-odd-e/doughnut | training_data~upload.py | import openai
import os
openai.api_key = os.getenv("OPENAI_API_TOKEN")
response = openai.File.create(
file=open("evaluationData.jsonl", "rb"),
purpose='fine-tune'
)
# Extract the file ID from the response
file_id = response['id']
file_name = response['filename']
# Print or use the file ID and filename
print(f"File ID: {file_id}")
print(f"File Name: {file_name}")
| [] |
2024-01-10 | nerds-odd-e/doughnut | training_data~job_monitor.py | import openai
import os
openai.api_key = os.getenv("OPENAI_API_TOKEN")
print(openai.FineTuningJob.list(limit=10))
| [] |
2024-01-10 | emilschleder/DaMedSum | Evaluation~rouge_evaluation_gpt.py | from openai import OpenAI
from evaluate import load
import pandas as pd
# Load the CSV file
df = pd.read_csv('evaluation/data/rouge_summaries_test.csv')
# Initialize the Rouge metric
rouge = load("rouge")
# Initialize the OpenAI API
client = OpenAI(api_key='<TOKEN>')
model = "gpt-3.5-turbo"
# Function to generate summary using ChatGPT 3.5-turbo
def generate_summary_chatgpt(text):
response = client.chat.completions.create(
model = model,
messages=[ { "role": "user",
"content": f"Skriv et resume af den følgende tekst på dansk. Resumeet må ikke være længere end maksimalt 60 ord:\n\n{text}"} ]
)
return response.choices[0].message.content
# Initialize the list for the results
results = []
# Generate summaries for each text in the dataset
for index, row in df.iterrows():
text, true_summary = row['text'], row['summary']
generated_summary = generate_summary_chatgpt(text)
print(f"Generated summary for text{index+1}... = {generated_summary}")
print(len(generated_summary))
scores = rouge.compute(predictions=[generated_summary], references=[true_summary])
# Save the scores and the generated summary
results.append({
"Model": model,
"TextID": f'text{index}',
"ROUGE-1": scores["rouge1"] * 100,
"ROUGE-2": scores["rouge2"] * 100,
"ROUGE-L": scores["rougeL"] * 100,
"ROUGE-Lsum": scores["rougeLsum"] * 100,
"summary": generated_summary,
"text_length": len(text),
"summary_length": len(generated_summary),
})
# Convert results to a DataFrame
results_df = pd.DataFrame(results)
df_means = results_df.groupby('Model')[['ROUGE-1', 'ROUGE-2', 'ROUGE-L', 'ROUGE-Lsum']].mean()
# Print the results
print(df_means)
print(results_df)
# Save the results in csv files
results_df.to_csv('evaluation/results/all/summary_evaluation_results_gpt.csv', index=False)
df_means.to_csv('evaluation/results/mean/summary_evaluation_results_gpt_means.csv', index=False) | [
"Skriv et resume af den følgende tekst på dansk. Resumeet må ikke være længere end maksimalt 60 ord:\n\nPLACEHOLDER"
] |
2024-01-10 | gprocunier/heat-gpt | heat-gpt.py | #!/usr/bin/env python3
import openai
import argparse
import textwrap
import os
# Set your API key here
openai.api_key = 'your-api-key'
def interact(max_width):
print("Welcome to heat-gpt")
use_git_repo = input("Would you like to store your templates in a local project folder? (Y/N) ")
if use_git_repo.lower() == "y":
repo_path = input("Please enter the local path to your project folder: ")
save_path = repo_path
else:
repo_path = None
save_path = os.getcwd() # Save in the current working directory
print("Templates will be stored in the current folder.")
# Define the prompt
prompt_prefix = '''
The following question denoted by "QUESTION: " is to be interpreted as a task for an OpenStack heat template,
and the desired output must be in the form of a heat template.
For example, if the question was "Create a VM with 2 CPUs and 4GB of RAM", the output might be:
heat_template_version: wallaby
description: create an instance
resources:
my_vm:
type: OS::Nova::Server
properties:
flavor: m1.small
image: cirros
networks:
- network: private
Ensure all responses are complete and functional templates with the appropriate heat headers and sections.
Omit all but the required elements of the template.
Now, QUESTION:
'''
while True:
try:
print('Please enter your prompt')
message = input('> ')
full_prompt = f'{prompt_prefix.strip()} {message}'
while True:
response = openai.Completion.create(
engine="text-davinci-003",
prompt=full_prompt,
max_tokens=2000 # Increase the max_tokens limit
)
response_text = response.choices[0].text.strip()
# Check if the response is likely to be YAML
if ':' in response_text and '\n' in response_text:
# Don't wrap the text
print(response_text)
else:
# Wrap the text as before
wrapper = textwrap.TextWrapper(width=max_width)
formatted_response = wrapper.fill(text=response_text)
print(formatted_response)
# Ask the user if they accept the suggestion
decision = input('Do you accept this suggestion? (Y/N/R) ')
if decision.lower() == 'y':
# Ask the user for a name for the heat template
filename = input('Please enter a name for the heat template: ')
# Remove any existing file extension from filename
filename = os.path.splitext(filename)[0]
# Add .yaml extension
filename += '.yaml'
# Write the response to a file
file_path = os.path.join(save_path, filename) if repo_path else filename
with open(file_path, 'w') as f:
f.write(response_text)
print(f'Template saved as {file_path}')
break
elif decision.lower() == 'n':
# Go back to the main prompt
break
elif decision.lower() == 'r':
# Regenerate the answer
continue
else:
print("Invalid response. Please enter Y, N, or R.")
except KeyboardInterrupt:
print('\nExiting...')
break
except EOFError:
print('\nExiting...')
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Interact with ChatGPT.')
parser.add_argument('--max-width', type=int, default=80,
help='max width of the output in characters')
args = parser.parse_args()
interact(args.max_width)
| [
"\n The following question denoted by \"QUESTION: \" is to be interpreted as a task for an OpenStack heat template,\n and the desired output must be in the form of a heat template.\n\n For example, if the question was \"Create a VM with 2 CPUs and 4GB of RAM\", the output might be:\n\n heat_template_version: wallaby\n\n description: create an instance\n\n resources:\n my_vm:\n type: OS::Nova::Server\n properties:\n flavor: m1.small\n image: cirros\n networks:\n - network: private\n\n\n Ensure all responses are complete and functional templates with the appropriate heat headers and sections.\n Omit all but the required elements of the template.\n\n Now, QUESTION:\n "
] |
2024-01-10 | coditamar/pr-agent | pr_agent~tools~pr_similar_issue.py | import copy
import json
import logging
from enum import Enum
from typing import List, Tuple
import pinecone
import openai
import pandas as pd
from pydantic import BaseModel, Field
from pr_agent.algo import MAX_TOKENS
from pr_agent.algo.token_handler import TokenHandler
from pr_agent.config_loader import get_settings
from pr_agent.git_providers import get_git_provider
from pinecone_datasets import Dataset, DatasetMetadata
MODEL = "text-embedding-ada-002"
class PRSimilarIssue:
def __init__(self, issue_url: str, args: list = None):
if get_settings().config.git_provider != "github":
raise Exception("Only github is supported for similar issue tool")
self.cli_mode = get_settings().CONFIG.CLI_MODE
self.max_issues_to_scan = get_settings().pr_similar_issue.max_issues_to_scan
self.issue_url = issue_url
self.git_provider = get_git_provider()()
repo_name, issue_number = self.git_provider._parse_issue_url(issue_url.split('=')[-1])
self.git_provider.repo = repo_name
self.git_provider.repo_obj = self.git_provider.github_client.get_repo(repo_name)
self.token_handler = TokenHandler()
repo_obj = self.git_provider.repo_obj
repo_name_for_index = self.repo_name_for_index = repo_obj.full_name.lower().replace('/', '-').replace('_/', '-')
index_name = self.index_name = "codium-ai-pr-agent-issues"
# assuming pinecone api key and environment are set in secrets file
try:
api_key = get_settings().pinecone.api_key
environment = get_settings().pinecone.environment
except Exception:
if not self.cli_mode:
repo_name, original_issue_number = self.git_provider._parse_issue_url(self.issue_url.split('=')[-1])
issue_main = self.git_provider.repo_obj.get_issue(original_issue_number)
issue_main.create_comment("Please set pinecone api key and environment in secrets file")
raise Exception("Please set pinecone api key and environment in secrets file")
# check if index exists, and if repo is already indexed
run_from_scratch = False
upsert = True
pinecone.init(api_key=api_key, environment=environment)
if not index_name in pinecone.list_indexes():
run_from_scratch = True
upsert = False
else:
if get_settings().pr_similar_issue.force_update_dataset:
upsert = True
else:
pinecone_index = pinecone.Index(index_name=index_name)
res = pinecone_index.fetch([f"example_issue_{repo_name_for_index}"]).to_dict()
if res["vectors"]:
upsert = False
if run_from_scratch or upsert: # index the entire repo
logging.info('Indexing the entire repo...')
logging.info('Getting issues...')
issues = list(repo_obj.get_issues(state='all'))
logging.info('Done')
self._update_index_with_issues(issues, repo_name_for_index, upsert=upsert)
else: # update index if needed
pinecone_index = pinecone.Index(index_name=index_name)
issues_to_update = []
issues_paginated_list = repo_obj.get_issues(state='all')
counter = 1
for issue in issues_paginated_list:
if issue.pull_request:
continue
issue_str, comments, number = self._process_issue(issue)
issue_key = f"issue_{number}"
id = issue_key + "." + "issue"
res = pinecone_index.fetch([id]).to_dict()
is_new_issue = True
for vector in res["vectors"].values():
if vector['metadata']['repo'] == repo_name_for_index:
is_new_issue = False
break
if is_new_issue:
counter += 1
issues_to_update.append(issue)
else:
break
if issues_to_update:
logging.info(f'Updating index with {counter} new issues...')
self._update_index_with_issues(issues_to_update, repo_name_for_index, upsert=True)
else:
logging.info('No new issues to update')
async def run(self):
logging.info('Getting issue...')
repo_name, original_issue_number = self.git_provider._parse_issue_url(self.issue_url.split('=')[-1])
issue_main = self.git_provider.repo_obj.get_issue(original_issue_number)
issue_str, comments, number = self._process_issue(issue_main)
openai.api_key = get_settings().openai.key
logging.info('Done')
logging.info('Querying...')
res = openai.Embedding.create(input=[issue_str], engine=MODEL)
embeds = [record['embedding'] for record in res['data']]
pinecone_index = pinecone.Index(index_name=self.index_name)
res = pinecone_index.query(embeds[0],
top_k=5,
filter={"repo": self.repo_name_for_index},
include_metadata=True).to_dict()
relevant_issues_number_list = []
relevant_comment_number_list = []
score_list = []
for r in res['matches']:
issue_number = int(r["id"].split('.')[0].split('_')[-1])
if original_issue_number == issue_number:
continue
if issue_number not in relevant_issues_number_list:
relevant_issues_number_list.append(issue_number)
if 'comment' in r["id"]:
relevant_comment_number_list.append(int(r["id"].split('.')[1].split('_')[-1]))
else:
relevant_comment_number_list.append(-1)
score_list.append(str("{:.2f}".format(r['score'])))
logging.info('Done')
logging.info('Publishing response...')
similar_issues_str = "### Similar Issues\n___\n\n"
for i, issue_number_similar in enumerate(relevant_issues_number_list):
issue = self.git_provider.repo_obj.get_issue(issue_number_similar)
title = issue.title
url = issue.html_url
if relevant_comment_number_list[i] != -1:
url = list(issue.get_comments())[relevant_comment_number_list[i]].html_url
similar_issues_str += f"{i + 1}. **[{title}]({url})** (score={score_list[i]})\n\n"
if get_settings().config.publish_output:
response = issue_main.create_comment(similar_issues_str)
logging.info(similar_issues_str)
logging.info('Done')
def _process_issue(self, issue):
header = issue.title
body = issue.body
number = issue.number
if get_settings().pr_similar_issue.skip_comments:
comments = []
else:
comments = list(issue.get_comments())
issue_str = f"Issue Header: \"{header}\"\n\nIssue Body:\n{body}"
return issue_str, comments, number
def _update_index_with_issues(self, issues_list, repo_name_for_index, upsert=False):
logging.info('Processing issues...')
corpus = Corpus()
example_issue_record = Record(
id=f"example_issue_{repo_name_for_index}",
text="example_issue",
metadata=Metadata(repo=repo_name_for_index)
)
corpus.append(example_issue_record)
counter = 0
for issue in issues_list:
if issue.pull_request:
continue
counter += 1
if counter % 100 == 0:
logging.info(f"Scanned {counter} issues")
if counter >= self.max_issues_to_scan:
logging.info(f"Scanned {self.max_issues_to_scan} issues, stopping")
break
issue_str, comments, number = self._process_issue(issue)
issue_key = f"issue_{number}"
username = issue.user.login
created_at = str(issue.created_at)
if len(issue_str) < 8000 or \
self.token_handler.count_tokens(issue_str) < MAX_TOKENS[MODEL]: # fast reject first
issue_record = Record(
id=issue_key + "." + "issue",
text=issue_str,
metadata=Metadata(repo=repo_name_for_index,
username=username,
created_at=created_at,
level=IssueLevel.ISSUE)
)
corpus.append(issue_record)
if comments:
for j, comment in enumerate(comments):
comment_body = comment.body
num_words_comment = len(comment_body.split())
if num_words_comment < 10 or not isinstance(comment_body, str):
continue
if len(comment_body) < 8000 or \
self.token_handler.count_tokens(comment_body) < MAX_TOKENS[MODEL]:
comment_record = Record(
id=issue_key + ".comment_" + str(j + 1),
text=comment_body,
metadata=Metadata(repo=repo_name_for_index,
username=username, # use issue username for all comments
created_at=created_at,
level=IssueLevel.COMMENT)
)
corpus.append(comment_record)
df = pd.DataFrame(corpus.dict()["documents"])
logging.info('Done')
logging.info('Embedding...')
openai.api_key = get_settings().openai.key
list_to_encode = list(df["text"].values)
try:
res = openai.Embedding.create(input=list_to_encode, engine=MODEL)
embeds = [record['embedding'] for record in res['data']]
except:
embeds = []
logging.error('Failed to embed entire list, embedding one by one...')
for i, text in enumerate(list_to_encode):
try:
res = openai.Embedding.create(input=[text], engine=MODEL)
embeds.append(res['data'][0]['embedding'])
except:
embeds.append([0] * 1536)
df["values"] = embeds
meta = DatasetMetadata.empty()
meta.dense_model.dimension = len(embeds[0])
ds = Dataset.from_pandas(df, meta)
logging.info('Done')
api_key = get_settings().pinecone.api_key
environment = get_settings().pinecone.environment
if not upsert:
logging.info('Creating index from scratch...')
ds.to_pinecone_index(self.index_name, api_key=api_key, environment=environment)
else:
logging.info('Upserting index...')
namespace = ""
batch_size: int = 100
concurrency: int = 10
pinecone.init(api_key=api_key, environment=environment)
ds._upsert_to_index(self.index_name, namespace, batch_size, concurrency)
logging.info('Done')
class IssueLevel(str, Enum):
ISSUE = "issue"
COMMENT = "comment"
class Metadata(BaseModel):
repo: str
username: str = Field(default="@codium")
created_at: str = Field(default="01-01-1970 00:00:00.00000")
level: IssueLevel = Field(default=IssueLevel.ISSUE)
class Config:
use_enum_values = True
class Record(BaseModel):
id: str
text: str
metadata: Metadata
class Corpus(BaseModel):
documents: List[Record] = Field(default=[])
def append(self, r: Record):
self.documents.append(r)
| [] |
2024-01-10 | coditamar/pr-agent | pr_agent~algo~ai_handler.py | import logging
import os
import litellm
import openai
from litellm import acompletion
from openai.error import APIError, RateLimitError, Timeout, TryAgain
from retry import retry
from pr_agent.config_loader import get_settings
OPENAI_RETRIES = 5
class AiHandler:
"""
This class handles interactions with the OpenAI API for chat completions.
It initializes the API key and other settings from a configuration file,
and provides a method for performing chat completions using the OpenAI ChatCompletion API.
"""
def __init__(self):
"""
Initializes the OpenAI API key and other settings from a configuration file.
Raises a ValueError if the OpenAI key is missing.
"""
try:
openai.api_key = get_settings().openai.key
litellm.openai_key = get_settings().openai.key
if get_settings().get("litellm.use_client"):
litellm_token = get_settings().get("litellm.LITELLM_TOKEN")
assert litellm_token, "LITELLM_TOKEN is required"
os.environ["LITELLM_TOKEN"] = litellm_token
litellm.use_client = True
self.azure = False
if get_settings().get("OPENAI.ORG", None):
litellm.organization = get_settings().openai.org
if get_settings().get("OPENAI.API_TYPE", None):
if get_settings().openai.api_type == "azure":
self.azure = True
litellm.azure_key = get_settings().openai.key
if get_settings().get("OPENAI.API_VERSION", None):
litellm.api_version = get_settings().openai.api_version
if get_settings().get("OPENAI.API_BASE", None):
litellm.api_base = get_settings().openai.api_base
if get_settings().get("ANTHROPIC.KEY", None):
litellm.anthropic_key = get_settings().anthropic.key
if get_settings().get("COHERE.KEY", None):
litellm.cohere_key = get_settings().cohere.key
if get_settings().get("REPLICATE.KEY", None):
litellm.replicate_key = get_settings().replicate.key
if get_settings().get("REPLICATE.KEY", None):
litellm.replicate_key = get_settings().replicate.key
if get_settings().get("HUGGINGFACE.KEY", None):
litellm.huggingface_key = get_settings().huggingface.key
if get_settings().get("HUGGINGFACE.API_BASE", None):
litellm.api_base = get_settings().huggingface.api_base
except AttributeError as e:
raise ValueError("OpenAI key is required") from e
@property
def deployment_id(self):
"""
Returns the deployment ID for the OpenAI API.
"""
return get_settings().get("OPENAI.DEPLOYMENT_ID", None)
@retry(exceptions=(APIError, Timeout, TryAgain, AttributeError, RateLimitError),
tries=OPENAI_RETRIES, delay=2, backoff=2, jitter=(1, 3))
async def chat_completion(self, model: str, system: str, user: str, temperature: float = 0.2):
"""
Performs a chat completion using the OpenAI ChatCompletion API.
Retries in case of API errors or timeouts.
Args:
model (str): The model to use for chat completion.
temperature (float): The temperature parameter for chat completion.
system (str): The system message for chat completion.
user (str): The user message for chat completion.
Returns:
tuple: A tuple containing the response and finish reason from the API.
Raises:
TryAgain: If the API response is empty or there are no choices in the response.
APIError: If there is an error during OpenAI inference.
Timeout: If there is a timeout during OpenAI inference.
TryAgain: If there is an attribute error during OpenAI inference.
"""
try:
deployment_id = self.deployment_id
if get_settings().config.verbosity_level >= 2:
logging.debug(
f"Generating completion with {model}"
f"{(' from deployment ' + deployment_id) if deployment_id else ''}"
)
response = await acompletion(
model=model,
deployment_id=deployment_id,
messages=[
{"role": "system", "content": system},
{"role": "user", "content": user}
],
temperature=temperature,
azure=self.azure,
force_timeout=get_settings().config.ai_timeout
)
except (APIError, Timeout, TryAgain) as e:
logging.error("Error during OpenAI inference: ", e)
raise
except (RateLimitError) as e:
logging.error("Rate limit error during OpenAI inference: ", e)
raise
except (Exception) as e:
logging.error("Unknown error during OpenAI inference: ", e)
raise TryAgain from e
if response is None or len(response["choices"]) == 0:
raise TryAgain
resp = response["choices"][0]['message']['content']
finish_reason = response["choices"][0]["finish_reason"]
print(resp, finish_reason)
return resp, finish_reason
| [] |
2024-01-10 | 5l1v3r1/AgentBench | src~agents~api_agents~claude_agents.py | import anthropic
from src.agent import Agent
import os
import json
import sys
import time
import re
import math
import random
import datetime
import argparse
import requests
from typing import List, Callable
import dataclasses
from copy import deepcopy
class Claude(Agent):
def __init__(self, api_args=None, **config):
if not api_args:
api_args = {}
api_args = deepcopy(api_args)
self.key = api_args.pop("key", None) or os.getenv('Claude_API_KEY')
api_args["model"] = api_args.pop("model", None)
if not self.key:
raise ValueError("Claude API KEY is required, please assign api_args.key or set OPENAI_API_KEY environment variable.")
if not api_args["model"]:
raise ValueError("Claude model is required, please assign api_args.model.")
self.api_args = api_args
if not self.api_args.get("stop_sequences"):
self.api_args["stop_sequences"] = [anthropic.HUMAN_PROMPT]
super.__init__(**config)
def inference(self, history: List[dict]) -> str:
prompt = ""
for message in history:
if message["role"] == "user":
prompt += anthropic.HUMAN_PROMPT + message["content"]
else:
prompt += anthropic.AI_PROMPT + message["content"]
prompt += anthropic.AI_PROMPT
c = anthropic.Client(self.key)
resp = c.completion(
prompt=prompt,
**self.api_args
)
return resp
| [
"content"
] |
2024-01-10 | AKapich/Football_Clustering_App | app~clustering_functions.py | import openai
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.pipeline import Pipeline
from yellowbrick.cluster import KElbowVisualizer
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
import process4positions as p4p
def basic_clustering(df, position, age_lower_threshold=16, age_upper_threshold=43, minute_threshold=540, n_components=2, ):
df = df[df['Age'] >= f'{str(age_lower_threshold)}-000'][df['Age'] <= f'{str(age_upper_threshold)}-000']
df = df[df['Minutes'] >= minute_threshold]
df = df[df['DetailedPosition'].isin(p4p.position_name_dict[position])]
# dataframe before processing and limiting amount of columns just to the ones used during clustering
raw_df = deepcopy(df)
if position !='Goalkeeper':
p4p.calculate_new_metrics(raw_df)
df = p4p.process(df, position)
df.fillna(0, inplace=True)
scaler = StandardScaler()
scaled_data = scaler.fit_transform(df)
# REASONABLE TO USE PIPELINE
pca = PCA()
tsne = TSNE(n_components=n_components, random_state = 42)
pipeline = Pipeline([('pca', pca), ('tsne', tsne)])
reduced_data = pipeline.fit_transform(scaled_data)
#n_clusters = p4p.play_patterns_dict[position]
visualizer = KElbowVisualizer(KMeans(random_state=42, n_init=10), k=(2,12), metric='distortion')
visualizer.fit(reduced_data)
n_clusters = visualizer.elbow_value_
kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
cluster_labels = kmeans.fit_predict(reduced_data)
# Join clusters and the df
cluster_labels = pd.Series(cluster_labels)
cluster_labels.index = raw_df.index
clustered_df = pd.concat([cluster_labels, raw_df], axis=1)
clustered_df.columns = ['Cluster']+list(raw_df.columns)
clustered_df['Cluster'] = clustered_df['Cluster']+1
return clustered_df
def beeswarm_comparison(clustered_df, metric, cluster2highlight):
fig, ax = plt.subplots(figsize=(10, 2.5))
plot_df = clustered_df[['Cluster', metric]]
if plot_df[metric].dtype == 'object':
plot_df[metric] = plot_df[metric].apply(lambda x: float(str(x).replace('%', '')))
palette = dict(zip(clustered_df['Cluster'].unique(), ['#fafafa']*len(clustered_df['Cluster'].unique())))
palette[cluster2highlight] = '#ff4b4b'
sns.swarmplot(data=plot_df, x=metric, hue='Cluster', palette=palette, ax=ax, legend=None)
fig.set_facecolor('#0e1117')
ax.set_facecolor('#0e1117')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_color('#fafafa')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='x', colors='#fafafa')
plt.suptitle(f'{metric}', color='#fafafa', size=16, fontweight="bold", family="monospace", ha='center')
ax.yaxis.grid(True, linestyle='--', alpha=0.7)
return fig
def chatgpt_call(prompt, model="gpt-3.5-turbo"):
response = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": prompt}],
temperature=0,
)
return response.choices[0].message["content"]
def generate_AI_analysis(df_cluster, position):
cluster_description = df_cluster.describe()
prompt = f"""
You are a football analyst. Your task is to describe the player profile for a cluster of players. \
Their position is {position}. \
You will be given a dataframe summarizing the metrics used for analysis within our cluster.\
If not stated differently, the stats are per 90 minutes. In Your analysis take into consideration AS MANY stats as You can. \
YOU MUSTN'T MENTION THE NUMBERS DIRECTLY, focus on the style of play that summary numbers indicate. \
Make sure Your analysis ends with paragraph describing THE STYLE of play for our cluster. \
The dataframe will delimited by 3 backticks, i.e. ```.
```{cluster_description}```
"""
return chatgpt_call(prompt) | [
"\n You are a football analyst. Your task is to describe the player profile for a cluster of players. Their position is PLACEHOLDER. You will be given a dataframe summarizing the metrics used for analysis within our cluster. If not stated differently, the stats are per 90 minutes. In Your analysis take into consideration AS MANY stats as You can. YOU MUSTN'T MENTION THE NUMBERS DIRECTLY, focus on the style of play that summary numbers indicate. Make sure Your analysis ends with paragraph describing THE STYLE of play for our cluster. The dataframe will delimited by 3 backticks, i.e. ```.\n\n ```PLACEHOLDER```\n \n "
] |
2024-01-10 | IanYangChina/DRL_Implementation | drl_implementation~agent~utils~segment_tree.py | """
The segment tree implementation from OpenAI baseline GitHub repo:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/segment_tree.py
This is used in the prioritized replay buffer.
"""
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
| [] |
2024-01-10 | fovi-llc/wiki3ai | packages~research-assistant~research_assistant~writer.py | from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import ConfigurableField
MODEL_NAME = 'gemini-pro'
WRITER_SYSTEM_PROMPT = "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text." # noqa: E501
# Report prompts from https://github.com/assafelovic/gpt-researcher/blob/master/gpt_researcher/master/prompts.py
RESEARCH_REPORT_TEMPLATE = """Information:
--------
{research_summary}
--------
Using the above information, answer the following question or topic: "{question}" in a detailed report -- \
The report should focus on the answer to the question, should be well structured, informative, \
in depth, with facts and numbers if available and a minimum of 1,200 words.
You should strive to write the report as long as you can using all relevant and necessary information provided.
You must write the report with markdown syntax.
You MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions.
Write all used source urls at the end of the report, and make sure to not add duplicated sources, but only one reference for each.
You must write the report in apa format.
Please do your best, this is very important to my career.""" # noqa: E501
RESOURCE_REPORT_TEMPLATE = """Information:
--------
{research_summary}
--------
Based on the above information, generate a bibliography recommendation report for the following question or topic: "{question}". \
The report should provide a detailed analysis of each recommended resource, explaining how each source can contribute to finding answers to the research question. \
Focus on the relevance, reliability, and significance of each source. \
Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax. \
Include relevant facts, figures, and numbers whenever available. \
The report should have a minimum length of 1,200 words.
Please do your best, this is very important to my career.""" # noqa: E501
OUTLINE_REPORT_TEMPLATE = """Information:
--------
{research_summary}
--------
Using the above information, generate an outline for a research report in Markdown syntax for the following question or topic: "{question}". \
The outline should provide a well-structured framework for the research report, including the main sections, subsections, and key points to be covered. \
The research report should be detailed, informative, in-depth, and a minimum of 1,200 words. \
Use appropriate Markdown syntax to format the outline and ensure readability.
Please do your best, this is very important to my career.""" # noqa: E501
model = ChatGoogleGenerativeAI(model=MODEL_NAME, convert_system_message_to_human=True, temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", WRITER_SYSTEM_PROMPT),
("user", RESEARCH_REPORT_TEMPLATE),
# ("user", f'{WRITER_SYSTEM_PROMPT}\n\n{RESEARCH_REPORT_TEMPLATE}'),
]
).configurable_alternatives(
ConfigurableField("report_type"),
default_key="research_report",
resource_report=ChatPromptTemplate.from_messages(
[
("system", WRITER_SYSTEM_PROMPT),
("user", RESOURCE_REPORT_TEMPLATE),
# ("user", f'{WRITER_SYSTEM_PROMPT}\n\n{RESOURCE_REPORT_TEMPLATE}'),
]
),
outline_report=ChatPromptTemplate.from_messages(
[
("system", WRITER_SYSTEM_PROMPT),
("user", OUTLINE_REPORT_TEMPLATE),
# ("user", f'{WRITER_SYSTEM_PROMPT}\n\n{OUTLINE_REPORT_TEMPLATE}'),
]
),
)
chain = prompt | model | StrOutputParser()
| [
"Information: \n--------\n{research_summary}\n--------\n\nBased on the above information, generate a bibliography recommendation report for the following question or topic: \"{question}\". The report should provide a detailed analysis of each recommended resource, explaining how each source can contribute to finding answers to the research question. Focus on the relevance, reliability, and significance of each source. Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax. Include relevant facts, figures, and numbers whenever available. The report should have a minimum length of 1,200 words.\n\nPlease do your best, this is very important to my career.",
"[('system', 'You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.'), ('user', 'Information: \\n--------\\n{research_summary}\\n--------\\n\\nUsing the above information, answer the following question or topic: \"{question}\" in a detailed report -- The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available and a minimum of 1,200 words.\\n\\nYou should strive to write the report as long as you can using all relevant and necessary information provided.\\nYou must write the report with markdown syntax.\\nYou MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions.\\nWrite all used source urls at the end of the report, and make sure to not add duplicated sources, but only one reference for each.\\nYou must write the report in apa format.\\nPlease do your best, this is very important to my career.')]",
"Information: \n--------\n{research_summary}\n--------\n\nUsing the above information, generate an outline for a research report in Markdown syntax for the following question or topic: \"{question}\". The outline should provide a well-structured framework for the research report, including the main sections, subsections, and key points to be covered. The research report should be detailed, informative, in-depth, and a minimum of 1,200 words. Use appropriate Markdown syntax to format the outline and ensure readability.\n\nPlease do your best, this is very important to my career.",
"Information: \n--------\n{research_summary}\n--------\n\nUsing the above information, answer the following question or topic: \"{question}\" in a detailed report -- The report should focus on the answer to the question, should be well structured, informative, in depth, with facts and numbers if available and a minimum of 1,200 words.\n\nYou should strive to write the report as long as you can using all relevant and necessary information provided.\nYou must write the report with markdown syntax.\nYou MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions.\nWrite all used source urls at the end of the report, and make sure to not add duplicated sources, but only one reference for each.\nYou must write the report in apa format.\nPlease do your best, this is very important to my career.",
"[('system', 'You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.'), ('user', 'Information: \\n--------\\n{research_summary}\\n--------\\n\\nUsing the above information, generate an outline for a research report in Markdown syntax for the following question or topic: \"{question}\". The outline should provide a well-structured framework for the research report, including the main sections, subsections, and key points to be covered. The research report should be detailed, informative, in-depth, and a minimum of 1,200 words. Use appropriate Markdown syntax to format the outline and ensure readability.\\n\\nPlease do your best, this is very important to my career.')]",
"research_report",
"report_type",
"You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.",
"{question}",
"[('system', 'You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.'), ('user', 'Information: \\n--------\\n{research_summary}\\n--------\\n\\nBased on the above information, generate a bibliography recommendation report for the following question or topic: \"{question}\". The report should provide a detailed analysis of each recommended resource, explaining how each source can contribute to finding answers to the research question. Focus on the relevance, reliability, and significance of each source. Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax. Include relevant facts, figures, and numbers whenever available. The report should have a minimum length of 1,200 words.\\n\\nPlease do your best, this is very important to my career.')]"
] |
2024-01-10 | Gentopia-AI/Gentopia | gentopia~assembler~agent_assembler.py | import os
from typing import Union, Dict, Optional
from gentopia.prompt import PromptTemplate
from gentopia.agent.base_agent import BaseAgent
from gentopia.assembler.config import Config
from gentopia.llm import OpenAIGPTClient
from gentopia.utils.util import check_huggingface
if check_huggingface():
from gentopia.llm import HuggingfaceLLMClient
from gentopia.llm.base_llm import BaseLLM
from gentopia.llm.llm_info import TYPES
from gentopia.manager.base_llm_manager import BaseLLMManager
from gentopia.memory.api import MemoryWrapper
from gentopia.memory.api import create_memory
from gentopia.model.agent_model import AgentType
from gentopia.model.param_model import OpenAIParamModel, HuggingfaceParamModel
from gentopia.tools import *
from gentopia.tools import BaseTool
from gentopia.tools.basetool import ToolMetaclass
class AgentAssembler:
"""
This class is responsible for assembling an agent instance from a configuration file or dictionary and its dependencies.
:param file: A path to a configuration file.
:type file: str, optional
:param config: A configuration dictionary.
:type config: dict, optional
"""
def __init__(self, file=None, config=None):
"""
Constructor method.
Initializes an instance of the AgentAssembler class.
:param file: A path to a configuration file.
:type file: str, optional
:param config: A configuration dictionary.
:type config: dict, optional
"""
if file is not None:
self.config = Config.from_file(file)
elif config is not None:
self.config = Config.from_dict(config)
self.plugins: Dict[str, Union[BaseAgent, BaseTool]] = dict()
self.manager: Optional[BaseLLMManager] = None
def get_agent(self, config=None):
"""
This method returns an agent instance based on the provided configuration.
:param config: A configuration dictionary.
:type config: dict, optional
:raises AssertionError: If the configuration is None.
:return: An agent instance.
:rtype: BaseAgent
"""
if config is None:
config = self.config
assert config is not None
auth = config.get('auth', {})
self._set_auth_env(auth)
# Agent config
name = config.get('name')
_type = AgentType(config.get('type'))
version = config.get('version', "")
description = config.get('description', "")
AgentClass = AgentType.get_agent_class(_type)
prompt_template = self._get_prompt_template(config.get('prompt_template'))
agent = AgentClass(
name=name,
type=_type,
version=version,
description=description,
target_tasks=config.get('target_tasks', []),
llm=self._get_llm(config['llm']),
prompt_template=prompt_template,
plugins=self._parse_plugins(config.get('plugins', [])),
memory=self._parse_memory(config.get('memory', [])) # initialize memory
)
return agent
def _parse_memory(self, obj) -> MemoryWrapper:
"""
This method parses the memory configuration and returns a memory wrapper instance.
:param obj: A configuration dictionary containing memory parameters.
:type obj: dict
:return: A memory wrapper instance.
:rtype: MemoryWrapper
"""
if obj == []:
return None
memory_type = obj["memory_type"] # memory_type: ["pinecone"]
return create_memory(memory_type, obj['conversation_threshold'], obj['reasoning_threshold'], **obj["params"]) # params of memory. Different memories may have different params
def _get_llm(self, obj) -> Union[BaseLLM, Dict[str, BaseLLM]]:
"""
This method returns a language model manager (LLM) instance based on the provided configuration.
:param obj: A configuration dictionary or string.
:type obj: dict or str
:raises AssertionError: If the configuration is not a dictionary or string.
:raises ValueError: If the specified LLM is not supported.
:return: An LLM instance or dictionary of LLM instances.
:rtype: Union[BaseLLM, Dict[str, BaseLLM]]
"""
assert isinstance(obj, dict) or isinstance(obj, str)
if isinstance(obj, dict) and ("model_name" not in obj):
return {
# list(item.keys())[0]: self._parse_llm(list(item.values())[0]) for item in obj
key: self._parse_llm(obj[key]) for key in obj
}
else:
return self._parse_llm(obj)
def _parse_llm(self, obj) -> BaseLLM:
"""
This method parses the Language Model Manager (LLM) configuration and returns an LLM instance.
:param obj: A configuration dictionary or string.
:type obj: dict or str
:raises ValueError: If the specified LLM is not supported.
:return: An LLM instance.
:rtype: BaseLLM
"""
if isinstance(obj, str):
name = obj
model_param = dict()
else:
name = obj['model_name']
model_param = obj.get('params', dict())
llm = None
if TYPES.get(name, None) == "OpenAI":
# key = obj.get('key', None)
params = OpenAIParamModel(**model_param)
llm = OpenAIGPTClient(model_name=name, params=params)
elif TYPES.get(name, None) == "Huggingface":
try:
import torch
except ImportError:
raise ImportError("Huggingface LLM requires PyTorch to be installed.")
device = obj.get('device', 'gpu' if torch.cuda.is_available() else 'cpu')
params = HuggingfaceParamModel(**model_param)
llm = HuggingfaceLLMClient(model_name=name, params=params, device=device)
if llm is None:
raise ValueError(f"LLM {name} is not supported currently.")
if self.manager is None:
return llm
return self.manager.get_llm(name, params, cls=HuggingfaceLLMClient, device=device)
def _get_prompt_template(self, obj):
"""
This method returns a prompt template instance based on the provided configuration.
:param obj: A configuration dictionary or prompt template instance.
:type obj: dict or PromptTemplate
:raises AssertionError: If the configuration is not a dictionary or prompt template instance.
:return: A prompt template instance.
:rtype: PromptTemplate
"""
assert isinstance(obj, dict) or isinstance(obj, PromptTemplate)
if isinstance(obj, dict):
return {
key: self._parse_prompt_template(obj[key]) for key in obj
}
else:
ans = self._parse_prompt_template(obj)
return ans
def _parse_prompt_template(self, obj):
"""
This method parses the prompt template configuration and returns a prompt template instance.
:param obj: A configuration dictionary or prompt template instance.
:type obj: dict or PromptTemplate
:raises AssertionError: If the configuration is not a dictionary or prompt template instance.
:return: A prompt template instance.
:rtype: PromptTemplate
"""
assert isinstance(obj, dict) or isinstance(obj, PromptTemplate)
if isinstance(obj, PromptTemplate):
return obj
input_variables = obj['input_variables']
template = obj['template']
template_format = obj.get('template_format', 'f-string')
validate_template = bool(obj.get('validate_template', True))
return PromptTemplate(input_variables=input_variables, template=template, template_format=template_format,
validate_template=validate_template)
def _parse_plugins(self, obj):
"""
This method parses the plugin configuration and returns a list of plugin instances.
:param obj: A list of plugin configuration dictionaries or custom class
:type obj: list
:raises AssertionError: If the configuration is not a list.
:return: A list of plugin instances.
:rtype: list
"""
assert isinstance(obj, list)
result = []
for plugin in obj:
# If referring to a tool class then directly load it
if issubclass(plugin.__class__, ToolMetaclass):
result.append(plugin())
continue
# Directly invoke already loaded plugin
if plugin.get('name', "") in self.plugins:
_plugin = self.plugins[plugin['name']]
result.append(_plugin)
continue
# Agent as plugin
if plugin.get('type', "") in AgentType.__members__:
agent = self.get_agent(plugin)
result.append(agent)
self.plugins[plugin['name']] = agent
# Tool as plugin
else:
params = plugin.get('params', dict())
tool = load_tools(plugin['name'])(**params)
result.append(tool)
self.plugins[plugin['name']] = tool
return result
def _set_auth_env(self, obj):
"""
This method sets environment variables for authentication.
:param obj: A dictionary containing authentication information.
:type obj: dict
"""
for key in obj:
os.environ[key] = obj.get(key)
| [
"f-string",
"validate_template",
"template_format",
"prompt_template"
] |
2024-01-10 | Gentopia-AI/Gentopia | gentopia~model~agent_model.py | from __future__ import annotations
from enum import Enum
from pydantic import BaseModel
#from gentopia.agent.base_agent import BaseAgent
class AgentType(Enum):
"""
Enumerated type for agent types.
"""
openai = "openai"
react = "react"
rewoo = "rewoo"
vanilla = "vanilla"
openai_memory = "openai_memory"
@staticmethod
def get_agent_class(_type: AgentType):
"""
Get agent class from agent type.
:param _type: agent type
:return: agent class
"""
if _type == AgentType.react:
from gentopia.agent.react import ReactAgent
return ReactAgent
elif _type == AgentType.rewoo:
from gentopia.agent.rewoo import RewooAgent
return RewooAgent
elif _type == AgentType.vanilla:
from gentopia.agent.vanilla import VanillaAgent
return VanillaAgent
elif _type == AgentType.openai:
from gentopia.agent.openai import OpenAIFunctionChatAgent
return OpenAIFunctionChatAgent
elif _type == AgentType.openai_memory:
from gentopia.agent.openai_memory import OpenAIMemoryChatAgent
return OpenAIMemoryChatAgent
else:
raise ValueError(f"Unknown agent type: {_type}")
class AgentOutput(BaseModel):
"""
Pydantic model for agent output.
"""
output: str
cost: float
token_usage: int | [] |
2024-01-10 | evanmschultz/LLM_Experiments | FreeCodeCampLLM_DevCoures~python_replit.py | from langchain.agents.agent_toolkits import create_python_agent
from langchain.tools.python.tool import PythonREPLTool
from langchain.python import PythonREPL
from langchain.llms.openai import OpenAI
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
import os
agent_executor = create_python_agent(
llm=OpenAI(temperature=0.5, max_tokens=2000),
tool=PythonREPLTool(),
verbose=True,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
agent_executor.run("Give me a binary search algorithm and test with 5 examples?")
| [] |
2024-01-10 | evanmschultz/LLM_Experiments | FreeCodeCampLLM_DevCoures~internet_browsing_Arxiv_1.py | from langchain.chat_models import ChatOpenAI
from langchain.agents import load_tools, initialize_agent, AgentType, AgentExecutor
import os
# If the parser is erroring out, remember to set temperature to a higher value!!!
llm = ChatOpenAI(temperature=0.3)
tools: list = load_tools(["arxiv"])
agent_chain: AgentExecutor = initialize_agent(
tools=tools,
llm=llm,
max_iterations=5,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
)
agent_chain.run(
"What does the 'Attention is All You Need' paper introduce?",
)
| [] |
2024-01-10 | evanmschultz/LLM_Experiments | FreeCodeCampLLM_DevCoures~YouTube_Chainlit.py | from langchain import OpenAI, LLMMathChain, SerpAPIWrapper
from langchain.agents import initialize_agent, Tool, AgentExecutor
from langchain.chat_models import ChatOpenAI
from langchain.tools import YouTubeSearchTool
import os
import chainlit
from langchain.chat_models import ChatOpenAI
from langchain.agents import load_tools, initialize_agent, AgentType
import os
@chainlit.on_chat_start
def start():
tool = YouTubeSearchTool()
tools: list[Tool] = [
Tool(
name="Search",
func=tool.run,
description="Useful for when you need to give links to youtube videos. Remember to put https://youtube.com/ in front of every link to complete it",
)
]
agent: AgentExecutor = initialize_agent(
tools=tools,
llm=OpenAI(temperature=0),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
chainlit.user_session.set("agent", agent)
@chainlit.on_message
async def main(message):
agent: AgentExecutor = chainlit.user_session.get("agent") # type: ignore
callbacks = chainlit.LangchainCallbackHandler(stream_final_answer=True)
await chainlit.make_async(agent.run)(message, callbacks=[callbacks])
| [] |
2024-01-10 | evanmschultz/LLM_Experiments | FreeCodeCampLLM_DevCoures~CLI_GPT.py | from langchain.tools import ShellTool
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from langchain.agents import AgentType, AgentExecutor
import os
shell_tool = ShellTool()
llm = ChatOpenAI(temperature=0, model="gpt-4")
shell_tool.description = shell_tool.description + f"args {shell_tool.args}".replace(
"{", "{{"
).replace("}", "}}")
agent: AgentExecutor = initialize_agent(
tools=[shell_tool],
llm=llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
agent.run(
"Create a text file called empty and inside it, add code that trains a basic convolutional neural network for 4 epochs"
)
| [] |
2024-01-10 | evanmschultz/LLM_Experiments | FreeCodeCampLLM_DevCoures~YouTube_Search.py | from langchain.tools import YouTubeSearchTool
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType, AgentExecutor
from langchain.llms import OpenAI
from langchain import LLMMathChain, SerpAPIWrapper
import os
tool = YouTubeSearchTool()
tools: list[Tool] = [
Tool(
name="Search",
func=tool.run,
description="Useful for when you need to give links to youtube videos. Remember to put https://youtube.com/ in front of every link to complete it",
)
]
agent: AgentExecutor = initialize_agent(
tools=tools,
llm=OpenAI(temperature=0),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
agent.run("How do you avoid over dependency in python class construction?")
| [] |
2024-01-10 | evanmschultz/LLM_Experiments | FreeCodeCampLLM_DevCoures~internet_browsing_Arxiv_2.py | from langchain import OpenAI, LLMMathChain, SerpAPIWrapper
from langchain.agents import initialize_agent, Tool, AgentExecutor
from langchain.chat_models import ChatOpenAI
import os
import chainlit
from langchain.chat_models import ChatOpenAI
from langchain.agents import load_tools, initialize_agent, AgentType
import os
@chainlit.on_chat_start
def start():
llm = ChatOpenAI(temperature=0.5, streaming=True)
tools: list = load_tools(["arxiv"])
agent_chain: AgentExecutor = initialize_agent(
tools=tools,
llm=llm,
max_iterations=10,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True,
)
chainlit.user_session.set("agent", agent_chain)
@chainlit.on_message
async def main(message):
agent: AgentExecutor = chainlit.user_session.get("agent") # type: ignore
callbacks = chainlit.LangchainCallbackHandler(stream_final_answer=True)
await chainlit.make_async(agent.run)(message, callbacks=[callbacks])
| [] |
2024-01-10 | evanmschultz/LLM_Experiments | FreeCodeCampLLM_DevCoures~human_as_a_tool.py | from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentType, AgentExecutor
import os
llm = ChatOpenAI(temperature=0.5)
math_llm = OpenAI(temperature=0.5)
tools: list = load_tools(
["human", "llm-math"],
llm=math_llm,
)
agent_chain: AgentExecutor = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
agent_chain.run("If I have 5 apples and eat 2, how many do I have left?")
| [] |
2024-01-10 | ltJustWorks/codejam_propaganda_tracker | backend~fact_checker~fact_checker_app~utils~gpt_test.py | from openai import OpenAI
client = OpenAI()
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."},
{"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."}
]
)
print(completion.choices[0].message) | [
"Compose a poem that explains the concept of recursion in programming.",
"You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."
] |
2024-01-10 | realrohilbansal/file-retrieval-bot | enquiry.py | from langchain.document_loaders import TextLoader # loads text documents
from langchain.text_splitter import CharacterTextSplitter # splits text into chunks
from langchain.embeddings import HuggingFaceInstructEmbeddings as embd # embeds text into vectors
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS # vector database
from langchain.chains import RetrievalQAWithSourcesChain # question answering chain
# takes in a file and returns a list of chunks
def chunking(file):
loader = TextLoader(file)
data = loader.load()
splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1600,
chunk_overlap=100
)
chunks = splitter.split_documents(data)
return chunks
# takes in a list of chunks and returns a vector database with chunks embedded as vectors
def embedder(chunks):
embeddings = OpenAIEmbeddings()
vectordb = FAISS.from_documents(chunks, embeddings)
return vectordb
# takes in a vector database and a question and returns the answer
def query(vectordb, llm, question):
chain = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever = vectordb.as_retriever() )
result = chain({'question': question}, return_only_outputs=True)
return result['answer'] | [] |
2024-01-10 | conatsu/langchain-test | src~chatbot_engine.py | from typing import List
import langchain
from langchain.agents import AgentType, initialize_agent
from langchain.agents.agent_toolkits import VectorStoreInfo, VectorStoreToolkit
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import DirectoryLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.tools import BaseTool
langchain.verbose = True
def create_index() -> VectorStoreIndexWrapper:
loader = DirectoryLoader("./src/", glob="**/*.py")
return VectorstoreIndexCreator().from_loaders([loader])
def create_tools(index: VectorStoreIndexWrapper) -> List[BaseTool]:
vectorstore_info = VectorStoreInfo(
vectorstore=index.vectorstore,
name="langchain-test source code",
description="Source code of application named langchain-test",
)
toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info)
return toolkit.get_tools()
def chat(
message: str, history: ChatMessageHistory, index: VectorStoreIndexWrapper
) -> str:
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
tools = create_tools(index)
memory = ConversationBufferMemory(
chat_memory=history, memory_key="chat_history", return_messages=True
)
agent_chain = initialize_agent(
tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, memory=memory
)
return agent_chain.run(input=message)
| [] |
2024-01-10 | conatsu/langchain-test | src~slack_app.py | import os
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from dotenv import load_dotenv
from chatbot_engine import chat, create_index
from langchain.memory import ChatMessageHistory
load_dotenv()
index = create_index()
# ボットトークンとソケットモードハンドラーを使ってアプリを初期化します
app = App(token=os.environ.get("SLACK_BOT_TOKEN"))
def fetch_history(channel: str) -> ChatMessageHistory:
bot_user_id = app.client.auth_test()["user_id"]
conversation_history = app.client.conversations_history(
channel=channel, limit=3)
history = ChatMessageHistory()
for message in reversed(conversation_history["messages"]):
text = message["text"]
if message["user"] == bot_user_id:
history.add_ai_message(text)
else:
history.add_user_message(text)
return history
@app.event("app_mention")
def handle_mention(event, say):
channel = event["channel"]
history = fetch_history(channel)
message = event["text"]
bot_message = chat(message, history, index)
say(bot_message)
# アプリを起動します
if __name__ == "__main__":
app_env = os.environ.get("APP_ENV", "production")
if app_env == "production":
app.start(port=int(os.environ.get("PORT", 3000)))
else:
SocketModeHandler(app, os.environ["SLACK_APP_TOKEN"]).start()
| [] |
2024-01-10 | qmeng222/Spotify-Playlist-Wizard | app_polished.py | import argparse
import datetime
import logging
import os
import json
import openai
import spotipy
from dotenv import load_dotenv
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
parser = argparse.ArgumentParser(description="Simple command line utility")
parser.add_argument("-p", type=str, help="The prompt to describing the playlist.")
parser.add_argument("-n", type=int, default="12", help="The number of songs to be added.")
parser.add_argument("-envfile", type=str, default=".env", required=False, help='A dotenv file with your environment variables: "SPOTIFY_CLIENT_ID", "SPOTIFY_CLIENT_SECRET", "OPENAI_API_KEY"')
args = parser.parse_args()
load_dotenv(args.envfile)
if any([x not in os.environ for x in ("SPOTIFY_CLIENT_ID", "SPOTIFY_CLIENT_SECRET", "OPENAI_API_KEY")]):
raise ValueError("Error: missing environment variables. Please check your env file.")
if args.n not in range(1,50):
raise ValueError("Error: n should be between 0 and 50")
openai.api_key = os.environ["OPENAI_API_KEY"]
playlist_prompt = args.p
count = args.n
playlist = get_playlist(playlist_prompt, count)
add_songs_to_spotify(playlist_prompt, playlist)
def get_playlist(prompt, count=8):
example_json = """
[
{"song": "Everybody Hurts", "artist": "R.E.M."},
{"song": "Nothing Compares 2 U", "artist": "Sinead O'Connor"},
{"song": "Tears in Heaven", "artist": "Eric Clapton"},
{"song": "Hurt", "artist": "Johnny Cash"},
{"song": "Yesterday", "artist": "The Beatles"}
]
"""
messages = [
{"role": "system", "content": """You are a helpful playlist generating assistant.
You should generate a list of songs and their artists according to a text prompt.
Your should return a JSON array, where each element follows this format: {"song": <song_title>, "artist": <artist_name>}
"""
},
{"role": "user", "content": "Generate a playlist of 5 songs based on this prompt: super super sad songs"},
{"role": "assistant", "content": example_json},
{"role": "user", "content": f"Generate a playlist of {count} songs based on this prompt: {prompt}"},
]
response = openai.ChatCompletion.create(
messages=messages,
model="gpt-4",
max_tokens=400
)
playlist = json.loads(response["choices"][0]["message"]["content"])
return playlist
def add_songs_to_spotify(playlist_prompt, playlist):
# Sign up as a developer and register your app at https://developer.spotify.com/dashboard/applications
# Step 1. Create an Application.
# Step 2. Copy your Client ID and Client Secret.
spotipy_client_id = os.environ["SPOTIFY_CLIENT_ID"] # Use your Spotify API's keypair's Client ID
spotipy_client_secret = os.environ["SPOTIFY_CLIENT_SECRET"] # Use your Spotify API's keypair's Client Secret
# Step 3. Click `Edit Settings`, add `http://localhost:9999` as as a "Redirect URI"
spotipy_redirect_url = "http://localhost:9999" # Your browser will return page not found at this step. We'll grab the URL and paste back in to our console
# Step 4. Click `Users and Access`. Add your Spotify account to the list of users (identified by your email address)
# Spotipy Documentation
# https://spotipy.readthedocs.io/en/2.22.1/#getting-started
sp = spotipy.Spotify(
auth_manager=spotipy.SpotifyOAuth(
client_id=spotipy_client_id,
client_secret=spotipy_client_secret,
redirect_uri=spotipy_redirect_url,
scope="playlist-modify-private",
)
)
current_user = sp.current_user()
assert current_user is not None
track_uris = []
for item in playlist:
artist, song = item["artist"], item["song"]
# https://developer.spotify.com/documentation/web-api/reference/#/operations/search
advanced_query = f"artist:({artist}) track:({song})"
basic_query = f"{song} {artist}"
for query in [advanced_query, basic_query]:
log.debug(f"Searching for query: {query}")
search_results = sp.search(q=query, limit=10, type="track") # , market=market)
if not search_results["tracks"]["items"] or search_results["tracks"]["items"][0]["popularity"] < 20:
continue
else:
good_guess = search_results["tracks"]["items"][0]
print(f"Found: {good_guess['name']} [{good_guess['id']}]")
# print(f"FOUND USING QUERY: {query}")
track_uris.append(good_guess["id"])
break
else:
print(f"Queries {advanced_query} and {basic_query} returned no good results. Skipping.")
created_playlist = sp.user_playlist_create(
current_user["id"],
public=False,
name=f"{playlist_prompt} ({datetime.datetime.now().strftime('%c')})",
)
sp.user_playlist_add_tracks(current_user["id"], created_playlist["id"], track_uris)
print("\n")
print(f"Created playlist: {created_playlist['name']}")
print(created_playlist["external_urls"]["spotify"])
if __name__ == "__main__":
main()
| [
"You are a helpful playlist generating assistant. \n You should generate a list of songs and their artists according to a text prompt.\n Your should return a JSON array, where each element follows this format: {\"song\": <song_title>, \"artist\": <artist_name>}\n ",
"Generate a playlist of 5 songs based on this prompt: super super sad songs",
"\n [\n {\"song\": \"Everybody Hurts\", \"artist\": \"R.E.M.\"},\n {\"song\": \"Nothing Compares 2 U\", \"artist\": \"Sinead O'Connor\"},\n {\"song\": \"Tears in Heaven\", \"artist\": \"Eric Clapton\"},\n {\"song\": \"Hurt\", \"artist\": \"Johnny Cash\"},\n {\"song\": \"Yesterday\", \"artist\": \"The Beatles\"}\n ]\n ",
"Generate a playlist of PLACEHOLDER songs based on this prompt: PLACEHOLDER"
] |
2024-01-10 | codeaudit/vel | vel~rl~env~wrappers~env_normalize.py | import gym
import numpy as np
from vel.openai.baselines.common.running_mean_std import RunningMeanStd
class EnvNormalize(gym.Wrapper):
"""
Single environment normalization based on VecNormalize from OpenAI baselines
"""
def __init__(self, env, normalize_observations=True, normalize_returns=True,
clip_observations=10., clip_rewards=10., gamma=0.99, epsilon=1e-8):
super().__init__(env)
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if normalize_observations else None
self.ret_rms = RunningMeanStd(shape=()) if normalize_returns else None
self.clipob = clip_observations
self.cliprew = clip_rewards
self.ret = 0.0
self.gamma = gamma
self.epsilon = epsilon
def step(self, action):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
obs, rews, news, infos = self.env.step(action)
self.ret = self.ret * self.gamma + rews
obs = self._filter_observation(obs)
if self.ret_rms:
self.ret_rms.update(np.array([self.ret]))
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
return obs, rews, news, infos
def _filter_observation(self, obs):
if self.ob_rms:
self.ob_rms.update(obs[None])
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs.astype(np.float32)
else:
return obs
def reset(self):
"""
Reset all environments
"""
obs = self.env.reset()
return self._filter_observation(obs)
| [] |
2024-01-10 | Ale-Mildiner/Tesis | gpt-test.py | #%%
import openai
import os
import requests
import numpy as np
import pandas as pd
key = ""
openai.api_key = os.getenv(key)
#response = openai.Completion.create(model="text-davinci-003", prompt="Say this is a test", temperature=0, max_tokens=7)
url = 'https://api.openai.com/v1/chat/completions'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer '
}
data = {
'model': 'gpt-3.5-turbo',
'messages': [
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': 'I want the answer be a list of python. Sperate in charaters the flowing sentence "Hi how are you?"'}
]
}
# Realizar la solicitud a la API
response = requests.post(url, headers=headers, json=data)
result = response.json()
# Obtener la respuesta del modelo
response_text = result['choices'][0]['message']['content']
print(response_text)
#%%
import numpy as np
import pandas as pd
path = "d:/Facultad/Tesis/"
base = pd.read_csv(path+'Corpus_medios_nac.csv', nrows = 10)
notas = base['nota']
notas = list(notas)
def generate_chat_response(prompt, messages):
openai.api_key = '' # Replace with your actual API key
# Format the messages as per OpenAI's chat format
formatted_messages = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
for idx, msg in enumerate(messages):
formatted_messages.append({'role': 'user', 'content': msg})
if idx < len(messages) - 1:
formatted_messages.append({'role': 'assistant', 'content': ''})
# Generate a response from the ChatGPT model
response = openai.Completion.create(
engine='text-davinci-003', # Specify the ChatGPT engine
prompt=formatted_messages,
temperature=0.7,
max_tokens=50,
n=1,
stop=None,
)
# Extract the reply from the response and return it
reply = response.choices[0].text.strip()
return reply
def generate_2(message):
url = 'https://api.openai.com/v1/chat/completions'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer '
}
data = {
'model': 'gpt-3.5-turbo',
'messages': [
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': messege}
]
}
# Realizar la solicitud a la API
response = requests.post(url, headers=headers, json=data)
result = response.json()
# Obtener la respuesta del modelo
response_text = result['choices'][0]['message']['content']
return response_text
prompt = "Chat with the assistant:"
text = notas[0]
messege = f"Extract the qouted phreasis in this text and give me the answer in a list of python with the qouted phrases: {text}"
response = generate_2(messege)
print(response)
#%%
curl https://api.openai.com/v1/embeddings \
-H "Content-Type: application/json" \
-H "Authorization: Bearer " \
-d '{
"input": "Your text string goes here",
"model": "text-embedding-ada-002"
}' | [
"You are a helpful assistant.",
"Chat with the assistant:",
"I want the answer be a list of python. Sperate in charaters the flowing sentence \"Hi how are you?\""
] |
2024-01-10 | daveshap/Raven_MVP | svc_iterator.py | import re
import os
import openai
import requests
import json
from time import sleep
from functions import *
import urllib3
default_sleep = 5
urllib3.disable_warnings()
open_ai_api_key = read_file('openaiapikey.txt')
openai.api_key = open_ai_api_key
last_msg = {'time':0.0}
iterated_actions = list()
context_action_limit = 15
def make_prompt(context, action, cof1, cof2, cof3):
prompt = read_file('base_iterator_prompt.txt')
prompt = prompt.replace('<<CONTEXT>>', context)
prompt = prompt.replace('<<ACTION>>', action)
prompt = prompt.replace('<<COF1>>', cof1)
prompt = prompt.replace('<<COF2>>', cof2)
prompt = prompt.replace('<<COF3>>', cof3)
return prompt
def query_gpt3(context, action, cof1, cof2, cof3):
prompt = make_prompt(context, action, cof1, cof2, cof3)
response = openai.Completion.create(
engine='davinci',
prompt=prompt,
temperature=0.5,
max_tokens=1000,
top_p=1,
frequency_penalty=0.3,
presence_penalty=0.3,
stop=['<<END>>', 'CONTEXT:', 'ACTION:', 'NEW4:'])
return response['choices'][0]['text'].strip().splitlines()
def post_actions(actions, context):
for action in actions:
try:
action = action.strip()
if action == '':
continue
action = re.sub('NEW\d+:', '', action)
payload = dict()
payload['msg'] = action.strip()
payload['irt'] = context['mid']
payload['ctx'] = context['mid']
payload['key'] = 'action.idea'
payload['sid'] = 'action.iterator'
result = nexus_post(payload)
except Exception as oops:
print('ERROR in ITERATOR/POST_ACTIONS:', oops)
def get_cof_evalutions(action):
try:
irt = nexus_get(irt=action['mid'])
cof1 = [i for i in irt if i['sid'] == 'cof1.evaluation'][0]
cof2 = [i for i in irt if i['sid'] == 'cof2.evaluation'][0]
cof3 = [i for i in irt if i['sid'] == 'cof3.evaluation'][0]
return cof1, cof2, cof3
except Exception as oops:
return None, None, None
def query_nexus():
global last_msg
try:
actions = nexus_get(key='action.idea')
actions = [i for i in actions if i['mid'] not in iterated_actions]
for action in actions:
total_actions = nexus_get(ctx=action['ctx'], key='action.idea')
if len(total_actions) > context_action_limit:
continue # if there are a lot of actions for this context already, skip ahead
cof1, cof2, cof3 = get_cof_evalutions(action) # TODO prioritize higher scoring actions
if cof1 and cof2 and cof3: # TODO support multiple COF services
context = nexus_get(mid=action['ctx'])
#print('CONTEXT:', context)
#print('COF1:', cof1)
#print('COF2:', cof2)
#print('COF3:', cof3)
iterations = query_gpt3(context[0]['msg'], action['msg'], cof1['msg'], cof2['msg'], cof3['msg'])
post_actions(iterations, context[0])
iterated_actions.append(action['mid'])
except Exception as oops:
print('ERROR in ITERATOR/QUERY_NEXUS:', oops)
if __name__ == '__main__':
print('Starting Iterator')
while True:
query_nexus()
sleep(default_sleep)
| [
"<<ACTION>>",
"<<CONTEXT>>",
"base_iterator_prompt.txt"
] |
2024-01-10 | daveshap/Raven_MVP | svc_actions.py | import re
import os
import openai
import requests
import json
from time import sleep
from functions import *
import urllib3
default_sleep = 1
urllib3.disable_warnings()
open_ai_api_key = read_file('openaiapikey.txt')
openai.api_key = open_ai_api_key
last_msg = {'time':0.0}
def make_prompt(context):
prompt = read_file('base_action_prompt.txt')
return prompt.replace('<<CONTEXT>>', context)
def query_gpt3(context):
prompt = make_prompt(context)
response = openai.Completion.create(
engine='davinci',
#engine='curie',
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0.7,
presence_penalty=0.7,
stop=['ACTION4:', 'CONTEXT:', 'INSTRUCTIONS:', '<<END>>'])
return response['choices'][0]['text'].strip().splitlines()
def post_actions(actions, context):
for action in actions:
try:
action = action.strip()
if action == '':
continue
action = re.sub('ACTION\d+:', '', action)
#print('ACTION:', action)
payload = dict()
payload['msg'] = action.strip()
payload['irt'] = context['mid']
payload['ctx'] = context['mid']
payload['key'] = 'action.idea'
payload['sid'] = 'action.generator'
result = nexus_post(payload)
#print(result)
except Exception as oops:
print('ERROR in ACTIONS/POST_ACTIONS:', oops)
def query_nexus():
global last_msg
try:
stream = nexus_get(key='context', start=last_msg['time'])
for context in stream:
if context['time'] <= last_msg['time']:
continue
#print('CONTEXT:', context['msg'])
if context['time'] > last_msg['time']:
last_msg = context
actions = query_gpt3(context['msg'])
#print('ALL ACTIONS:', actions)
post_actions(actions, context)
except Exception as oops:
print('ERROR in ACTIONS/QUERY_NEXUS:', oops)
if __name__ == '__main__':
print('Starting Action Generator')
while True:
query_nexus()
sleep(default_sleep)
| [
"base_action_prompt.txt"
] |
2024-01-10 | daveshap/Raven_MVP | svc_questions.py | import re
import os
import openai
import requests
import json
from time import sleep
from functions import *
import urllib3
default_sleep = 1
urllib3.disable_warnings()
open_ai_api_key = read_file('openaiapikey.txt')
openai.api_key = open_ai_api_key
last_msg = {'time':0.0}
def make_prompt(context):
#prompt = read_file('base_questions_prompt_02.txt')
prompt = read_file('base_questions_prompt.txt')
return prompt.replace('<<CONTEXT>>', context)
def query_gpt3(context):
prompt = make_prompt(context)
response = openai.Completion.create(
#engine='davinci',
engine='curie-instruct-beta',
#engine='curie',
prompt=prompt,
temperature=0.5,
max_tokens=1000,
top_p=1,
frequency_penalty=0.3,
presence_penalty=0.3,
stop=['QUESTION4:', 'CONTEXT:', 'INSTRUCTIONS:', '<<END>>'])
return response['choices'][0]['text'].strip().splitlines()
def post_actions(actions, context):
for action in actions:
try:
action = action.strip()
if action == '':
continue
action = re.sub('QUESTION\d+:', '', action)
#print('ACTION:', action)
payload = dict()
payload['msg'] = action.strip()
payload['irt'] = context['mid']
payload['ctx'] = context['mid']
payload['key'] = 'question'
payload['sid'] = 'question.generator'
result = nexus_post(payload)
#print(result)
except Exception as oops:
print('ERROR in ACTIONS/POST_ACTIONS:', oops)
def query_nexus():
global last_msg
try:
stream = nexus_get(key='context', start=last_msg['time'])
for context in stream:
if context['time'] <= last_msg['time']:
continue
#print('CONTEXT:', context['msg'])
if context['time'] > last_msg['time']:
last_msg = context
actions = query_gpt3(context['msg'])
#print('ALL ACTIONS:', actions)
post_actions(actions, context)
except Exception as oops:
print('ERROR in ACTIONS/QUERY_NEXUS:', oops)
if __name__ == '__main__':
print('Starting Action Generator')
while True:
query_nexus()
sleep(default_sleep)
| [
"base_questions_prompt.txt"
] |
2024-01-10 | daveshap/Raven_MVP | svc_encyclopedia.py | import re
import os
import openai
import requests
import json
from time import sleep
from raven_functions import *
from solr_functions import *
import urllib3
default_sleep = 1
urllib3.disable_warnings()
open_ai_api_key = read_file('openaiapikey.txt')
openai.api_key = open_ai_api_key
last_msg = {'time':0.0}
#default_engine = 'davinci'
default_engine = 'curie-instruct-beta'
def query_gpt3(prompt):
response = openai.Completion.create(
engine=default_engine,
prompt=prompt,
temperature=0.5,
max_tokens=100,
top_p=1,
frequency_penalty=0.3,
presence_penalty=0.3,
stop=['PASSAGE:', 'QUERIES:', 'INSTRUCTIONS:', '<<END>>'])
return response['choices'][0]['text']
def post_articles(articles, context):
for article in articles:
try:
# TODO massage article
payload = dict()
#print('POST ARTICLE:', article['title'])
payload['msg'] = str(article['title']) + ' : ' + str(article['text'])
payload['irt'] = context['mid']
payload['ctx'] = context['mid']
payload['key'] = 'encyclopedia.article'
payload['sid'] = 'encyclopedia.wiki'
result = nexus_post(payload)
#print(result)
except Exception as oops:
print('ERROR in ENCYCLOPEDIA/POST_ARTICLES:', oops)
def get_search_queries(text):
prompt = read_file('prompt_search_query.txt')
prompt = prompt.replace('<<PASSAGE>>', text)
results = query_gpt3(prompt).split(',')
return [i.strip() for i in results]
def query_nexus():
global last_msg
try:
stream = nexus_get(key='context', start=last_msg['time'])
for context in stream:
if context['time'] <= last_msg['time']:
continue
if context['time'] > last_msg['time']:
last_msg = context
queries = get_search_queries(context['msg'])
#print('QUERIES:', queries)
articles = list()
for query in queries:
result = solr_search(query)
articles += result['response']['docs']
post_articles(articles, context)
except Exception as oops:
print('ERROR in ENCYCLOPEDIA/QUERY_NEXUS:', oops)
if __name__ == '__main__':
print('Starting Encyclopedia Service')
while True:
query_nexus()
sleep(default_sleep)
| [
"prompt_search_query.txt",
"<<PASSAGE>>"
] |
2024-01-10 | daveshap/Raven_MVP | svc_cof1.py | import re
import os
import openai
import requests
import json
from time import sleep
from functions import *
import urllib3
default_sleep = 1
urllib3.disable_warnings()
open_ai_api_key = read_file('openaiapikey.txt')
openai.api_key = open_ai_api_key
last_msg = {'time':0.0}
def make_prompt(context, action):
prompt = read_file('base_cof1_prompt.txt')
return prompt.replace('<<CONTEXT>>', context).replace('<<ACTION>>', action)
def query_gpt3(context, action):
prompt = make_prompt(context, action)
response = openai.Completion.create(
engine='davinci',
#engine='curie',
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0.7,
presence_penalty=0.7,
stop=['<<END>>', 'CONTEXT:', 'ACTION:', 'INSTRUCTIONS:'])
return response['choices'][0]['text']
def query_nexus():
global last_msg
try:
stream = nexus_get(key='action.idea', start=last_msg['time'])
for action in stream:
if action['time'] <= last_msg['time']:
continue
#print('ACTION:', action['msg'])
if action['time'] > last_msg['time']:
last_msg = action
context = nexus_get(mid=action['irt'])
evaluation = query_gpt3(context[0]['msg'], action['msg'])
evaluation = re.sub('\s+', ' ', evaluation).strip()
#print('EVAL:', evaluation)
if 'positive EXPLAIN:' in evaluation:
evaluation = evaluation.replace('positive EXPLAIN:', '').strip()
payload = {'msg': evaluation, 'irt': action['mid'], 'key': 'cof1.positive', 'sid': 'cof1.evaluation', 'ctx': action['ctx']}
else:
evaluation = evaluation.replace('negative EXPLAIN:', '').strip()
payload = {'msg': evaluation, 'irt': action['mid'], 'key': 'cof1.negative', 'sid': 'cof1.evaluation', 'ctx': action['ctx']}
result = nexus_post(payload)
#print(result)
except Exception as oops:
print('ERROR in COF1/QUERY_NEXUS:', oops)
if __name__ == '__main__':
print('Starting COF1')
while True:
query_nexus()
# TODO look for metacognitive messages (aka HALT and RESUME)
sleep(default_sleep)
| [
"base_cof1_prompt.txt"
] |
2024-01-10 | daveshap/Raven_MVP | svc_cof2.py | import re
import os
import openai
import requests
import json
from time import sleep
from functions import *
import urllib3
default_sleep = 1
urllib3.disable_warnings()
open_ai_api_key = read_file('openaiapikey.txt')
openai.api_key = open_ai_api_key
last_msg = {'time':0.0}
def make_prompt(context, action):
prompt = read_file('base_cof2_prompt.txt')
return prompt.replace('<<CONTEXT>>', context).replace('<<ACTION>>', action)
def query_gpt3(context, action):
prompt = make_prompt(context, action)
response = openai.Completion.create(
engine='davinci',
#engine='curie',
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0.7,
presence_penalty=0.7,
stop=['<<END>>', 'CONTEXT:', 'ACTION:', 'INSTRUCTIONS:'])
return response['choices'][0]['text']
def query_nexus():
global last_msg
try:
stream = nexus_get(key='action.idea', start=last_msg['time'])
for action in stream:
if action['time'] <= last_msg['time']:
continue
#print('ACTION:', action['msg'])
if action['time'] > last_msg['time']:
last_msg = action
context = nexus_get(mid=action['irt'])
evaluation = query_gpt3(context[0]['msg'], action['msg'])
evaluation = re.sub('\s+', ' ', evaluation).strip()
#print('EVAL:', evaluation)
if 'positive EXPLAIN:' in evaluation:
evaluation = evaluation.replace('positive EXPLAIN:', '').strip()
payload = {'msg': evaluation, 'irt': action['mid'], 'key': 'cof2.positive', 'sid': 'cof2.evaluation', 'ctx': action['ctx']}
else:
evaluation = evaluation.replace('negative EXPLAIN:', '').strip()
payload = {'msg': evaluation, 'irt': action['mid'], 'key': 'cof2.negative', 'sid': 'cof2.evaluation', 'ctx': action['ctx']}
result = nexus_post(payload)
#print(result)
except Exception as oops:
print('ERROR in COF2/QUERY_NEXUS:', oops)
if __name__ == '__main__':
print('Starting COF2')
while True:
query_nexus()
# TODO look for metacognitive messages (aka HALT and RESUME)
sleep(default_sleep)
| [
"base_cof2_prompt.txt"
] |
2024-01-10 | daveshap/Raven_MVP | svc_cof3.py | import re
import os
import openai
import requests
import json
from time import sleep
from functions import *
import urllib3
default_sleep = 1
urllib3.disable_warnings()
open_ai_api_key = read_file('openaiapikey.txt')
openai.api_key = open_ai_api_key
last_msg = {'time':0.0}
def make_prompt(context, action):
prompt = read_file('base_cof3_prompt.txt')
return prompt.replace('<<CONTEXT>>', context).replace('<<ACTION>>', action)
def query_gpt3(context, action):
prompt = make_prompt(context, action)
response = openai.Completion.create(
engine='davinci',
#engine='curie',
prompt=prompt,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0.7,
presence_penalty=0.7,
stop=['<<END>>', 'CONTEXT:', 'ACTION:', 'INSTRUCTIONS:'])
return response['choices'][0]['text']
def query_nexus():
global last_msg
try:
stream = nexus_get(key='action.idea', start=last_msg['time'])
for action in stream:
if action['time'] <= last_msg['time']:
continue
#print('ACTION:', action['msg'])
if action['time'] > last_msg['time']:
last_msg = action
context = nexus_get(mid=action['irt'])
evaluation = query_gpt3(context[0]['msg'], action['msg'])
evaluation = re.sub('\s+', ' ', evaluation).strip()
#print('EVAL:', evaluation)
if 'positive EXPLAIN:' in evaluation:
evaluation = evaluation.replace('positive EXPLAIN:', '').strip()
payload = {'msg': evaluation, 'irt': action['mid'], 'key': 'cof3.positive', 'sid': 'cof3.evaluation', 'ctx': action['ctx']}
else:
evaluation = evaluation.replace('negative EXPLAIN:', '').strip()
payload = {'msg': evaluation, 'irt': action['mid'], 'key': 'cof3.negative', 'sid': 'cof3.evaluation', 'ctx': action['ctx']}
result = nexus_post(payload)
#print(result)
except Exception as oops:
print('ERROR in COF3/QUERY_NEXUS:', oops)
if __name__ == '__main__':
print('Starting COF3')
while True:
query_nexus()
# TODO look for metacognitive messages (aka HALT and RESUME)
# use 'ctx' field to halt all actions related to given context
sleep(default_sleep)
| [
"base_cof3_prompt.txt"
] |
2024-01-10 | noah-blanchard/py-vocal-assistant | todo_manager.py | from openai_agent import OpenAIAgent
from speech_processing import SpeechProcessing
from command_processing import CommandProcessing
class TodoManager:
def __init__(self):
self.openai_agent = OpenAIAgent()
self.tasks = ["Buy milk", "Buy chocolate", "Go play football"]
self.speech_processor = SpeechProcessing()
self.command_processor = CommandProcessing()
def handle_command(self, command):
label = self.openai_agent.get_todo_command_label(command)
print(f"Label: {label}, Command: {command}")
if label == "add":
self.add_to_todo_list(command)
elif label == "list":
self.get_todo_list()
elif label == "remove":
self.remove_from_todo_list(command)
else:
self.speech_processor.speak("I couldn't understand your command! Please try again.")
def add_to_todo_list(self, item):
todo = self.openai_agent.generated_todo(item)
print(f"Todo to be added: {todo}")
if todo:
self.tasks.append(todo)
self.speech_processor.speak(f"Succesfully added '{todo}' to your todo list !")
def get_todo_list(self):
self.speech_processor.queue("Here's what's in your todo list!")
for index, task in enumerate(self.tasks):
self.speech_processor.queue(f"{index + 1}: {task}", False)
self.speech_processor.runAndWait()
def remove_from_todo_list(self, command):
task = self.openai_agent.recognize_todo(self.tasks, command)
print(command, task)
if task not in self.tasks:
self.speech_processor.speak("I couldn't find the specified task in your to-do list. Please try again.")
else:
self.speech_processor.speak(f"Do you want to remove '{task}' from your to-do list ?")
decision = self.speech_processor.listen()
decision = self.command_processor.get_approve_deny(decision)
print(decision)
if decision == "approve":
self.tasks.remove(task)
self.speech_processor.speak(f"I have removed '{task}' from your to-do list.")
else:
self.speech_processor.speak(f"Okay! I won't remove '{task}' from your to-do list.")
| [] |
2024-01-10 | noah-blanchard/py-vocal-assistant | trivia_agent.py | import requests
import random
from speech_processing import SpeechProcessing
from command_processing import CommandProcessing
from openai_agent import OpenAIAgent
class TriviaAgent:
def __init__(self):
self.base_url = "https://the-trivia-api.com/v2/questions"
self.speech_processor = SpeechProcessing()
self.command_processor = CommandProcessing()
self.openai_agent = OpenAIAgent()
def handle_command(self, command):
self.speech_processor.speak("Do you want me to ask you a trivia question ?")
decision = self.speech_processor.listen()
decision = self.command_processor.get_approve_deny(decision)
if decision == "approve":
self.speech_processor.speak("Ok! Let's play trivia !")
self.start_trivia()
else:
self.speech_processor.speak("Ok, it is totally fine, let me know if you need help for anything.")
def start_trivia(self):
question = self.get_question()
possible_answers = []
possible_answers.append(question["correct"])
possible_answers.extend(question["incorrect"])
random.shuffle(possible_answers)
self.speech_processor.queue(f"The category of the question is : {question['category']}")
self.speech_processor.queue(f"Here's the question : {question['question']}", rephrase=False)
for index, answer in enumerate(possible_answers):
self.speech_processor.queue(f"{index + 1}: {answer}", rephrase=False)
self.speech_processor.queue("What is your answer ?")
self.speech_processor.runAndWait()
self.get_and_check_answer(question["correct"])
def get_and_check_answer(self, correct_answer):
print("correct answer : ", correct_answer)
answer = self.speech_processor.listen()
verdict = self.openai_agent.check_trivia_answer(correct_answer, answer)
print("verdict : ", verdict)
if verdict.lower() == "correct":
self.speech_processor.speak("Congratulations! This is the right answer to the trivia question.")
else:
self.speech_processor.speak("This isn't the right answer to the trivia question. You can try a new one if you want.")
def get_question(self, limit=1):
try:
params = {
"limit": limit
}
response = requests.get(self.base_url, params=params)
if response.status_code == 200:
data = response.json()[0]
question_data = {
"category": data["category"],
"question": data["question"]["text"],
"correct": data["correctAnswer"],
"incorrect": data["incorrectAnswers"]
}
return question_data
except Exception as e:
print("There was an error retrieving a question : ", e)
return None | [] |
2024-01-10 | noah-blanchard/py-vocal-assistant | command_processing.py | from openai_agent import OpenAIAgent
class CommandProcessing:
def __init__(self):
self.openai_agent = OpenAIAgent()
def handle_command(self, command):
return self.openai_agent.get_command_label(command)
def get_approve_deny(self, command):
return self.openai_agent.get_approve_deny(command) | [] |
2024-01-10 | noah-blanchard/py-vocal-assistant | speech_processing.py | import speech_recognition as sr
import pyttsx3 as tts
from openai_agent import OpenAIAgent
import time
from pygame import mixer
class SpeechProcessing:
def __init__(self):
self.recognizer = sr.Recognizer()
self.tts_engine = tts.init()
self.openai_agent = OpenAIAgent()
self.sound_file = "listen_sound.mp3"
mixer.init()
self.tts_engine.setProperty("voice", "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0")
self.tts_engine.setProperty("rate", 178)
def play_sound(self):
mixer.music.load(self.sound_file)
mixer.music.play()
def listen_for_wakeword(self):
wakeword = "hello my friend"
with sr.Microphone() as source:
self.recognizer.adjust_for_ambient_noise(source, duration=1)
print("Waiting for wake word...")
try:
audio = self.recognizer.listen(source, timeout=5)
text = self.recognizer.recognize_google(audio)
if text.lower() == wakeword:
print("Wake word detected.")
return True
except sr.WaitTimeoutError:
pass
except sr.UnknownValueError:
pass
except sr.RequestError:
print("Couldn't request results from the Google Speech Recognition service")
except Exception as e:
print(f"There was an error: {e}")
return False
def listen(self, timeout=5):
with sr.Microphone() as source:
self.recognizer.adjust_for_ambient_noise(source, duration=1)
self.play_sound()
print("Listening...")
audio = None
try:
audio = self.recognizer.listen(source, timeout)
except sr.WaitTimeoutError:
print("Listening timed out while waiting for phrase to start")
return ""
text = ""
try:
print("Recognizing...")
text = self.recognizer.recognize_google(audio)
print(f"User said: {text}")
except sr.UnknownValueError:
print("Google Speech could not recognize audio")
except sr.RequestError:
print("Couldn't request results from the Google Speech Recognition service")
except Exception:
print("There was an error")
return text
def speak(self, text, rephrase=True):
self.queue(text, rephrase)
self.runAndWait()
def queue(self, text, rephrase=True):
if rephrase:
self.tts_engine.say(self.openai_agent.rephrase(text))
else:
self.tts_engine.say(text)
def runAndWait(self):
self.tts_engine.runAndWait() | [] |
2024-01-10 | noah-blanchard/py-vocal-assistant | weather_agent.py | import requests
from dotenv import load_dotenv
import os
from speech_processing import SpeechProcessing
from openai_agent import OpenAIAgent
load_dotenv()
class WeatherAgent:
def __init__(self):
self.api_key = os.getenv("WEATHER_API_KEY")
self.base_url = "http://api.weatherapi.com/v1/current.json"
self.openai_agent = OpenAIAgent()
self.speech_processor = SpeechProcessing()
def handle_command(self, command):
location = self.openai_agent.extract_information("location", command)
if location == "none":
self.get_location()
else:
weather_data = self.get_weather(location)
self.process_weather(weather_data)
def get_location(self):
self.speech_processor.speak("Please specify a location for me to give you the current weather.")
location = self.speech_processor.listen()
location = self.openai_agent.extract_information("location", location)
if location and location != "none":
weather_data = self.get_weather(location)
self.process_weather(weather_data)
else:
self.speech_processor.speak("I can't find the specified location. Please try again.")
def process_weather(self, data):
if data:
weather_message = f"Currently in {data['location']}, the weather condition is : {data['condition']}, and the temperature is : {data['temperature']} degrees."
self.speech_processor.speak(weather_message)
else:
self.speech_processor.speak("I couldn't retrieve the weather informations. Please try again.")
def get_weather(self, location):
params = {
'key': self.api_key,
'q': location,
'aqi': 'no'
}
response = requests.get(self.base_url, params=params)
if response.status_code != 200:
return None
data = response.json()
weather_data = {
'location': data['location']['name'],
'condition': data['current']['condition']['text'],
'temperature': data['current']['temp_c']
}
return weather_data | [] |
2024-01-10 | dsvolk/aidebates | src~gradio~stream.py | from collections.abc import Generator
from queue import Empty, Queue
from threading import Thread
from typing import Dict
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from .callbacks import QueueCallback
def stream(prompt_template: ChatPromptTemplate, llm_params: Dict, invoke_params: Dict) -> Generator:
q: Queue = Queue()
job_done = object()
chain = prompt_template | ChatOpenAI(callbacks=[QueueCallback(q)], **llm_params)
# Create a funciton to call - this will run in a thread
def task():
_ = chain.invoke(invoke_params)
q.put(job_done)
# Create a thread and start the function
t = Thread(target=task)
t.start()
content = ""
# Get each new token from the queue and yield for our generator
while True:
try:
next_token = q.get(True, timeout=1)
if next_token is job_done:
break
content += next_token
yield content
except Empty:
continue
| [] |
2024-01-10 | dsvolk/aidebates | src~debates~round.py | from typing import Dict
from langchain.memory import ChatMessageHistory
from langchain.prompts import ChatPromptTemplate
from src.gradio import stream
def _make_prompt_template(prompt: str, history: ChatMessageHistory) -> ChatPromptTemplate:
return ChatPromptTemplate.from_messages([("system", prompt)] + history.messages) # type: ignore
class DebateRound:
def __init__(self, motion: str, gov_prompt: str, opp_prompt: str, llm_params: Dict):
self.motion = motion
self.gov_prompt = gov_prompt
self.opp_prompt = opp_prompt
self.gov_history = ChatMessageHistory()
self.opp_history = ChatMessageHistory()
self.llm_params = llm_params
def gov_speech(self):
prompt_template = _make_prompt_template(self.gov_prompt, self.gov_history)
content = ""
for content in stream(prompt_template, self.llm_params, {"motion": self.motion}):
yield content
self.gov_history.add_ai_message(content)
self.opp_history.add_user_message(content)
def opp_speech(self):
prompt_template = _make_prompt_template(self.opp_prompt, self.opp_history)
content = ""
for content in stream(prompt_template, self.llm_params, {"motion": self.motion}):
yield content
self.gov_history.add_user_message(content)
self.opp_history.add_ai_message(content)
| [] |
2024-01-10 | dsvolk/aidebates | src~chains~debator.py | from __future__ import annotations
import logging
import string
from typing import Final, List, Optional, Tuple
import pandas as pd
# from langchain.base_language import BaseLanguageModel
# from langchain.chains.base import Chain
from langchain.callbacks import get_openai_callback
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.chat_models.openai import ChatOpenAI
from langchain.prompts.prompt import PromptTemplate
from src.chains.basic import BasicChain
# from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun
smart_translator_role_prompts = (
(
"idiomatic_translation",
"""Translate this sentence into Russian.
Q: {input}
A:""",
),
(
"words_translation",
"""List all the words and collocation of intermediate and advanced levels and translate them into Russian. Do not translate simple words. Do not include the translation of the original text.
Example:
Q: Human Self and its nature is held as fundamentally unqualified, faultless, beautiful, blissful, ethical, compassionate and good.
A:
human - человеческий
Self - Я
is held as - считается
unqualified - неквалифицированный
faultless - безупречный
beautiful - красивый
blissful - блаженный
ethical - этичный
compassionate - сострадательный
Q: {input}
A:""",
),
(
"synonyms",
"""You act as an English tutor who teaches English to a Russian speaker. Your student, whose level is beginner to intermediate, asks you to look at the text and find difficult words phrasal verbs and provide simpler synonyms for them. Don't provide synonyms for simple or intermediate words and do not provide synonyms which are not simple. Only provide very close synonyms. Do not provide synonyms for specialized terms, proper names and numerals. Do not provide synonyms for collocation.
Example:
Q: The conference was abruptly called off due to unforeseen circumstances that occurred at the last minute.
A:
call off - synonym: cancel
occur - synonym: happen
Q: {input}
A:""",
),
(
"transcription",
"""In the given text choose 20% of the words of the intermediate and advanced level where with the most irregular spelling, along with their transcription.
Example:
Q: Salvation theory occupies a place of special significance in many religions.In the academic field of religious studies, soteriology is understood by scholars as representing a key theme in a number of different religions and is often studied in a comparative context; that is, comparing various ideas about what salvation is and how it is obtained.
A:
soteriology - [soʊtəriˈɑlədʒi]
occupy - [ˈɒkjʊpaɪ]
significance - [sɪɡˈnɪfɪkəns]
Q: {input}
A:""",
),
)
smart_translator_role_llm_params: Final[dict] = {
"model": "gpt-3.5-turbo",
"temperature": 0.0,
}
class DebatorRole(object):
def __init__(
self,
role_key: str = "smart-translator",
prompts: Tuple[
Tuple[str, str], Tuple[str, str], Tuple[str, str], Tuple[str, str]
] = smart_translator_role_prompts,
role_name: str = "Smart Translator",
pretty_name: str = "🌐 Smart Translator",
commands: List[str] = None,
description: str = "",
hidden: bool = False,
chains: Optional[Tuple[BasicChain, BasicChain, BasicChain, BasicChain]] = None,
):
self.role_key = role_key
self.prompts = prompts
self.role_name = role_name
self.pretty_name = pretty_name
self.commands = commands or ["st", "smarttranslator"]
self.description = description
self.hidden = hidden
if chains is None:
self.chains = (
BasicChain(
prompt=PromptTemplate.from_template(smart_translator_role_prompts[0][1]),
llm=ChatOpenAI(**smart_translator_role_llm_params),
),
BasicChain(
prompt=PromptTemplate.from_template(smart_translator_role_prompts[1][1]),
llm=ChatOpenAI(**smart_translator_role_llm_params),
),
BasicChain(
prompt=PromptTemplate.from_template(smart_translator_role_prompts[2][1]),
llm=ChatOpenAI(**smart_translator_role_llm_params),
),
BasicChain(
prompt=PromptTemplate.from_template(smart_translator_role_prompts[3][1]),
llm=ChatOpenAI(**smart_translator_role_llm_params),
),
)
else:
self.chains = chains
def __str__(self):
return self.role_key
@classmethod
def _parse_smart_translator_responses(cls, responses: List[str]) -> str:
"""Parse a protocol string from NVC and return a string with the message."""
# from pprint import pprint
# pprint(responses)
output = responses[0]
# group 1: words
words = None
try:
words = pd.DataFrame(
[
[line.split(" - ")[0].strip(string.punctuation), line.split(" - ")[1].strip(string.punctuation)]
for line in responses[1].strip(string.punctuation).split("\n")
],
columns=["word", "translation"],
)
except Exception as e:
logging.warning(f"Failed to parse words: {e}")
# group 2: synonyms
synonyms = None
try:
synonyms = pd.DataFrame(
[
[
line.split(" - synonym: ")[0].strip(string.punctuation),
line.split(" - synonym: ")[1].strip(string.punctuation),
]
for line in responses[2].strip(string.punctuation).split("\n")
],
columns=["word", "synonym"],
)
except Exception as e:
logging.warning(f"Failed to parse synonyms: {e}")
# group 3: transcription
transcription = None
try:
transcription = pd.DataFrame(
[
[line.split(" - ")[0].strip(string.punctuation), line.split(" - ")[1].strip(string.punctuation)]
for line in responses[3].strip(string.punctuation).split("\n")
],
columns=["word", "transcription"],
)
except Exception as e:
logging.warning(f"Failed to parse transcription: {e}")
if words is None:
return output.strip()
else:
# merge all dataframes on word
df = words
for df_to_merge in [synonyms, transcription]:
if df_to_merge is not None:
df = df.merge(df_to_merge, on="word", how="outer")
df = df.dropna(subset=["word", "translation"])
df = df.drop_duplicates()
def row_to_text(row):
text = row["word"]
if ("transcription" in row) and (pd.notnull(row["transcription"])):
text += f" - {row['transcription']}"
if ("translation" in row) and (pd.notnull(row["translation"])):
text += f" - {row['translation']}"
if ("synonym" in row) and (pd.notnull(row["synonym"])):
text += f" (синоним: {row['synonym']})"
return text
df["text"] = df.apply(row_to_text, axis=1)
output += "\n\n" + "\n".join(df["text"].tolist())
return output.strip()
async def predict(self, text: str):
with get_openai_callback() as cb:
responses = [
await chain.arun({"input": text}, callbacks=[StdOutCallbackHandler()]) for chain in self.chains
]
logging.info(f"LLM response: {responses}")
try:
parsed_response = self._parse_smart_translator_responses(responses)
except Exception as e:
logging.warning(f"Failed to parse LLM response: {e}")
parsed_response = "\n\n".join(responses)
return {
"text": parsed_response,
"n_prompt_tokens": cb.prompt_tokens,
"n_completion_tokens": cb.completion_tokens,
}
| [
"L",
"(('idiomatic_translation', 'Translate this sentence into Russian.\\n\\nQ: {input}\\nA:'), ('words_translation', 'List all the words and collocation of intermediate and advanced levels and translate them into Russian. Do not translate simple words. Do not include the translation of the original text.\\n\\nExample:\\nQ: Human Self and its nature is held as fundamentally unqualified, faultless, beautiful, blissful, ethical, compassionate and good.\\nA:\\nhuman - человеческий\\nSelf - Я\\nis held as - считается\\nunqualified - неквалифицированный\\nfaultless - безупречный\\nbeautiful - красивый\\nblissful - блаженный\\nethical - этичный\\ncompassionate - сострадательный\\n\\nQ: {input}\\nA:'), ('synonyms', \"You act as an English tutor who teaches English to a Russian speaker. Your student, whose level is beginner to intermediate, asks you to look at the text and find difficult words phrasal verbs and provide simpler synonyms for them. Don't provide synonyms for simple or intermediate words and do not provide synonyms which are not simple. Only provide very close synonyms. Do not provide synonyms for specialized terms, proper names and numerals. Do not provide synonyms for collocation.\\n\\nExample:\\nQ: The conference was abruptly called off due to unforeseen circumstances that occurred at the last minute.\\nA:\\ncall off - synonym: cancel\\noccur - synonym: happen\\n\\nQ: {input}\\nA:\"), ('transcription', 'In the given text choose 20% of the words of the intermediate and advanced level where with the most irregular spelling, along with their transcription.\\n\\nExample:\\nQ: Salvation theory occupies a place of special significance in many religions.In the academic field of religious studies, soteriology is understood by scholars as representing a key theme in a number of different religions and is often studied in a comparative context; that is, comparing various ideas about what salvation is and how it is obtained.\\nA:\\nsoteriology - [soʊtəriˈɑlədʒi]\\noccupy - [ˈɒkjʊpaɪ]\\nsignificance - [sɪɡˈnɪfɪkəns]\\n\\nQ: {input}\\nA:'))"
] |
2024-01-10 | IQ-SCM/InternGPT | iGPT~controllers~ConversationBot.py | import inspect
import re
import os
import numpy as np
import uuid
import shutil
import whisper
from PIL import Image
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
from ..models import *
from iGPT.models.utils import gen_new_name
INTERN_CHAT_PREFIX = """InternGPT is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. InternGPT is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
InternGPT is able to process and understand large amounts of text and images. As a language model, InternGPT can not directly read images, but it has a list of tools to finish different visual tasks. Each image will have a file name formed as "image/xxx.png", and InternGPT can invoke different tools to indirectly understand pictures. When talking about images, InternGPT is very strict to the file name and will never fabricate nonexistent files. When using tools to generate new image files, InternGPT is also known that the image may not be the same as the user's demand, and will use other visual question answering tools or description tools to observe the real image. InternGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the image content and image file name. It will remember to provide the file name from the last tool observation, if a new image is generated.
Human may provide new figures to InternGPT with a description. The description helps InternGPT to understand this image, but InternGPT should use tools to finish following tasks, rather than directly imagine from the description.
Overall, InternGPT is a powerful visual dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics.
TOOLS:
------
InternGPT has access to the following tools:"""
INTERN_CHAT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
INTERN_CHAT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if it does not exist.
You will remember to provide the image file name loyally if it's provided in the last tool observation.
Begin!
Previous conversation history:
{chat_history}
New input: {input}
Since InternGPT is a text language model, InternGPT must use tools to observe images rather than imagination.
The thoughts and observations are only visible for InternGPT, InternGPT should remember to repeat important information in the final response for Human.
Thought: Do I need to use a tool? {agent_scratchpad} Let's think step by step.
"""
INTERN_CHAT_PREFIX_CN = """InternGPT 旨在能够协助完成范围广泛的文本和视觉相关任务,从回答简单的问题到提供对广泛主题的深入解释和讨论。 InternGPT 能够根据收到的输入生成类似人类的文本,使其能够进行听起来自然的对话,并提供连贯且与手头主题相关的响应。
InternGPT 能够处理和理解大量文本和图像。作为一种语言模型,InternGPT 不能直接读取图像,但它有一系列工具来完成不同的视觉任务。每张图片都会有一个文件名,格式为“image/xxx.png”,InternGPT可以调用不同的工具来间接理解图片。在谈论图片时,InternGPT 对文件名的要求非常严格,绝不会伪造不存在的文件。在使用工具生成新的图像文件时,InternGPT也知道图像可能与用户需求不一样,会使用其他视觉问答工具或描述工具来观察真实图像。 InternGPT 能够按顺序使用工具,并且忠于工具观察输出,而不是伪造图像内容和图像文件名。如果生成新图像,它将记得提供上次工具观察的文件名。
Human 可能会向 InternGPT 提供带有描述的新图形。描述帮助 InternGPT 理解这个图像,但 InternGPT 应该使用工具来完成以下任务,而不是直接从描述中想象。有些工具将会返回英文描述,但你对用户的聊天应当采用中文。
总的来说,InternGPT 是一个强大的可视化对话辅助工具,可以帮助处理范围广泛的任务,并提供关于范围广泛的主题的有价值的见解和信息。
工具列表:
------
InternGPT 可以使用这些工具:"""
INTERN_CHAT_FORMAT_INSTRUCTIONS_CN = """用户使用中文和你进行聊天,但是工具的参数应当使用英文。如果要调用工具,你必须遵循如下格式:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
当你不再需要继续调用工具,而是对观察结果进行总结回复时,你必须使用如下格式:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
INTERN_CHAT_SUFFIX_CN = """你对文件名的正确性非常严格,而且永远不会伪造不存在的文件。
开始!
因为InternGPT是一个文本语言模型,必须使用工具去观察图片而不是依靠想象。
推理想法和观察结果只对InternGPT可见,需要记得在最终回复时把重要的信息重复给用户,你只能给用户返回中文句子。我们一步一步思考。在你使用工具时,工具的参数只能是英文。
聊天历史:
{chat_history}
新输入: {input}
Thought: Do I need to use a tool? {agent_scratchpad}
"""
def cut_dialogue_history(history_memory, keep_last_n_words=500):
if history_memory is None or len(history_memory) == 0:
return history_memory
tokens = history_memory.split()
n_tokens = len(tokens)
print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
if n_tokens < keep_last_n_words:
return history_memory
paragraphs = history_memory.split('\n')
last_n_tokens = n_tokens
while last_n_tokens >= keep_last_n_words:
last_n_tokens -= len(paragraphs[0].split(' '))
paragraphs = paragraphs[1:]
return '\n' + '\n'.join(paragraphs)
class ConversationBot:
def __init__(self, load_dict):
print(f"Initializing InternGPT, load_dict={load_dict}")
if 'HuskyVQA' not in load_dict:
raise ValueError("You have to load ImageCaptioning as a basic function for iGPT")
if 'SegmentAnything' not in load_dict:
raise ValueError("You have to load SegmentAnything as a basic function for iGPT")
if 'ImageOCRRecognition' not in load_dict:
raise ValueError("You have to load ImageOCRRecognition as a basic function for iGPT")
self.models = {}
self.audio_model = whisper.load_model("small").to('cuda:0')
# Load Basic Foundation Models
for class_name, device in load_dict.items():
self.models[class_name] = globals()[class_name](device=device)
# Load Template Foundation Models
for class_name, module in globals().items():
if getattr(module, 'template_model', False):
template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'}
loaded_names = set([type(e).__name__ for e in self.models.values()])
if template_required_names.issubset(loaded_names):
self.models[class_name] = globals()[class_name](
**{name: self.models[name] for name in template_required_names})
self.tools = []
for instance in self.models.values():
for e in dir(instance):
if e.startswith('inference'):
func = getattr(instance, e)
self.tools.append(Tool(name=func.name, description=func.description, func=func))
def init_agent(self):
memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
llm = OpenAI(temperature=0)
agent = initialize_agent(
self.tools,
llm,
agent="conversational-react-description",
verbose=True,
memory=memory,
return_intermediate_steps=True,
agent_kwargs={'prefix': INTERN_CHAT_PREFIX, 'format_instructions': INTERN_CHAT_FORMAT_INSTRUCTIONS,
'suffix': INTERN_CHAT_SUFFIX}, )
user_state = [{'agent': agent, 'memory': memory}]
return user_state
def find_latest_image(self, file_list):
res = None
prev_mtime = None
for file_item in file_list:
file_path = os.path.basename(file_item[0])
if not os.path.exists(file_item[0]):
continue
if res is None:
res = file_item[0]
ms = int(file_path.split('_')[0][3:]) * 0.001
prev_mtime = int(os.path.getmtime(file_item[0])) + ms
else:
ms = int(file_path.split('_')[0][3:]) * 0.001
cur_mtime = int(os.path.getmtime(file_item[0])) + ms
# cur_mtime = cur_mtime + ms
if cur_mtime > prev_mtime:
prev_mtime = cur_mtime
res = file_item[0]
return res
def run_task(self, use_voice, text, audio_path, state, user_state):
if use_voice:
state, _, user_state = self.run_audio(audio_path, state, user_state)
else:
state, _, user_state = self.run_text(text, state, user_state)
return state, state, user_state
def find_param(self, msg, keyword, excluded=False):
p1 = re.compile(f'(image/[-\\w]*.(png|mp4))')
p2 = re.compile(f'(image/[-\\w]*{keyword}.(png|mp4))')
if keyword == None or len(keyword) == 0:
out_filenames = p1.findall(msg)
elif not excluded:
out_filenames = p2.findall(msg)
elif excluded:
all_files = p1.findall(msg)
excluded_files = p2.findall(msg)
out_filenames = set(all_files) - set(excluded_files)
res = self.find_latest_image(out_filenames)
return res
def rectify_action(self, inputs, history_msg, agent):
print('Rectify the action.')
print(inputs)
func = None
func_name = None
func_inputs = None
res = None
if 'extract' in inputs.lower() or 'save' in inputs.lower():
cls = self.models.get('ExtractMaskedAnything', None)
if cls is not None:
func = cls.inference
mask_path = self.find_param(inputs, 'mask')
if mask_path is None:
mask_path = self.find_param(history_msg, 'mask')
img_path = self.find_parent(mask_path, history_msg+inputs)
if img_path is None:
img_path = self.find_param(history_msg+inputs, 'mask', excluded=True)
func_inputs = f'{img_path},{mask_path}'
func_name = 'ExtractMaskedAnything'
elif 'generate' in inputs.lower() or 'beautify' in inputs.lower():
# print('*' * 40)
cls = self.models.get('ImageText2Image', None)
if cls is not None:
func = cls.inference
img_path = self.find_param(inputs, '')
if img_path is None:
img_path = self.find_param(history_msg, '')
# img_path = self.find_param(history_msg, 'raw')
prompt = inputs.strip()
func_inputs = f'{img_path},{prompt}'
func_name = 'ImageText2Image'
elif 'describe' in inputs.lower() or 'introduce' in inputs.lower():
cls = self.models.get('HuskyVQA', None)
func_name = 'HuskyVQA'
if cls is not None and 'mask' in inputs.lower():
prompt = inputs.strip()
func = cls.inference_by_mask
mask_path = self.find_param(inputs, 'mask')
if mask_path is None:
mask_path = self.find_param(history_msg, 'mask')
img_path = self.find_parent(mask_path, history_msg+inputs)
if img_path is None:
img_path = self.find_param(history_msg+inputs, 'mask', excluded=True)
func_inputs = f'{img_path},{mask_path},{prompt}'
elif cls is not None:
prompt = inputs.strip()
func = cls.inference
img_path = self.find_param(inputs, 'mask', excluded=True)
if img_path is None:
img_path = self.find_param(history_msg, 'mask', excluded=True)
func_inputs = f'{img_path}'
elif 'image' in inputs.lower() or 'figure' in inputs.lower() or 'picture' in inputs.lower():
cls = self.models.get('HuskyVQA', None)
func_name = 'HuskyVQA'
if cls is not None:
func = cls.inference
img_path = self.find_param(inputs, 'mask', excluded=True)
if img_path is None:
img_path = self.find_param(history_msg, 'mask', excluded=True)
prompt = inputs.strip()
func_inputs = f'{img_path},{prompt}'
else:
def only_chat(inputs):
res = agent(f"You can use history message to finish the user's request without using any tools. {inputs}")
res = res['output'].replace("\\", "/")
return res
func_name = 'ChatGPT'
func_inputs = inputs
func = only_chat
print(f'{func_name}: {func_inputs}')
return_res = None
if func is None:
res = f"I have tried to use the tool: \"{func_name}\" to acquire the results, but it is not sucessfully loaded."
else:
return_res = func(func_inputs)
if os.path.exists(return_res):
res = f"I have used the tool: \"{func_name}\" with the inputs: \"{func_inputs}\" to get the results. The result image is named {return_res}."
else:
res = return_res
print(f"I have used the tool: \"{func_name}\" to obtain the results. The Inputs: \"{func_inputs}\". Result: {return_res}.")
return res
def check_illegal_files(self, file_list):
illegal_files = []
for file_item in file_list:
if not os.path.exists(file_item[0]):
illegal_files.append(file_item[0])
return illegal_files
def find_parent(self, cur_path, history_msg):
if cur_path is None:
return None
root_path = os.path.dirname(cur_path)
name = os.path.basename(cur_path)
name = name.split('.')[0]
parent_name = name.split('_')[1]
# p1 = re.compile(f'(image/[-\\w]*.(png|mp4))')
p = re.compile(f'(image/{parent_name}[-\\w]*.(png|mp4))')
out_filenames = p.findall(history_msg)
if len(out_filenames) > 0:
out_filenames = out_filenames[0][0]
else:
out_filenames = None
all_file_items = os.listdir(f'{root_path}')
for item in all_file_items:
if item.startswith(parent_name):
out_filenames = os.path.join(root_path, item)
# out_filenames = item
break
print(f'{cur_path}, parent path: {out_filenames}')
return out_filenames
def get_suggested_inputs(self, inputs, history_msg):
image_path = self.find_param(history_msg+inputs, 'mask', excluded=True)
mask_path = self.find_param(history_msg+inputs, 'mask')
if image_path is None or mask_path is None:
return inputs
prompt_template2 = f"If the tool only needs image_path, image_path might be {image_path}. If the tool only needs mask_path, mask_path might be {mask_path}. "
image_path = self.find_parent(mask_path, history_msg)
if image_path is None:
image_path = self.find_param(history_msg+inputs, 'mask', excluded=True)
prompt_template1 = f"If the tool needs both image_path and mask_path as inputs, image_path might be {image_path} and mask_path might be {mask_path}. "
prompt_template3 = 'In other cases, you could refer to history message to finish the action. '
# prompt_template4 = 'Please finish my request using or not using tools. '
# prompt_template4 = 'If you understand, say \"Received\". \n'
new_inputs = prompt_template1 + prompt_template2 + prompt_template3 + inputs
print(f'Processed by get_suggested_inputs, prompt: {new_inputs}')
return new_inputs
def check_response(self, response):
pattern = re.compile('(image/[-\\w]*.(png|mp4))')
# img_pattern = re.compile('(image/[-\\w]*.(png|mp4))')
file_items = pattern.findall(response)
image_path = ''
mask_path = ''
for item in file_items:
if len(image_path) == 0 and '_image.' in item[0]:
image_path = item[0]
elif len(mask_path) == 0 and '_mask.' in item[0]:
mask_path = item[0]
if len(image_path) == 0 or len(mask_path) == 0:
return True
res = self.find_param(response, '')
if res == image_path:
return True
img_idx = response.find(image_path)
mask_idx = response.find(mask_path)
# if self.find_parent(mask_path) != image_path or \
# mask_idx < img_idx:
# return False
if mask_idx < img_idx:
return False
return True
def exec_simple_action(self, inputs, history_msg):
print('Execute the simple action without ChatGPT.')
print('history_msg: ', history_msg + inputs)
print('inputs: ', inputs)
func = None
func_name = None
func_inputs = None
res = None
new_inputs = inputs.replace('ReplaceMaskedAnything', 'placeholder')
if 'remove' in inputs.lower() or 'erase' in inputs.lower():
cls = self.models.get('LDMInpainting', None)
if cls is not None:
func = cls.inference
mask_path = self.find_param(inputs, 'mask')
if mask_path is None:
mask_path = self.find_param(history_msg, 'mask')
if mask_path is None:
return 'I can not found the mask_path. Please check you have successfully operated on input image.'
img_path = self.find_parent(mask_path, history_msg+inputs)
if img_path is None:
img_path = self.find_param(history_msg+inputs, 'mask', excluded=True)
func_inputs = f'{img_path},{mask_path}'
func_name = 'RemoveMaskedAnything'
elif 'replace' in new_inputs.lower():
cls = self.models.get('ReplaceMaskedAnything', None)
if cls is not None:
func = cls.inference
mask_path = self.find_param(inputs, 'mask')
if mask_path is None:
mask_path = self.find_param(history_msg, 'mask')
if mask_path is None:
return 'I can not found the mask_path. Please check you have successfully operated on input image.'
img_path = self.find_parent(mask_path, history_msg+inputs)
if img_path is None:
img_path = self.find_param(history_msg+inputs, 'mask', excluded=True)
if img_path is None:
return 'I can not found the image_path. Please check you have successfully uploaded an input image.'
func_inputs = f'{img_path},{mask_path},{inputs}'
func_name = 'ReplaceMaskedAnything'
print(f'{func_name}: {func_inputs}')
if func is None:
return None
return_res = func(func_inputs)
res = f"I have used the tool: \"{func_name}\" with the inputs: \"{func_inputs}\" to get the results. The result image is named {return_res}."
print(res)
return res
def exec_agent(self, inputs, agent):
# pattern = re.compile('(image/[-\\w]*.(png|mp4))')
response = agent({"input": inputs})['output']
response = response.replace("\\", "/")
nonsense_words = 'I do not need to use a tool'
if nonsense_words in response.split('.')[0] and len(response.split('.')) > 1:
response = '.'.join(response.split('.')[1:])
if not self.check_response(response):
raise RuntimeError('Arguments are not matched.')
return response
def find_result_path(self, inputs):
pattern = re.compile('(image/[-\\w]*.(png|mp4))')
out_filenames = pattern.findall(inputs)
illegal_files = self.check_illegal_files(out_filenames)
if len(illegal_files) > 0:
raise FileNotFoundError(f'{illegal_files} do (does) not exist.')
res = self.find_latest_image(out_filenames)
return res
def run_text(self, text, state, user_state):
text = text.strip()
if text is None or len(text) == 0:
state += [(None, 'Please input text.')]
return state, state, user_state
agent = user_state[0]['agent']
agent.memory.buffer = cut_dialogue_history(agent.memory.buffer, keep_last_n_words=500)
history_msg = agent.memory.buffer[:]
# pattern = re.compile('(image/[-\\w]*.(png|mp4))')
response = self.exec_simple_action(text, history_msg)
try:
response = self.exec_simple_action(text, history_msg)
if response is None:
# inputs = self.get_suggested_inputs(text, history_msg)
inputs = text
response = self.exec_agent(inputs, agent)
else:
agent.memory.buffer += f'\nHuman: {text}\n' + f'AI: {response})'
res = self.find_result_path(response)
except Exception as err1:
print(f'Error in line {err1.__traceback__.tb_lineno}: {err1}')
try:
response = self.rectify_action(text, history_msg, agent)
res = self.find_result_path(response)
agent.memory.buffer += f'\nHuman: {text}\n' + f'AI: {response}'
except Exception as err2:
print(f'Error in line {err2.__traceback__.tb_lineno}: {err2}')
state += [(text, 'Sorry, something went wrong inside the ChatGPT. Please check whether your image, video and message have been uploaded successfully.')]
return state, state, user_state
if res is not None and agent.memory.buffer.count(res) <= 1:
state = state + [(text, response + f' `{res}` is as follows: ')]
state = state + [(None, (res, ))]
else:
state = state + [(text, response)]
print(f"\nProcessed run_text, Input text: {text}\nCurrent state: {state}\n"
f"Current Memory: {agent.memory.buffer}")
return state, state, user_state
def run_audio(self, audio_path, state, user_state):
print(f'audio_path = {audio_path}')
if audio_path is None or not os.path.exists(audio_path):
state += [(None, 'No audio input. Please stop recording first and then send the audio.')]
return state, state
if self.audio_model is None:
self.audio_model = whisper.load_model("small").to('cuda:0')
text = self.audio_model.transcribe(audio_path)["text"]
res = self.run_text(text, state, user_state)
print(f"\nProcessed run_audio, Input transcribed audio: {text}\nCurrent state: {state}\n"
f"Current Memory: {user_state[0]['agent'].memory.buffer}")
return res[0], res[1], res[2]
def upload_audio(self, audio_path, state, user_state):
print(f'audio_path = {audio_path}')
if audio_path is None or not os.path.exists(audio_path):
state += [(None, 'No audio input. Please upload audio file.')]
return state, state
user_state = self.clear_user_state(False, user_state)
audio_name = os.path.basename(audio_path)
# vid_name = gen_new_name(vid_name, '', vid_name.split('.')[-1])
new_audio_path = os.path.join('./image/', audio_name)
new_audio_path = gen_new_name(new_audio_path, 'audio', audio_name.split('.')[-1])
shutil.copy(audio_path, new_audio_path)
user_state[0]['audio_path'] = new_audio_path
Human_prompt = f'\nHuman: provide an audio file named {new_audio_path}. You should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
AI_prompt = f"Received audio: {new_audio_path} "
# self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
user_state[0]['agent'].memory.buffer += Human_prompt + 'AI: ' + AI_prompt
state = state + [((new_audio_path, ), AI_prompt)]
# print('exists = ', os.path.exists("./tmp_files/1e7f_f4236666_tmp.mp4"))
print(f"\nProcessed upload_video, Input Audio: `{new_audio_path}`\nCurrent state: {state}\n"
f"Current Memory: {user_state[0]['agent'].memory.buffer}")
return state, state, user_state
def upload_image(self, image, state, user_state):
# [txt, click_img, state, user_state], [chatbot, txt, state, user_state]
# self.reset()
print('upload an image')
if image is None or image.get('image', None) is None:
return state, state, user_state
user_state = self.clear_user_state(False, user_state)
img = image['image']
image_filename = os.path.join('image', f"{str(uuid.uuid4())[:6]}.png")
image_filename = gen_new_name(image_filename, 'image')
img.save(image_filename, "PNG")
user_state[0]['image_path'] = image_filename
img = img.convert('RGB')
image_caption = self.models['HuskyVQA'].inference_captioning(image_filename)
# description = 'Debug'
user_state[0]['image_caption'] = image_caption
ocr_res = None
user_state[0]['ocr_res'] = []
if 'ImageOCRRecognition' in self.models.keys():
ocr_res = self.models['ImageOCRRecognition'].inference(image_filename)
ocr_res_raw = self.models['ImageOCRRecognition'].readtext(image_filename)
if ocr_res is not None and len(ocr_res) > 0:
Human_prompt = f'\nHuman: provide a image named {image_filename}. The description is: {image_caption} OCR result is: {ocr_res}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
user_state[0]['ocr_res'] = ocr_res_raw
else:
Human_prompt = f'\nHuman: provide a image named {image_filename}. The description is: {image_caption} This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
AI_prompt = "Received. "
# self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + ' AI: ' + AI_prompt
user_state[0]['agent'].memory.buffer += Human_prompt + 'AI: ' + AI_prompt
state = state + [(f"*{image_filename}*", AI_prompt)]
print(f"\nProcessed upload_image, Input image: {image_filename}\nCurrent state: {state}\n"
f"Current Memory: {user_state[0]['agent'].memory.buffer}")
return state, state, user_state
def upload_video(self, video_path, state, user_state):
# self.reset()
print('upload a video')
user_state = self.clear_user_state(False, user_state)
vid_name = os.path.basename(video_path)
# vid_name = gen_new_name(vid_name, '', vid_name.split('.')[-1])
new_video_path = os.path.join('./image/', vid_name)
new_video_path = gen_new_name(new_video_path, 'video', vid_name.split('.')[-1])
shutil.copy(video_path, new_video_path)
user_state[0]['video_path'] = new_video_path
if "VideoCaption" in self.models.keys():
description = self.models['VideoCaption'].inference(new_video_path)
else:
description = 'A video.'
user_state[0]['video_caption'] = description
Human_prompt = f'\nHuman: provide a video named {new_video_path}. The description is: {description}. This information helps you to understand this video, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
AI_prompt = f"Received video: {new_video_path} "
# self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
user_state[0]['agent'].memory.buffer += Human_prompt + 'AI: ' + AI_prompt
state = state + [((new_video_path, ), AI_prompt)]
# print('exists = ', os.path.exists("./tmp_files/1e7f_f4236666_tmp.mp4"))
print(f"\nProcessed upload_video, Input video: `{new_video_path}`\nCurrent state: {state}\n"
f"Current Memory: {user_state[0]['agent'].memory.buffer}")
return state, state, user_state
def blend_mask(self, img, mask):
mask = mask.astype(np.uint8)
transparency_ratio = mask.astype(np.float32) / 255 / 3
transparency_ratio = transparency_ratio[:, :, np.newaxis]
mask = mask[:, :, np.newaxis]
mask[mask != 0] = 255
mask= mask.repeat(3, axis=2)
mask[:,:,0] = 0
mask[:,:,2] = 0
new_img_arr = img * (1 - transparency_ratio) + mask * transparency_ratio
new_img_arr = np.clip(new_img_arr, 0, 255).astype(np.uint8)
# print(new_img_arr.shape)
return new_img_arr
def process_seg(self, image, state, user_state):
Human_prompt="Please process this image based on given mask."
if image is None or \
user_state[0].get('image_path', None) is None or \
not os.path.exists(user_state[0]['image_path']):
AI_prompt = "Please upload an image for processing."
state += [(Human_prompt, AI_prompt)]
return None, state, state, user_state
if 'SegmentAnything' not in self.models.keys():
state += [(None, 'Please load the segmentation tool.')]
return image['image'], state, state, user_state
img = Image.open(user_state[0]['image_path']).convert('RGB')
print(f'user_state[0][\'image_path\'] = {user_state[0]["image_path"]}')
img = np.array(img, dtype=np.uint8)
mask = image['mask'].convert('L')
mask = np.array(mask, dtype=np.uint8)
if mask.sum() == 0:
AI_prompt = "You can click the image and ask me some questions."
state += [(Human_prompt, AI_prompt)]
return image['image'], state, state, user_state
# if 'SegmentAnything' in self.models.keys():
# self.models['SegmentAnything'].clicked_region = mask
if user_state[0].get('features', None) is None:
user_state[0]['features'] = self.models['SegmentAnything'].get_image_embedding(img)
res_mask = self.models['SegmentAnything'].segment_by_mask(mask, user_state[0]['features'])
if user_state[0].get('seg_mask', None) is not None:
res_mask = np.logical_or(user_state[0]['seg_mask'], res_mask)
res_mask = res_mask.astype(np.uint8)*255
user_state[0]['seg_mask'] = res_mask
new_img_arr = self.blend_mask(img, res_mask)
new_img = Image.fromarray(new_img_arr)
res_mask_img = Image.fromarray(res_mask).convert('RGB')
res_mask_path = gen_new_name(user_state[0]['image_path'], 'mask')
res_mask_img.save(res_mask_path)
AI_prompt = f"Received. The mask_path is named {res_mask_path}."
user_state[0]['agent'].memory.buffer += '\nHuman: ' + Human_prompt + '\nAI: ' + AI_prompt
# state = state + [(Human_prompt, f"*{AI_prompt}*")]
state = state + [(Human_prompt, f'Received. The sgemented figure named `{res_mask_path}` is as follows: ')]
state = state + [(None, (res_mask_path, ))]
print(f"\nProcessed run_image, Input image: `{user_state[0]['image_path']}`\nCurrent state: {state}\n"
f"Current Memory: {user_state[0]['agent'].memory.buffer}")
return new_img, state, state, user_state
def process_ocr(self, image, state, user_state):
Human_prompt="Please process this image based on given mask."
if image is None or \
user_state[0].get('image_path', None) is None or \
not os.path.exists(user_state[0]['image_path']):
AI_prompt = "Please upload an image for processing."
state += [(Human_prompt, AI_prompt)]
return None, state, state, user_state
uploaded_image_filename = user_state[0]['image_path']
img = np.array(image['image'])
# img[:100+int(time.time() % 50),:100, :] = 0
img = Image.fromarray(img)
# img = image['image'].convert('RGB')
mask = image['mask'].convert('L')
# mask.save(f'test_{int(time.time()) % 1000}.png')
mask = np.array(mask, dtype=np.uint8)
if mask.sum() == 0:
AI_prompt = "You can click the image and ask me some questions."
state += [(Human_prompt, AI_prompt)]
return image['image'], state, state, user_state
chosen_ocr_res = None
if 'ImageOCRRecognition' in self.models.keys():
# self.models['ImageOCRRecognition'].clicked_region = mask
chosen_ocr_res = self.models['ImageOCRRecognition'].get_ocr_by_mask(mask, user_state[0]['ocr_res'])
else:
state += [Human_prompt, f'ImageOCRRecognition is not loaded.']
if chosen_ocr_res is not None and len(chosen_ocr_res) > 0:
AI_prompt = f'OCR result: {chosen_ocr_res}'
# self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + ' AI: ' + AI_prompt
else:
AI_prompt = 'I didn\'t find any optical characters at given location.'
state = state + [(Human_prompt, AI_prompt)]
user_state[0]['agent'].memory.buffer += '\nHuman: ' + Human_prompt + '\nAI: ' + AI_prompt
print(f"\nProcessed process_ocr, Input image: {uploaded_image_filename}\nCurrent state: {state}\n"
f"Current Memory: {user_state[0]['agent'].memory.buffer}")
return image['image'], state, state, user_state
def process_save(self, image, state, user_state):
if image is None:
return None, state, state, user_state
uploaded_image_filename = user_state[0]['image_path']
mask_image = image['mask'].convert('RGB')
# mask = np.array(mask, dtype=np.uint8)
# mask_image = Image.fromarray(mask).convert('RGB')
random_name = os.path.join('image', f"{str(uuid.uuid4())[:6]}.png")
mask_image_name = gen_new_name(random_name, 'rawmask')
mask_image.save(mask_image_name, "PNG")
Human_prompt="Please save the given mask."
if np.array(mask_image, dtype=np.uint8).sum() == 0:
AI_prompt = "I can not find the mask. Please operate on the image at first."
state += [(Human_prompt, AI_prompt)]
return state, state, image['image']
AI_prompt = f'The saved mask is named {mask_image_name}: '
state = state + [(Human_prompt, AI_prompt)]
state = state + [(None, (mask_image_name, ))]
user_state[0]['agent'].memory.buffer = user_state[0]['agent'].memory.buffer + Human_prompt + ' AI: ' + AI_prompt
print(f"\nProcessed process_ocr, Input image: {uploaded_image_filename}\nCurrent state: {state}\n"
f"Current Memory: {user_state[0]['agent'].memory.buffer}")
return image['image'], state, state, user_state
def clear_user_state(self, clear_momery, user_state):
new_user_state = [{}]
new_user_state[0]['agent'] = user_state[0]['agent']
new_user_state[0]['memory'] = user_state[0]['memory']
if clear_momery:
new_user_state[0]['memory'].clear()
else:
new_user_state[0]['memory'] = user_state[0]['memory']
return new_user_state
| [
"Please save the given mask.",
"\nHuman: provide a video named PLACEHOLDER. The description is: PLACEHOLDER. This information helps you to understand this video, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n",
"If the tool only needs image_path, image_path might be PLACEHOLDER. If the tool only needs mask_path, mask_path might be PLACEHOLDER. ",
"Received. ",
"Received video: PLACEHOLDER ",
"Please upload an image for processing.",
"You can click the image and ask me some questions.",
"\nHuman: provide an audio file named PLACEHOLDER. You should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n",
"In other cases, you could refer to history message to finish the action. ",
"I can not find the mask. Please operate on the image at first.",
"\nHuman: provide a image named PLACEHOLDER. The description is: PLACEHOLDER This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n",
"\nHuman: provide a image named PLACEHOLDER. The description is: PLACEHOLDER OCR result is: PLACEHOLDER. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n",
"Received audio: PLACEHOLDER ",
"Received. The mask_path is named PLACEHOLDER.",
"If the tool needs both image_path and mask_path as inputs, image_path might be PLACEHOLDER and mask_path might be PLACEHOLDER. ",
"Please process this image based on given mask.",
"OCR result: PLACEHOLDER",
"The saved mask is named PLACEHOLDER: ",
"I didn't find any optical characters at given location."
] |
2024-01-10 | paperbottle11/cscapstone | webgen2~flask_app.py | from flask import Flask, send_from_directory, redirect, request, render_template
from base64 import b64decode
import os
import cv2
import numpy as np
import openai
import json
from pydantic import BaseModel
import time
import shutil
def byte_image_to_numpy(byte_image):
np_array = np.frombuffer(byte_image, np.uint8)
img = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
return img
def show(img, wait=0):
if type(img) is bytes:
img = byte_image_to_numpy(img)
cv2.imshow("img", img)
cv2.waitKey(wait)
with open("../config.txt", "r") as f:
api_key = f.read().strip()
openai.api_key = api_key
f.close()
def generateImage(prompt, debug=False):
if debug:
print("Debug mode is on, skipping image generation")
return open("test.png", "rb").read()
response = openai.Image.create(
prompt=prompt,
n=1,
size="256x256",
response_format="b64_json",
)
image_data = b64decode(response["data"][0]["b64_json"])
return image_data
class WebsiteAIResponse(BaseModel):
html: str
image_names: list[str]
image_prompts: list[str]
def generate(userRequest, model="gpt-3.5-turbo-0613", messages=None):
msg = [
{"role": "system", "content": "You are a machine that generates a website with HTML."},
{"role": "user", "content": f"The request is: {userRequest}. Create HTML code with all of the content in English that would be in the request. The output must be valid json text without any unescaped double quotes and no newline characters. Be informative. Use between one and three images. In the backend, make corresponding lists of image file names and of detailed descriptions for each image name. Position these images in the website where they logically make sense. The folder for images is called \"images\". Use bootstrap CSS classes to style the html. Do not link a bootstrap stylesheet."}
]
if messages is not None:
msg.extend(messages)
response = openai.ChatCompletion.create(
model=model,
messages=msg,
functions=[
{
"name": "create_website",
"description": "Create a website based on the given request and create image prompts for the images in the website",
"parameters": WebsiteAIResponse.model_json_schema()
}
],
function_call={"name": "create_website"}
)
with open("json.json", "w") as f:
f.write(response.choices[0]["message"]["function_call"]["arguments"])
f.close()
try:
output = json.loads(response.choices[0]["message"]["function_call"]["arguments"].strip().encode())
except json.decoder.JSONDecodeError:
return "failed", [], []
return output["html"], output["image_names"], output["image_prompts"]
app = Flask(__name__, static_folder="static")
@app.route('/', methods=['GET'])
def index():
return redirect('/home')
global lastQuery, app_root, generations_count, view_number, project_number, project_path, projects_count
lastQuery = ""
app_root = os.path.dirname(os.path.abspath(__file__))
projects_count = len([entry for entry in os.listdir(app_root) if os.path.isdir(os.path.join(app_root, entry)) and entry.startswith("generations")])
project_number = projects_count - 1 if projects_count > 0 else 0
project_path = os.path.join(app_root, f"generations{project_number}")
generations_count = 0
if os.path.exists(project_path):
generations_count = len([entry for entry in os.listdir(project_path) if os.path.isfile(os.path.join(project_path, entry)) and entry.startswith("baseHTML")])
view_number = generations_count - 1 if generations_count > 0 else 0
def processHTML(html, current_view=view_number):
if current_view == 0: prevView = 0
else: prevView = current_view - 1
if current_view == generations_count - 1: nextView = current_view
else: nextView = current_view + 1
insertIdx = html.find("<head>")
element = "\n<link rel='stylesheet' href='css/{{stylesheet}}'>"
html = html[:insertIdx+6] + element + html[insertIdx+6:]
html = html[:insertIdx+6+len(element)] + "\n<link rel='stylesheet' href='css/genstyle.css'>\n<script src='https://ajax.googleapis.com/ajax/libs/jquery/3.7.1/jquery.min.js'></script>\n<script src='https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js'></script>\n" + html[insertIdx+6+len(element):]
insertIdx = html.find("<body>")
html = html[:insertIdx+6] + f"\n<div class='floating-menu'>\n<div id='stylesheetlistcontainer'><p>Stylesheets:</p><div id='stylesheetlist'>\n<a class='button' href='/lastgen?sheet=cerulean.css&project={'{{project_num}}'}&view={'{{view}}'}'>Cerulean</a>\n<a class='button' href='/lastgen?sheet=cosmo.css&project={'{{project_num}}'}&view={'{{view}}'}'>Cosmo</a>\n<a class='button' href='/lastgen?sheet=darkly.css&project={'{{project_num}}'}&view={'{{view}}'}'>Darkly</a>\n<a class='button' href='/lastgen?sheet=journal.css&project={'{{project_num}}'}&view={'{{view}}'}'>Journal</a>\n<a class='button' href='/lastgen?sheet=lux.css&project={'{{project_num}}'}&view={'{{view}}'}'>Lux</a>\n<a class='button' href='/lastgen?sheet=quartz.css&project={'{{project_num}}'}&view={'{{view}}'}'>Quartz</a>\n<a class='button' href='/lastgen?sheet=united.css&project={'{{project_num}}'}&view={'{{view}}'}'>United</a></div>\n</div>\n<form action='/lastgen' onsubmit='hidesubmit();'>\n<p>Last feedback: \"{'{{view_feedback}}'}\"</p>\n<textarea rows='3' name='feedback' placeholder='enter feedback'></textarea>\n<div class='submitbutton'>\n<button id='submitbutton' type='submit'>Submit</button><div class='loader' id='hiddenDiv'>\n</div>\n</div>\n<input type='hidden' name='sheet' value='{'{{stylesheet}}'}'>\n<input type='hidden' name='view' value='{'{{view}}'}'>\n<input type='hidden' name='project' value='{'{{project_num}}'}'>\n</form>\n<div class='menu-section'>\n<div id='view-nav'>\n<a class='button' href='/lastgen?sheet={'{{stylesheet}}'}&view={prevView}&project={'{{project_num}}'}'><</a>\n<div id='generation-view'>\n<p>Revision: #{current_view} (made from Revision #{'{{source_num}}'})</p>\n<p>Total Generations: {generations_count}</p>\n</div>\n<a class='button' href='/lastgen?sheet={'{{stylesheet}}'}&view={nextView}&project={'{{project_num}}'}'>></a>\n</div>\n<a class='button home' href='/home'>Home</a>\n</div>\n</div>\n<script>\nlet showButton = document.getElementById('submitbutton');\nlet hiddenDiv = document.getElementById('hiddenDiv');\nshowButton.onclick = function() {{hiddenDiv.style.display = 'block';}};\nfunction hidesubmit() {{let button = document.getElementById('submitbutton');button.disabled = true; }}</script>" + html[insertIdx+6:]
return html
@app.route('/home', methods=['GET'])
def home():
global lastQuery, app_root, generations_count, view_number, project_number, project_path, projects_count
if request.method == 'GET' and "request" in request.args:
if request.args["request"] == "": return redirect('/home')
elif request.args["request"] == lastQuery:
stylesheet = "journal.css"
if request.method == 'GET' and "sheet" in request.args: stylesheet=request.args["sheet"]
return redirect(f"/lastgen?sheet={stylesheet}")
else:
print("Request: " + request.args["request"])
print("Image Generation:", "off" if "imagegen" not in request.args else "on")
startTime = time.time()
project_number = projects_count
project_path = os.path.join(app_root, f"generations{project_number}")
if os.path.exists(project_path):
confirm = input("Project already exists. Do you want to continue? (y/n) ")
if confirm.lower() == "n": return redirect('/home')
os.makedirs(os.path.join(project_path, "images"))
projects_count += 1
print("generating website")
userRequest = request.args["request"]
startTextTime = time.time()
model = "gpt-3.5-turbo-0613"
# model = "gpt-4-1106-preview"
html, image_names, image_prompts = generate(userRequest, model=model)
textTimeElapsed = time.time() - startTextTime
print("text generation time: " + str(textTimeElapsed))
if html == "failed":
print("Failed to generate HTML due to JSON error")
try:
shutil.rmtree(project_path, ignore_errors=True)
except Exception as e:
print(f'Failed to delete directory: {e}')
projects_count -= 1
return redirect('/error')
generations_count = 0
view_number = 0
with open(os.path.join(project_path, f"baseHTML{generations_count}.html"), "w") as f:
f.write(html)
f.close()
generations_count += 1
html = processHTML(html)
# Writes the generated site to the generated folder
with open("templates/view.html", "wb") as f:
f.write(html.encode())
f.close()
# Generates images for each image prompt
print("generating images")
imageStartTime = time.time()
debug = "imagegen" not in request.args
for name, prompt in zip(image_names, image_prompts):
img = generateImage(prompt, debug=debug)
with open(os.path.join(project_path, "images", name), "wb") as f:
f.write(img)
f.close()
imageTimeElapsed = time.time() - imageStartTime
print("image generation time: " + str(imageTimeElapsed))
# Save the current query as the last query
lastQuery = request.args["request"]
print("serving generated site")
totalTimeElapsed = time.time() - startTime
print(totalTimeElapsed)
with open("time.txt", "a") as f:
f.write(f"{totalTimeElapsed},{textTimeElapsed},{imageTimeElapsed},{userRequest},{len(html)},model:{model},imagegen:{not debug}\n")
f.close()
with open(os.path.join(project_path, "log.json"), "w") as f:
json.dump({str(view_number): [lastQuery, 0]}, f)
f.close()
stylesheet = "journal.css"
if request.method == 'GET' and "sheet" in request.args: stylesheet=request.args["sheet"]
return redirect(f"/lastgen?sheet={stylesheet}&view={view_number}")
projects = []
projects_count = len([entry for entry in os.listdir(app_root) if os.path.isdir(os.path.join(app_root, entry)) and entry.startswith("generations")])
for i in range(projects_count):
path = os.path.join(app_root, f"generations{i}")
if os.path.exists(path):
log = os.path.join(path, "log.json")
if os.path.exists(log):
with open(log, "r") as f:
project = json.load(f)
f.close()
projects.append(project["0"][0])
else:
projects.append("Not Found.")
return render_template("index.html", projects=projects)
@app.route('/error')
def error():
return send_from_directory(app.static_folder, "error.html")
@app.route('/generated/<path:filename>')
def web_gen_assets(filename):
return send_from_directory("static/generated", filename)
@app.route('/css/<path:filename>')
def css_assets(filename):
return send_from_directory("static/css", filename)
@app.route('/images/<path:filename>')
def img_gen_assets(filename):
return send_from_directory(os.path.join(project_path, "images"), filename)
@app.route('/lastgen', methods=['GET'])
def lastgen():
global lastQuery, app_root, generations_count, view_number, project_number, project_path, projects_count
stylesheet = "journal.css"
if request.method == 'GET':
if "sheet" in request.args: stylesheet=request.args["sheet"]
if "project" in request.args: project_number = int(request.args["project"])
else: project_number = projects_count - 1
project_path = os.path.join(app_root, f"generations{project_number}")
generations_count = 0
if os.path.exists(project_path):
generations_count = len([entry for entry in os.listdir(project_path) if os.path.isfile(os.path.join(project_path, entry)) and entry.startswith("baseHTML")])
view_number = generations_count - 1 if generations_count > 0 else 0
if "view" in request.args: view = int(request.args["view"])
else: view = view_number
if "feedback" in request.args:
if request.args["feedback"] == "": return redirect(f"/lastgen?sheet={stylesheet}&view={view}&project={project_number}")
try:
with open(os.path.join(project_path, f"baseHTML{view}.html"), "r") as f:
html = f.read()
f.close()
except FileNotFoundError:
with open("templates/view.html", "r") as f:
html = f.read()
f.close()
newMsgs = [{"role": "assistant", "content": html},
{'role': 'user', 'content': "The following feedback are changes that need to made to the code. Add, remove, and change the code as needed. The output should be valid json text. Use bootstrap CSS when needed. The feedback is: " + request.args["feedback"]}
]
print("processing feedback:", request.args["feedback"] if "feedback" in request.args else "")
startTime = time.time()
newhtml, image_names, image_prompts = generate(lastQuery, messages=newMsgs)
totalTimeElapsed = time.time() - startTime
if newhtml == "failed":
print("Failed to generate HTML due to JSON error")
return redirect('/error')
view_number += 1
generations_count += 1
with open(os.path.join(project_path, f"baseHTML{view_number}.html"), "w") as f:
f.write(newhtml)
f.close()
outhtml = processHTML(newhtml)
with open("templates/view.html", "wb") as f:
f.write(outhtml.encode())
f.close()
with open(os.path.join(project_path, "log.json"), "r") as f:
view_feedback = json.load(f)
f.close()
view_feedback[str(view_number)] = [request.args["feedback"], view]
with open(os.path.join(project_path, "log.json"), "w") as f:
json.dump(view_feedback, f)
f.close()
with open("feedbackTimes.txt", "a") as f:
f.write(f"{totalTimeElapsed},{request.args['feedback']},{len(newhtml) - len(html)}\n")
f.close()
return redirect(f"/lastgen?sheet={stylesheet}&view={view_number}&project={project_number}")
else:
with open(os.path.join(project_path, f"baseHTML{view}.html"), "r") as f:
html = f.read()
f.close()
html = processHTML(html, current_view=view)
with open("templates/view.html", "wb") as f:
f.write(html.encode())
f.close()
view_feedback = "Not Found."
source_num = "Not Found."
log_path = os.path.join(project_path, "log.json")
if os.path.exists(log_path):
with open(log_path, "r") as f:
log = json.load(f)
if str(view) in log:
view_feedback, source_num = log[str(view)]
f.close()
return render_template("view.html", stylesheet=stylesheet, view_feedback=view_feedback, view=view, source_num=source_num, project_num=project_number)
if __name__ == "__main__":
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(host="0.0.0.0", port=80) | [
"You are a machine that generates a website with HTML.",
"The request is: PLACEHOLDER. Create HTML code with all of the content in English that would be in the request. The output must be valid json text without any unescaped double quotes and no newline characters. Be informative. Use between one and three images. In the backend, make corresponding lists of image file names and of detailed descriptions for each image name. Position these images in the website where they logically make sense. The folder for images is called \"images\". Use bootstrap CSS classes to style the html. Do not link a bootstrap stylesheet.",
"The following feedback are changes that need to made to the code. Add, remove, and change the code as needed. The output should be valid json text. Use bootstrap CSS when needed. The feedback is: "
] |
2024-01-10 | odgaard/ayo_transcription | script.py | import os
import openai
import pandas as pd
openai.api_key = "sk-dummykey"
def get_transcription(audio_file):
with open(audio_file, "rb") as file:
#transcription = {"text": "test"}
transcription = openai.Audio.transcribe("whisper-1", file)
total_text = transcription["text"]
#total_text = " ".join([segment["text"] for segment in transcription['segments']])
return total_text
def main(args):
# Load the existing CSV file or initialize a new one
csv_path = os.path.join(args.folder, 'transcriptions.csv')
if os.path.isfile(csv_path):
transcriptions_df = pd.read_csv(csv_path)
else:
transcriptions_df = pd.DataFrame(columns=['id', 'transcription'])
# Get the set of existing ids
existing_ids = set(transcriptions_df['id'])
# Process each audio file in the folder
for filename in os.listdir(args.folder):
if filename.endswith(".wav") or filename.endswith(".mp3"): # or whatever your audio file extensions are
print(f"Processing {filename}")
# Extract id from the filename
file_id = int(filename.split('.')[0])
# Skip the file if its id already exists in the csv
if file_id in existing_ids:
continue
# Transcribe the file and add the transcription to the DataFrame
file_path = os.path.join(args.folder, filename)
transcription = get_transcription(file_path)
new_row = pd.DataFrame({
'id': [file_id],
'transcription': [transcription]
})
transcriptions_df = pd.concat([transcriptions_df, new_row], ignore_index=True)
# Save the updated DataFrame to the CSV file
print("Writing csv file")
transcriptions_df.to_csv(csv_path, index=False)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument('--folder', type=str, required=True, help="Folder containing audio files")
args = parser.parse_args()
main(args)
| [] |
2024-01-10 | harshita1611/TeachGPT | builder.py | import logging
from langchain.document_loaders import DirectoryLoader, PDFMinerLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.vectorstores import Chroma
from config import (
PERSIST_DIRECTORY,
MODEL_DIRECTORY,
SOURCE_DIR,
EMBEDDING_MODEL,
DEVICE_TYPE,
CHROMA_SETTINGS,
)
def load_docs(directory: str = SOURCE_DIR):
loader = DirectoryLoader(directory, glob="**/*.pdf", use_multithreading=True, loader_cls=PDFMinerLoader)
docs = loader.load()
logging.info(f"Loaded {len(docs)} documents from {directory}")
return docs
def split_docs(documents,chunk_size=1000,chunk_overlap=200):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(documents)
logging.info(f"Split {len(docs)} documents into chunks")
return docs
def builder():
logging.info("Building the database")
documents = load_docs()
docs = split_docs(documents)
embeddings = HuggingFaceInstructEmbeddings(
model_name=EMBEDDING_MODEL,
model_kwargs={"device": DEVICE_TYPE},
cache_folder=MODEL_DIRECTORY,
)
db = Chroma.from_documents(
docs,
embeddings,
persist_directory=PERSIST_DIRECTORY,
client_settings=CHROMA_SETTINGS,
)
logging.info(f"Loaded Documents to Chroma DB Successfully")
if __name__ == '__main__':
builder() | [] |
2024-01-10 | langchain-ai/prompt-eval-recommendation | eval_suggestions.py | """
Module to suggest evals for the user to run based on prompt deltas.
"""
import difflib
import hashlib
import json
from typing import Callable, Optional
from copy import deepcopy
from dotenv import load_dotenv
import logging
import re
from langchain.adapters.openai import convert_openai_messages
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI
from langchain.callbacks.manager import trace_as_chain_group
llm = ChatOpenAI(model="gpt-4")
load_dotenv()
# import os
# BASE_URL = os.getenv("AZURE_API_BASE")
# API_KEY = os.getenv("AZURE_API_KEY")
# DEPLOYMENT_NAME = "gpt-4"
# llm = AzureChatOpenAI(
# openai_api_base=BASE_URL,
# openai_api_version=os.getenv("AZURE_API_VERSION"),
# deployment_name=DEPLOYMENT_NAME,
# openai_api_key=API_KEY,
# openai_api_type="azure",
# )
def show_diff(template_1: str, template_2: str):
diff = difflib.ndiff(template_1.splitlines(), template_2.splitlines())
return "\n".join(diff)
def show_readable_diff(template_1: str, template_2: str):
diff = list(
difflib.ndiff(template_1.splitlines(), template_2.splitlines())
)
# Processing the diff list for readable format
added_lines = []
deleted_lines = []
for line in diff:
if line.startswith("- "):
deleted_lines.append(line)
if line.startswith("+ "):
added_lines.append(line)
output = []
if deleted_lines:
output.append("Deleted lines:")
output.extend(deleted_lines)
if added_lines:
output.append("Added lines:")
output.extend(added_lines)
return "\n".join(output)
CATEGORIZE_TEMPLATE = """I need your assistance in identifying and categorizing any new instructions I've added to my prompt template, that add requirements for LLM pipeline responses to satisfy.
**First Prompt Template**:
```
{template_1}
```
**Second Prompt Template** (Updated):
```
{template_2}
```
**Changes Highlighted**:
```
{diff}
```
Please focus your analysis on the newly added instructions in the updated prompt template. Use the categories listed below to describe the changes::
- **Structural**:
- **Format Instruction**: Have any new specifications been added regarding the expected response format, such as a list, JSON, Markdown, or HTML?
- **Example Demonstration**: Are there any new examples provided to demonstrate the desired response format, including specific headers, keys, or structures?
- **Prompt Rephrasing (not a new instruction)**: Has the prompt been rephrased slightly to clarify the task, maintaining the same overall semantic meaning?
- **Content**:
- **Workflow Description**: Have more detailed steps on how to perform the task been newly added?
- **Data Placeholders**: Have any new data sources or context been inserted in placeholders for the LLM to consider?
- **Quantity Instruction**: Have there been new specifications added regarding the number of items of a certain type in the response, such as “at least”, “at most”, or an exact number?
- **Inclusion**: Are there new keywords that every future LLM response should now include?
- **Exclusion**: Have any new keywords been specified that should be excluded from all future LLM responses?
- **Qualitative Assessment**: Are there new qualitative criteria for assessing good responses, including specific requirements for length, tone, or style?
**Expected Output Structure**:
```json
{{
"Structural": {{
"FormatInstruction": "Describe new format specifications (if any)",
"ExampleDemonstration": "Describe new example structure (if any)",
"PromptRephrasing": "Change description (if any)"
}},
"Content": {{
"WorkflowDescription": "Describe added workflow steps (if any)",
"DataPlaceholders": "Describe added data sources or context (if any)",
"QuantityInstruction": "Describe new item quantity specifications (if any)",
"Inclusion": "State new keywords for LLM to include in all responses (if any)",
"Exclusion": "State new keywords for LLM to exclude from all responses (if any)",
"QualitativeAssessment": "Describe new qualitative criteria of a good LLM response (if any)"
}}
}}
```
Please fill out this structure based on your analysis of the newly added instructions. For any categories without changes, please write "No change." Remember, at this stage, we are focusing on identifying additions to the prompt template, not deletions."""
SUGGEST_EVAL_TEMPLATE = """Please use this JSON structure, detailing the newly added instructions to the LLM prompt template, to design at least one evaluation function for each applicable change.
**Requirements and Guidelines:**
1. Limit the use of libraries in your functions to json, numpy, pandas, re, and other standard Python libraries.
2. For complex evaluations where pattern matching or logical checks are insufficient, you may use the `ask_expert` function. This function sends a specific yes-or-no question to a human expert and returns a boolean value.
3. All evaluation functions should return a binary True or False value.
4. All evaluation functions should have a descriptive name and comment explaining the purpose of the function.
5. When creating functions for QualitativeAssesment prompt additions, target the specific criteria added to the prompt rather than creating generic evaluations. For instance, if the criteria specifies a "concise response", the function might check the length of the response and decide whether it's concise. Create a different function for each qualitative criteria, even if there are multiple criteria in the same prompt edit.
6. Use the following template for each function, only accepting a formatted LLM prompt (filled with values in the placeholders) and response as arguments:
**Function Signature**:
```python
def evaluation_function_name(prompt: str, response: str) -> bool:
# Your implementation here
```
**Evaluation Categories and Example Functions:**
Below are examples of functions for each type of change you might encounter, based on the JSON structure provided:
{example_evals}
**Important Notes:**
- If writing a conditional based on keywords in the formatted prompt, make sure the keywords aren't always present in the prompt template. For instance, if the prompt template always contains the word "wedding", don't write a function that checks if the response contains the word "wedding"--use a phrase like "my wedding" to check in the conditional.
- Customize the provided function templates based on the actual criteria specified in the given JSON output of changes. You'll need to adjust the specifics (like the exact phrases or counts) based on the actual criteria I've added to my prompts. Make sure each function has a descriptive name and comment explaining the purpose of the function.
- Do not create evaluation functions for changes categorized under "PromptRephrasing" or "DataPlaceholders".
- Ensure that each function serves as a standalone validation tool to run on every response to the prompt. Each function should be correct and complete, and should not rely on other non-library functions to run."""
EXAMPLE_EVALS = {
"FormatInstruction": """**Format Instruction**:
- If the desired format is JSON:
```python
def evaluate_json_format(prompt: str, response: str) -> bool:
try:
json.loads(response)
return True
except json.JSONDecodeError:
return False
```
- If the desired format is a list:
```python
def evaluate_list_format(prompt: str, response: str) -> bool:
# Check if response starts with a bullet point or number
return response.startswith("- ") or response.startswith("1. ")
```
""",
"ExampleDemonstration": """**Example Demonstration**:
```python
def check_example_demonstration(prompt: str, response: str) -> bool:
# Suppose the example demonstration is markdown and has specific headers
# of "First Header" and "Second Header"
return "# First Header" in response and "# Second Header" in response
```
""",
"QuantityInstruction": """**Quantity Instruction**:
```python
def evaluate_num_distinct_words(prompt: str, response: str) -> bool:
# Suppose responses should contain at least 3 distinct words
distinct_word_count = len(set(response.split()))
return distinct_word_count >= 3
```
""",
"Inclusion": """**Inclusion**:
```python
def check_includes_color(prompt: str, response: str) -> bool:
# Suppose the response should include some color in the rainbow
colors = ["red", "orange", "yellow", "green", "blue", "purple", "indigo"]
return any(color in response for color in colors)
```
""",
"Exclusion": """**Exclusion**:
```python
def check_excludes_white(prompt: str, response: str) -> bool:
# Suppose the prompt template instructs not to include the color
# white for queries related to wedding dresses for guests
if "my wedding" in prompt:
return "white" not in response.lower()
else:
return True
```
""",
"QualitativeAssessment": """**Qualitative Assessment**:
- If the desired length is concise:
```python
def evaluate_concise(prompt: str, response: str) -> bool:
# Suppose the response should be less than 50 characters
return len(response) < 50
```
- If the desired tone is positive:
```python
def evaluate_tone(prompt: str, response: str) -> bool:
return ask_expert(f"Is the tone of the response \{response\} positive?")
```
""",
}
RENDER_DIFF_TEMPLATE = """Please use this JSON structure, detailing the newly added instructions to the LLM prompt template, to render the second prompt template with the changes highlighted. You should return the same second prompt template, but wrap each identified change based on the JSON structure of changes in its tag. Make sure each change has opening and closing tags (e.g., <FormatInstruction></FormatInstruction>). Category tags should not be nested. Your answer should start with <FormattedPromptTemplate> and end with </FormattedPromptTemplate>"""
# Hash all prompts into a single string
combined_content = (
CATEGORIZE_TEMPLATE
+ SUGGEST_EVAL_TEMPLATE
+ RENDER_DIFF_TEMPLATE
+ str(EXAMPLE_EVALS)
).encode()
hash_object = hashlib.sha256(combined_content)
PIPELINE_PROMPT_HASH = hash_object.hexdigest()
def get_suggest_eval_prompt(changes_flagged):
# See which keys have been flagged
examples_to_include = [
f"{str(i)}. {EXAMPLE_EVALS[key]}"
for i, key in enumerate(changes_flagged)
if key in EXAMPLE_EVALS
]
# Format the prompt
examples_to_include_str = "\n".join(examples_to_include)
return SUGGEST_EVAL_TEMPLATE.format(example_evals=examples_to_include_str)
async def suggest_evals(
template_1: str,
template_2: str,
source: str,
characterize_callback: Optional[Callable[[str], None]] = None,
eval_prediction_callback: Optional[Callable[[str], None]] = None,
):
"""Suggest evals for the user to run based on prompt deltas."""
with trace_as_chain_group(
"suggest_evals",
inputs={
"template_1": template_1,
"template_2": template_2,
},
tags=[source, PIPELINE_PROMPT_HASH],
) as cb:
# If the templates are the same, return []
if template_1 == template_2:
# Send callback that there is no change
characterize_callback("Prompt templates are the same.")
cb.on_chain_end(
{
"eval_functions": [],
"messages": [],
"rendered_diff": None,
}
)
return [], [], None
template_1_pretty = template_1 if template_1 != "" else "Empty string"
template_2_pretty = template_2 if template_2 != "" else "Empty string"
diff = show_readable_diff(template_1, template_2)
messages = [
{
"content": "You are an expert in Python and prompting large language models (LLMs). You are assisting me, a prompt engineer, build and monitor an LLM pipeline. An LLM pipeline accepts a prompt, with some instructions, and uses an LLM to generate a response to the prompt. A prompt engineer writes a prompt template, with placeholders, that will get formatted with different variables at pipeline runtime. Typically as prompt engineers test a pipeline, we observe that some responses are not good, so we add new instructions to the prompt template to prevent against these failure modes.",
"role": "system",
},
{
"content": CATEGORIZE_TEMPLATE.format(
template_1=template_1_pretty,
template_2=template_2_pretty,
diff=diff,
),
"role": "user",
},
]
# First characterize the deltas
try:
lc_messages = convert_openai_messages(messages)
char_response = llm.astream(lc_messages, {"callbacks": cb})
logging.debug("Determining prompt deltas...")
collected_messages = []
# iterate through the stream of events
async for chunk in char_response:
if characterize_callback:
characterize_callback(chunk.content)
collected_messages.append(chunk.content)
logging.info("")
reply = "".join(collected_messages)
except Exception as e:
logging.error(f"Error getting deltas: {e}")
cb.on_chain_end(
{
"eval_functions": [],
"messages": messages,
"rendered_diff": None,
}
)
return [], messages, None
# Parse the reply's json from ```json ... ```
reply_json = None
try:
pattern = r"```json(.*?)```"
matches = re.findall(pattern, reply, re.DOTALL)
reply_json = json.loads(matches[0])
except Exception as e:
logging.error(f"Error parsing json: {e}")
messages.append({"content": reply, "role": "assistant"})
cb.on_chain_end(
{
"eval_functions": [],
"messages": messages,
"rendered_diff": None,
}
)
return [], messages, None
# Look for any changes
changes_made = []
for key in reply_json.get("Structural", {}):
if not isinstance(reply_json["Structural"][key], str):
continue
if reply_json["Structural"][key].lower() != "no change":
changes_made.append(key)
for key in reply_json.get("Content", {}):
if not isinstance(reply_json["Content"][key], str):
continue
if reply_json["Content"][key].lower() != "no change":
changes_made.append(key)
# Remove promptrephrasing and dataorcontextaddition
if "PromptRephrasing" in changes_made:
changes_made.remove("PromptRephrasing")
# if "WorkflowDescription" in changes_made:
# changes_made.remove("WorkflowDescription")
if "DataPlaceholders" in changes_made:
changes_made.remove("DataPlaceholders")
# Render the diff
try:
diff_render_messages = deepcopy(messages)
diff_render_messages.append(
{"content": RENDER_DIFF_TEMPLATE, "role": "user"}
)
diff_render_response = await llm.ainvoke(
convert_openai_messages(diff_render_messages)
)
diff_render_response = diff_render_response.content
# Extract whatever is in ```<FormattedPromptTemplate> tags
pattern = (
r"<FormattedPromptTemplate>(.*?)</FormattedPromptTemplate>"
)
matches = re.findall(pattern, diff_render_response, re.DOTALL)
diff_render_response = matches[0]
except Exception as e:
logging.error(f"Error rendering diff: {e}")
diff_render_response = None
# If there are no changes, return []
if not changes_made:
cb.on_chain_end(
{
"eval_functions": [],
"messages": messages,
"rendered_diff": diff_render_response,
}
)
return [], messages, diff_render_response
messages.append(
{
"content": reply,
"role": "assistant",
}
)
# See if there are any deltas that bring upon evals
eval_functions = []
# Then extract the evals
messages.append(
{"content": get_suggest_eval_prompt(changes_made), "role": "user"}
)
logging.debug("Generating evals...")
lc_messages = convert_openai_messages(messages)
eval_response_stream = llm.astream(lc_messages, {"callbacks": cb})
eval_response = []
async for chunk in eval_response_stream:
if eval_prediction_callback:
eval_prediction_callback(chunk.content)
eval_response.append(chunk.content)
eval_prediction_callback(None)
eval_response_content = "".join(eval_response)
messages.append(
{"content": eval_response_content, "role": "assistant"}
)
# Look for the evals in the response as any instance of ```python ```
pattern = r"^\s*```python\s+(.*?def.*?)(?=\n\s*```)" # match any def with leading whitespace
matches = re.findall(
pattern, eval_response_content, re.DOTALL | re.MULTILINE
)
# Get longest match
for match in matches:
match = match.strip()
try:
# Replace `ask_expert` with a call to an llm function
needs_llm = False
function_str = match
if "ask_expert" in function_str:
function_str = function_str.replace(
"ask_expert", "ask_llm"
)
needs_llm = True
# Add the function to the list of eval functions
eval_functions.append(
{"code": function_str, "needs_llm": needs_llm}
)
except:
logging.error(f"Error parsing code: {match}")
cb.on_chain_end(
{
"eval_functions": eval_functions,
"messages": messages,
"rendered_diff": diff_render_response,
},
)
return eval_functions, messages, diff_render_response
if __name__ == "__main__":
import asyncio
ef, m = asyncio.run(
suggest_evals(
"You ar ea helpful agent",
"You are a helpful agent. Respond in less than 3 words",
)
)
print(ef, m)
| [
"You are an expert in Python and prompting large language models (LLMs). You are assisting me, a prompt engineer, build and monitor an LLM pipeline. An LLM pipeline accepts a prompt, with some instructions, and uses an LLM to generate a response to the prompt. A prompt engineer writes a prompt template, with placeholders, that will get formatted with different variables at pipeline runtime. Typically as prompt engineers test a pipeline, we observe that some responses are not good, so we add new instructions to the prompt template to prevent against these failure modes.",
"Please use this JSON structure, detailing the newly added instructions to the LLM prompt template, to design at least one evaluation function for each applicable change.\n\n**Requirements and Guidelines:**\n\n1. Limit the use of libraries in your functions to json, numpy, pandas, re, and other standard Python libraries.\n2. For complex evaluations where pattern matching or logical checks are insufficient, you may use the `ask_expert` function. This function sends a specific yes-or-no question to a human expert and returns a boolean value.\n3. All evaluation functions should return a binary True or False value.\n4. All evaluation functions should have a descriptive name and comment explaining the purpose of the function.\n5. When creating functions for QualitativeAssesment prompt additions, target the specific criteria added to the prompt rather than creating generic evaluations. For instance, if the criteria specifies a \"concise response\", the function might check the length of the response and decide whether it's concise. Create a different function for each qualitative criteria, even if there are multiple criteria in the same prompt edit.\n6. Use the following template for each function, only accepting a formatted LLM prompt (filled with values in the placeholders) and response as arguments:\n\n**Function Signature**:\n```python\ndef evaluation_function_name(prompt: str, response: str) -> bool:\n # Your implementation here\n```\n\n**Evaluation Categories and Example Functions:**\n\nBelow are examples of functions for each type of change you might encounter, based on the JSON structure provided:\n\n{example_evals}\n\n**Important Notes:**\n\n- If writing a conditional based on keywords in the formatted prompt, make sure the keywords aren't always present in the prompt template. For instance, if the prompt template always contains the word \"wedding\", don't write a function that checks if the response contains the word \"wedding\"--use a phrase like \"my wedding\" to check in the conditional. \n- Customize the provided function templates based on the actual criteria specified in the given JSON output of changes. You'll need to adjust the specifics (like the exact phrases or counts) based on the actual criteria I've added to my prompts. Make sure each function has a descriptive name and comment explaining the purpose of the function.\n- Do not create evaluation functions for changes categorized under \"PromptRephrasing\" or \"DataPlaceholders\".\n- Ensure that each function serves as a standalone validation tool to run on every response to the prompt. Each function should be correct and complete, and should not rely on other non-library functions to run.",
"I need your assistance in identifying and categorizing any new instructions I've added to my prompt template, that add requirements for LLM pipeline responses to satisfy.\n\n**First Prompt Template**: \n```\n{template_1}\n```\n\n**Second Prompt Template** (Updated):\n```\n{template_2}\n```\n\n**Changes Highlighted**:\n```\n{diff}\n```\n\nPlease focus your analysis on the newly added instructions in the updated prompt template. Use the categories listed below to describe the changes::\n\n- **Structural**:\n - **Format Instruction**: Have any new specifications been added regarding the expected response format, such as a list, JSON, Markdown, or HTML?\n - **Example Demonstration**: Are there any new examples provided to demonstrate the desired response format, including specific headers, keys, or structures?\n - **Prompt Rephrasing (not a new instruction)**: Has the prompt been rephrased slightly to clarify the task, maintaining the same overall semantic meaning?\n\n- **Content**:\n - **Workflow Description**: Have more detailed steps on how to perform the task been newly added?\n - **Data Placeholders**: Have any new data sources or context been inserted in placeholders for the LLM to consider?\n - **Quantity Instruction**: Have there been new specifications added regarding the number of items of a certain type in the response, such as “at least”, “at most”, or an exact number?\n - **Inclusion**: Are there new keywords that every future LLM response should now include?\n - **Exclusion**: Have any new keywords been specified that should be excluded from all future LLM responses?\n - **Qualitative Assessment**: Are there new qualitative criteria for assessing good responses, including specific requirements for length, tone, or style?\n\n**Expected Output Structure**:\n\n```json\n{{\n \"Structural\": {{\n \"FormatInstruction\": \"Describe new format specifications (if any)\",\n \"ExampleDemonstration\": \"Describe new example structure (if any)\",\n \"PromptRephrasing\": \"Change description (if any)\"\n }},\n \"Content\": {{\n \"WorkflowDescription\": \"Describe added workflow steps (if any)\",\n \"DataPlaceholders\": \"Describe added data sources or context (if any)\",\n \"QuantityInstruction\": \"Describe new item quantity specifications (if any)\",\n \"Inclusion\": \"State new keywords for LLM to include in all responses (if any)\",\n \"Exclusion\": \"State new keywords for LLM to exclude from all responses (if any)\",\n \"QualitativeAssessment\": \"Describe new qualitative criteria of a good LLM response (if any)\"\n }}\n}}\n```\n\nPlease fill out this structure based on your analysis of the newly added instructions. For any categories without changes, please write \"No change.\" Remember, at this stage, we are focusing on identifying additions to the prompt template, not deletions.",
"Empty string",
"Please use this JSON structure, detailing the newly added instructions to the LLM prompt template, to render the second prompt template with the changes highlighted. You should return the same second prompt template, but wrap each identified change based on the JSON structure of changes in its tag. Make sure each change has opening and closing tags (e.g., <FormatInstruction></FormatInstruction>). Category tags should not be nested. Your answer should start with <FormattedPromptTemplate> and end with </FormattedPromptTemplate>"
] |
2024-01-10 | ChenLight-s/Auto-GPT | scripts~llm_utils.py | import openai
from config import Config
cfg = Config()
openai.api_key = cfg.openai_api_key
# Overly simple abstraction until we create something better
def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str:
"""Create a chat completion using the OpenAI API"""
if cfg.use_azure:
response = openai.ChatCompletion.create(
deployment_id=cfg.azure_chat_deployment_id,
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens
)
return response.choices[0].message["content"]
| [] |
2024-01-10 | cablepull/TaskWeaver | project~plugins~sql_pull_data.py | from operator import itemgetter
import pandas as pd
from langchain.chat_models import AzureChatOpenAI, ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnableMap
from langchain.utilities import SQLDatabase
from taskweaver.plugin import Plugin, register_plugin
@register_plugin
class SqlPullData(Plugin):
def __call__(self, query: str):
api_type = self.config.get("api_type", "azure")
if api_type == "azure":
model = AzureChatOpenAI(
azure_endpoint=self.config.get("api_base"),
openai_api_key=self.config.get("api_key"),
openai_api_version=self.config.get("api_version"),
azure_deployment=self.config.get("deployment_name"),
temperature=0,
verbose=True,
)
elif api_type == "openai":
model = ChatOpenAI(
openai_api_key=self.config.get("api_key"),
model_name=self.config.get("deployment_name"),
temperature=0,
verbose=True,
)
else:
raise ValueError("Invalid API type. Please check your config file.")
template = """Based on the table schema below, write a SQL query that would answer the user's question:
{schema}
Question: {question}
SQL Query:"""
prompt = ChatPromptTemplate.from_template(template)
db = SQLDatabase.from_uri(self.config.get("sqlite_db_path"))
def get_schema(_):
return db.get_table_info()
inputs = {
"schema": RunnableLambda(get_schema),
"question": itemgetter("question"),
}
sql_response = RunnableMap(inputs) | prompt | model.bind(stop=["\nSQLResult:"]) | StrOutputParser()
sql = sql_response.invoke({"question": query})
result = db._execute(sql, fetch="all")
df = pd.DataFrame(result)
if len(df) == 0:
return df, (
f"I have generated a SQL query based on `{query}`.\nThe SQL query is {sql}.\n" f"The result is empty."
)
else:
return df, (
f"I have generated a SQL query based on `{query}`.\nThe SQL query is {sql}.\n"
f"There are {len(df)} rows in the result.\n"
f"The first {min(5, len(df))} rows are:\n{df.head(min(5, len(df))).to_markdown()}"
)
| [
"Based on the table schema below, write a SQL query that would answer the user's question:\n {schema}\n\n Question: {question}\n SQL Query:"
] |
2024-01-10 | codeghees/weblm | weblm~controller.py | import heapq
import itertools
import json
import csv
import math
import os
import re
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from typing import Any, Dict, DefaultDict, List, Tuple, Union
import cohere
import numpy as np
MAX_SEQ_LEN = 2000
MAX_NUM_ELEMENTS = 100
TYPEABLE = ["input", "select"]
CLICKABLE = ["link", "button"]
MODEL = "xlarge"
help_msg = """Welcome to WebLM!
The goal of this project is build a system that takes an objective from the user, and operates a browser to carry it out.
For example:
- book me a table for 2 at bar isabel next wednesday at 7pm
- i need a flight from SF to London on Oct 15th nonstop
- buy me more whitening toothpaste from amazon and send it to my apartment
WebLM learns to carry out tasks *by demonstration*. That means that you'll need to guide it and correct it when it goes astray. Over time, the more people who use it, the more tasks it's used for, WebLM will become better and better and rely less and less on user input.
To control the system:
- You can see what the model sees at each step by looking at the list of elements the model can interact with
- show: You can also see a picture of the browser window by typing `show`
- success: When the model has succeeded at the task you set out (or gotten close enough), you can teach the model by typing `success` and it will save it's actions to use in future interations
- cancel: If the model is failing or you made a catastrophic mistake you can type `cancel` to kill the session
- help: Type `help` to show this message
Everytime you use WebLM it will improve. If you want to contribute to the project and help us build join the discord (https://discord.com/invite/co-mmunity) or send an email to [email protected]"""
prompt_template = """Given:
(1) an objective that you are trying to achieve
(2) the URL of your current web page
(3) a simplified text description of what's visible in the browser window
Your commands are:
click X - click on element X.
type X "TEXT" - type the specified text into input X
Here are some examples:
$examples
Present state:
$state
Next Command:"""
state_template = """Objective: $objective
Current URL: $url
Current Browser Content:
------------------
$browser_content
------------------
Previous actions:
$previous_commands"""
prioritization_template = """Here are the most relevant elements on the webpage (links, buttons, selects and inputs) to achieve the objective below:
Objective: $objective
URL: $url
Relevant elements:
{element}"""
user_prompt_end = ("\n\t(success) the goal is accomplished"
"\n\t(cancel) terminate the session"
"\nType a choice and then press enter:")
user_prompt_1 = ("Given web state:\n{state}"
"\n\nI have to choose between `clicking` and `typing` here."
"\n**I think I should{action}**"
"\n\t(y) proceed with this action"
"\n\t(n) do the other action" + user_prompt_end)
user_prompt_2 = ("Given state:\n{self._construct_state(url, pruned_elements)}"
"\n\nSuggested command: {cmd}.\n\t(y) accept and continue"
"\n\t(s) save example, accept, and continue"
"\n{other_options}"
"\n\t(enter a new command) type your own command to replace the model's suggestion" + user_prompt_end)
user_prompt_3 = ("Given state:\n{self._construct_state(url, pruned_elements)}"
"\n\nSuggested command: {self._cmd}.\n\t(y) accept and continue"
"\n\t(s) save example, accept, and continue"
"\n\t(enter a new command) type your own command to replace the model's suggestion" + user_prompt_end)
def _fn(x):
if len(x) == 3:
option, prompt, self = x
return_likelihoods = "ALL"
elif len(x) == 4:
option, prompt, self, return_likelihoods = x
while True:
try:
if len(self.co.tokenize(prompt)) > 2048:
prompt = truncate_left(self.co.tokenize, prompt)
return (self.co.generate(prompt=prompt, max_tokens=0, model=MODEL,
return_likelihoods=return_likelihoods).generations[0].likelihood, option)
except cohere.error.CohereError as e:
print(f"Cohere fucked up: {e}")
continue
def truncate_left(tokenize, prompt, *rest_of_prompt, limit=2048):
i = 0
chop_size = 5
print(f"WARNING: truncating sequence of length {len(tokenize(prompt + ''.join(rest_of_prompt)))} to length {limit}")
while len(tokenize(prompt + "".join(rest_of_prompt))) > limit:
prompt = prompt[i * chop_size:]
i += 1
return prompt
def split_list_by_separators(l: List[Any], separator_sequences: List[List[Any]]) -> List[List[Any]]:
"""Split a list by a subsequence.
split_list_by_separators(range(7), [[2, 3], [5]]) == [[0, 1], [4], [6]]
"""
split_list: List[List[Any]] = []
tmp_seq: List[Any] = []
i = 0
while i < len(l):
item = l[i]
# if this item may be part of one of the separator_sequences
if any(item == x[0] for x in separator_sequences):
for s in filter(lambda x: item == x[0], separator_sequences):
# if we've found a matching subsequence
if l[i:i + len(s)] == s:
if len(tmp_seq) != 0:
split_list.append(tmp_seq)
tmp_seq = []
i += len(s)
break
else:
i += 1
else:
tmp_seq.append(item)
i += 1
if len(tmp_seq) != 0:
split_list.append(tmp_seq)
return split_list
class Prompt:
def __init__(self, prompt: str) -> None:
self.prompt = prompt
def __str__(self) -> str:
return self.prompt
class Command:
def __init__(self, cmd: str) -> None:
self.cmd = cmd
def __str__(self) -> str:
return self.cmd
class DialogueState(Enum):
Unset = None
Action = "pick action"
ActionFeedback = "action from feedback"
Command = "suggest command"
CommandFeedback = "command from feedback"
class Controller:
"""A Cohere-powered controller that takes in a browser state and produces and action.
The basic outline of this Controller's strategy is:
1. receive page content from browser
2. prioritise elements on page based on how relevant they are to the objective
3. look up similar states from the past
4. choose between clicking and typing
5. choose what element to click or what element to type in
"""
def __init__(self, co: cohere.Client, objective: str):
"""
Args:
co (cohere.Client): a Cohere Client
objective (str): the objective to accomplish
"""
self.co = co
self.objective = objective
self.previous_commands: List[str] = []
self.moments: List[Tuple[str, str]] = []
self.user_responses: DefaultDict[str, int] = defaultdict(int)
self.reset_state()
def is_running(self):
return self._step != DialogueState.Unset
def reset_state(self):
self._step = DialogueState.Unset
self._action = None
self._cmd = None
self._chosen_elements: List[Dict[str, str]] = []
self._prioritized_elements = None
self._prioritized_elements_hash = None
def success(self):
for state, command in self.moments:
self._save_example(state, command)
def choose(self,
template: str,
options: List[Dict[str, str]],
return_likelihoods: str = "ALL",
topk: int = 1) -> List[Tuple[int, Dict[str, str]]]:
"""Choose the most likely continuation of `prompt` from a set of `options`.
Args:
template (str): a string template with keys that match the dictionaries in `options`
options (List[Dict[str, str]]): the options to be chosen from
Returns:
str: the most likely option from `options`
"""
num_options = len(options)
with ThreadPoolExecutor(num_options) as pp:
_lh = pp.map(
_fn,
zip(options, [template.format(**option) for option in options], [self] * num_options,
[return_likelihoods] * num_options))
return sorted(_lh, key=lambda x: x[0], reverse=True)[:topk]
def choose_element(self,
template: str,
options: List[Dict[str, str]],
group_size: int = 10,
topk: int = 1) -> List[Dict[str, str]]:
"""A hacky way of choosing the most likely option, while staying within sequence length constraints
Algo:
1. chunk `options` into groups of `group_size`
2. within each group perform a self.choose to get the topk elements (we'll have num_groups*topk elements after this)
3. flatten and repeat recursively until the number of options is down to topk
Args:
template (str): the prompt template with f-string style template tags
options (List[Dict[str, str]]): a list of dictionaries containing key-value replacements of the template tags
group_size (int, optional): The size of each group of options to select from. Defaults to 10.
topk (int, optional): The topk most likely options to return. Defaults to 1.
Returns:
List[Dict[str, str]]: The `topk` most likely elements in `options` according to the model
"""
num_options = len(options)
num_groups = int(math.ceil(num_options / group_size))
if num_options == 0:
raise Exception()
choices = []
for i in range(num_groups):
group = options[i * group_size:(i + 1) * group_size]
template_tmp = template.replace("elements", "\n".join(item["elements"] for item in group))
options_tmp = [{"id": item["id"]} for item in group]
choice = [x[1] for x in self.choose(template_tmp, options_tmp, topk=topk)]
chosen_elements = []
for x in choice:
chosen_elements.append(list(filter(lambda y: y["id"] == x["id"], group))[0])
choices.extend(chosen_elements)
if len(choices) <= topk:
return choices
else:
return self.choose_element(template, choices, group_size, topk)
def gather_examples(self, state: str, topk: int = 5) -> List[str]:
"""Simple semantic search over a file of past interactions to find the most similar ones."""
with open("examples.json", "r") as fd:
examples = json.load(fd)
if len(examples) == 0:
return []
embeds, examples = zip(*examples)
embeds = np.array(embeds)
embedded_state = np.array(self.co.embed(texts=[state], truncate="RIGHT").embeddings[0])
scores = np.einsum("i,ji->j", embedded_state,
embeds) / (np.linalg.norm(embedded_state) * np.linalg.norm(embeds, axis=1))
ind = np.argsort(scores)[-topk:]
examples = np.array(examples)[ind]
return examples
def _construct_prev_cmds(self) -> str:
return "\n".join(
f"{i+1}. {x}" for i, x in enumerate(self.previous_commands)) if self.previous_commands else "None"
def _construct_state(self, url: str, page_elements: List[str]) -> str:
state = state_template
state = state.replace("$objective", self.objective)
state = state.replace("$url", url[:100])
state = state.replace("$previous_commands", self._construct_prev_cmds())
return state.replace("$browser_content", "\n".join(page_elements))
def _construct_prompt(self, state: str, examples: List[str]) -> str:
prompt = prompt_template
prompt = prompt.replace("$examples", "\n\n".join(examples))
return prompt.replace("$state", state)
def _save_example(self, state: str, command: str):
example = ("Example:\n" f"{state}\n" f"Next Command: {command}\n" "----")
print(f"Example being saved:\n{example}")
with open("examples.json", "r") as fd:
embeds_examples = json.load(fd)
embeds, examples = zip(*embeds_examples)
embeds, examples = list(embeds), list(examples)
if example in examples:
print("example already exists")
return
examples.append(example)
embeds.append(self.co.embed(texts=[example]).embeddings[0])
embeds_examples = list(zip(embeds, examples))
with open("examples_tmp.json", "w") as fd:
json.dump(embeds_examples, fd)
os.replace("examples_tmp.json", "examples.json")
def _construct_responses(self):
keys_to_save = ["y", "n", "s", "command", "success", "cancel"]
responses_to_save = defaultdict(int)
for key, value in self.user_responses.items():
if key in keys_to_save:
responses_to_save[key] = value
elif key not in keys_to_save and key:
responses_to_save["command"] += 1
self.user_responses = responses_to_save
print(f"Responses being saved:\n{dict(responses_to_save)}")
def save_responses(self):
keys_to_save = ["y", "n", "s", "command", "success", "cancel"]
# Check if data file already exists
responses_filepath = "responses.csv"
if os.path.isfile(responses_filepath):
print("File exists")
with open(responses_filepath, "a+") as fd:
wr = csv.writer(fd, quoting=csv.QUOTE_ALL)
wr.writerow([self.user_responses[key] for key in keys_to_save])
else:
print("No data available")
with open(responses_filepath, "w+") as fd:
wr = csv.writer(fd, quoting=csv.QUOTE_ALL)
wr.writerow(keys_to_save)
wr.writerow([self.user_responses[key] for key in keys_to_save])
def _shorten_prompt(self, url, elements, examples, *rest_of_prompt, target: int = MAX_SEQ_LEN):
state = self._construct_state(url, elements)
prompt = self._construct_prompt(state, examples)
tokenized_prompt = self.co.tokenize(prompt + "".join(rest_of_prompt))
tokens = tokenized_prompt.token_strings
split_tokens = split_list_by_separators(tokens,
[['EX', 'AMP', 'LE'], ["Example"], ["Present", " state", ":", "\n"]])
example_tokens = split_tokens[1:-1]
length_of_examples = list(map(len, example_tokens))
state_tokens = split_tokens[-1]
state_tokens = list(
itertools.chain.from_iterable(
split_list_by_separators(state_tokens, [['----', '----', '----', '----', '--', '\n']])[1:-1]))
state_tokens = split_list_by_separators(state_tokens, [["\n"]])
length_of_elements = list(map(len, state_tokens))
length_of_prompt = len(tokenized_prompt)
def _fn(i, j):
state = self._construct_state(url, elements[:len(elements) - i])
prompt = self._construct_prompt(state, examples[j:])
return state, prompt
MIN_EXAMPLES = 1
i, j = (0, 0)
while (length_of_prompt - sum(length_of_examples)) + sum(
length_of_examples[j:]) > target and j < len(examples) - MIN_EXAMPLES:
j += 1
print(f"num examples: {len(examples) - j}")
state, prompt = _fn(i, j)
if len(self.co.tokenize(prompt + "".join(rest_of_prompt))) <= target:
return state, prompt
MIN_ELEMENTS = 7
while (length_of_prompt - sum(length_of_examples[:j]) - sum(length_of_elements)) + sum(
length_of_elements[:len(length_of_elements) - i]) > target and i < len(elements) - MIN_ELEMENTS:
i += 1
print(f"num elements: {len(length_of_elements) - i}")
state, prompt = _fn(i, j)
# last resort, start cutting off the bigging of the prompt
if len(self.co.tokenize(prompt + "".join(rest_of_prompt))) > target:
prompt = truncate_left(self.co.tokenize, prompt, *rest_of_prompt, limit=target)
return state, prompt
def _generate_prioritization(self, page_elements: List[str], url: str):
prioritization = prioritization_template
prioritization = prioritization.replace("$objective", self.objective)
prioritization = prioritization.replace("$url", url)
self._prioritized_elements = self.choose(prioritization, [{
"element": x
} for x in page_elements],
topk=len(page_elements))
self._prioritized_elements = [x[1]["element"] for x in self._prioritized_elements][:MAX_NUM_ELEMENTS]
self._prioritized_elements_hash = hash(frozenset(page_elements))
self._step = DialogueState.Action
print(self._prioritized_elements)
def pick_action(self, url: str, page_elements: List[str], response: str = None):
# this strategy for action selection does not work very well, TODO improve this
if self._step not in [DialogueState.Action, DialogueState.ActionFeedback]:
return
state = self._construct_state(url, self._prioritized_elements)
examples = self.gather_examples(state)
prompt = self._construct_prompt(state, examples)
if self._step == DialogueState.Action:
action = " click"
if any(y in x for y in TYPEABLE for x in page_elements):
state, prompt = self._shorten_prompt(url, self._prioritized_elements, examples, target=MAX_SEQ_LEN)
action = self.choose(prompt + "{action}", [
{
"action": " click",
},
{
"action": " type",
},
], topk=2)
# if the model is confident enough, just assume the suggested action is correct
if (action[0][0] - action[1][0]) / -action[1][0] > 1.:
action = action[0][1]["action"]
else:
action = action[0][1]["action"]
self._action = action
self._step = DialogueState.ActionFeedback
return Prompt(eval(f'f"""{user_prompt_1}"""'))
self._action = action
self._step = DialogueState.Command
elif self._step == DialogueState.ActionFeedback:
if response == "y":
pass
elif response == "n":
if "click" in self._action:
self._action = " type"
elif "type" in self._action:
self._action = " click"
elif response == "examples":
examples = "\n".join(examples)
return Prompt(f"Examples:\n{examples}\n\n" "Please respond with 'y' or 'n'")
else:
return Prompt("Please respond with 'y' or 'n'")
self._step = DialogueState.Command
def _get_cmd_prediction(self, prompt: str, chosen_element: str) -> str:
if "type" in self._action:
text = None
while text is None:
try:
num_tokens = 20
if len(self.co.tokenize(prompt)) > 2048 - num_tokens:
print(f"WARNING: truncating sequence of length {len(self.co.tokenize(prompt))}")
prompt = truncate_left(self.co.tokenize,
prompt,
self._action,
chosen_element,
limit=2048 - num_tokens)
print(len(self.co.tokenize(prompt + self._action + chosen_element)))
text = max(self.co.generate(prompt=prompt + self._action + chosen_element,
model=MODEL,
temperature=0.5,
num_generations=5,
max_tokens=num_tokens,
stop_sequences=["\n"],
return_likelihoods="GENERATION").generations,
key=lambda x: x.likelihood).text
print(text)
except cohere.error.CohereError as e:
print(f"Cohere fucked up: {e}")
continue
else:
text = ""
return (self._action + chosen_element + text).strip()
def generate_command(self, url: str, pruned_elements: List[str], response: str = None):
state = self._construct_state(url, pruned_elements)
examples = self.gather_examples(state)
prompt = self._construct_prompt(state, examples)
if self._step == DialogueState.Command:
if len(pruned_elements) == 1:
chosen_element = " " + " ".join(pruned_elements[0].split(" ")[:2])
self._chosen_elements = [{"id": chosen_element}]
else:
state = self._construct_state(url, ["$elements"])
prompt = self._construct_prompt(state, examples)
state, prompt = self._shorten_prompt(url, ["$elements"], examples, self._action)
group_size = 20
self._chosen_elements = self.choose_element(
prompt + self._action + "{id}",
list(map(lambda x: {
"id": " " + " ".join(x.split(" ")[:2]),
"elements": x
}, pruned_elements)),
group_size,
topk=5)
print(self._chosen_elements)
chosen_element = self._chosen_elements[0]["id"]
state = self._construct_state(url, pruned_elements)
prompt = self._construct_prompt(state, examples)
state, prompt = self._shorten_prompt(url, pruned_elements, examples, self._action, chosen_element)
cmd = self._get_cmd_prediction(prompt, chosen_element)
self._cmd = cmd
self._step = DialogueState.CommandFeedback
other_options = "\n".join(
f"\t({i+2}){self._action}{x['id']}" for i, x in enumerate(self._chosen_elements[1:]))
return Prompt(eval(f'f"""{user_prompt_2}"""'))
elif self._step == DialogueState.CommandFeedback:
if response == "examples":
examples = "\n".join(examples)
return Prompt(f"Examples:\n{examples}\n\n" "Please respond with 'y' or 's'")
elif response == "prompt":
chosen_element = self._chosen_elements[0]["id"]
state, prompt = self._shorten_prompt(url, pruned_elements, examples, self._action, chosen_element)
return Prompt(f"{prompt}\n\nPlease respond with 'y' or 's'")
elif response == "recrawl":
return Prompt(eval(f'f"""{user_prompt_3}"""'))
elif response == "elements":
return Prompt("\n".join(str(d) for d in self._chosen_elements))
if re.match(r'\d+', response):
chosen_element = self._chosen_elements[int(response) - 1]["id"]
state, prompt = self._shorten_prompt(url, pruned_elements, examples, self._action, chosen_element)
self._cmd = self._get_cmd_prediction(prompt, chosen_element)
if "type" in self._action:
return Prompt(eval(f'f"""{user_prompt_3}"""'))
elif response != "y" and response != "s":
self._cmd = response
cmd_pattern = r"(click|type) (link|button|input|select) [\d]+( \"\w+\")?"
if not re.match(cmd_pattern, self._cmd):
return Prompt(f"Invalid command '{self._cmd}'. Must match regex '{cmd_pattern}'. Try again...")
if response == "s":
self._save_example(state=self._construct_state(url, pruned_elements[:50]), command=self._cmd)
self.moments.append((self._construct_state(url, pruned_elements), self._cmd))
self.previous_commands.append(self._cmd)
cmd = Command(self._cmd.strip())
self.reset_state()
return cmd
def step(self, url: str, page_elements: List[str], response: str = None) -> Union[Prompt, Command]:
self._step = DialogueState.Action if self._step == DialogueState.Unset else self._step
if self._prioritized_elements is None or self._prioritized_elements_hash != hash(frozenset(page_elements)):
self._generate_prioritization(page_elements, url)
self.user_responses[response] += 1
self._construct_responses()
action_or_prompt = self.pick_action(url, page_elements, response)
if isinstance(action_or_prompt, Prompt):
return action_or_prompt
if "click" in self._action:
pruned_elements = list(filter(lambda x: any(x.startswith(y) for y in CLICKABLE),
self._prioritized_elements))
elif "type" in self._action:
pruned_elements = list(filter(lambda x: any(x.startswith(y) for y in TYPEABLE), self._prioritized_elements))
return self.generate_command(url, pruned_elements, response)
| [
"\n\n",
"\n",
"Objective: $objective\nCurrent URL: $url\nCurrent Browser Content:\n------------------\n$browser_content\n------------------\nPrevious actions:\n$previous_commands",
"1",
"elements",
"Given web state:\n{state}\n\nI have to choose between `clicking` and `typing` here.\n**I think I should{action}**\n\t(y) proceed with this action\n\t(n) do the other actionPLACEHOLDER",
"Given:\n (1) an objective that you are trying to achieve\n (2) the URL of your current web page\n (3) a simplified text description of what's visible in the browser window\n\nYour commands are:\n click X - click on element X.\n type X \"TEXT\" - type the specified text into input X\n\nHere are some examples:\n\n$examples\n\nPresent state:\n$state\nNext Command:",
"Given state:\n{self._construct_state(url, pruned_elements)}\n\nSuggested command: {cmd}.\n\t(y) accept and continue\n\t(s) save example, accept, and continue\n{other_options}\n\t(enter a new command) type your own command to replace the model's suggestionPLACEHOLDER",
"\n\t(success) the goal is accomplished\n\t(cancel) terminate the session\nType a choice and then press enter:",
"Here are the most relevant elements on the webpage (links, buttons, selects and inputs) to achieve the objective below:\nObjective: $objective\nURL: $url\nRelevant elements:\n{element}",
"Given state:\n{self._construct_state(url, pruned_elements)}\n\nSuggested command: {self._cmd}.\n\t(y) accept and continue\n\t(s) save example, accept, and continue\n\t(enter a new command) type your own command to replace the model's suggestionPLACEHOLDER"
] |
2024-01-10 | codeghees/weblm | weblm~interface.py | import asyncio
import chunk
import os
import traceback
from typing import Dict, Tuple
import cohere
import discord
from discord import Embed, File
from discord.ext import commands
from playwright.async_api import async_playwright
from .controller import Command, Controller, Prompt, help_msg
from .crawler import AsyncCrawler
co = cohere.Client(os.environ.get("COHERE_KEY"))
MSG_LEN_LIMIT = 1800
def chunk_message_for_sending(msg):
chunks = []
tmp_chunk = ""
for line in msg.split("\n"):
if len(tmp_chunk + line) > MSG_LEN_LIMIT:
chunks.append(tmp_chunk)
tmp_chunk = line
else:
tmp_chunk += "\n" + line
if tmp_chunk != "":
chunks.append(tmp_chunk)
return chunks
class MyClient(discord.Client):
def __init__(self, playwright, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.sessions: Dict[int, Tuple[Crawler, Controller]] = {}
self.playwright = playwright
async def on_ready(self):
"""Initializes bot"""
print(f"We have logged in as {self.user}")
for guild in self.guilds:
print(f"{self.user} is connected to the following guild:\n" f"{guild.name}(id: {guild.id})")
async def find_session(self, id, message):
print(message.clean_content)
objective = message.clean_content.removeprefix("weblm ")
if id not in self.sessions:
print("did not find session")
crawler, controller = (AsyncCrawler(self.playwright), Controller(co, objective))
await crawler._init_browser()
print("browser inited")
self.sessions[id] = (crawler, controller)
await crawler.go_to_page("google.com")
print("crawler on page")
crawler, controller = self.sessions[id]
return (crawler, controller)
async def respond_to_message(self, message):
print(message.clean_content)
objective = message.clean_content.removeprefix("weblm ")
crawler, controller = await self.find_session(message.id, message)
if objective == "cancel":
del self.sessions[message.id]
return
elif objective == "help":
msg = await message.channel.send(help_msg)
await msg.edit(suppress=True)
return
elif objective == "success":
controller.success()
del self.sessions[message.channel.starter_message.id]
msg = await message.channel.send("🎉🎉🎉")
await msg.edit(suppress=True)
return
while True:
content = await crawler.crawl()
async with message.channel.typing():
if not controller.is_running():
print("Controller not yet running")
response = controller.step(crawler.page.url, content)
else:
response = controller.step(crawler.page.url, content, response=objective)
print(response)
if isinstance(response, Command):
print("running command", response)
await crawler.run_cmd(str(response))
elif isinstance(response, Prompt):
thread = await message.create_thread(name=objective)
for chunk in chunk_message_for_sending(str(response)):
msg = await thread.send(chunk)
await msg.edit(suppress=True)
return
async def respond_in_thread(self, message):
if message.channel.starter_message.id not in self.sessions:
print("Session not running, killing")
await message.channel.send("This session is dead please begin a new one in #web-lm.")
return
print(message.clean_content)
objective = message.clean_content.removeprefix("weblm ")
crawler, controller = await self.find_session(message.channel.starter_message.id, message)
if objective == "cancel":
del self.sessions[message.channel.starter_message.id]
return
elif objective == "success":
controller.success()
del self.sessions[message.channel.starter_message.id]
msg = await message.channel.send("🎉🎉🎉")
await msg.edit(suppress=True)
return
elif objective == "help":
msg = await message.channel.send(help_msg)
await msg.edit(suppress=True)
return
elif objective == "show":
path = await crawler.screenshot()
await message.channel.send(file=discord.File(path))
return
while True:
content = await crawler.crawl()
print("AIDAN", content)
async with message.channel.typing():
if not controller.is_running():
print("Controller not yet running")
response = controller.step(crawler.page.url, content)
else:
response = controller.step(crawler.page.url, content, response=objective)
print(response)
if isinstance(response, Command):
print("running command", response)
await crawler.run_cmd(str(response))
elif isinstance(response, Prompt):
for chunk in chunk_message_for_sending(str(response)):
msg = await message.channel.send(chunk)
await msg.edit(suppress=True)
return
async def respond_to_dm(self, message):
print(message.clean_content)
objective = message.clean_content.removeprefix("weblm ")
crawler, controller = await self.find_session(message.author.id, message)
if objective == "cancel":
del self.sessions[message.author.id]
return
elif objective == "success":
controller.success()
del self.sessions[message.author.id]
msg = await message.channel.send("🎉🎉🎉")
await msg.edit(suppress=True)
return
elif objective == "help":
msg = await message.channel.send(help_msg)
await msg.edit(suppress=True)
return
elif objective == "show":
path = await crawler.screenshot()
await message.channel.send(file=discord.File(path))
return
while True:
content = await crawler.crawl()
print("AIDAN", content)
async with message.channel.typing():
if not controller.is_running():
print("Controller not yet running")
response = controller.step(crawler.page.url, content)
else:
response = controller.step(crawler.page.url, content, response=objective)
print(response)
if isinstance(response, Command):
print("running command", response)
await crawler.run_cmd(str(response))
elif isinstance(response, Prompt):
for chunk in chunk_message_for_sending(str(response)):
msg = await message.channel.send(chunk)
await msg.edit(suppress=True)
return
async def on_message(self, message):
try:
print(message)
if isinstance(message.channel, discord.DMChannel) and message.author != self.user:
await self.respond_to_dm(message)
elif isinstance(message.channel, discord.TextChannel) and message.channel.id in [
1026557845308723212, 1032611829186306048
] and message.author != self.user and message.clean_content.startswith("weblm "):
await self.respond_to_message(message)
elif isinstance(message.channel, discord.Thread) and message.channel.parent.id in [
1026557845308723212, 1032611829186306048
] and message.author != self.user:
await self.respond_in_thread(message)
except Exception:
print(f"Exception caught:\n{traceback.format_exc()}")
async def main():
intents = discord.Intents.all()
async with async_playwright() as playwright:
async with MyClient(playwright, intents=intents) as client:
try:
await client.start(os.environ.get("DISCORD_KEY"))
except Exception as e:
print(f"Exception caught: {e}")
if __name__ == "__main__":
asyncio.run(main())
| [] |
2024-01-10 | Flagro/SummaryForEveryone | summaryforeveryone~utils~text_spliiters.py | from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import SpacyTextSplitter
from langchain.text_splitter import NLTKTextSplitter
def get_text_splitter(separator="\n\n", chunk_size=1024, text_splitter_name="character"):
if text_splitter_name == "spacy":
return SpacyTextSplitter(
chunk_size=chunk_size,
chunk_overlap=0,
separator=separator,
pipeline="sentencizer"
)
elif text_splitter_name == "character":
return CharacterTextSplitter.from_tiktoken_encoder(
separator=separator,
chunk_size=chunk_size // 10,
chunk_overlap=0,
)
elif text_splitter_name == "nltk":
return NLTKTextSplitter(
chunk_size=chunk_size,
chunk_overlap=0,
separator=separator
)
else:
raise ValueError(f"Unknown text splitter name: {text_splitter_name}")
| [] |
2024-01-10 | Flagro/SummaryForEveryone | summaryforeveryone~utils~models~yandexgpt.py | from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain_core.load.serializable import Serializable
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.utils import enforce_stop_tokens
from typing import Any, List, Mapping, Optional
import json
import requests
class _BaseCustomYandexGPT(Serializable):
iam_token: str = ""
api_key: str = ""
x_folder_id: str = ""
finetuned_model_id: str = ""
model_name: str = "general"
temperature: Optional[float] = 0.7
max_tokens: int = 7400
stop: Optional[List[str]] = None
url: str = "https://llm.api.cloud.yandex.net/llm/v1alpha/instruct"
@property
def _llm_type(self) -> str:
return "yandex_gpt"
class YandexCustomGPT(_BaseCustomYandexGPT, LLM):
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {
"model_name": self.model_name,
"temperature": self.temperature,
"finetuned_model_id": self.finetuned_model_id,
"max_tokens": self.max_tokens,
"stop": self.stop,
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if self.finetuned_model_id or not self.api_key:
headers = {"Authorization": f"Bearer {self.iam_token}", "x-folder-id": f"{self.x_folder_id}"}
else:
headers = {"Authorization": f"Api-Key {self.api_key}"}
json_body = {
"model": "general",
"request_text": prompt,
"generation_options": {
"max_tokens": self.max_tokens,
"temperature": self.temperature
}
}
if self.finetuned_model_id:
json_body["instruction_uri"] = f"ds://{self.finetuned_model_id}"
result = requests.post(
url=self.url,
headers=headers,
json=json_body
)
text = json.loads(result.text)['result']['alternatives'][0]['text']
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | glinft/sfbot | model~openai~chatgpt_model.py | # encoding:utf-8
from model.model import Model
from config import model_conf, common_conf_val
from common import const
from common import log
from common.redis import RedisSingleton
from common.word_filter import WordFilter
import openai
import os
import time
import json
import re
import requests
import base64
import random
import hashlib
import tiktoken
from datetime import datetime
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from urllib.parse import urlparse, urlunparse
user_session = dict()
md5sum_pattern = r'^[0-9a-f]{32}$'
faiss_store_root= "/opt/faiss/"
def calculate_md5(text):
md5_hash = hashlib.md5()
md5_hash.update(text.encode('utf-8'))
return md5_hash.hexdigest()
def get_org_bot(input_string):
parts = input_string.split(':')
org_part = ":".join(parts[:2])
bot_part = ":".join(parts[2:])
return org_part, bot_part
def get_org_id(string):
pattern = r'org:(\d+)'
match = re.search(pattern, string)
orgid = 0
if match:
orgid = int(match.group(1))
return orgid
def get_bot_id(string):
pattern = r'bot:(\d+)'
match = re.search(pattern, string)
botid = 0
if match:
botid = int(match.group(1))
return botid
def get_unique_by_key(data, key):
seen = set()
unique_list = [d for d in data if d.get(key) not in seen and not seen.add(d.get(key))]
return unique_list
def num_tokens_from_string(string):
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = len(encoding.encode(string))
return num_tokens
def num_tokens_from_messages(messages):
encoding = tiktoken.get_encoding("cl100k_base")
tokens_per_message = 4
tokens_per_name = -1
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens
def remove_url_query(url):
parsed_url = urlparse(url)
clean_url = urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, '', '', ''))
return clean_url
def is_image_url(url):
image_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.svg', '.webp']
lower_url = url.lower()
return any(lower_url.endswith(ext) for ext in image_extensions)
def is_video_url(url):
video_extensions = ['.mp4', '.webm', '.ogv']
lower_url = url.lower()
return any(lower_url.endswith(ext) for ext in video_extensions)
def increase_hit_count(fid, category, url=''):
gqlurl = 'http://127.0.0.1:5000/graphql'
gqlfunc = 'increaseHitCount'
headers = { "Content-Type": "application/json", }
query = f"mutation {gqlfunc} {{ {gqlfunc}( id:{fid}, category:\"{category}\", url:\"{url}\" ) }}"
gqldata = { "query": query, "variables": {}, }
try:
gqlresp = requests.post(gqlurl, json=gqldata, headers=headers)
log.info(f"GQL/{gqlfunc}: #{fid} {gqlresp.status_code} {query}")
log.debug(f"GQL/{gqlfunc}: #{fid} {gqlresp.json()}")
except Exception as e:
log.exception(e)
def send_query_notification(rid, str1, str2):
gqlurl = 'http://127.0.0.1:5000/graphql'
gqlfunc = 'notiSfbotNotification'
headers = { "Content-Type": "application/json", }
chatstr = f"{str1}\n\n{str2}"
content = base64.b64encode(chatstr.encode('utf-8')).decode('utf-8')
query = f"mutation {gqlfunc} {{ {gqlfunc}( id:{rid}, content:\"{content}\" ) }}"
gqldata = { "query": query, "variables": {}, }
try:
gqlresp = requests.post(gqlurl, json=gqldata, headers=headers)
log.info(f"GQL/{gqlfunc}: #{rid} {gqlresp.status_code} {query}")
log.debug(f"GQL/{gqlfunc}: #{rid} {gqlresp.json()}")
except Exception as e:
log.exception(e)
def run_word_filter(text, org_id):
wftool = WordFilter()
wfdict,_ = wftool.load_words(0)
if int(org_id)>0:
wfdict_org,_ = wftool.load_words(org_id)
wfdict.update(wfdict_org)
filted_text = wftool.replace_sensitive_words(text, wfdict)
return filted_text
# OpenAI对话模型API (可用)
class ChatGPTModel(Model):
def __init__(self):
openai.api_key = model_conf(const.OPEN_AI).get('api_key')
api_base = model_conf(const.OPEN_AI).get('api_base')
if api_base:
openai.api_base = api_base
proxy = model_conf(const.OPEN_AI).get('proxy')
if proxy:
openai.proxy = proxy
log.info("[CHATGPT] api_base={} proxy={}".format(
api_base, proxy))
def select_gpt_service(self, vendor='default'):
if vendor == 'azure':
openai.api_key = model_conf(const.OPEN_AI).get('azure_api_key')
openai.api_base = model_conf(const.OPEN_AI).get('azure_api_base')
openai.api_type = 'azure'
openai.api_version = '2023-05-15'
else:
openai.api_key = model_conf(const.OPEN_AI).get('api_key')
openai.api_base = model_conf(const.OPEN_AI).get('api_base')
def reply(self, query, context=None):
# acquire reply content
if not context or not context.get('type') or context.get('type') == 'TEXT':
log.info("[CHATGPT] query={}".format(query))
from_user_id = context['from_user_id']
from_org_id = context['from_org_id']
from_org_id, from_chatbot_id = get_org_bot(from_org_id)
user_flag = context['userflag']
nres = int(context.get('res','0'))
fwd = int(context.get('fwd','0'))
character_id = context.get('character_id')
character_desc = context.get('character_desc')
temperature = context['temperature']
website = context.get('website','undef')
email = context.get('email','undef')
sfmodel = context.get('sfmodel','undef')
if isinstance(sfmodel, str) and (sfmodel == 'undef' or sfmodel == ''):
sfmodel = None
clear_memory_commands = common_conf_val('clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
log.info('[CHATGPT] reset session: {}'.format(from_user_id))
Session.clear_session(from_user_id)
return 'Session is reset.'
orgnum = str(get_org_id(from_org_id))
botnum = str(get_bot_id(from_chatbot_id))
myredis = RedisSingleton(password=common_conf_val('redis_password', ''))
teammode = int(context.get('teammode','0'))
teambotkeep = int(context.get('teambotkeep','0'))
teamid = int(context.get('teamid','0'))
teambotid = int(context.get('teambotid','0'))
if teammode == 1:
if teambotkeep == 1 and teambotid == 0:
teambotkeep = 0
if teambotkeep == 0:
newteambot, newteam = self.find_teambot(user_flag, from_org_id, from_chatbot_id, teamid, query)
if newteambot > 0:
teamid = newteam
teambotid = newteambot
else:
if teambotid == 0:
teammode = 0
else:
if teamid == 0 and teambotid > 0:
teambot_pattern = "sfteam:org:{}:team:*:bot:{}".format(orgnum,teambotid)
keys_matched = myredis.redis.keys(teambot_pattern)
for key in keys_matched:
teambot_key=key.decode()
teamid=int(teambot_key.split(':')[4])
if teammode == 1:
teambot_key = "sfteam:org:{}:team:{}:bot:{}".format(orgnum,teamid,teambotid)
log.info("[CHATGPT] key={} query={}".format(teambot_key,query))
if myredis.redis.exists(teambot_key):
teambot_name = myredis.redis.hget(teambot_key, 'name').decode().strip()
teambot_desc = myredis.redis.hget(teambot_key, 'desc').decode().strip()
teambot_prompt = myredis.redis.hget(teambot_key, 'prompt').decode().strip()
teambot_model = myredis.redis.hget(teambot_key, 'model')
else:
teammode = 0
if teammode == 0:
teamid = 0
teambotid = 0
if teammode == 1:
teambot_instruction = (
f"You are {teambot_name}.\n{teambot_desc}.\n"
"You only provide factual answers to queries, and do not try to make up an answer.\n"
"Do not try to answer the queries that are irrelevant to your functionality and responsibility, just reject them politely.\n"
"Your functionality and responsibility are described below, separated by 3 backticks.\n\n"
f"```\n{teambot_prompt}\n```\n"
)
character_id = f"x{teambotid}"
character_desc = teambot_instruction
log.info("[CHATGPT] teambot character id={} desc={}".format(character_id,character_desc))
if sfmodel is None and teambot_model is not None:
sfmodel = teambot_model.decode().strip()
else:
sfbot_key = "sfbot:org:{}:bot:{}".format(orgnum,botnum)
sfbot_model = myredis.redis.hget(sfbot_key, 'model')
if sfmodel is None and sfbot_model is not None:
sfmodel = sfbot_model.decode().strip()
commands = []
query_embedding = openai.Embedding.create(input=query, model="text-embedding-ada-002")["data"][0]['embedding']
atcs = myredis.ft_search(embedded_query=query_embedding,
vector_field="text_vector",
hybrid_fields=myredis.create_hybrid_field1(orgnum, user_flag, "category", "atc"),
k=3)
if len(atcs) > 0:
for i, atc in enumerate(atcs):
if float(atc.vector_score) > 0.15:
break
cid = myredis.redis.hget(atc.id, 'id').decode()
csf = 1.0 - float(atc.vector_score)
commands.append({'id':cid,'category':"actionTransformer",'score':csf})
new_query, hitdocs, refurls, similarity, use_faiss = Session.build_session_query(query, from_user_id, from_org_id, from_chatbot_id, user_flag, character_desc, character_id, website, email, fwd)
if new_query is None:
return 'Sorry, I have no ideas about what you said.'
log.info("[CHATGPT] session query={}".format(new_query))
if new_query[-1]['role'] == 'assistant':
reply_message = new_query.pop()
reply_content = reply_message['content']
logid = Session.save_session(query, reply_content, from_user_id, from_org_id, from_chatbot_id, 0, 0, 0, similarity, use_faiss)
reply_content = run_word_filter(reply_content, get_org_id(from_org_id))
reply_content+='\n```sf-json\n'
reply_content+=json.dumps({'logid':logid})
reply_content+='\n```\n'
return reply_content
# if context.get('stream'):
# # reply in stream
# return self.reply_text_stream(query, new_query, from_user_id)
reply_content, logid = self.reply_text(new_query, query, sfmodel, from_user_id, from_org_id, from_chatbot_id, similarity, temperature, use_faiss, 0)
reply_embedding = openai.Embedding.create(input=reply_content, model="text-embedding-ada-002")["data"][0]['embedding']
docs = myredis.ft_search(embedded_query=reply_embedding,
vector_field="text_vector",
hybrid_fields=myredis.create_hybrid_field2(orgnum, botnum, user_flag, "category", "kb"),
k=1)
score = 0.0
if len(docs) > 0:
score = 1.0 - float(docs[0].vector_score)
qnts = myredis.ft_search(embedded_query=query_embedding, vector_field="text_vector", hybrid_fields=myredis.create_hybrid_field(orgnum, "category", "qnt"), k=3)
if len(qnts) > 0:
for i, qnt in enumerate(qnts):
log.info(f"{i}) {qnt.id} {qnt.orgid} {qnt.category} {qnt.vector_score}")
if float(qnt.vector_score) > 0.2:
break
rid = myredis.redis.hget(qnt.id, 'id').decode()
send_query_notification(rid, query, reply_content)
resources = []
if nres > 0:
resources = Session.get_resources(reply_content, from_user_id, from_org_id)
reply_content = Session.insert_resource_to_reply(reply_content, from_user_id, from_org_id)
reply_content = run_word_filter(reply_content, get_org_id(from_org_id))
reply_content+='\n```sf-json\n'
reply_content+=json.dumps({'docs':hitdocs,'pages':refurls,'resources':resources,'commands':commands,'score':score,'logid':logid,'teammode':teammode,'teamid':teamid,'teambotid':teambotid})
reply_content+='\n```\n'
#log.debug("[CHATGPT] user={}, query={}, reply={}".format(from_user_id, new_query, reply_content))
return reply_content
elif context.get('type', None) == 'IMAGE_CREATE':
return self.create_img(query, 0)
def find_teambot(self, user_flag, org_id, chatbot_id, team_id, query):
myredis = RedisSingleton(password=common_conf_val('redis_password', ''))
orgnum = get_org_id(org_id)
botnum = get_bot_id(chatbot_id)
team_info = '# Team Information\n'
team_keys = []
team_pattern = "sfteam:org:{}:team:*:data".format(orgnum)
keys_matched = myredis.redis.keys(team_pattern)
for key in keys_matched:
team_keys.append(key.decode())
if team_id > 0:
team_key = "sfteam:org:{}:team:{}:data".format(orgnum,team_id)
if team_key in team_keys:
team_keys.clear()
team_keys.append(team_key)
for key in team_keys:
team_desc = myredis.redis.hget(key, 'team_desc').decode()
team_publ = 1
fpub = myredis.redis.hget(key, 'public')
if fpub is not None:
team_publ = int(fpub.decode())
if team_publ == 1:
team_info += team_desc+'\n'
else:
if user_flag == 'internal':
team_info += team_desc+'\n'
if len(team_info) < 20:
log.info("[CHATGPT] find_teambot: No available team {}/{}".format(org_id,user_flag))
return 0, 0
sys_msg = (
"You are a contact-center manager, and you try to dispatch the user query to the most suitable team/agent.\n"
"You only provide factual answers to queries, and do not try to make up an answer.\n"
"The functionality and responsibility of teams are described below in markdown format.\n\n"
f"```markdown\n{team_info}\n```\n"
)
usr_msg = (
"Here is user query.\n"
f"```\n{query}\n```\n\n"
"Reply the dispatchment in json format with 2 keys named team_id and agent_id.\n"
"If you have no idea about how to dispatch based on the given team information, simply return team_id=0 and agent_id=0.\n"
"The answer should be only json string and nothing else.\n"
)
msgs = [{'role':'system','content':sys_msg},{'role':'user','content':usr_msg}]
try:
use_azure = True if orgnum==4 else False
response = openai.ChatCompletion.create(
api_base=(model_conf(const.OPEN_AI).get('azure_api_base') if use_azure else None),
api_key=(model_conf(const.OPEN_AI).get('azure_api_key') if use_azure else None),
api_type=("azure" if use_azure else None),
api_version=("2023-05-15" if use_azure else None),
engine=("base" if use_azure else None),
model=model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo",
messages=msgs,
temperature=0.1,
frequency_penalty=0.0,
presence_penalty=0.0,
)
reply_content = response.choices[0]['message']['content']
reply_usage = response['usage']
log.info("[CHATGPT] find_teambot: result={} usage={}".format(reply_content,json.dumps(reply_usage)))
dispatch = json.loads(reply_content)
return int(dispatch['agent_id']), int(dispatch['team_id'])
except Exception as e:
log.exception(e)
return 0, 0
def reply_text(self, query, qtext, qmodel, user_id, org_id, chatbot_id, similarity, temperature, use_faiss=False, retry_count=0):
try:
try:
temperature = float(temperature)
if temperature < 0.0 or temperature > 1.0:
raise ValueError()
except ValueError:
temperature = model_conf(const.OPEN_AI).get("temperature", 0.75)
orgnum = get_org_id(org_id)
use_azure = True if orgnum==4 else False
response = openai.ChatCompletion.create(
api_base=(model_conf(const.OPEN_AI).get('azure_api_base') if use_azure else None),
api_key=(model_conf(const.OPEN_AI).get('azure_api_key') if use_azure else None),
api_type=("azure" if use_azure else None),
api_version=("2023-05-15" if use_azure else None),
engine=("base" if use_azure else None), # Azure deployment Name
model=qmodel or model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称
messages=query,
temperature=temperature, # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好
#max_tokens=4096, # 回复最大的字符数,为输入和输出的总数
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0) # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
)
reply_content = response.choices[0]['message']['content']
used_tokens = response['usage']['total_tokens']
prompt_tokens = response['usage']['prompt_tokens']
completion_tokens = response['usage']['completion_tokens']
log.debug(response)
log.info("[CHATGPT] usage={}", json.dumps(response['usage']))
log.info("[CHATGPT] reply={}", reply_content)
logid = Session.save_session(qtext, reply_content, user_id, org_id, chatbot_id, used_tokens, prompt_tokens, completion_tokens, similarity, use_faiss)
return reply_content, logid
except openai.error.RateLimitError as e:
# rate limit exception
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[CHATGPT] RateLimit exceed, retry {} attempts".format(retry_count+1))
return self.reply_text(query, qtext, qmodel, user_id, org_id, chatbot_id, similarity, temperature, use_faiss, retry_count+1)
else:
return "You're asking too quickly, please take a break before asking me again.", None
except openai.error.APIConnectionError as e:
log.warn(e)
log.warn("[CHATGPT] APIConnection failed")
return "I can't connect to the service, please try again later.", None
except openai.error.Timeout as e:
log.warn(e)
log.warn("[CHATGPT] Timeout")
return "I haven't received the message, please try again later.", None
except openai.error.ServiceUnavailableError as e:
log.warn(e)
log.warn("[CHATGPT] Service Unavailable")
return "The server is overloaded or not ready yet.", None
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(user_id)
return "Oops, something wrong, please ask me again.", None
async def reply_text_stream(self, query, context, retry_count=0):
try:
log.info("[CHATGPT] query={}".format(query))
from_user_id = context['from_user_id']
from_org_id = context['from_org_id']
from_org_id, from_chatbot_id = get_org_bot(from_org_id)
user_flag = context['userflag']
nres = int(context.get('res','0'))
fwd = int(context.get('fwd','0'))
character_id = context.get('character_id')
character_desc = context.get('character_desc')
temperature = context['temperature']
website = context.get('website','undef')
email = context.get('email','undef')
sfmodel = context.get('sfmodel','undef')
if isinstance(sfmodel, str) and (sfmodel == 'undef' or sfmodel == ''):
sfmodel = None
new_query, hitdocs, refurls, similarity, use_faiss = Session.build_session_query(query, from_user_id, from_org_id, from_chatbot_id, user_flag, character_desc, character_id, website, email, fwd)
if new_query is None:
yield True,'Sorry, I have no ideas about what you said.'
log.info("[CHATGPT] session query={}".format(new_query))
if new_query[-1]['role'] == 'assistant':
reply_message = new_query.pop()
reply_content = reply_message['content']
logid = Session.save_session(query, reply_content, from_user_id, from_org_id, from_chatbot_id, 0, 0, 0, similarity, use_faiss)
reply_content = run_word_filter(reply_content, get_org_id(from_org_id))
reply_content+='\n```sf-json\n'
reply_content+=json.dumps({'logid':logid})
reply_content+='\n```\n'
yield True,reply_content
try:
temperature = float(temperature)
if temperature < 0.0 or temperature > 1.0:
raise ValueError()
except ValueError:
temperature = model_conf(const.OPEN_AI).get("temperature", 0.75)
orgnum = str(get_org_id(from_org_id))
botnum = str(get_bot_id(from_chatbot_id))
myredis = RedisSingleton(password=common_conf_val('redis_password', ''))
sfbot_key = "sfbot:org:{}:bot:{}".format(orgnum,botnum)
sfbot_model = myredis.redis.hget(sfbot_key, 'model')
if sfmodel is None and sfbot_model is not None:
sfmodel = sfbot_model.decode().strip()
res = openai.ChatCompletion.create(
model=sfmodel or model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称
messages=new_query,
temperature=temperature, # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好
#max_tokens=4096, # 回复最大的字符数,为输入和输出的总数
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
stream=True
)
full_response = ""
for chunk in res:
log.debug(chunk)
if (chunk["choices"][0]["finish_reason"]=="stop"):
break
chunk_message = chunk['choices'][0]['delta'].get("content")
if(chunk_message):
full_response+=chunk_message
yield False,full_response
prompt_tokens = num_tokens_from_messages(new_query)
completion_tokens = num_tokens_from_string(full_response)
used_tokens = prompt_tokens + completion_tokens
logid = Session.save_session(query, full_response, from_user_id, from_org_id, from_chatbot_id, used_tokens, prompt_tokens, completion_tokens, similarity, use_faiss)
resources = []
if nres > 0:
resources = Session.get_resources(full_response, from_user_id, from_org_id)
full_response = run_word_filter(full_response, get_org_id(from_org_id))
full_response+='\n```sf-json\n'
full_response+=json.dumps({'docs':hitdocs,'pages':refurls,'resources':resources,'logid':logid})
full_response+='\n```\n'
#log.debug("[CHATGPT] user={}, query={}, reply={}".format(from_user_id, new_query, full_response))
yield True,full_response
except openai.error.RateLimitError as e:
# rate limit exception
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[CHATGPT] RateLimit exceed, retry {} attempts".format(retry_count+1))
yield True, self.reply_text_stream(query, context, retry_count+1)
else:
yield True, "You're asking too quickly, please take a break before asking me again."
except openai.error.APIConnectionError as e:
log.warn(e)
log.warn("[CHATGPT] APIConnection failed")
yield True, "I can't connect to the service, please try again later."
except openai.error.Timeout as e:
log.warn(e)
log.warn("[CHATGPT] Timeout")
yield True, "I haven't received the message, please try again later."
except openai.error.ServiceUnavailableError as e:
log.warn(e)
log.warn("[CHATGPT] Service Unavailable")
yield True, "The server is overloaded or not ready yet."
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(from_user_id)
yield True, "Oops, something wrong, please ask me again."
def create_img(self, query, retry_count=0):
try:
log.info("[OPEN_AI] image_query={}".format(query))
response = openai.Image.create(
prompt=query, #图片描述
n=1, #每次生成图片的数量
size="256x256" #图片大小,可选有 256x256, 512x512, 1024x1024
)
image_url = response['data'][0]['url']
log.info("[OPEN_AI] image_url={}".format(image_url))
return [image_url]
except openai.error.RateLimitError as e:
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[OPEN_AI] ImgCreate RateLimit exceed, retry {} attempts".format(retry_count+1))
return self.create_img(query, retry_count+1)
else:
return "You're asking too quickly, please take a break before asking me again."
except Exception as e:
log.exception(e)
return None
class Session(object):
@staticmethod
def build_session_query(query, user_id, org_id, chatbot_id='bot:0', user_flag='external', character_desc=None, character_id=None, website=None, email=None, fwd=0):
'''
build query with conversation history
e.g. [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
:param query: query content
:param user_id: from user id
:return: query content with conversaction
'''
config_prompt = common_conf_val("input_prompt", "")
session = user_session.get(user_id, [])
faiss_id = user_id
if isinstance(website, str) and website != 'undef' and len(website) > 0:
log.info("[FAISS] try to search data of website:{}".format(website))
faiss_id = calculate_md5('website:'+re.sub(r'https?://','',website.lower()))
elif isinstance(email, str) and email != 'undef' and len(email) > 0:
log.info("[FAISS] try to search data of email:{}".format(email))
faiss_id = calculate_md5(email.lower())
if re.match(md5sum_pattern, faiss_id):
log.info("[FAISS] try to load local store {}".format(faiss_id))
if re.match(md5sum_pattern, faiss_id) and os.path.exists(f"{faiss_store_root}{faiss_id}"):
faiss_store_path = f"{faiss_store_root}{faiss_id}"
mykey = model_conf(const.OPEN_AI).get('api_key')
embeddings = OpenAIEmbeddings(openai_api_key=mykey)
dbx = FAISS.load_local(faiss_store_path, embeddings)
log.info("[FAISS] local store loaded")
similarity = 0.0
docs = dbx.similarity_search_with_score(query, k=3)
log.info("[FAISS] semantic search done")
if len(docs) == 0:
log.info("[FAISS] semantic search: None")
return None, [], [], similarity, True
similarity = float(docs[0][1])
'''
if len(docs) > 0 and similarity < 0.6:
log.info(f"[FAISS] semantic search: score:{similarity} < threshold:0.6")
return None, [], [], similarity, True
'''
system_prompt = 'You are answering the question just like you are the owner or partner of the company described in the context.'
if isinstance(character_desc, str) and character_desc != 'undef' and len(character_desc) > 0:
system_prompt = character_desc
system_prompt += '\nIf you don\'t know the answer, just say you don\'t know. DO NOT try to make up an answer.'
system_prompt += '\nIf the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.'
system_prompt += '\nIf you are unclear about the question, politely respond that you need a clearer and more detailed description.'
system_prompt += f"\n{config_prompt}\n```"
for doc, score in docs:
log.info("[FAISS] {} {}".format(score, json.dumps(doc.metadata)))
'''
if score < 0.6:
break
'''
system_prompt += '\n' + doc.page_content
system_prompt += '\n```\n'
log.info("[FAISS] prompt={}".format(system_prompt))
if len(session) > 0 and session[0]['role'] == 'system':
session.pop(0)
session = []
system_item = {'role': 'system', 'content': system_prompt}
session.insert(0, system_item)
user_session[user_id] = session
user_item = {'role': 'user', 'content': query}
session.append(user_item)
return session, [], [], similarity, True
orgnum = get_org_id(org_id)
qnaorg = "(0|{})".format(orgnum)
botnum = str(get_bot_id(chatbot_id))
if isinstance(character_id, str) and (character_id[0] == 'c' or character_id[0] == 'x' or character_id[0] == 't'):
botnum += " | {}".format(character_id)
refurls = []
hitdocs = []
qna_output = None
myquery = openai.Embedding.create(input=query, model="text-embedding-ada-002")["data"][0]['embedding']
myredis = RedisSingleton(password=common_conf_val('redis_password', ''))
qnas = myredis.ft_search(embedded_query=myquery, vector_field="title_vector", hybrid_fields=myredis.create_hybrid_field(qnaorg, "category", "qa"))
if len(qnas) > 0 and float(qnas[0].vector_score) < 0.15:
qna = qnas[0]
log.info(f"Q/A: {qna.id} {qna.orgid} {qna.category} {qna.vector_score}")
try:
qnatext = myredis.redis.hget(qna.id, 'text').decode()
answers = json.loads(qnatext)
if len(answers)>0:
qna_output = random.choice(answers)
fid = myredis.redis.hget(qna.id, 'id').decode()
increase_hit_count(fid, 'qa', '')
except json.JSONDecodeError as e:
pass
except Exception as e:
pass
log.info("[RDSFT] org={} {} {}".format(org_id, orgnum, qnaorg))
similarity = 0.0
docs = myredis.ft_search(embedded_query=myquery,
vector_field="text_vector",
hybrid_fields=myredis.create_hybrid_field2(str(orgnum), botnum, user_flag, "category", "kb"))
if len(docs) > 0:
similarity = 1.0 - float(docs[0].vector_score)
threshold = float(common_conf_val('similarity_threshold', 0.7))
if similarity < threshold:
docs = []
system_prompt = 'You are a helpful AI customer support agent. Use the following pieces of context to answer the customer inquiry.'
orgnum = str(get_org_id(org_id))
botnum = str(get_bot_id(chatbot_id))
sfbot_key = "sfbot:org:{}:bot:{}".format(orgnum,botnum)
sfbot_char_desc = myredis.redis.hget(sfbot_key, 'character_desc')
if sfbot_char_desc is not None:
sfbot_char_desc = sfbot_char_desc.decode()
if len(sfbot_char_desc) > 0:
system_prompt = sfbot_char_desc
if isinstance(character_desc, str) and character_desc != 'undef' and len(character_desc) > 0:
system_prompt = character_desc
if fwd > 0:
log.info("[CHATGPT] prompt(onlyfwd)={}".format(system_prompt))
if len(session) > 0 and session[0]['role'] == 'system':
session.pop(0)
system_item = {'role': 'system', 'content': system_prompt}
session.insert(0, system_item)
user_session[user_id] = session
user_item = {'role': 'user', 'content': query}
session.append(user_item)
return session, [], [], similarity, False
if isinstance(character_id, str) and character_id.startswith('x'):
log.info("[CHATGPT] teambot character id={} add context".format(character_id))
else:
system_prompt += '\nIf you don\'t know the answer, just say you don\'t know. DO NOT try to make up an answer.'
system_prompt += '\nIf the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.'
system_prompt += '\nIf you are unclear about the question, politely respond that you need a clearer and more detailed description.'
if len(docs) == 0 and qna_output is None:
log.info("[CHATGPT] prompt(nodoc)={}".format(system_prompt))
if len(session) > 0 and session[0]['role'] == 'system':
session.pop(0)
system_item = {'role': 'system', 'content': system_prompt}
session.insert(0, system_item)
user_session[user_id] = session
user_item = {'role': 'user', 'content': query}
session.append(user_item)
return session, [], [], similarity, False
system_prompt += f"\n{config_prompt}\n```"
if qna_output is not None:
system_prompt += '\n' + qna_output
for i, doc in enumerate(docs):
log.info(f"{i}) {doc.id} {doc.orgid} {doc.category} {doc.vector_score}")
system_prompt += '\n' + myredis.redis.hget(doc.id, 'text').decode()
if float(doc.vector_score) < 0.15:
urlhit = ''
docurl = myredis.redis.hget(doc.id, 'source')
if docurl is not None:
urlhit = docurl.decode()
dockey = myredis.redis.hget(doc.id, 'dkey')
if dockey is not None:
dockey = dockey.decode()
dockeyparts = dockey.split(":")
fct = dockeyparts[1]
fid = dockeyparts[2]
if fct == 'file':
dfname = myredis.redis.hget(doc.id, 'filename')
if dfname is not None:
dfname = dfname.decode()
hitdocs.append({'id':fid,'category':fct,'url':urlhit,'filename':dfname,'key':f"{fid};{urlhit}"})
if float(doc.vector_score) < 0.2:
docurl = myredis.redis.hget(doc.id, 'source')
if docurl is None:
continue
urlkey = myredis.redis.hget(doc.id, 'refkey')
if urlkey is None:
continue
urltitle = None
try:
docurl = docurl.decode()
urlkey = urlkey.decode()
urlmeta = json.loads(myredis.redis.lindex(urlkey, 0).decode())
urltitle = urlmeta['title']
except json.JSONDecodeError as e:
print("Error decoding JSON:", urlkey, str(e))
except Exception as e:
print("Error URL:", urlkey, str(e))
log.info(f"{i}) {doc.id} URL={docurl} Title={urltitle}")
refurls.append({'url': docurl, 'title': urltitle})
system_prompt += '\n```\n'
log.info("[CHATGPT] prompt={}".format(system_prompt))
refurls = get_unique_by_key(refurls, 'url')
hitdocs = get_unique_by_key(hitdocs, 'key')
hitdocs = [{k: v for k, v in d.items() if k != 'key'} for d in hitdocs]
for doc in hitdocs:
increase_hit_count(doc['id'], doc['category'], doc['url'])
if len(session) > 0 and session[0]['role'] == 'system':
session.pop(0)
system_item = {'role': 'system', 'content': system_prompt}
session.insert(0, system_item)
user_session[user_id] = session
user_item = {'role': 'user', 'content': query}
session.append(user_item)
return session, hitdocs, refurls, similarity, False
@staticmethod
def save_session(query, answer, user_id, org_id, chatbot_id, used_tokens=0, prompt_tokens=0, completion_tokens=0, similarity=0.0, use_faiss=False):
max_tokens = model_conf(const.OPEN_AI).get('conversation_max_tokens')
max_history_num = model_conf(const.OPEN_AI).get('max_history_num', None)
if not max_tokens or max_tokens > 4000:
# default value
max_tokens = 1000
session = user_session.get(user_id)
if session:
# append conversation
gpt_item = {'role': 'assistant', 'content': answer}
session.append(gpt_item)
if used_tokens > max_tokens and len(session) >= 3:
# pop first conversation (TODO: more accurate calculation)
session.pop(1)
session.pop(1)
if max_history_num is not None:
while len(session) > max_history_num * 2 + 1:
session.pop(1)
session.pop(1)
if use_faiss:
return None
if used_tokens > 0:
myredis = RedisSingleton(password=common_conf_val('redis_password', ''))
botkey = "sfbot:{}:{}".format(org_id,chatbot_id)
momkey = 'stat_'+datetime.now().strftime("%Y%m")
momqty = myredis.redis.hget(botkey, momkey)
if momqty is None:
myredis.redis.hset(botkey, momkey, 1)
else:
momqty = int(momqty.decode())
myredis.redis.hset(botkey, momkey, momqty+1)
gqlurl = 'http://127.0.0.1:5000/graphql'
gqlfunc = 'createChatHistory'
headers = { "Content-Type": "application/json", }
orgnum = get_org_id(org_id)
botnum = get_bot_id(chatbot_id)
question = base64.b64encode(query.encode('utf-8')).decode('utf-8')
answer = base64.b64encode(answer.encode('utf-8')).decode('utf-8')
xquery = f"""mutation {gqlfunc} {{ {gqlfunc}( chatHistory:{{ tag:"{user_id}",organizationId:{orgnum},sfbotId:{botnum},question:"{question}",answer:"{answer}",similarity:{similarity},promptTokens:{prompt_tokens},completionTokens:{completion_tokens},totalTokens:{used_tokens}}}){{ id tag }} }}"""
gqldata = { "query": xquery, "variables": {}, }
try:
gqlresp = requests.post(gqlurl, json=gqldata, headers=headers)
log.info("[HISTORY] response: {} {}".format(gqlresp.status_code, gqlresp.text.strip()))
if gqlresp.status_code != 200:
return None
chatlog = json.loads(gqlresp.text)
return chatlog['data']['createChatHistory']['id']
except Exception as e:
log.exception(e)
return None
@staticmethod
def clear_session(user_id):
user_session[user_id] = []
@staticmethod
def get_resources(query, user_id, org_id):
orgnum = get_org_id(org_id)
resorg = "(0|{})".format(orgnum)
myquery = openai.Embedding.create(input=query, model="text-embedding-ada-002")["data"][0]['embedding']
myredis = RedisSingleton(password=common_conf_val('redis_password', ''))
ress = myredis.ft_search(embedded_query=myquery, vector_field="text_vector", hybrid_fields=myredis.create_hybrid_field(resorg, "category", "res"), k=5)
if len(ress) == 0:
return []
resources = []
for i, res in enumerate(ress):
resurl = myredis.redis.hget(res.id, 'url')
resnam = myredis.redis.hget(res.id, 'title')
vscore = 1.0 - float(res.vector_score)
if resurl is not None:
resurl = resurl.decode()
resnam = resnam.decode()
resources.append({'url':resurl,'name':resnam,'score':vscore})
resources = get_unique_by_key(resources, 'url')
return resources
@staticmethod
def get_top_resource(query, user_id, org_id, pos=0):
orgnum = get_org_id(org_id)
resorg = "(0|{})".format(orgnum)
myquery = openai.Embedding.create(input=query, model="text-embedding-ada-002")["data"][0]['embedding']
myredis = RedisSingleton(password=common_conf_val('redis_password', ''))
ress = myredis.ft_search(embedded_query=myquery, vector_field="text_vector", hybrid_fields=myredis.create_hybrid_field(resorg, "category", "res"), k=1, offset=pos)
if len(ress) == 0:
return None
res0 = ress[0]
if float(res0.vector_score) > 0.25:
return None
resurl = myredis.redis.hget(res0.id, 'url')
if resurl is None:
return None
resurl = resurl.decode()
resname = myredis.redis.hget(res0.id, 'title')
vscore = 1.0 - float(res0.vector_score)
if resname is not None:
resname = resname.decode()
urlnoq = remove_url_query(resurl)
restype = 'unknown'
if is_image_url(urlnoq):
restype = 'image'
elif is_video_url(urlnoq):
restype = 'video'
topres = {'rid':res0.id, 'url':resurl,'name':resname,'type':restype,'score':vscore}
return topres
@staticmethod
def insert_resource_to_reply(text, user_id, org_id):
resrids=set()
paragraphs = text.split("\n\n")
for i, paragraph in enumerate(paragraphs):
if len(paragraph) < 50:
continue
found = False
for j in range(10):
resource = Session.get_top_resource(paragraph, user_id, org_id, j)
if resource is None:
found = False
break
resrid = resource['rid']
if resrid not in resrids:
found = True
resrids.add(resrid)
break
if not found:
continue
resurl = resource['url']
resname = resource['name']
restype = resource['type']
if restype == 'image':
imagetag = f"\n\n<img src=\"{resurl}\" alt=\"{resname}\" width=\"600\">\n\n\n"
paragraphs[i] = paragraphs[i] + imagetag
elif restype == 'video':
videotag = f"\n\n<video width=\"600\" controls><source src=\"{resurl}\" type=\"video/mp4\">Your browser does not support the video tag.</video>\n\n\n"
paragraphs[i] = paragraphs[i] + videotag
modified_text = "\n\n".join(paragraphs)
return modified_text
| [
"\n",
"You are a helpful AI customer support agent. Use the following pieces of context to answer the customer inquiry.",
"\nPLACEHOLDER",
"\nIf the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.",
"You are answering the question just like you are the owner or partner of the company described in the context.",
"input_prompt",
"\n```\n",
"\nIf you are unclear about the question, politely respond that you need a clearer and more detailed description.",
"\nIf you don't know the answer, just say you don't know. DO NOT try to make up an answer.",
"\nPLACEHOLDER\n```",
"prompt_tokens"
] |
2024-01-10 | monomadic/config | openai~html2md.py | import json
import openai
import os
import argparse
openai.api_key = os.getenv('OPENAI_API_KEY')
model = os.getenv('GPT_MODEL', "gpt-3.5-turbo")
def refactor_code(code_file=None):
if code_file is not None:
with open(code_file, 'r') as file:
code = file.read()
response = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "system",
"content": "You are an assistant that converts html to markdown. Don't include any explanations in your responses. Do not shorten the resulting markdown for brevity. Include yaml frontmatter if possible."
},
{
"role": "user",
"content": code
}
]
)
return response.choices[0].message['content']
def main():
parser = argparse.ArgumentParser(description='Convert html to markdown using OpenAI GPT-3.5-turbo.')
parser.add_argument('input', type=str, help='The code file to refactor.')
args = parser.parse_args()
print(refactor_code(code_file=args.input))
if __name__ == "__main__":
main()
| [
"You are an assistant that converts html to markdown. Don't include any explanations in your responses. Do not shorten the resulting markdown for brevity. Include yaml frontmatter if possible."
] |
2024-01-10 | monomadic/config | openai~refactor.py | import json
import openai
import os
import argparse
openai.api_key = os.getenv('OPENAI_API_KEY')
model = os.getenv('GPT_MODEL', "gpt-3.5-turbo")
def refactor_code(code_file=None):
if code_file is not None:
with open(code_file, 'r') as file:
code = file.read()
response = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "system",
"content": "You are an assistant that refactors ghidra decompiler output to make it more readable. Don't include any explanations in your responses. Do not format the result as markdown, format it as regular code."
},
{
"role": "user",
"content": code
}
]
)
return response.choices[0].message['content']
def main():
parser = argparse.ArgumentParser(description='Refactor ghidra decompiler output using OpenAI GPT-3.5-turbo.')
parser.add_argument('input', type=str, help='The code file to refactor.')
args = parser.parse_args()
print(refactor_code(code_file=args.input))
if __name__ == "__main__":
main()
| [
"You are an assistant that refactors ghidra decompiler output to make it more readable. Don't include any explanations in your responses. Do not format the result as markdown, format it as regular code."
] |
2024-01-10 | monomadic/config | openai~format-markdown.py | import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
model_engine = "text-davinci-002"
# Extract text from the PDF using a PDF extraction tool
pdf_text = text
batch_size = 1024
# Split the PDF text into chunks of the desired size
text_chunks = [pdf_text[i:i+batch_size] for i in range(0, len(pdf_text), batch_size)]
# Initialize an empty list to store the generated text
generated_text = []
for chunk in text_chunks:
prompt = "Write a Markdown version of the PDF using the following text, keep the text intact and no suggestions should be added:\n\n" + chunk
completion = openai.Completion.create(engine=model_engine, prompt=prompt, max_tokens=len(chunk), n=1,stop=None,temperature=0)
generated_text.append(completion.choices[0].text)
# Combine the generated text into a single string
full_text = "".join(generated_text)
# Split the text into paragraphs
paragraphs = full_text.split("\n\n")
# Insert the images into the appropriate position in the list of paragraphs
for i, paragraph in enumerate(paragraphs):
if i == 2: # Insert image after the third paragraph
paragraphs.insert(i+1, "")
# Combine the paragraphs into a single string
formatted_text = "\n\n".join(paragraphs)
# Use Markdown syntax to format the message
formatted_message = "# PDF Text\n\n" + formatted_text
# Save the message to a file
with open("pdf_text.md", "w") as f:
f.write(formatted_message)
| [
"Write a Markdown version of the PDF using the following text, keep the text intact and no suggestions should be added:\n\nPLACEHOLDER"
] |
2024-01-10 | k-farruh/llm_solution | llm_service.py | import json
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.document_loaders import DirectoryLoader
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import AnalyticDB
import os
import logging
import time
import requests
import sys
import argparse
from dotenv import dotenv_values
environment_file = '/etc/environmentadb'
dotenv_values(environment_file)
class LLMService:
def __init__(self, cfg) -> None:
self.cfg = cfg
self.vector_db = self.connect_adb()
def post_to_llm_eas(self, query_prompt):
# url = os.environ.get('PAI_ENDPOINT')
# author_key = os.environ.get('PAI_ACCESS_TOKEN')
url = self.cfg['EASCfg']['url']
author_key = self.cfg['EASCfg']['token']
headers = {
"Authorization": author_key,
'Accept': "*/*",
"Content-Type": "application/x-www-form-urlencoded;charset=utf-8"
}
resp = requests.post(
url=url,
data=query_prompt.encode('utf8'),
headers=headers,
timeout=10000,
)
return resp.text
def connect_adb(self):
connection_string = AnalyticDB.connection_string_from_db_params(
host=self.cfg['ADBCfg']['PG_HOST'],
database=self.cfg['ADBCfg']['PG_DATABASE'],
user=self.cfg['ADBCfg']['PG_USER'],
password=self.cfg['ADBCfg']['PG_PASSWORD'],
# host=os.environ.get('PG_HOST'),
# database=os.environ.get('PG_DATABASE'),
# user=os.environ.get('PG_USER'),
# password=os.environ.get('PG_PASSWORD'),
driver='psycopg2cffi',
port=5432,
)
embedding_model = self.cfg['embedding']['embedding_model']
model_dir = self.cfg['embedding']['model_dir']
embed = HuggingFaceEmbeddings(model_name=os.path.join(model_dir, embedding_model), model_kwargs={'device': 'cpu'})
vector_db = AnalyticDB(
embedding_function=embed,
embedding_dimension=self.cfg['embedding']['embedding_dimension'],
connection_string=connection_string,
# pre_delete_collection=self.is_delete,
)
return vector_db
def upload_custom_knowledge(self):
docs_dir = self.cfg['create_docs']['docs_dir']
docs = DirectoryLoader(docs_dir, glob=self.cfg['create_docs']['glob'], show_progress=True).load()
text_splitter = CharacterTextSplitter(chunk_size=int(self.cfg['create_docs']['chunk_size']), chunk_overlap=self.cfg['create_docs']['chunk_overlap'])
docs = text_splitter.split_documents(docs)
start_time = time.time()
self.vector_db.add_documents(docs)
end_time = time.time()
print("Insert into AnalyticDB Success. Cost time: {} s".format(end_time - start_time))
def upload_file_knowledge(self, file):
# Check the file extension
if file.lower().endswith('.pdf'):
# Load PDF file
docs = PyPDFLoader(file).load()
elif file.lower().endswith(('.md', '.txt', '.html')):
# Load text file
docs = TextLoader(file).load()
else:
# Unsupported file format
raise ValueError("Unsupported file format")
text_splitter = CharacterTextSplitter(chunk_size=int(self.cfg['create_docs']['chunk_size']), chunk_overlap=self.cfg['create_docs']['chunk_overlap'])
docs = text_splitter.split_documents(docs)
start_time = time.time()
self.vector_db.add_documents(docs)
end_time = time.time()
print("Insert into AnalyticDB Success. Cost time: {} s".format(end_time - start_time))
def create_user_query_prompt(self, query):
docs = self.vector_db.similarity_search(query, k=int(self.cfg['query_topk']))
context_docs = ""
for idx, doc in enumerate(docs):
context_docs += "-----\n\n"+str(idx+1)+".\n"+doc.page_content
context_docs += "\n\n-----\n\n"
user_prompt_template = self.cfg['prompt_template']
user_prompt_template = user_prompt_template.format(context=context_docs, question=query)
return user_prompt_template
def user_query(self, query):
user_prompt_template = self.create_user_query_prompt(query)
print("Post user query to EAS-Llama 2")
start_time = time.time()
ans = self.post_to_llm_eas(user_prompt_template)
end_time = time.time()
print("Get response from EAS-Llama 2. Cost time: {} s".format(end_time - start_time))
print(ans)
return ans
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Command line argument parser')
parser.add_argument('--config', type=str, help='json configuration file input', default='config.json')
parser.add_argument('--upload', help='Upload knowledge base', default=False)
parser.add_argument('--query', help='user request query')
args = parser.parse_args()
if args.config:
if not args.upload and not args.query:
print('Not any operation is set.')
else:
if os.path.exists(args.config):
with open(args.config) as f:
cfg = json.load(f)
solver = LLMService(cfg)
if args.upload:
print('Uploading custom files to ADB.')
solver.upload_custom_knowledge()
if args.query:
user_prompt_template = solver.create_user_query_prompt(args.query)
answer = solver.user_query(args.query)
print("The answer is: ", answer)
else:
print(f"{args.config} does not exist.")
else:
print("The config json file must be set.")
# python main.py --config config.json --query "Tell me about Machine Learning PAI" | [
"prompt_template"
] |
2024-01-10 | nghiempt/sr-dps-server | helper_function~get_label.py | import asyncio
import ssl
import csv
import json
import openai
import os
from dotenv import load_dotenv
class CHATGPT_GENERATOR:
@staticmethod
def get_prompt(ds_content, pp_content):
prompt = """Let's compare and analyze the information between Data Safety and Privacy Policy to clarify 3 issues: which information is incorrect, which information is incomplete and which information is inconsistent. Notes when classifying: Incomplete: Data Safety provides information but is not as complete as the Privacy Policy provides. Incorrect: Data Safety does not provide that information, but the Privacy Policy mentions it. Inconsistency: Data Safety is provided but its description is inconsistent with the Privacy Policy information provided. Note: always gives me the result (0 or 1, 1 is yes, 0 is no) in the form below: {"label" : { "incorrect": (0 or 1), "incomplete": (0 or 1), "inconsistent": (0 or 1) }, "label_description" " { "incorrect": "explaination", "incomplete": "explaination", "inconsistent": "explaination" } } . Please in the answer, just give me the json only and in English. Below is information for 2 parts: Data Safety: """ + ds_content + """, Privacy Policy: """ + pp_content
return prompt
@staticmethod
def get_completion(prompt):
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.chat.completions.create(
model=os.getenv("GPT_4"),
messages=[
{"role": "system", "content": "You are an assistant who analyzes and evaluates the correct, complete, and consistency between the Data Safety information provided compared to the information provided by the Privacy Policy of applications on the Google Play Store."},
{"role": "user", "content": prompt}
]
)
assistant_reply = response.choices[0].message.content
return assistant_reply
class DATASET_GENERATOR:
@staticmethod
def loop_csv(input_csv_path, output_csv_path, chatgpt_generator):
with open(input_csv_path, "r", newline="", encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
new_data = []
for index, row in enumerate(reader):
try:
print("\n_____________ Run times App ID: " + row['app_id'] + "_____________")
prompt = chatgpt_generator.get_prompt(row['app_data_safety'], row['app_privacy_policy'])
result = chatgpt_generator.get_completion(prompt)
new_row = [row['app_id'], result]
new_data.append(new_row)
except Exception as e:
print(e)
new_row = [row['app_id'], "ERROR"]
new_data.append(new_row)
print("~~~~~~~~~~~~~~ ERROR ~~~~~~~~~~~~~~\n")
with open(output_csv_path, 'w', newline='', encoding='utf-8') as csvfile:
csv_writer = csv.writer(csvfile)
csv_writer.writerow(['app_id', 'label_content'])
csv_writer.writerows(new_data)
if __name__ == "__main__":
chatgpt_generator = CHATGPT_GENERATOR()
input_csv_path = "../dataset/formated_data/dataset.csv"
output_csv_path = "../dataset/formated_data/label.csv"
DATASET_GENERATOR().loop_csv(input_csv_path, output_csv_path, chatgpt_generator)
| [
"app_privacy_policy",
"You are an assistant who analyzes and evaluates the correct, complete, and consistency between the Data Safety information provided compared to the information provided by the Privacy Policy of applications on the Google Play Store.",
"Let's compare and analyze the information between Data Safety and Privacy Policy to clarify 3 issues: which information is incorrect, which information is incomplete and which information is inconsistent. Notes when classifying: Incomplete: Data Safety provides information but is not as complete as the Privacy Policy provides. Incorrect: Data Safety does not provide that information, but the Privacy Policy mentions it. Inconsistency: Data Safety is provided but its description is inconsistent with the Privacy Policy information provided. Note: always gives me the result (0 or 1, 1 is yes, 0 is no) in the form below: {\"label\" : { \"incorrect\": (0 or 1), \"incomplete\": (0 or 1), \"inconsistent\": (0 or 1) }, \"label_description\" \" { \"incorrect\": \"explaination\", \"incomplete\": \"explaination\", \"inconsistent\": \"explaination\" } } . Please in the answer, just give me the json only and in English. Below is information for 2 parts: Data Safety: PLACEHOLDER, Privacy Policy: PLACEHOLDER",
"app_data_safety"
] |
2024-01-10 | nghiempt/sr-dps-server | helper_function~make_final_dataset_only_prompt.py | import asyncio
import ssl
from urllib.request import urlopen
from bs4 import BeautifulSoup
import csv
import json
import openai
import os
from dotenv import load_dotenv
from newspaper import Article
class READ_DATA_SAFETY:
@staticmethod
def load_html_content(url):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
return soup
@staticmethod
def indent_content(tag):
tag_content = tag.get_text(separator='\n')
if tag_content.strip() == '· Optional':
return str(tag.contents[0])
if (tag_content.strip() != '· Optional') & (tag.name != 'h3'):
return '\n data-section: ' + str(tag.contents[0])
tag_name = tag.name
if tag_name == 'h3':
return '\n\t category: ' + str(tag.contents[0])
elif tag_name == 'h2':
return str(tag.contents[0]) + '\n'
else:
return ''
@staticmethod
def filter_content(soup):
global title
title = soup.find('div', class_='ylijCc').get_text(separator='\n')
content = ""
tags = soup.find_all(['h2', 'h3', 'h4'])
div_tag = soup.find_all('div', class_='FnWDne')
i = 0
j = 0
for i in range(len(tags)):
if tags[i].contents:
span_tag = tags[i].find('span')
if span_tag:
span_text = span_tag.get_text(separator='\n')
if span_text.strip() != '· Optional':
content += '\n\t\tdata-type: ' + span_text
content += '\n\t\tpurpose: ' + \
div_tag[j].get_text(
separator='\n')
j += 1
span_tag.decompose()
if tags[i].contents:
content += READ_DATA_SAFETY.indent_content(tags[i])
return content
@staticmethod
async def scrape_link(url):
loop = asyncio.get_event_loop()
soup = await loop.run_in_executor(None, READ_DATA_SAFETY.load_html_content, url)
text = READ_DATA_SAFETY.filter_content(soup)
return text
@staticmethod
def formated_data_v0(content):
# content_data = ''
# lines = content.splitlines()
# for i in range(len(lines)):
# category = data_section = data_type = purpose = optional = ''
# if lines[i].startswith(' data-section:') and not lines[i].startswith(' data-section: Security practices'):
# data_section = lines[i].replace(' data-section: ', '')
# content_data += data_section + ": "
# for j in range(i + 1, len(lines)):
# if lines[j].startswith('\t category: '):
# category = lines[j].replace('\t category: ', '')
# content_data = content_data + category+"("
# for k in range(j + 1, len(lines)):
# if lines[k].startswith('\t\tdata-type: '):
# data_type = lines[k].replace(
# '\t\tdata-type: ', '')
# content_data = content_data + data_type+", "
# elif lines[k].startswith('\t\tpurpose: '):
# purpose = lines[k].replace('\t\tpurpose: ', '')
# if purpose.endswith(' · Optional'):
# optional = True
# purpose = purpose.replace(
# ' · Optional', '')
# else:
# optional = False
# purpose = purpose.replace(', ', ' - ')
# elif lines[j].startswith('\t category: '):
# content_data = content_data[:-2] + "), "
# break
# elif lines[j].startswith(' data-section:'):
# content_data = content_data[:-2] + "; "
# break
# elif lines[i].startswith(' data-section: Security practices'):
# data_section = lines[i].replace(' data-section: ', '')
# content_data += data_section + ": "
# for j in range(i + 1, len(lines)):
# if lines[j].startswith('\t category: '):
# category = lines[j].replace('\t category: ', '')
# content_data = content_data + category + ", "
# elif lines[j].startswith(' data-section:'):
# content_data = content_data[:-2] + "; "
# break
# content_data = content_data[:-2]
# sections = content_data.split(';')
# result_dict = {}
# for section in sections:
# parts = section.split(':')
# key = parts[0].strip()
# value = parts[1].strip() if len(parts) > 1 else None
# result_dict[key] = value
return ""
@staticmethod
def formated_data(content):
result_json = {}
lines = content.splitlines()
current_section = None
for line in lines:
if line.startswith(' data-section: No data shared'):
current_section = "data_shared"
result_json[current_section] = []
elif line.startswith(' data-section: No data collected'):
current_section = "data_collected"
result_json[current_section] = []
elif line.startswith(' data-section:'):
current_section = line.replace(' data-section: ', '').strip()
current_section = current_section.replace(' ', '_').lower()
result_json[current_section] = []
elif line.startswith('\t category: '):
category = line.replace('\t category: ', '').strip()
current_data = {"category": category, "sub_info": []}
for sub_line in lines[lines.index(line) + 1:]:
if sub_line.startswith('\t\tdata-type: '):
data_type = sub_line.replace('\t\tdata-type: ', '').strip()
elif sub_line.startswith('\t\tpurpose: '):
purpose = sub_line.replace('\t\tpurpose: ', '').strip()
optional = purpose.endswith(' · Optional')
purpose = purpose.replace(' · Optional', '').replace(', ', ' - ')
current_data["sub_info"].append({
"data_type": data_type,
"purpose": purpose,
"optional": optional
})
elif sub_line.startswith('\t category: '):
break
result_json[current_section].append(current_data)
return result_json
@staticmethod
def formated_data_string_only(content):
content_data = ''
lines = content.splitlines()
for i in range(len(lines)):
category = data_section = data_type = purpose = ''
if lines[i].startswith(' data-section: No data shared'):
data_section = lines[i].replace(' data-section: ', '')
content_data += "[Data shared: " + data_section + "] - "
elif lines[i].startswith(' data-section: No data collected'):
data_section = lines[i].replace(' data-section: ', '')
content_data += "[Data collected: " + data_section + "] - "
elif lines[i].startswith(' data-section:') and not lines[i].startswith(' data-section: Security practices'):
data_section = lines[i].replace(' data-section: ', '')
content_data += "[" + data_section + ": "
for j in range(i + 1, len(lines)):
if lines[j].startswith('\t category: '):
category = lines[j].replace('\t category: ', '')
content_data = content_data + category + "("
for k in range(j + 1, len(lines)):
if lines[k].startswith('\t\tdata-type: '):
data_type = lines[k].replace('\t\tdata-type: ', '')
content_data = content_data + data_type + " · <"
elif lines[k].startswith('\t\tpurpose: '):
purpose = lines[k].replace('\t\tpurpose: ', '').replace(", ", " - ")
if purpose.endswith(' · Optional'):
purpose = purpose.replace(" · Optional", "")
content_data += purpose + "> · Optional, "
else:
content_data += purpose + ">, "
elif lines[j].startswith('\t category: '):
content_data = content_data[:-2] + "), "
break
elif lines[j].startswith(' data-section:'):
content_data = content_data[:-2] + "] - "
break
elif lines[i].startswith(' data-section: Security practices'):
data_section = lines[i].replace(' data-section: ', '')
content_data += "[" + data_section + ": "
for j in range(i + 1, len(lines)):
if lines[j].startswith('\t category: '):
category = lines[j].replace('\t category: ', '')
content_data = content_data + category + ", "
content_data = content_data[:-2] + "] - "
content_data = content_data[:-3]
return content_data
@staticmethod
def generate_result(user_url):
preprocess_datasafety = asyncio.run(
READ_DATA_SAFETY().scrape_link(user_url))
content_data_safety = READ_DATA_SAFETY().formated_data(
preprocess_datasafety)
return content_data_safety
class READ_PRIVACY_POLICY:
def __init__(self):
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
@staticmethod
def get_completion(prompt, model="gpt-4"):
messages = [{"role": "user", "content": prompt}]
response = openai.chat.completions.create(
model=model,
messages=messages,
temperature=0.9,
)
return response.choices[0].message.content
@staticmethod
def remove_empty_lines(content):
lines = content.split('\n')
cleaned_lines = [line.strip() for line in lines if line.strip()]
return '\n'.join(cleaned_lines)
@staticmethod
def check_valid_token_prompt(prompt):
print(len(prompt))
return len(prompt) <= 8000
@staticmethod
def generate_result(url):
try:
article = Article(url)
article.download()
article.parse()
text = READ_PRIVACY_POLICY.remove_empty_lines(article.text)
prompt = 'Help me to find the origin text about 3 things: type/purpose of data the app shared with others, type/purpose of data the app collected and Security Practices in the text below in this json format: {"data_shared" : "a string", "data_collected": "a string", "security_practices" : "a string"} . Please in the answer, just give me the json only and in English: \n'
check_valid_token_prompt = READ_PRIVACY_POLICY.check_valid_token_prompt(
prompt + text)
if check_valid_token_prompt:
response = READ_PRIVACY_POLICY.get_completion(prompt + text)
return response
else:
return "No provide sharing information section"
except Exception as e:
print(f"An exception occurred: {e}")
return "An error occurred during processing"
@staticmethod
def generate_result_string_only(url):
try:
article = Article(url)
article.download()
article.parse()
text = READ_PRIVACY_POLICY.remove_empty_lines(article.text)
prompt = 'Help me to find the origin text about 3 things: type/purpose of data the app shared with others, type/purpose of data the app collected and Security Practices in the text below in this json format: {"data_shared" : "a string", "data_collected": "a string", "security_practices" : "a string"} . Please in the answer, just give me the json only and in English: \n'
check_valid_token_prompt = READ_PRIVACY_POLICY.check_valid_token_prompt(
prompt + text)
if check_valid_token_prompt:
response = READ_PRIVACY_POLICY.get_completion(prompt + text)
if response.startswith("{"):
data_dict = json.loads(response)
data_shared = data_dict["data_shared"]
data_collected = data_dict["data_collected"]
security_practices = data_dict["security_practices"]
formatted_output = f"[Data shared: {data_shared}] - [Data Collected: {data_collected}] - [Security practices: {security_practices}]"
print(formatted_output)
return formatted_output
else:
return response
else:
return "No provide sharing information section"
except Exception as e:
print(f"An exception occurred: {e}")
return "An error occurred during processing"
class MAKE_PROMPT:
@staticmethod
def get_prompt(id, ds_content, pp_content):
json_prompt = '''
Let's compare and analyze the information between Data Safety and Privacy Policy to clarify 3 issues: which information is incorrect, which information is incomplete and which information is inconsistent.\n\nNotes when classifying:\n+ Incomplete: Data Safety provides information but is not as complete as the Privacy Policy provides.\n+ Incorrect: Data Safety does not provide that information, but the Privacy Policy mentions it.\n+ Inconsistency: Data Safety is provided but its description is inconsistent with the Privacy Policy information provided.\n\nNote: always gives me the result (0 or 1) in the form below:\nIncomplete: 0 or 1 (1 is yes, 0 is no)\nIncorrect: 0 or 1 (1 is yes, 0 is no)\nInconsistency: 0 or 1 (1 is yes, 0 is no)\n\nBelow is information for 2 parts:
Data Safety - Share section: ''' + ds_content + '''
Privacy Policy - Share section: ''' + pp_content + '''
'''
return json_prompt
class JSON_MAKER:
@staticmethod
def remove_empty_lines(content):
lines = content.split('\n')
cleaned_lines = [line.strip() for line in lines if line.strip()]
return '\n'.join(cleaned_lines)
@staticmethod
def get_dataset(prompt, completion, package_name, json_file_path):
with open(json_file_path, 'r') as json_file:
existing_data = json.load(json_file)
newdata = {
"prompt": JSON_MAKER().remove_empty_lines(prompt),
"completion": JSON_MAKER().remove_empty_lines(completion),
"package_name": package_name,
}
existing_data.append(newdata)
with open(json_file_path, 'w') as json_file:
json.dump(existing_data, json_file, indent=4)
@staticmethod
def loop_csv(csv_path, json_path, step_5, step_6, step_7):
with open(csv_path, "r", newline="", encoding="utf-8") as csvfile:
reader = csv.reader(csvfile)
next(reader)
for index, row in enumerate(reader):
print("\n_____________ Run times " +
row[0] + " <" + row[2] + "> " + "_____________")
content_5 = step_5.generate_result(row[7])
content_6 = step_6.generate_result(row[8])
content_7 = step_7.get_prompt(row[0], content_5, content_6)
content_8 = ""
JSON_MAKER.get_dataset(content_7, content_8, row[2], json_path)
print("~~~~~~~~~~~~~~ Success ~~~~~~~~~~~~~~\n")
if __name__ == "__main__":
step_5 = READ_DATA_SAFETY()
step_6 = READ_PRIVACY_POLICY()
step_7 = MAKE_PROMPT()
csv_path = ""
json_path = ""
JSON_MAKER().loop_csv(csv_path, json_path, step_5, step_6, step_7)
| [
"Help me to find the origin text about 3 things: type/purpose of data the app shared with others, type/purpose of data the app collected and Security Practices in the text below in this json format: {\"data_shared\" : \"a string\", \"data_collected\": \"a string\", \"security_practices\" : \"a string\"} . Please in the answer, just give me the json only and in English: \n",
"\n Let's compare and analyze the information between Data Safety and Privacy Policy to clarify 3 issues: which information is incorrect, which information is incomplete and which information is inconsistent.\n\nNotes when classifying:\n+ Incomplete: Data Safety provides information but is not as complete as the Privacy Policy provides.\n+ Incorrect: Data Safety does not provide that information, but the Privacy Policy mentions it.\n+ Inconsistency: Data Safety is provided but its description is inconsistent with the Privacy Policy information provided.\n\nNote: always gives me the result (0 or 1) in the form below:\nIncomplete: 0 or 1 (1 is yes, 0 is no)\nIncorrect: 0 or 1 (1 is yes, 0 is no)\nInconsistency: 0 or 1 (1 is yes, 0 is no)\n\nBelow is information for 2 parts:\n Data Safety - Share section: PLACEHOLDER\n Privacy Policy - Share section: PLACEHOLDER\n "
] |
2024-01-10 | joshkyh/collagis_serp | Python~google_maps~azure_openai.py | import os
import openai
from tqdm import tqdm
import pandas as pd
from azure.identity import DefaultAzureCredential
from pprint import pprint
credential = DefaultAzureCredential()
token = credential.get_token("https://cognitiveservices.azure.com/.default",
tenant_id='ef12d42b-60e3-4161-87ee-98ebcc73eae8')
pprint(dir(token))
openai.api_type = "azure"
openai.api_key = os.getenv("DEV_OAIAPI")
openai.api_base = "https://dev-oaiapi.openai.azure.com/"
openai.api_version = "2023-05-15" # subject to change
# Move get_embedding to the global scope
def get_embedding(text_to_embed):
embedding = openai.Embedding.create(
input=text_to_embed, deployment_id="text-embedding-ada-002"
)["data"][0]["embedding"]
return embedding
emb = get_embedding(text_to_embed='Hi there!')
def get_ada_embeddings():
# Get the reviews
reviews = pd.read_parquet('data/google_maps_reviews.parquet', engine='pyarrow')
# Get non-null reviews
reviews = reviews[reviews['snippet'].notnull()]
# Get Quartile 1 character length
reviews['snippet'].str.len().quantile(0.25)
reviews['snippet'].str.len().quantile(0.50)
reviews['snippet'].str.len().quantile(0.75)
# Get OPENAI KEY
openai.api_key = os.environ.get("OPENAI_API_KEY")
# Initialize tqdm with pandas
tqdm.pandas()
# Apply the function and show a progress bar
reviews["embedding"] = reviews["snippet"].astype(str).progress_apply(get_embedding)
# Write the reviews to a parquet file
reviews.to_parquet('data/google_maps_reviews_with_embeddings.parquet', engine='pyarrow')
if __name__ == '__main__':
get_ada_embeddings()
| [] |
2024-01-10 | chrisTORTUS/chatOsler | osler.py | #!/usr/bin/env python3
from tkinter import *
import intents
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import torch
import os
import base64
from azure.ai.ml import MLClient
from azure.identity import DefaultAzureCredential
import json
from google.cloud import vision
from PIL import ImageGrab, Image, ImageDraw
import pandas as pd
import numpy as np
import time
import pyautogui
import openai
from PIL import Image, ImageTk
import cv2
# from picovoice import Picovoice
# from pvrecorder import PvRecorder
import pyperclip
import json
import re
import sys
from google.cloud import speech
import pyaudio
from six.moves import queue
import sounddevice as sd
from scipy.io.wavfile import write
# import mutagen
# from mutagen.wave import WAVE
import eyed3
import requests
import epic_screens
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import torch
import threading
import subprocess
from dotenv import load_dotenv
from gpt import ask_gpt
import gpt_prompts
from mpyg321.MPyg123Player import MPyg123Player # or MPyg321Player if you installed mpg321
from threading import Thread
import threading
from picovoice import Picovoice
from pvrecorder import PvRecorder
import platform
import pvleopard
load_dotenv()
openai.api_key = os.environ['OPENAI_API_KEY']
# Audio recording parameters
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
LEOPARD = pvleopard.create(access_key=os.environ['PICOVOICE_ACCESS_KEY'], model_path='./picovoice/transcribeUserResponse-leopard-v1.2.0-23-06-26--13-50-08.pv')
PLAYER = MPyg123Player()
def update_chat_text(msg):
txt.insert(END, "\n" + msg)
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b"".join(data)
class PicovoiceThread(Thread):
def __init__(self, label, access_key):
super().__init__()
# Picovoice access key
self._access_key = access_key
# tkinter gui
self._label = label
self._width = 350
self._height = 350
# speech recognition variables
self._is_ready = False
self._stop = False
self._is_stopped = False
@staticmethod
def _keyword_path():
'''
Method to retrieve Porcupine wake word.
'''
if platform.system() == "Darwin":
return os.path.join(
os.path.dirname(__file__),
"./picovoice/Hey-Osler_en_mac_v2_2_0.ppn")
elif platform.system() == 'Windows':
return os.path.join(
os.path.dirname(__file__),
"./picovoice/hey-osler_en_windows_v2_1_0.ppn")
else:
raise ValueError("unsupported platform '%s'" % platform.system())
@staticmethod
def _context_path():
'''
Method to retrieve Rhino context file (speech-to-intent).
'''
if platform.system() == "Darwin":
return os.path.join(
os.path.dirname(__file__),
"./picovoice/epic_en_mac_v2_2_0.rhn")
elif platform.system() == 'Windows':
return os.path.join(
os.path.dirname(__file__),
"./picovoice/Clinical-Demo_en_mac_v2_1_0.rhn")
else:
raise ValueError("unsupported platform '%s'" % platform.system())
def match_intent(self, utterance):
model = SentenceTransformer("sentence-transformers/multi-qa-mpnet-base-dot-v1")
intents_ls = [
intents.START_CONSULTATION_NOTE,
intents.TRANSCRIBE_CONSULTATION,
# intents.SUMMARISE_CONSULTATION,
intents.PLACE_ORDERS,
intents.FILE_DIAGNOSES
# intents.ANSWER_QUESTIONS,
# intents.WRITE_LETTER,
# intents.QUERY_MEDS,
# intents.QUERY_ORDERS,
]
intent_embeddings = model.encode(intents_ls)
utterance_embeddings = model.encode(utterance)
cos_scores = cosine_similarity(utterance_embeddings.reshape(1, -1), intent_embeddings)
cos_scores_torch = torch.from_numpy(cos_scores)
cos_max = torch.max(cos_scores_torch).item()
cos_argmax = torch.argmax(cos_scores_torch, dim=1)
cos = cos_argmax[0].item()
intent = intents_ls[cos]
print(f"Intent matched: {intent}")
return intent, cos_max
def _wake_word_callback(self):
img = Image.open("demo_screenshots/osler_awake_smaller.png")
osler_awake = ImageTk.PhotoImage(img)
self._label.configure(image=osler_awake)
self._label.image = osler_awake
# receive and transcribe the user utterance. Records for 6 seconds currently.
recorder.stop()
self.get_user_utterance()
actor.user_utterance_text = self.leopard_transcribe()
# update the chat interface with the user command
update_chat_text("You -> " + actor.user_utterance_text)
# match the utterance to an intent
intent, score = self.match_intent(actor.user_utterance_text)
print(score)
# perform the action if match score above a cosine similarity threshold. Currently set at 0.5
if float(score) > 0.6:
# update the chat interface with the interpreted intent
update_chat_text("OSLER -> It looks like you asked me to perform the task: " + intent)
# extract mrn from utterance
if intent == intents.START_CONSULTATION_NOTE:
actor.global_mrn = actor.extract_mrn_from_utterance(actor.user_utterance_text)
print('mrn: ', actor.global_mrn)
self.osler_thinking()
actor.act(intent)
else:
# update the chat interface with message reporting unsupported command
update_chat_text("OSLER -> This request is not currently supported.")
#play the audio file
PLAYER.play_song("no_matched_intent.wav")
time.sleep(5)
# resume the recorder
recorder.start()
self.osler_sleeping()
def osler_thinking(self):
img = Image.open("demo_screenshots/osler_thinking_smaller.png")
osler_thinking = ImageTk.PhotoImage(img)
self._label.configure(image=osler_thinking)
self._label.image = osler_thinking
def osler_sleeping(self):
img = Image.open("demo_screenshots/osler_sleep_smaller.png")
osler_sleeping = ImageTk.PhotoImage(img)
self._label.configure(image=osler_sleeping)
self._label.image = osler_sleeping
def perform_action(self, intent):
self.osler_thinking()
recorder.stop()
actor.act(intent)
recorder.start()
self.osler_sleeping()
def get_user_utterance(self):
fs = 44100 # Sample rate
seconds = 6 # Duration of recording
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=1)
sd.wait() # Wait until recording is finished
write('user_utterance.wav', fs, myrecording) # Save as WAV file
def leopard_transcribe(self):
transcript, words = LEOPARD.process_file('user_utterance.wav')
print(transcript)
for word in words:
print(
"{word=\"%s\" start_sec=%.2f end_sec=%.2f confidence=%.2f}"
% (word.word, word.start_sec, word.end_sec, word.confidence))
return transcript
def _inference_callback(self, inference):
pass
def run(self):
pv = None
global recorder
recorder = None
global actor
actor = Actor()
try:
pv = Picovoice(
access_key=self._access_key,
keyword_path=self._keyword_path(),
porcupine_sensitivity=0.75,
wake_word_callback=self._wake_word_callback,
context_path=self._context_path(),
inference_callback=self._inference_callback)
print(pv.context_info)
recorder = PvRecorder(device_index=-1, frame_length=pv.frame_length)
recorder.start()
self._is_ready = True
while not self._stop:
pcm = recorder.read()
pv.process(pcm)
finally:
if recorder is not None:
recorder.delete()
if pv is not None:
pv.delete()
self._is_stopped = True
def is_ready(self):
return self._is_ready
def stop(self):
self._stop = True
def is_stopped(self):
return self._is_stopped
class Actor:
def __init__(self) -> None:
self.consultation_transcript = ""
self.transcript_summary = ""
self.consultation_entities = {'orders': [{'name': 'X-ray of upper chest', 'reason': 'Patient has been experiencing chest pain'}, {'name': 'MRI of stomach', 'reason': "Investigation related to patient's chest pain"}, {'name': '24-hour RVCG', 'reason': 'Further investigation of reported chest pain'}], 'medicine': [{'name': 'Blood thinners', 'dosage': 'Unspecified', 'reason': 'Chest pain'}, {'name': 'Ibuprofen', 'dosage': 'Unspecified', 'reason': 'Chest pain'}], 'visit_diagnoses': []}
# self.consultation_entities = {}
self.mrn_flag = False
self.user_utterance_text = ''
self.patient_mrn_str = ''
self.patient_mrn_digits = '111'
self.med_hx = ''
self.letters_hx = ''
self.global_mrn = ''
self.consultation_done = False
def get_element_center(self, loc):
'''
Method to get the center of element's bounding box
'''
corner_x, corner_y = loc[0], loc[1]
width, height = loc[2], loc[3]
x, y = corner_x/2 + width/4, corner_y/2 + height/4
return x, y
def click_screenshot(self, screenshot, confidence=0.8):
'''
Method to click on a matching screenshot.
'''
# loc = pyautogui.locateOnScreen(root_path + f"demo_screenshots/{screenshot}", confidence=confidence)
loc = pyautogui.locateOnScreen(f"demo_screenshots/{screenshot}")
if loc is None:
print('cant find it!')
return 0
# raise Exception("Matching image not found on screen.")
x, y = self.get_element_center(loc)
print(f"Mouse click at: {x, y}")
pyautogui.click(x, y)
return 1
def activate_application(self, app_name):
applescript_code = f'''
tell application "{app_name}"
activate
end tell
'''
process = subprocess.Popen(['osascript', '-e', applescript_code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
output, error = process.communicate()
if error:
print("Error executing AppleScript:", error)
return
def new_tab(self):
'''
Opens a new tab.
'''
pyautogui.hotkey("ctrl", "t")
def type_string(self, char_string, interval=0.2):
'''
Types a given string.
'''
pyautogui.write(char_string, interval=interval)
def press_key(self, key, presses=1):
'''
Presses a given key.
'''
pyautogui.press(key, presses=presses)
def press_command(self, command):
'''
Performs a given hotkey command.
'''
if command == "copy":
pyautogui.hotkey("ctrl", "c")
elif command == "paste":
pyautogui.hotkey("ctrl", "v")
elif command == "tab_back":
pyautogui.hotkey("alt", "tab")
else:
raise Exception(f"Command {command} not recognized.")
def scroll(self, offset):
'''
Vertical scrolling.
'''
pyautogui.scroll(offset)
def _wake_word_callback(self):
img = Image.open("demo_screenshots/osler_awake_smaller.png")
osler_awake = ImageTk.PhotoImage(img)
self._label.configure(image=osler_awake)
self._label.image = osler_awake
def listen_print_loop(self, responses):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = " " * (num_chars_printed - len(transcript))
if not result.is_final:
# sys.stdout.write(transcript + overwrite_chars + "\r")
# sys.stdout.flush()
# num_chars_printed = len(transcript)
pass
else:
# print(transcript + overwrite_chars)
output = transcript + overwrite_chars
self.consultation_transcript += output
output = output.lower()
if "stop recording" in output:
break
pyperclip.copy(transcript + overwrite_chars)
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
# Exit recognition if any of the transcribed phrases could be
# one of our keywords.
if re.search(r"\b(exit|quit)\b", transcript, re.I):
print("Exiting..")
break
num_chars_printed = 0
def transcribe(self):
# See http://g.co/cloud/speech/docs/languagesv
# for a list of supported languages.
language_code = "en-US" # a BCP-47 language tag
client = speech.SpeechClient()
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=language_code,
model='medical_conversation'
)
streaming_config = speech.StreamingRecognitionConfig(
config=config, interim_results=True
)
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (
speech.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator
)
responses = client.streaming_recognize(streaming_config, requests)
# Now, put the transcription responses to use.
self.listen_print_loop(responses)
def match_screen(self):
# get text representation of current screen
current_screen = ""
model = SentenceTransformer("sentence-transformers/multi-qa-mpnet-base-dot-v1")
screens_ls = [
epic_screens.PATIENT_LOOKUP,
epic_screens.SCHEDULE,
epic_screens.PATIENT_PAGE
]
epic_embeddings = model.encode(screens_ls)
screen_embeddings = model.encode(current_screen)
cos_scores = cosine_similarity(screen_embeddings.reshape(1, -1), epic_embeddings)
cos_scores_torch = torch.from_numpy(cos_scores)
cos_max = torch.max(cos_scores_torch).item()
cos_argmax = torch.argmax(cos_scores_torch, dim=1)
cos = cos_argmax[0].item()
print(cos_scores)
intent = screens_ls[cos]
print(f"Intent matched: {intent}")
def act(self, intent):
if intent == intents.START_CONSULTATION_NOTE:
self.new_consultation_mrn()
elif intent == intents.TRANSCRIBE_CONSULTATION:
self.transcribe_consultation()
self.consultation_done = True
elif intent == intents.WRITE_LETTER:
self.write_referral()
elif intent == intents.PLACE_ORDERS:
self.place_orders()
elif intent == intents.FILE_DIAGNOSES:
self.file_diagnoses()
elif intent == intents.LIST_ABILITIES:
self.list_abilities()
# elif intent == intents.ANSWER_QUESTIONS:
# self.ask_general_consultation_question()
# elif intent == intents.QUERY_ORDERS:
# self.query_orders()
# elif intent == intents.QUERY_MEDS:
# self.query_meds()
else:
raise ValueError("unsupported intent '%s'" % intent)
# update to sleeping mode after task done
picovoice_thread.osler_sleeping()
def get_user_voice_response(self):
fs = 44100 # Sample rate
seconds = 6 # Duration of recording
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=1)
sd.wait() # Wait until recording is finished
write('user_response.wav', fs, myrecording) # Save as WAV file
def leopard_transcribe(self):
transcript, words = LEOPARD.process_file('user_response.wav')
print(transcript)
for word in words:
print(
"{word=\"%s\" start_sec=%.2f end_sec=%.2f confidence=%.2f}"
% (word.word, word.start_sec, word.end_sec, word.confidence))
return transcript
def str_to_digit(self, nstr):
digit = ''
is_digit = True
if nstr == 'zero':
digit = '0'
elif nstr == 'one':
digit = '1'
elif nstr == 'two':
digit = '2'
elif nstr == 'three':
digit = '3'
elif nstr == 'four':
digit = '4'
elif nstr == 'five':
digit = '5'
elif nstr == 'six':
digit = '6'
elif nstr == 'seven':
digit = '7'
elif nstr == 'eight':
digit = '8'
elif nstr == 'nine':
digit = '9'
else:
print('error converting string to digit')
is_digit = False
return digit, is_digit
def convert_string_to_num(self, num_str):
num_str_ls = num_str.split(' ')
digits_str = ''
for num_str in num_str_ls:
digits_str += self.str_to_digit(num_str)
return digits_str
def extract_mrn_from_utterance(self, utterance_str):
str_ls = utterance_str.split(' ')
mrn = ''
for s in str_ls:
digit, is_digit = self.str_to_digit(s)
if is_digit:
mrn += digit
return mrn
def extract_mrn_from_text(self, utterance_str):
str_ls = utterance_str.split(' ')
mrn = ''
for s in str_ls:
if s.isdigit():
mrn = s
return mrn
def ask_general_consultation_question(self):
# play the audio file of the question
PLAYER.play_song("ask_general_consultation_question.wav")
time.sleep(2)
# record the user response and write to a wav audio file
self.get_user_voice_response()
# use picovoice leopard to transcribe the audio response file
question = self.leopard_transcribe()
# combine the quetsion with the consultation transcript
question_about_consultation_prompt = 'INSTRUCTION: You are a medical doctor who has just performed a consultation and is provided with a transcript of the consultation. Answer a question about the consultation as accurately as possible. The consultation transcritp and question about it will follow\n'
question_about_consultation_prompt += '\nCONSULTATION TRANSCRIPT: \n' + self.consultation_transcript
question_about_consultation_prompt += '\nQUESTION ABOUT CONSULTATION: \n' + question + '?\n\n'
response=openai.Completion.create(
model="text-davinci-003",
prompt=question_about_consultation_prompt,
max_tokens=2500,
temperature=0
)
answer = json.loads(str(response))
answer = answer['choices'][0]['text']
# print the answer
print(answer)
# create the audio file from the text
# Language in which you want to convert
language = 'en'
# Passing the text and language to the engine,
# here we have marked slow=False. Which tells
# the module that the converted audio should
# have a high speed
myobj = gTTS(text=answer, lang=language, slow=False)
# Saving the converted audio in a mp3 file named
myobj.save("consulation_answer.wav")
# get the length of the audio file to know how long to sleep while playing
audio = eyed3.load("consulation_answer.wav")
length_in_seconds = int(audio.info.time_secs)
#play the audio file
PLAYER.play_song("consulation_answer.wav")
time.sleep(length_in_seconds + 1)
def extract_letters(self):
time.sleep(2)
pyautogui.keyDown('ctrl')
pyautogui.press('space')
pyautogui.keyUp('ctrl')
time.sleep(1)
pyperclip.copy('chart review')
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
time.sleep(1)
pyautogui.press('down')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(2)
self.click_screenshot("letters.png")
time.sleep(2)
self.click_screenshot("recent_letters.png")
time.sleep(1)
letters = ''
for i in range(5):
pyautogui.press("enter")
time.sleep(2)
pyautogui.click()
time.sleep(1)
pyautogui.keyDown('command')
pyautogui.press('a')
pyautogui.keyUp('command')
time.sleep(1)
pyautogui.keyDown('command')
pyautogui.press('c')
pyautogui.keyUp('command')
letters += pyperclip.paste()
time.sleep(1)
pyautogui.keyDown('option')
pyautogui.keyDown('command')
pyautogui.press('left')
pyautogui.keyUp('option')
pyautogui.keyUp('command')
time.sleep(2)
pyautogui.press('down')
time.sleep(1)
self.letters_hx = letters
def glance_patient_search_results(self):
# telling the user that a glance is being done
txt.insert(END, "\n" + "OSLER -> Analysing the screen...")
parsed_screen = parse_screen()
sys_instr = gpt_prompts.patient_lookup_outcome
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + openai.api_key
}
conversation = [{"role": "system", "content": sys_instr}]
conversation.append({"role": "user", "content": parsed_screen})
payload = {
"model": "gpt-4-32k",
"messages": conversation,
"temperature": 0,
"max_tokens": 1
# "stop": "\n"
}
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 200:
suggested_command = response.json()["choices"][0]["message"]["content"]
usage = response.json()["usage"]
return suggested_command, usage
else:
print(f"Error: {response.status_code} - {response.text}")
def new_consultation_mrn(self):
while True:
# screenshot and parse current screen
parsed_screen = parse_screen()
current_screen = match_screen(parsed_screen)
txt.insert(END, "\n" + "OSLER -> The current epic screen is: " + current_screen)
self.activate_application('Citrix Viewer')
if current_screen == 'schedule':
# press f10 for search activities bar
pyautogui.press('f10')
time.sleep(2)
# search for write note activity
pyperclip.copy('write')
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
time.sleep(2)
# press enter to select write note activity
pyautogui.press('enter')
time.sleep(2)
if current_screen == 'patient_lookup':
print('global_mrn: ', self.global_mrn)
pyperclip.copy(self.global_mrn)
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
time.sleep(2)
pyautogui.press('enter')
time.sleep(1)
# at this point there are three different possible outcomes so need to use UIED to check and handle
mrn_search_outcome, usage = self.glance_patient_search_results()
print('mrn search outcome: ', mrn_search_outcome)
if mrn_search_outcome == '1':
txt.insert(END, "\n" + "OSLER -> Great! This MRN matches exactly one patient")
pyautogui.press('enter')
time.sleep(2)
pyautogui.press('enter')
time.sleep(8)
elif mrn_search_outcome == '2':
txt.insert(END, "\n" + "OSLER -> Sorry, this MRN matches more than one patient.")
break
elif mrn_search_outcome == '3':
txt.insert(END, "\n" + "OSLER -> Sorry, this MRN does not match any patient. Please try again.")
break
else:
print('error with processing the result from glancing')
if current_screen == 'chart_review':
# ctrl space
pyautogui.keyDown('ctrl')
pyautogui.press('space')
pyautogui.keyUp('ctrl')
time.sleep(2)
# search for write note activity
pyperclip.copy('write note')
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
time.sleep(2)
# select write note activity
pyautogui.press('down')
time.sleep(1)
pyautogui.press('enter')
time.sleep(2)
pyautogui.press('enter')
time.sleep(5)
if current_screen == 'documentation':
# use the accept button as a unique marker to check if note is already opened
if not pyautogui.locateOnScreen("demo_screenshots/accept.png", confidence=0.7, grayscale=True):
self.click_screenshot('create_note.png', confidence=0.6)
time.sleep(2)
time.sleep(2)
pyautogui.press('f3')
time.sleep(2)
# release the function button
pyautogui.keyUp('fn')
time.sleep(1)
# add smart text medicines and problem list
pyautogui.write('.med', interval=0.1)
time.sleep(1)
pyautogui.press('enter')
time.sleep(1)
# add smart text medicines and problem list
pyautogui.write('.diagprobap', interval=0.1)
time.sleep(1)
pyautogui.press('enter')
time.sleep(1)
# copying the patient medical history and medications and saving to memory
pyautogui.keyDown('command')
pyautogui.press('a')
pyautogui.keyUp('command')
time.sleep(1)
pyautogui.keyDown('command')
pyautogui.press('c')
pyautogui.keyUp('command')
time.sleep(0.5)
pyautogui.press('right')
self.med_hx = pyperclip.paste()
break
def transcribe_consultation(self):
# activate Epic window
self.activate_application('Citrix Viewer')
# add header
pyperclip.copy('\n\n--------- Consultation Transcription ---------\n\n')
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
self.transcribe()
# stop recording banner
pyperclip.copy('\n\n--------- Recording Stopped ---------\n\n')
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
self.summarise_transcription()
self.consultation_entities = ask_gpt(self.consultation_transcript)
print('extracted metadata from consultation')
print(str(self.consultation_entities))
def place_orders(self):
# check if consultation has been done
if not self.consultation_done:
update_chat_text("OSLER -> You have not yet performed a consultation so this request is invalid")
return
# bring Epic window to the front
self.activate_application('Citrix Viewer')
orders_list = self.get_orders_from_gpt_call(self.consultation_entities)
for order in orders_list:
pyautogui.keyDown('command')
pyautogui.press('o')
pyautogui.keyUp('command')
time.sleep(1)
pyperclip.copy(order)
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(1)
pyautogui.press('enter')
pyautogui.keyDown('option')
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
pyautogui.keyUp('option')
time.sleep(1)
pyautogui.press('escape')
def list_abilities(self):
abilities_msg = '''OSLER -> Hi! I'm Osler, your personal AI digital healthcare assistant. I can help you with the following:
\n- Starting a new consultation note
\n- Transcribing a consultation
\n- Placing orders mentioned in the consultation
\n- Filing diagnoses mentioned in the consultation
\n- Answering general questions about the consultation
'''
update_chat_text(abilities_msg)
def file_diagnoses(self):
# check if consultation has been done
if not self.consultation_done:
update_chat_text("OSLER -> You have not yet performed a consultation so this request is invalid")
return
diagnosis_list = self.get_diagnoses_from_gpt_call(self.consultation_entities)
# bring Epic window to the front
self.activate_application('Citrix Viewer')
pyautogui.keyDown('command')
pyautogui.press('g')
pyautogui.keyUp('command')
time.sleep(1)
for diagnosis in diagnosis_list:
pyperclip.copy(diagnosis)
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
time.sleep(1)
pyautogui.press('enter')
time.sleep(1)
pyautogui.press('enter')
time.sleep(1)
pyautogui.press('escape')
def summarise_transcription(self):
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + openai.api_key
}
SOAP_user_msg_template = """
MEDICAL HISTORY:
------------------
$medical_history
------------------
CONSULTATION TRANSCRIPT:
------------------
$consultation_transcript
------------------
"""
system_instruction = '''
You are a medical office assistant drafting documentation for a physician. You will be provided with a MEDICAL HISTORY and a CONSULTATION TRANSCRIPT. DO NOT ADD any content that isn't specifically mentioned in the CONSULTATION TRANSCRIPT or the MEDICAL HISTORY. From the attached transcript and medical history, generate a SOAP note based on the below template format for the physician to review, include all the relevant information and do not include any information that isn't explicitly mentioned in the transcript.If nothing is mentioned just returned[NOT MENTIONED].
Template for Clinical SOAP Note Format:
Subjective: The “history” section
- HPI: include any mentioned symptom dimensions, chronological narrative of patients complains, information obtained from other sources(always identify source if not the patient).
- Pertinent past medical history.
- Pertinent review of systems mentioned, for example, “Patient has not had any stiffness or loss
of motion of other joints.”
- Current medications mentioned(list with daily dosages).
Objective: The physical exam and laboratory data section
- Vital signs including oxygen saturation when indicated.
- Focussed physical exam.
- All pertinent labs, x - rays, etc.completed at the visit.
Assessment / Problem List: Your assessment of the patients problems
- Assessment: A one sentence description of the patient and major problem
- Problem list: A numerical list of problems identified
- All listed problems need to be supported by findings in subjective and objective areas above.Try to take the assessment of the major problem to the highest level of diagnosis that you can, for example, “low back sprain caused by radiculitis involving left 5th LS nerve root.”
- Any differential diagnoses mentioned in the transcript, if not just leave this blank as DIFFERENTIAL DIAGNOSIS:
Plan: Any plan for the patient mentioned in the transcript
- Divide any diagnostic and treatment plans for each differential diagnosis.
- Your treatment plan should include: patient education pharmacotherapy if any, other therapeutic procedures.You must also address plans for follow - up(next scheduled visit, etc.)
Please provide your response in a bullet point list for each heading.'''
user_message = SOAP_user_msg_template
user_message = user_message.replace("$medical_history", self.med_hx)
user_message = user_message.replace("$consultation_transcript", self.consultation_transcript)
conversation = [{"role": "system", "content": system_instruction}]
conversation.append({"role": "user", "content": user_message})
payload = {
"model": "gpt-4",
"messages": conversation,
"temperature": 0,
"max_tokens": 500
# "stop": "\n"
}
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 200:
suggested_command = response.json()["choices"][0]["message"]["content"]
usage = response.json()["usage"]
# return suggested_command, usage
else:
print(f"Error: {response.status_code} - {response.text}")
# write consultation summary to notes
pyperclip.copy(suggested_command)
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
def summarise_transcription1(self):
# add header
pyperclip.copy('\n\n--------- Consultation Summary ---------\n\n')
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
# get GPT consultation summary
meta_consultation_summarisation = 'INSTRUCTION: Summarise the below MEDICAL HISTORY and CONSULTATION TRANSCRIPT between patient and doctor into short notes, under the following headings: 1. Detailed summary of the patient symptoms 2. Medicines 3. Allergies 4. Family History 5. Social History 6. Examination findings 7. Impression 8. Plan\n'
meta_consultation_summarisation += 'MEDICAL HISTORY: \n' + self.med_hx
meta_consultation_summarisation += '\nCONSULTATION TRANSCRIPT: \n' + self.consultation_transcript + '\n\n'
response=openai.Completion.create(
model="text-davinci-003",
prompt=meta_consultation_summarisation,
max_tokens=2500,
temperature=0
)
consultation_summary = json.loads(str(response))
consultation_summary = consultation_summary['choices'][0]['text']
self.transcript_summary = consultation_summary
# write consultation summary to notes
pyperclip.copy(consultation_summary)
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
def get_orders_from_gpt_call(self, response):
orders_ls = []
for order in response['orders']:
orders_ls.append(order['name'])
return orders_ls
def get_diagnoses_from_gpt_call(self, response):
diagnoses_ls = []
for diagnosis in response['visit_diagnoses']:
diagnoses_ls.append(diagnosis['name'])
return diagnoses_ls
def get_meds_from_gpt_call(self, response):
meds_ls = []
for med in response['medicine']:
meds_ls.append(med['name'])
return meds_ls
def speak_orders_list(self, orders_list):
# The text to be converted into audio
text = 'The orders I got from the consultation were '
for i in range(len(orders_list)):
text += orders_list[i]
if i < len(orders_list) - 1:
text += ' and '
return text
def speak_meds_list(self, meds_list):
# The text to be converted into audio
text = 'The medicines I got from the consultation were '
for i in range(len(meds_list)):
text += meds_list[i]
if i < len(meds_list) - 1:
text += ' and '
return text
def query_orders(self):
# get the list of orders extracted from the consultation
orders_list = self.get_orders_from_gpt_call(self.consultation_entities)
# convert the list of orders into the text to speak
audio_text = self.speak_orders_list(orders_list)
# create the audio file from the text
# Language in which you want to convert
language = 'en'
# Passing the text and language to the engine,
# here we have marked slow=False. Which tells
# the module that the converted audio should
# have a high speed
myobj = gTTS(text=audio_text, lang=language, slow=False)
# Saving the converted audio in a mp3 file named
myobj.save("extracted_orders_list.wav")
# get the length of the audio file to know how long to sleep while playing
audio = eyed3.load("extracted_orders_list.wav")
length_in_seconds = int(audio.info.time_secs)
#play the audio file
PLAYER.play_song("extracted_orders_list.wav")
time.sleep(length_in_seconds + 1)
def query_meds(self):
# get the list of orders extracted from the consultation
meds_list = self.get_meds_from_gpt_call(self.consultation_entities)
# convert the list of orders into the text to speak
audio_text = self.speak_meds_list(meds_list)
# create the audio file from the text
# Language in which you want to convert
language = 'en'
# Passing the text and language to the engine,
# here we have marked slow=False. Which tells
# the module that the converted audio should
# have a high speed
myobj = gTTS(text=audio_text, lang=language, slow=False)
# Saving the converted audio in a mp3 file named
myobj.save("extracted_meds_list.wav")
# get the length of the audio file to know how long to sleep while playing
audio = eyed3.load("extracted_meds_list.wav")
length_in_seconds = int(audio.info.time_secs)
#play the audio file
PLAYER.play_song("extracted_meds_list.wav")
time.sleep(length_in_seconds + 1)
def write_referral(self):
self.activate_application('Citrix Viewer')
# press f10 for search activities bar
pyautogui.press('f10')
time.sleep(2)
# search for write note activity
pyperclip.copy('letter')
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
pyautogui.press('enter')
time.sleep(3)
# input MRN 111
pyperclip.copy(self.global_mrn)
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
time.sleep(3)
# press enter 3 times
pyautogui.press('enter')
time.sleep(3)
pyautogui.press('enter')
time.sleep(3)
pyautogui.press('enter')
time.sleep(8)
# select clinic letter
self.click_screenshot("select_clinic_letter.png", confidence=0.6)
time.sleep(3)
# add recipient as patient 1
pyautogui.keyDown('command')
pyautogui.keyDown('option')
pyautogui.press('1')
pyautogui.keyUp('command')
pyautogui.keyUp('option')
time.sleep(3)
#play the letter pending audio file
PLAYER.play_song("letter_pending.wav")
time.sleep(4)
# get GPT to write referral letter
referral_letter_prompt = 'Write a letter to the patients GP including all of the following information, include the patients background medical history, medications, a summary of the consultation and a plan:\n\n'
referral_letter_prompt += self.transcript_summary
response=openai.Completion.create(
model="text-davinci-003",
prompt=referral_letter_prompt,
max_tokens=1500,
temperature=0
)
referral_letter = json.loads(str(response))
print(referral_letter['choices'][0]['text'])
pyautogui.press('tab', presses=10, interval=0.2)
time.sleep(1)
pyperclip.copy(referral_letter['choices'][0]['text'])
pyautogui.keyDown('command')
pyautogui.press('v')
pyautogui.keyUp('command')
def init_ml_client(subscription_id, resource_group, workspace):
return MLClient(
DefaultAzureCredential(), subscription_id, resource_group, workspace
)
ml_client = init_ml_client(
"af5d9edb-37c3-40a4-a58f-5b97efbbac8d",
"hello-rg",
"osler-perception"
)
def read_image(path_to_image):
with open(path_to_image, "rb") as f:
return f.read()
def predict_image_object_detection_sample(
ml_client,
endpoint_name,
deployment_name,
path_to_image
):
request_json = {
"image" : base64.encodebytes(read_image(path_to_image)).decode("utf-8")
}
request_fn = "request.json"
with open(request_fn, "w") as request_f:
json.dump(request_json, request_f)
response = ml_client.online_endpoints.invoke(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
request_file=request_fn
)
detections = json.loads(response)
return detections
def detect_text(path):
"""Detects text in the file."""
client = vision.ImageAnnotatorClient(credentials=credentials)
with open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
# print('Texts:')
# for text in texts:
# # print(f'\n"{text.description}"')
# vertices = ([f'({vertex.x},{vertex.y})'
# for vertex in text.bounding_poly.vertices])
# # print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
return response
# not including bboxes just yet
def html_from_UIE(df_row, idx):
elem_type = df_row['displayNames']
bbox = df_row['bboxes']
inner_text = df_row['predicted text']
html = f"""<{elem_type} id={idx}>{inner_text}</{elem_type}>"""
return html
def df_to_html(df):
s = ''
for index, row in df.iterrows():
s += html_from_UIE(row, index) + '\n'
return s
def bb_intersection_over_minArea(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / min(boxAArea, boxBArea)
# return the intersection over union value
return iou
def strls2str(strls):
s = ''
for elem in strls:
s += elem + ' '
return s[:-1]
def add_text_to_uie(response, ocr):
conf_threshold = 0
i = 0
ids = []
texts = []
labels = []
bboxes = []
for detection in response["boxes"]:
if detection["score"] < conf_threshold:
continue
text = []
box = detection["box"]
x_min, y_min, x_max, y_max = (
box["topX"],
box["topY"],
box["bottomX"],
box["bottomY"]
)
uie_box = [
x_min * 1280, y_min * 1080, x_max * 1280, y_max * 1080
]
for annotation in ocr.text_annotations[1:]:
top_left = annotation.bounding_poly.vertices[0]
bottom_right = annotation.bounding_poly.vertices[2]
ocr_box = [top_left.x, top_left.y, bottom_right.x, bottom_right.y]
iou = bb_intersection_over_minArea(uie_box, ocr_box)
if iou > 0.8:
text.append(annotation.description)
text = strls2str(text)
ids.append(i)
texts.append(text)
labels.append(detection["label"])
bboxes.append([x_min, y_min, x_max, y_max])
i += 1
response_df = pd.DataFrame.from_dict({
"displayNames": labels,
"bboxes": bboxes,
"predicted text": texts
})
return response_df
def parse_screen():
print('parsing screen...')
current_screen = ImageGrab.grab() # Take the screenshot
screen_size = current_screen.size
current_screen = current_screen.resize((RESIZE_WIDTH,RESIZE_HEIGHT))
current_screen.save('current_screen.png')
# send screenshot to UIED model to get UIEs
# print('sending screenshot to tortus UIED model...')
response = predict_image_object_detection_sample(
ml_client,
endpoint_name="uied",
deployment_name="yolov5",
path_to_image="current_screen.png"
)
# send screenshot to Google OCR to get text
# print('sending screenshot to google OCR...')
ocr = detect_text('current_screen.png')
# merge OCR with UIEs
# print('merging OCR and UIED...')
merged_df = add_text_to_uie(response, ocr)
merged_df.to_csv('uied.csv')
# covert to LLM template format
# print('converting to LLM template format from dataframe...')
llm_format = df_to_html(merged_df)
return llm_format
def match_intent(utterance):
model = SentenceTransformer("sentence-transformers/multi-qa-mpnet-base-dot-v1")
intent_ls = [
intents.START_CONSULTATION_NOTE,
intents.TRANSCRIBE_CONSULTATION,
intents.SUMMARISE_CONSULTATION,
intents.PLACE_ORDERS,
intents.FILE_DIAGNOSES,
intents.ANSWER_QUESTIONS,
intents.WRITE_LETTER,
intents.QUERY_MEDS,
intents.QUERY_ORDERS,
intents.LIST_ABILITIES
]
intent_embeddings = model.encode(intent_ls)
utterance_embeddings = model.encode(utterance)
cos_scores = cosine_similarity(utterance_embeddings.reshape(1, -1), intent_embeddings)
cos_scores_torch = torch.from_numpy(cos_scores)
cos_max = torch.max(cos_scores_torch).item()
cos_argmax = torch.argmax(cos_scores_torch, dim=1)
cos = cos_argmax[0].item()
intent = intent_ls[cos]
return intent, cos_max
def match_screen(current_screen):
model = SentenceTransformer("sentence-transformers/multi-qa-mpnet-base-dot-v1")
screens_ls = [
epic_screens.PATIENT_LOOKUP,
epic_screens.SCHEDULE,
epic_screens.CHART_REVIEW,
epic_screens.DOCUMENTATION
]
screen_labels = ['patient_lookup', 'schedule', 'chart_review', 'documentation']
epic_embeddings = model.encode(screens_ls)
screen_embeddings = model.encode(current_screen)
cos_scores = cosine_similarity(screen_embeddings.reshape(1, -1), epic_embeddings)
cos_scores_torch = torch.from_numpy(cos_scores)
cos_max = torch.max(cos_scores_torch).item()
cos_argmax = torch.argmax(cos_scores_torch, dim=1)
cos = cos_argmax[0].item()
intent = screens_ls[cos]
screen_name = screen_labels[cos]
return screen_name
# Send function
def send():
msg = "You -> " + e.get()
txt.insert(END, "\n" + msg)
user = e.get().lower()
e.delete(0, END)
# Run the rest(user) function asynchronously using a thread
threading.Thread(target=msg2task, args=(user,)).start()
# rest(user)
def msg2task(user_msg):
# match the user command to intents
intent, score = match_intent(user_msg)
print(score)
if float(score) > 0.6:
# if matched intent is starting a new consult note, attempt extract mrn from user message
if intent == intents.START_CONSULTATION_NOTE:
actor.global_mrn = actor.extract_mrn_from_text(user_msg)
print('mrn: ', actor.global_mrn)
# display matched intent to user
osler_message = "It looks like you asked me to perform the task: "
txt.insert(END, "\n" + "OSLER -> " + osler_message + intent)
# e.delete(0, END)
# perform task
picovoice_thread.osler_thinking()
actor.act(intent)
else:
# display matched intent to user
txt.insert(END, "\n" + "OSLER -> This request is not currently supported.")
# GUI
root = Tk()
root.title("OSLER")
BG_GRAY = "#ABB2B9"
BG_COLOR = "#17202A"
TEXT_COLOR = "#EAECEE"
FONT = "Helvetica 14"
FONT_BOLD = "Helvetica 13 bold"
RESIZE_WIDTH = 1280
RESIZE_HEIGHT = 1080
# DEVICE_SIZE = (1440, 900)
DEVICE_SIZE = (1791, 1119)
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file('./tortus-374118-e15fd1ca5b60.json')
ACCESS_KEY = os.environ['PICOVOICE_ACCESS_KEY']
# to prevent the huggingface tokenizer parallelisation error
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# set google speech-to-text application credentials
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/Users/christophertan/Desktop/osler1/tortus-374118-e15fd1ca5b60.json"
actor = Actor()
global_mrn = ''
# labe1 = Label(root, bg=BG_COLOR, fg=TEXT_COLOR, text="OSLER", font=FONT_BOLD, pady=10, width=20, height=1).grid(
# row=0)
img1 = ImageTk.PhotoImage(file="./demo_screenshots/osler_sleep_smaller.png")
label = Label(root, bg=BG_COLOR, image=img1)
label.grid(row=0)
txt = Text(root, bg=BG_COLOR, fg=TEXT_COLOR, font=FONT, width=60)
txt.grid(row=1, column=0, columnspan=2)
scrollbar = Scrollbar(txt)
scrollbar.place(relheight=1, relx=0.974)
e = Entry(root, bg="#2C3E50", fg=TEXT_COLOR, font=FONT, width=55)
e.grid(row=2, column=0)
send_button = Button(root, text="Send", font=FONT_BOLD, bg=BG_GRAY,
command=send).grid(row=2, column=1)
picovoice_thread = PicovoiceThread(label, ACCESS_KEY)
picovoice_thread.start()
while not picovoice_thread.is_ready():
pass
root.mainloop() | [
"INSTRUCTION: You are a medical doctor who has just performed a consultation and is provided with a transcript of the consultation. Answer a question about the consultation as accurately as possible. The consultation transcritp and question about it will follow\n",
"Write a letter to the patients GP including all of the following information, include the patients background medical history, medications, a summary of the consultation and a plan:\n\n",
"\n\t\tMEDICAL HISTORY:\n\t\t------------------\n\t\t$medical_history\n\t\t------------------\n\n\t\tCONSULTATION TRANSCRIPT:\n\t\t------------------\n\t\t$consultation_transcript\n\t\t------------------\n\t\t",
"\nQUESTION ABOUT CONSULTATION: \nPLACEHOLDER?\n\n",
"\nCONSULTATION TRANSCRIPT: \n"
] |
2024-01-10 | chrisTORTUS/chatOsler | latency_breakdown.py | #!/usr/bin/env python3
#
# natbot.py
#
# Set OPENAI_API_KEY to your API key, and then run this from a terminal.
#
import time
import ast
from sys import argv, exit, platform
import openai
import os
import json
import requests
from dotenv import load_dotenv
import pyautogui
from PIL import ImageGrab, Image, ImageDraw
import pandas as pd
import numpy as np
from azure.ai.ml import MLClient
from azure.identity import DefaultAzureCredential
from google.cloud import vision
from gpt import ask_gpt
from io import StringIO
import random
import base64
load_dotenv()
count=0
openai.api_key= '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'
system_instruction = """“You are a computer controlling agent called OSLER.
You will be passed text representations of computer screens of a system called Epic - these will have various buttons, text and image items that one might find on a computer screen.
You will be given a task that you are trying to achieve - for example “open a new patient window on Epic” and it is your job to provide the step by step clicks of each button to complete this task.
You will be given the history of the current task you are trying to achieve, including the previous screen information and the previous buttons you have already pressed, and whether or not the screen changed after your performed that action. If you clicked a button and the screen didn't change, this is a failure. If you typed a small amonunt of text, and the screen didn't change, this is okay. If you type a large amount of text, and the screen changed, this is success and you can stop.
DO NOT TRY THE SAME THING TWICE IF IT IS NOT WORKING. TRY SOMETHING DIFFERENT. Make three attempts to do something but give up if all three are failures. When giving up just report [FAIL] and add your rationale as normal.
If there is an Accept button visible, always press that first.
You can interact with the screen by either clicking the mouse or typing.
You can also interact the screen by pressing specific combination of computer keys on the keyboard.
You can also respond to queries that the user might ask based on the information provided, e.g summarising the medical conditions or tests visible on the screen
You can use each of these tools by using the following commands where X is either the element on the screen you want to interact with or Y is text you want to type into a field. If you want to use a keyboard shortcut use the [PRESS] command and choose from the list below.
In general you should prioritise using [PRESS] to navigate, rather than [CLICK] as it is more reliable.
1. [CLICK] [X] - NEVER CLICK ON ANY ELEMENT YOU CANNOT SEE
2. [DOUBLECLICK] [X]
3. [TYPESUBMIT] - this is to type something, and then press Enter afterwords automatically to submit it
4. [WRITE] [Y] - this is to type a block of text
5. [PRESS] [Z]
6. [DONE] - when you think that the task is complete, close the workflow by using this command. For example, if you are searching for a patient and find their name and MRN displayed on the page, you are done searching.
In general if you ever get stuck try [PRESS][Enter] first.
Your task is to either return the next best action to take to achieve the task goal or the information requested, and include a description of what you are seeing on the screen and your reasoning as to why you've chosen that action.
Included in your memory is a list of successful previous tasks and the action sequences taken to achieve them.
Always return the response in the same format
Next Action To Take:
Rationale:
Information Requested: (if any)
The format of the screen content is highly simplified; all formatting elements are stripped.
Interactive elements such as links, inputs, buttons are represented like this:
<link id=1>text</link>
<button id=2>text</button>
<input id=3>text</input>
Images are rendered as their alt text like this:
<img id=4 alt=""/>
Text looks like this:
<text id=63>Give ONE capsule ( 1.25 mg ) ONC</text>
Here are some examples:
EXAMPLE 1:
==================================================
CURRENT BROWSER CONTENT:
------------------
<link id=1>About</link>
<link id=2>Store</link>
<link id=3>Gmail</link>
<link id=4>Images</link>
<link id=5>(Google apps)</link>
<link id=6>Sign in</link>
<img id=7 alt="(Google)"/>
<input id=8 alt="Search"></input>
<button id=9>(Search by voice)</button>
<button id=10>(Google Search)</button>
<button id=11>(I'm Feeling Lucky)</button>
<link id=12>Advertising</link>
<link id=13>Business</link>
<link id=14>How Search works</link>
<link id=15>Carbon neutral since 2007</link>
<link id=16>Privacy</link>
<link id=17>Terms</link>
<text id=18>Settings</text>
------------------
TASK: Find a 2 bedroom house for sale in Anchorage AK for under $750k
HISTORY: [CLICK][10] - Google Search. SCREEN CHANGED
Next Action To Take: [TYPE] [Anchorage Redfin]
Rationale: I am on the google home page. The action I want to take is to search for "anchorage redfin" in the google search bar
==================================================
EXAMPLE 2:
==================================================
CURRENT BROWSER CONTENT:
------------------
<link id=1>About</link>
<link id=2>Store</link>
<link id=3>Gmail</link>
<link id=4>Images</link>
<link id=5>(Google apps)</link>
<link id=6>Sign in</link>
<img id=7 alt="(Google)"/>
<input id=8 alt="Search"></input>
<button id=9>(Search by voice)</button>
<button id=10>(Google Search)</button>
<button id=11>(I'm Feeling Lucky)</button>
<link id=12>Advertising</link>
<link id=13>Business</link>
<link id=14>How Search works</link>
<link id=15>Carbon neutral since 2007</link>
<link id=16>Privacy</link>
<link id=17>Terms</link>
<text id=18>Settings</text>
------------------
TASK: Send an email to Dave
HISTORY: [CLICK][Gmail] SCREEN CHANGED
Next Action To Take: [CLICK] [3] - Gmail
Rationale: I am on the Google Home Page. The action I want to take is to send an email to Dave, so I need to click on the Gmail button.
==================================================
[Keyboard Shortcuts:
CTRL+Space Opens the menu bar if outside a patient
F10 Opens the menu bar if outside a patient
F3 FIND A WORD ON THE PAGE
F11 minimizes all windows
CTRL+1 Search for patient
CTRL+4 Telephone Call
CTRL+7 Sign my visits
CTRL+8 Slicer Dicer
CTRL+9 Remind me
CTRL+W Closes the workspace
CTRL+D Opens the More Activities Menu
CTRL+O Opens an order
CTRL+H Replace
CTRL+F Find
CTRL+- ZOOM OUT/IN
TAB MOVES CURSOR THROUGH BUTTONS
When typing
Home Moves cursor to start of line of text
End Moves cursor to end of line of text
CTRL+Home Moves cursor to start of all text
SHIFT+ Highlights to those positions
CTRL+End Moves cursor to end of all text
esc close menu
F3 Pops out window into a fullscreen
CTRL+ALT+O Review orders signed
On calendar free text boxes relative date shortcuts
T today
M month
Y year
Write Note Select Encounter
Add order Order menu
If anything is underlined press Alt + LETTER UNDERLINE
CTRL+SHIFT+1 Schedule
CTRL+SHIFT+2 Patient Lists
CTRL+SHIFT+3 Learning zone
CTRL+SHIFT+4 My messages
CTRL+SHIFT+5 Secure Chat
CTRL+SHIFT+P Problem List
CTRL+SHIFT+H History
Stop Mousing Around!
Give these keyboard shortcuts a try and save time!
Action Shortcut
Open Chart Search CTRL+Space
Log out CTRL+ALT+L
Secure CTRL+ALT+S
Close Workspace / Patient CTRL+W
Close Activity CTRL+Q
Toggle Workspace CTRL+Tab
Home Workspace CTRL+ALT+1
Second Workspace CTRL+ALT+2
Nth Workspace CTRL+ALT+number
Epic Button ALT
More Activities CTRL+D
Toolbar Actions ALT+T
Open Help Desk Report CTRL+ALT+SHIFT+H
What Time Is It Epic?
Have Epic quickly enter dates and times using shortcuts!
Time Shortcut Example
N for Now N is the time right now
T for Today T-1 is yesterday
W for Week W-2 is 14 days ago
M for Month M-1 is this day last month
MB for Month Begin MB-1 is the first day of last month
ME for Month End ME-1 is the last day of last month
Y for Year Y-40 is this day forty years ago
CTRL+O Add an Order
CTRL+SHIFT+A Allergies
CTRL+R Chart Review
CTRL+SPACE Chart Search
CTRL+SHIFT + G Diagnoses
CTRL+SHIFT + H History
CTRL+SHIFT + O Meds and Orders
F8 Move to Next Section
F7 Move to Previous Section
F9 Open or Close Current Section
CTRL+SHIFT+I Patient Instructions
CTRL+SHIFT+P Problem List
F5 Refresh Navigator
CTRL+S Sign Orders/Visit
CTRL+SHIFT+M My note
CTRL+SHIFT+E Sign everything it and close
CTRL+SHIFT+F Inform others
CTRL+SHIFT+Y Correspondence
CTRL+R Chart Review Page
CTRL+F Find
CTRL+G Adds a Diagnosis quickly
TAB Move down fields
SHIFT+TAB Move up fields]
ALT+A - Accept
ALT+C Cancel
In Epic blood results are called 'Labs'
==============
Here are some suggestions of previous successful actions (but do not follow them to the letter if they don't appear on the screen):
1. Task: ["Open up patient [X]"], Action Sequence: [1. [CLICK][Patient Lookup],[TYPESUBMIT][X],[PRESS][ENTER],[DONE],+-[CLICK,[OPEN CHART]]]
2. Task: ["Open up the chart for patient [X]"], Action Sequence: [1. [CLICK][Patient Lookup],[TYPESUBMIT][X],[PRESS][ENTER],[DONE], +-[CLICK,[OPEN CHART]]
3. Task: ["Write a new note for patient [X]"], Action Sequence: First open up the record for patient as per 2. Then [1. [CLICK][Write Note],you will see an encounter page sometimes, you need to [TYPESUBMIT][Domanski],then [PRESS][Enter],[TYPESUBMIT][.diagprobap],[TYPESUBMIT][.medscurrent], [DONE]]
==============
This is everything you've tried previously, DONE means successfully, FAIL means it failed.
$logs
"""
input_template = """
CURRENT BROWSER CONTENT:
------------------
$browser_content
------------------
TASK: $objective
HISTORY: $previous_action
"""
def read_image(path_to_image):
with open(path_to_image, "rb") as f:
return f.read()
def predict_image_object_detection_sample(
ml_client,
endpoint_name,
deployment_name,
path_to_image
):
request_json = {
"image" : base64.encodebytes(read_image(path_to_image)).decode("utf-8")
}
request_fn = "request.json"
with open(request_fn, "w") as request_f:
json.dump(request_json, request_f)
response = ml_client.online_endpoints.invoke(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
request_file=request_fn
)
detections = json.loads(response)
return detections
from PIL import Image
import imagehash
def get_image_dhash(image_path):
# Open the image
image = Image.open(image_path)
# Compute the dhash
dhash = imagehash.dhash(image)
return dhash
def detect_text(path):
"""Detects text in the file."""
client = vision.ImageAnnotatorClient(credentials=credentials)
with open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
# print('Texts:')
# for text in texts:
# # print(f'\n"{text.description}"')
# vertices = ([f'({vertex.x},{vertex.y})'
# for vertex in text.bounding_poly.vertices])
# # print('bounds: {}'.format(','.join(vertices)))
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
return response
def bb_intersection_over_minArea(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / min(boxAArea, boxBArea)
# return the intersection over union value
return iou
def strls2str(strls):
s = ''
for elem in strls:
s += elem + ' '
return s[:-1]
def add_text_to_uie(response, ocr):
response_df = pd.DataFrame(data=dict(response.predictions[0]))
ls = []
for index, row in response_df.iterrows():
d = {'id': index, 'text': []}
uie_box = [row['bboxes'][0] * 1280, row['bboxes'][1] * 1080, row['bboxes'][2] * 1280, row['bboxes'][3] * 1080]
# uie_box = [row['left'] * 2, row['top'] * 2, row['right'] * 2, row['lower'] * 2]
# calculate the overlap against all other text on the screen
for annotation in ocr.text_annotations[1:]:
top_left = annotation.bounding_poly.vertices[0]
bottom_right = annotation.bounding_poly.vertices[2]
ocr_box = [top_left.x, top_left.y, bottom_right.x, bottom_right.y]
iou = bb_intersection_over_minArea(uie_box, ocr_box)
if iou > 0.8:
d['text'].append(annotation.description)
#ls.append(d)
text_string = strls2str(d['text'])
ls.append(text_string)
response_df['predicted text'] = ls
return response_df
# not including bboxes just yet
def html_from_UIE(df_row, idx):
elem_type = df_row['displayNames']
bbox = df_row['bboxes']
inner_text = df_row['predicted text']
html = f"""<{elem_type} id={idx}>{inner_text}</{elem_type}>"""
return html
def df_to_html(df):
s = ''
for index, row in df.iterrows():
s += html_from_UIE(row, index) + '\n'
return s
def get_gpt_command(objective, browser_content,previous_action):
# Now df_str is a string representation of the DataFrame
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + openai.api_key
}
user_message = input_template
user_message = user_message.replace("$browser_content", browser_content)
user_message = user_message.replace("$objective", objective)
user_message = user_message.replace("$previous_action", previous_action)
#print(user_message)
conversation = [{"role": "system", "content": system_instruction}]
conversation.append({"role": "user", "content": user_message})
payload = {
"model": "gpt-4-32k",
"messages": conversation,
"temperature": 0,
"max_tokens": 1000
# "stop": "\n"
}
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 200:
suggested_command = response.json()["choices"][0]["message"]["content"]
usage = response.json()["usage"]
return suggested_command, usage
else:
print(f"Error: {response.status_code} - {response.text}")
# def take_action(action_string, elems_df):
# cmd = cmd.split("\n")[0]
# if cmd.startswith("SCROLL UP"):
# _crawler.scroll("up")
# elif cmd.startswith("SCROLL DOWN"):
# _crawler.scroll("down")
# elif cmd.startswith("CLICK"):
# commasplit = cmd.split(",")
# id = commasplit[0].split(" ")[1]
# pyautogui.click(x=100, y=200)
# _crawler.click(id)
# elif cmd.startswith("TYPE"):
# spacesplit = cmd.split(" ")
# id = spacesplit[1]
# text = spacesplit[2:]
# text = " ".join(text)
# # Strip leading and trailing double quotes
# text = text[1:-1]
# if cmd.startswith("TYPESUBMIT"):
# text += '\n'
# _crawler.type(id, text)
# time.sleep(2)
# takes bbox in [x0, y0, x1, y1] format
def get_center_of_bbox(bbox):
center = [0.5 * (bbox[0] + bbox[2]), 0.5 * (bbox[1] + bbox[3])]
return center
def take_action(action_string, elems_df, device_size):
cmd = ask_gpt(action_string)
action_list=cmd['action']
action_dict =action_list[0]
global action_type
global steps
action_type=action_dict['type']
action_ID=action_dict['ID']
steps.append(action_dict)
if action_type == 'CLICK':
id= action_ID
row = elems_df.iloc[int(id)]
#print(row)
norm_bbox = ast.literal_eval(row['bboxes'])
#print(norm_bbox)
device_bbox = [norm_bbox[0] * device_size[0], norm_bbox[1] * device_size[1], norm_bbox[2] * device_size[0], norm_bbox[3] * device_size[1]]
center = get_center_of_bbox(device_bbox)
#
pyautogui.moveTo(center[0], center[1], 0.1)
time.sleep(0.2)
pyautogui.doubleClick()
# _crawler.click(id)
#
pyautogui.moveTo(center[0], center[1], 0.1)
time.sleep(0.2)
pyautogui.doubleClick()
# _crawler.click(id)
elif action_type == 'WRITE':
pyautogui.typewrite(action_ID)
elif action_type =='PRESS':
keys=action_ID.split('+')
if len(keys) == 1:
pyautogui.press(action_ID)
else:
pyautogui.hotkey(*keys)
elif action_type =='TYPESUBMIT':
pyautogui.typewrite(action_ID)
pyautogui.press('enter')
elif action_type == 'DONE':
print('All done')
elif action_type == 'FAIL':
print('I couldnt make this happen, sorry')
def add_text_to_uie(response, ocr):
conf_threshold = 0
i = 0
ids = []
texts = []
labels = []
bboxes = []
for detection in response["boxes"]:
if detection["score"] < conf_threshold:
continue
text = []
box = detection["box"]
x_min, y_min, x_max, y_max = (
box["topX"],
box["topY"],
box["bottomX"],
box["bottomY"]
)
uie_box = [
x_min * 1280, y_min * 1080, x_max * 1280, y_max * 1080
]
for annotation in ocr.text_annotations[1:]:
top_left = annotation.bounding_poly.vertices[0]
bottom_right = annotation.bounding_poly.vertices[2]
ocr_box = [top_left.x, top_left.y, bottom_right.x, bottom_right.y]
iou = bb_intersection_over_minArea(uie_box, ocr_box)
if iou > 0.8:
text.append(annotation.description)
text = strls2str(text)
ids.append(i)
texts.append(text)
labels.append(detection["label"])
bboxes.append([x_min, y_min, x_max, y_max])
i += 1
response_df = pd.DataFrame.from_dict({
"displayNames": labels,
"bboxes": bboxes,
"predicted text": texts
})
return response_df
def parse_screen():
task_label=random.randint(111111,999999)
os.rename('current_screen.png', 'previous_screen.png')
print('parsing screen...')
current_screen = ImageGrab.grab() # Take the screenshot
screen_size = current_screen.size
current_screen = current_screen.resize((RESIZE_WIDTH,RESIZE_HEIGHT))
current_screen.save('current_screen.png')
filename=str(task_label)+'-'+str((count+1))+'.png'
current_screen.save(filename)
dhash_step=get_image_dhash('current_screen.png')
dhash_hex = str(dhash_step)
before_UIED = time.time()
# send screenshot to UIED model to get UIEs
# print('sending screenshot to tortus UIED model...')
response = predict_image_object_detection_sample(
ml_client,
endpoint_name="uied",
deployment_name="yolov5",
path_to_image="current_screen.png"
)
after_UIED = time.time()
time_dict['UIED_times'].append(after_UIED - before_UIED)
# send screenshot to Google OCR to get text
# print('sending screenshot to google OCR...')
ocr = detect_text('current_screen.png')
after_OCR = time.time()
time_dict['OCR_times'].append(after_OCR - after_UIED)
# merge OCR with UIEs
# print('merging OCR and UIED...')
merged_df = add_text_to_uie(response, ocr)
merged_df.to_csv('uied.csv')
# covert to LLM template format
# print('converting to LLM template format from dataframe...')
llm_format = df_to_html(merged_df)
return llm_format
import asyncio
from concurrent.futures import ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=2)
import threading
def parse_screen_threaded():
task_label = random.randint(111111, 999999)
os.rename('current_screen.png', 'previous_screen.png')
print('parsing screen...')
current_screen = ImageGrab.grab() # Take the screenshot
screen_size = current_screen.size
current_screen = current_screen.resize((RESIZE_WIDTH,RESIZE_HEIGHT))
current_screen.save('current_screen.png')
filename = str(task_label)+'-'+str((count+1))+'.png'
current_screen.save(filename)
dhash_step = get_image_dhash('current_screen.png')
dhash_hex = str(dhash_step)
before_UIED = time.time()
# Use a list to store the results of the threads
results = [None, None]
# Define the functions for the threads
def predict_image():
results[0] = predict_image_object_detection_sample(
ml_client,
endpoint_name="uied",
deployment_name="yolov5",
path_to_image="current_screen.png"
)
def detect():
results[1] = detect_text('current_screen.png')
# Create the threads
thread1 = threading.Thread(target=predict_image)
thread2 = threading.Thread(target=detect)
# Start the threads
thread1.start()
thread2.start()
# Wait for both threads to finish
thread1.join()
thread2.join()
response, ocr = results
after_UIED = time.time()
time_dict['UIED_times'].append(after_UIED - before_UIED)
after_OCR = time.time()
time_dict['OCR_times'].append(after_OCR - after_UIED)
# merge OCR with UIEs
merged_df = add_text_to_uie(response, ocr)
merged_df.to_csv('uied.csv')
# covert to LLM template format
llm_format = df_to_html(merged_df)
return llm_format
async def parse_screen_async():
task_label = random.randint(111111, 999999)
os.rename('current_screen.png', 'previous_screen.png')
print('parsing screen...')
current_screen = ImageGrab.grab() # Take the screenshot
screen_size = current_screen.size
current_screen = current_screen.resize((RESIZE_WIDTH, RESIZE_HEIGHT))
current_screen.save('current_screen.png')
filename = str(task_label) + '-' + str((count + 1)) + '.png'
current_screen.save(filename)
dhash_step = get_image_dhash('current_screen.png')
dhash_hex = str(dhash_step)
before_UIED = time.time()
loop = asyncio.get_event_loop()
# send screenshot to UIED model to get UIEs
predict_task = loop.run_in_executor(executor, predict_image_object_detection_sample, ml_client, "uied", "yolov5", "current_screen.png")
# send screenshot to Google OCR to get text
ocr_task = loop.run_in_executor(executor, detect_text, 'current_screen.png')
response, ocr = await asyncio.gather(predict_task, ocr_task)
after_UIED = time.time()
time_dict['UIED_times'].append(after_UIED - before_UIED)
after_OCR = time.time()
time_dict['OCR_times'].append(after_OCR - after_UIED)
# merge OCR with UIEs
merged_df = add_text_to_uie(response, ocr)
merged_df.to_csv('uied.csv')
# covert to LLM template format
llm_format = df_to_html(merged_df)
return llm_format
# initialise google ai
def init_ml_client(subscription_id, resource_group, workspace):
return MLClient(
DefaultAzureCredential(), subscription_id, resource_group, workspace
)
ml_client = init_ml_client(
"af5d9edb-37c3-40a4-a58f-5b97efbbac8d",
"hello-rg",
"osler-perception"
)
RESIZE_WIDTH = 1280
RESIZE_HEIGHT = 1080
DEVICE_SIZE = (1770, 1107)
# authenticate google vision API for OCR
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file('tortus-374118-e15fd1ca5b60.json')
# intialise dictionary to store experiment results
time_dict = {'run_id': [],'total_times': [], 'parse_screen_times': [], 'first_image_hash_times': [], 'second_image_hash_times': [], 'hamming_distance_times': [], 'gpt4_times': [], 'func_call_times': [], 'UIED_times': [], 'OCR_times': [], 'miscellaneous': []}
for i in range(10):
time_dict['run_id'].append(i)
start_time = time.time()
# take a screenshot of the current screen
# llm_format = parse_screen()
llm_format = parse_screen_threaded()
time_after_parse = time.time()
screen_parsing_time = time_after_parse - start_time
time_dict['parse_screen_times'].append(screen_parsing_time)
# print("----------------\n" + llm_format + "\n----------------\n")
# Compute the dhash of the two images
image1_dhash = get_image_dhash('current_screen.png')
time_after_first_hash = time.time()
first_image_hashing_time = time_after_first_hash - time_after_parse
time_dict['first_image_hash_times'].append(first_image_hashing_time)
image2_dhash = get_image_dhash('previous_screen.png')
time_after_second_hash = time.time()
second_image_hashing_time = time_after_second_hash - time_after_first_hash
time_dict['second_image_hash_times'].append(second_image_hashing_time)
# Compute the Hamming distance between the two hashes
hamming_distance = image1_dhash - image2_dhash
time_after_hamming_distance = time.time()
hamming_distance_time = time_after_hamming_distance - time_after_second_hash
time_dict['hamming_distance_times'].append(hamming_distance_time)
# send text representation of screen in LLM format to GPT4 to get the action string
print('calling GPT4...')
gpt_response, usage = get_gpt_command("start a new note", llm_format, "placeholder")
print('gpt response: ', gpt_response)
time_after_gpt_call = time.time()
gpt_call_time = time_after_gpt_call - time_after_hamming_distance
time_dict['gpt4_times'].append(gpt_call_time)
# parse the GPT4 response
response_elems = gpt_response.split(":")
#for elems in response_elems:
#print("item "+ elems)
action_description = response_elems[1]
gpt_cmd = response_elems[-1].strip()
# make the gpt function call in take_action function
cmd = ask_gpt(gpt_response)
time_after_func_call = time.time()
function_call_time = time_after_func_call - time_after_gpt_call
time_dict['func_call_times'].append(function_call_time)
# take_action(gpt_response, merged_df, DEVICE_SIZE)
final_time = time.time()
total_time = final_time - start_time
time_dict['total_times'].append(total_time)
miscellaneous_time = total_time - screen_parsing_time - first_image_hashing_time - second_image_hashing_time - gpt_call_time - function_call_time
time_dict['miscellaneous'].append(miscellaneous_time)
print(time_dict)
# convert time_dict to pandas dataframe
time_df = pd.DataFrame(time_dict)
print(time_df)
# save dataframe to csv
time_df.to_csv('latency_breakdown.csv')
# calculate mean times
mean_times = time_df.mean()
print(mean_times)
| [
"\nCURRENT BROWSER CONTENT:\n------------------\n$browser_content\n------------------\n\nTASK: $objective\nHISTORY: $previous_action\n\n"
] |
2024-01-10 | dawoshi/Tianchi-LLM-QA | bm25_retriever.py | #!/usr/bin/env python
# coding: utf-8
from langchain.retrievers import BM25Retriever
from langchain.schema import Document
from pdf_parse import DataProcess
import jieba
class BM25(object):
def __init__(self, documents):
docs = []
full_docs = []
for idx, line in enumerate(documents):
line = line.strip("\n").strip()
if(len(line)<5):
continue
tokens = " ".join(jieba.cut_for_search(line))
# docs.append(Document(page_content=tokens, metadata={"id": idx, "cate":words[1],"pageid":words[2]}))
docs.append(Document(page_content=tokens, metadata={"id": idx}))
# full_docs.append(Document(page_content=words[0], metadata={"id": idx, "cate":words[1], "pageid":words[2]}))
words = line.split("\t")
full_docs.append(Document(page_content=words[0], metadata={"id": idx}))
self.documents = docs
self.full_documents = full_docs
self.retriever = self._init_bm25()
# 初始化BM25的知识库
def _init_bm25(self):
return BM25Retriever.from_documents(self.documents)
# 获得得分在topk的文档和分数
def GetBM25TopK(self, query, topk):
self.retriever.k = topk
query = " ".join(jieba.cut_for_search(query))
ans_docs = self.retriever.get_relevant_documents(query)
ans = []
for line in ans_docs:
ans.append(self.full_documents[line.metadata["id"]])
return ans
if __name__ == "__main__":
# bm2.5
dp = DataProcess(pdf_path = "/root/autodl-tmp/codes/data/train_a.pdf")
dp.ParseBlock(max_seq = 1024)
dp.ParseBlock(max_seq = 512)
print(len(dp.data))
dp.ParseAllPage(max_seq = 256)
dp.ParseAllPage(max_seq = 512)
print(len(dp.data))
dp.ParseOnePageWithRule(max_seq = 256)
dp.ParseOnePageWithRule(max_seq = 512)
print(len(dp.data))
data = dp.data
bm25 = BM25(data)
res = bm25.GetBM25TopK("座椅加热", 6)
print(res)
| [] |
2024-01-10 | dawoshi/Tianchi-LLM-QA | faiss_retriever.py | #!/usr/bin/env python
# coding: utf-8
from langchain.schema import Document
from langchain.vectorstores import Chroma,FAISS
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from pdf_parse import DataProcess
import torch
# from bm25_retriever import BM25
class FaissRetriever(object):
def __init__(self, model_path, data):
self.embeddings = HuggingFaceEmbeddings(
model_name = model_path,
model_kwargs = {"device":"cuda"}
)
docs = []
for idx, line in enumerate(data):
line = line.strip("\n").strip()
words = line.split("\t")
docs.append(Document(page_content=words[0], metadata={"id": idx}))
self.vector_store = FAISS.from_documents(docs, self.embeddings)
del self.embeddings
torch.cuda.empty_cache()
def GetTopK(self, query, k):
context = self.vector_store.similarity_search_with_score(query, k=k)
return context
def GetvectorStore(self):
return self.vector_store
if __name__ == "__main__":
base = "/root/autodl-tmp/codes"
model_name=base + "/pre_train_model/m3e-large" #text2vec-large-chinese
dp = DataProcess(pdf_path = base + "/data/train_a.pdf")
dp.ParseBlock(max_seq = 1024)
dp.ParseBlock(max_seq = 512)
print(len(dp.data))
dp.ParseAllPage(max_seq = 256)
dp.ParseAllPage(max_seq = 512)
print(len(dp.data))
dp.ParseOnePageWithRule(max_seq = 256)
dp.ParseOnePageWithRule(max_seq = 512)
print(len(dp.data))
data = dp.data
faissretriever = FaissRetriever(model_name, data)
# bm25 = BM25(data)
faiss_ans = faissretriever.GetTopK("如何预防新冠肺炎", 6)
print(faiss_ans)
faiss_ans = faissretriever.GetTopK("交通事故如何处理", 6)
print(faiss_ans)
faiss_ans = faissretriever.GetTopK("吉利集团的董事长是谁", 6)
print(faiss_ans)
faiss_ans = faissretriever.GetTopK("吉利汽车语音组手叫什么", 6)
print(faiss_ans)
# bm25_ans = bm25.GetBM25TopK("座椅加热", 6)
# ans = reRank(6, bm25_ans, faiss_ans)
| [] |
2024-01-10 | afonsosamarques/action-robust-decision-transformer | codebase~baselines~arrl_sgld~action_noise.py | import numpy as np
# Taken from OpenAI baselines - baselines/ddpg/noise.py
class ActionNoise(object):
def reset(self):
pass
class NormalActionNoise(ActionNoise):
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def __call__(self):
return np.random.normal(self.mu, self.sigma)
def __repr__(self):
return 'NormalActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
class OrnsteinUhlenbeckActionNoise(ActionNoise):
def __init__(self, mu, sigma, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
| [] |
2024-01-10 | gilgamesh7/openai_example_applications | categorise_movies.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
movie_to_categorise = input("Enter name of movie for categorisation : ")
categorisation_prompt = f"Enter name of movie for categorisation as Action , Thriller , Drama , Horror , Western , Science Fiction , Drama , Crime , Comedy , Romance , Adventure, Slasher :\n\n{movie_to_categorise}\n\n1.",
response = openai.Completion.create(
model="text-davinci-003",
prompt=categorisation_prompt,
temperature=0,
max_tokens=64,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
print(f'1.{response["choices"][0]["text"]}')
| [
"Enter name of movie for categorisation as Action , Thriller , Drama , Horror , Western , Science Fiction , Drama , Crime , Comedy , Romance , Adventure, Slasher :\n\nPLACEHOLDER\n\n1."
] |
2024-01-10 | gilgamesh7/openai_example_applications | mood_to_colour.py | import seaborn as sns
from PIL import Image, ImageDraw
import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
mood_description = input("Describe a mood or situation you want a mood for (Eg: colour like a blue sky at dusk) : ")
mood_prompt = f"The CSS code for a color like {mood_description}:\n\nbackground-color: #"
response = openai.Completion.create(
model="text-davinci-003",
prompt=mood_prompt,
temperature=0,
max_tokens=64,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=[";"]
)
palette = f"#{response['choices'][0]['text']}"
width_px=100
new = Image.new(mode="RGB", size=(width_px,100))
newt = Image.new(mode="RGB", size=(100,100), color=palette)
new.paste(newt)
new.show() | [
"The CSS code for a color like PLACEHOLDER:\n\nbackground-color: #"
] |
2024-01-10 | gilgamesh7/openai_example_applications | parse_unstructured_data.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
unstructured_data_prompt = """
A table summarizing the crime in New Zealand Cities :
How Safe Is New Zealand: New Zealand Crime Rates
Making a move to beautiful New Zealand and unsure of what the New Zealand crime rates are like? You first need to know about the origins of criminal law and the judiciary system in New Zealand. The New Zealand criminal law is rooted in English Criminal law and incorporated in legislation in 1893. The Crimes Act 1961 and its modifications define the majority of felonies that result in imprisonment in New Zealand. This act addressed criminal law, crime statistics, the origin and aspects of crime, prosecution, penalty, and public views of offences.
In terms of enforcing bodies, the New Zealand Police is the principal administrative body. The New Zealand Police Service oversees decreasing crime and improving public security. The land, sea, and air operating workforce trainees are up to western standards, and you can be assured that misbehaviours such as bullying and extortion involving police officers are not primary concerns. The New Zealand Bill of Rights Act 1990 guarantees detainees several rights, including access to fair trial assistance.
Crime rate in New Zealand – an overview
Financial crises, underemployment, natural calamities, additional security, evolving political shifts, greater enforcement, and numerous heritage and cultural changes have all influenced crime rates. A person’s socio-economic life alone influences their ability to commit a crime. Someone going through a financial crisis or suffering from poor mental health is more likely to commit a crime. By building up a reliable socio-economic system in a country, you (kind of) guarantee a crime-free zone for the citizens. New Zealand, a developed country, provides a high quality of life to its people. Thereby, it comes as no surprise that it has been in the top ranking of the global peace index for so many years now.
The Institute for Economics and Peace publishes a study known as the Global Peace Index, which assesses the relative peacefulness of international locations. According to the Global Peace Index, New Zealand is ranked the second safest country worldwide in 2021 and has been part of the top five since 2008.
As per the statistics by Numbeo, New Zealand has a crime index of 43.03 and a safety index of 56.97. Hence, it is generally safe to roam around in broad daylight comparative to at night. Other crimes such as theft, racism, corruption, bribery & assault have low to moderate rate indexes. The crime rate, however, had an exponential increase in the past three years. The data provided comes from individual public experience over the span of three years.
Here are the statistics records from 2020 by the New Zealand Police. Do note that data for 2021 is yet not available.
The most common types of crimes determined by the New Zealand Crime and Safety Survey (NZCASS) are sexual assault, minor violence, and burglary. The murder/ homicide rate has been fluctuating over the past few years. The laws take serious action against such felonies and can be punishable for up to 20 years of imprisonment. The good news, however, is that weapon assault is rare in New Zealand (especially when compared against other developed nations such as the USA) and charges for firearm felonies constitute a minute percentage of the crime rates in New Zealand.
When compared in respect to the total number of crimes reported to the United States, the statistics as given by Nation Master show the following:
Country
Total crime reports
Rank
New Zealand
427,230
25th in the world
United States of America
11.88 million
1st in the world
(28 times more than New Zealand.)
Statistics based on 2015 average.
The violent crimes, including the rape and homicide index of New Zealand, is low compared to the United States, so we can say it’s safer for women, elders, and children. Being charged for an offence in New Zealand triggers a set of actions and makes you liable in a court of law for further legal proceedings where you submit your plea.
On the bright side, crime levels actually plummeted when New Zealand went into Covid-19 level 4 lockdown in 2020. Many offences remained at lower-than-usual rates by the end of 2020. In January 2020, the number to victims recorded by police was 28,342 – the highest since the data set began in mid 2014. However, this number soon plunged to 12,323 during lockdown in April – the lowest on record.
In case of any emergency, whether police, fire, or Ambulance, dial 111. Calling 111 does not require you to have credit and you can call it for free. Of course, make sure you have a valid emergency for calling!
This article will cover in-depth the crime rate in major cities of New Zealand. It will also give you an idea of if and how these crime rates might affect you. Keep scrolling to read the rest if you are looking forward to booking a flight to New Zealand or deciding on permanent residence.
Let’s cover the statistics for the five major cities in New Zealand, including the Capital, Wellington.
New Zealand crime rates: Wellington
<img decoding="async" src="https://images.unsplash.com/photo-1589871973318-9ca1258faa5d?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=1740&q=80" alt="New zealand sunset city skyline"/>
Photo credit: Sulthan Auliya / Unsplash
Wellington is the capital of New Zealand, and whilst it is a little city, it is a city rich in natural beauty and sights to see, especially given Wellington’s geographical status of being a valley bordered by hillside regions. In fact, all of its best sights are nearby one another!
The Crime Index of Wellington is 28.21, and the safety index is 71.79. The level of crime is low and didn’t increase as much in the past three years. According to Numbeo Wellington, the safety index of walking alone during daylight is 87.11 (Very High), and the safety index of walking alone during the night is 60.78 (High).
The statistic also reveals the overall quality of life to be 194.76, which makes it a suitable place to live as you don’t have to worry as much about theft, assault, or being a victim of hate crimes.
Robbery and shoplifting may be the only noticeable offences in Wellington. Moreover, given that the city is the capital and hub to a diverse population, sexual assault cases mainly focused on the area surrounding the Cuba-Courtenay precinct do occur at late night.
Wellington Police Station Address: 41 Victoria Street, Wellington Central, Wellington 6011, New ZealandTel: 105Opening hours: Open 24 hours (Daily)
New Zealand crime rates: Auckland
Auckland, also known as Tamaki Makaurau, is the most populous city of New Zealand. A blend of natural wonders and urban adventures that sprawls over volcanoes and around twin port cities, it is a multi-cultural melting pot of cuisine, entertainment, history, and artistry.
The Crime Index of Auckland is 45.58, and its safety index is 54.42. One would naturally presume, looking at the statistic given, Auckland loses out on the safety index as compared to Wellington, the country’s capital. According to Numbeo Auckland, the safety index for walking alone during daylight is 71.85 (a relatively high statistic) and the safety index for walking alone during the night is 42.97, which is moderate.
The Auckland Quality of Life Index is 164.19, which may be a little less compared to Wellington but nonetheless on the broader spectrum remains a high standard. The safety and security of Auckland makes this city a wonderful choice to relocate to – as long as you are able to manage the high living costs that come along with it.
Auckland Police Station Address: 13-15 College Hill, Hargreaves Street, Freemans Bay, Auckland 1011, New ZealandTel: +64 9-302 6400Opening hours: Open 24 hours (Daily)
New Zealand crime rates: Christchurch
<img decoding="async" src="https://images.unsplash.com/photo-1460853039135-e25ff9d63405?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=1548&q=80" alt="new zealand scenic ocean view "/>
Photo credit: Edward Manson / Unsplash
Christchurch has its location on the South Island, unlike Wellington and Auckland, which are both found on the North Island. The Christchurch Crime Index is 39.71, and its safety index is 60.29. Its safety index lies somewhere between Wellington and Auckland. Like Auckland, the safety index for walking alone during daylight is 75.21, which is high, and the safety index for walking alone during the night is 44.53, which is moderate. The quality of life index of Christchurch is 186.73, which places it just below Wellington and above Auckland.
Christchurch Police StationAddress: 40 Lichfield Street, Christchurch Central City, Christchurch 8011, New ZealandTel: +64 3-363 7400Opening hours: 7 a.m. to 9 p.m. (Mon. to Fri.); 8:30 a.m. to 6 p.m. (Sat. + Sun.)
New Zealand crime rates: Rotorua
Rotorua is famous for its boiling mud pools, erupting geysers, thermal springs, intriguing pristine lakes, as well as ancient woodlands. Maori culture, which dominates New Zealand society, is prevalent in Rotorua and be seen affecting everything from cuisine to traditions and speech.
According to Numbeo Rotorua, the city has a Crime Index of 50.61 and a safety index of 49.39. Despite being one of the prime tourist attractions in the country, Rotorua has the highest crime rate out of any city in New Zealand. However, in the bigger picture of comparison with other cities worldwide, you can see how safe Rotorua is comparatively. Major cities like Chicago and London all have higher Crime Indexes as compared to Rotorua.
Rotorua Police StationAddress: 1190/1214 Fenton Street, Rotorua 3010, New ZealandTel: +64 7-349 9554Opening hours: 8 a.m. to 4 p.m. (Daily)
New Zealand crime rates: Queenstown
<img decoding="async" src="https://images.unsplash.com/photo-1593755673003-8ca8dbf906c2?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=1740&q=80" alt="new zealand city crowded "/>
Photo credit: Sulthan Auliya / Unsplash
Queenstown is a tourist hotspot in Otago, New Zealand’s South Island, recognised for its enterprise tourism, specifically venture and skiing. Due to the city’s large tourist appeal, it’s a little costly when compared to other cities in New Zealand.
According to Numbeo Queenstown, it has a Crime Index of 20.22 and a safety Index of 79.78. Unfortunately, there has been a definite upswing in sexual violence and assaults over the last year in the city, with an estimate of at least three cases a month in Queenstown. The majority of sexual crimes are being committed when the victim is drunk or using drugs, often after a night out or at kick-ons, when friends put their intoxicated mate to bed. Queenstown police have launched a ‘Don’t Guess the Yes’ campaign, aimed squarely at tackling the perpetrators of sex crimes.
Queenstown Police Station Address: 11 Camp Street, Queenstown 9300, New ZealandTel: +64 3-441 1600Opening hours: 8 a.m. to 5 p.m. (Mon. to Fri.); 9 a.m. to 5 p.m. (Sat. + Sun.)
Feeling ready to make the big move?
Head over here to begin your journeyOr start ticking off your relocation checklist here
Still unsure?
Learn more about New Zealand here Explore more destinations here \n\n
| City | Crime Index | Safety Index | Police Station Address |
"""
response = openai.Completion.create(
model="text-davinci-003",
prompt=unstructured_data_prompt,
temperature=0,
max_tokens=100,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
print(f'{response["choices"][0]["text"]}')
| [
"\nA table summarizing the crime in New Zealand Cities :\nHow Safe Is New Zealand: New Zealand Crime Rates\nMaking a move to beautiful New Zealand and unsure of what the New Zealand crime rates are like? You first need to know about the origins of criminal law and the judiciary system in New Zealand. The New Zealand criminal law is rooted in English Criminal law and incorporated in legislation in 1893. The Crimes Act 1961 and its modifications define the majority of felonies that result in imprisonment in New Zealand. This act addressed criminal law, crime statistics, the origin and aspects of crime, prosecution, penalty, and public views of offences.\nIn terms of enforcing bodies, the New Zealand Police is the principal administrative body. The New Zealand Police Service oversees decreasing crime and improving public security. The land, sea, and air operating workforce trainees are up to western standards, and you can be assured that misbehaviours such as bullying and extortion involving police officers are not primary concerns. The New Zealand Bill of Rights Act 1990 guarantees detainees several rights, including access to fair trial assistance.\nCrime rate in New Zealand – an overview\nFinancial crises, underemployment, natural calamities, additional security, evolving political shifts, greater enforcement, and numerous heritage and cultural changes have all influenced crime rates. A person’s socio-economic life alone influences their ability to commit a crime. Someone going through a financial crisis or suffering from poor mental health is more likely to commit a crime. By building up a reliable socio-economic system in a country, you (kind of) guarantee a crime-free zone for the citizens. New Zealand, a developed country, provides a high quality of life to its people. Thereby, it comes as no surprise that it has been in the top ranking of the global peace index for so many years now.\nThe Institute for Economics and Peace publishes a study known as the Global Peace Index, which assesses the relative peacefulness of international locations. According to the Global Peace Index, New Zealand is ranked the second safest country worldwide in 2021 and has been part of the top five since 2008.\nAs per the statistics by Numbeo, New Zealand has a crime index of 43.03 and a safety index of 56.97. Hence, it is generally safe to roam around in broad daylight comparative to at night. Other crimes such as theft, racism, corruption, bribery & assault have low to moderate rate indexes. The crime rate, however, had an exponential increase in the past three years. The data provided comes from individual public experience over the span of three years.\nHere are the statistics records from 2020 by the New Zealand Police. Do note that data for 2021 is yet not available.\nThe most common types of crimes determined by the New Zealand Crime and Safety Survey (NZCASS) are sexual assault, minor violence, and burglary. The murder/ homicide rate has been fluctuating over the past few years. The laws take serious action against such felonies and can be punishable for up to 20 years of imprisonment. The good news, however, is that weapon assault is rare in New Zealand (especially when compared against other developed nations such as the USA) and charges for firearm felonies constitute a minute percentage of the crime rates in New Zealand.\nWhen compared in respect to the total number of crimes reported to the United States, the statistics as given by Nation Master show the following:\nCountry\nTotal crime reports\nRank\nNew Zealand\n427,230\n25th in the world\nUnited States of America\n11.88 million\n1st in the world \n\n(28 times more than New Zealand.)\nStatistics based on 2015 average.\nThe violent crimes, including the rape and homicide index of New Zealand, is low compared to the United States, so we can say it’s safer for women, elders, and children. Being charged for an offence in New Zealand triggers a set of actions and makes you liable in a court of law for further legal proceedings where you submit your plea.\nOn the bright side, crime levels actually plummeted when New Zealand went into Covid-19 level 4 lockdown in 2020. Many offences remained at lower-than-usual rates by the end of 2020. In January 2020, the number to victims recorded by police was 28,342 – the highest since the data set began in mid 2014. However, this number soon plunged to 12,323 during lockdown in April – the lowest on record.\nIn case of any emergency, whether police, fire, or Ambulance, dial 111. Calling 111 does not require you to have credit and you can call it for free. Of course, make sure you have a valid emergency for calling!\nThis article will cover in-depth the crime rate in major cities of New Zealand. It will also give you an idea of if and how these crime rates might affect you. Keep scrolling to read the rest if you are looking forward to booking a flight to New Zealand or deciding on permanent residence.\nLet’s cover the statistics for the five major cities in New Zealand, including the Capital, Wellington.\nNew Zealand crime rates: Wellington\n\n<img decoding=\"async\" src=\"https://images.unsplash.com/photo-1589871973318-9ca1258faa5d?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=1740&q=80\" alt=\"New zealand sunset city skyline\"/>\nPhoto credit: Sulthan Auliya / Unsplash\nWellington is the capital of New Zealand, and whilst it is a little city, it is a city rich in natural beauty and sights to see, especially given Wellington’s geographical status of being a valley bordered by hillside regions. In fact, all of its best sights are nearby one another!\nThe Crime Index of Wellington is 28.21, and the safety index is 71.79. The level of crime is low and didn’t increase as much in the past three years. According to Numbeo Wellington, the safety index of walking alone during daylight is 87.11 (Very High), and the safety index of walking alone during the night is 60.78 (High).\nThe statistic also reveals the overall quality of life to be 194.76, which makes it a suitable place to live as you don’t have to worry as much about theft, assault, or being a victim of hate crimes.\nRobbery and shoplifting may be the only noticeable offences in Wellington. Moreover, given that the city is the capital and hub to a diverse population, sexual assault cases mainly focused on the area surrounding the Cuba-Courtenay precinct do occur at late night.\nWellington Police Station Address: 41 Victoria Street, Wellington Central, Wellington 6011, New ZealandTel: 105Opening hours: Open 24 hours (Daily)\nNew Zealand crime rates: Auckland\nAuckland, also known as Tamaki Makaurau, is the most populous city of New Zealand. A blend of natural wonders and urban adventures that sprawls over volcanoes and around twin port cities, it is a multi-cultural melting pot of cuisine, entertainment, history, and artistry.\nThe Crime Index of Auckland is 45.58, and its safety index is 54.42. One would naturally presume, looking at the statistic given, Auckland loses out on the safety index as compared to Wellington, the country’s capital. According to Numbeo Auckland, the safety index for walking alone during daylight is 71.85 (a relatively high statistic) and the safety index for walking alone during the night is 42.97, which is moderate.\nThe Auckland Quality of Life Index is 164.19, which may be a little less compared to Wellington but nonetheless on the broader spectrum remains a high standard. The safety and security of Auckland makes this city a wonderful choice to relocate to – as long as you are able to manage the high living costs that come along with it.\nAuckland Police Station Address: 13-15 College Hill, Hargreaves Street, Freemans Bay, Auckland 1011, New ZealandTel: +64 9-302 6400Opening hours: Open 24 hours (Daily)\nNew Zealand crime rates: Christchurch\n\n<img decoding=\"async\" src=\"https://images.unsplash.com/photo-1460853039135-e25ff9d63405?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=1548&q=80\" alt=\"new zealand scenic ocean view \"/>\nPhoto credit: Edward Manson / Unsplash\nChristchurch has its location on the South Island, unlike Wellington and Auckland, which are both found on the North Island. The Christchurch Crime Index is 39.71, and its safety index is 60.29. Its safety index lies somewhere between Wellington and Auckland. Like Auckland, the safety index for walking alone during daylight is 75.21, which is high, and the safety index for walking alone during the night is 44.53, which is moderate. The quality of life index of Christchurch is 186.73, which places it just below Wellington and above Auckland.\nChristchurch Police StationAddress: 40 Lichfield Street, Christchurch Central City, Christchurch 8011, New ZealandTel: +64 3-363 7400Opening hours: 7 a.m. to 9 p.m. (Mon. to Fri.); 8:30 a.m. to 6 p.m. (Sat. + Sun.)\nNew Zealand crime rates: Rotorua\nRotorua is famous for its boiling mud pools, erupting geysers, thermal springs, intriguing pristine lakes, as well as ancient woodlands. Maori culture, which dominates New Zealand society, is prevalent in Rotorua and be seen affecting everything from cuisine to traditions and speech.\nAccording to Numbeo Rotorua, the city has a Crime Index of 50.61 and a safety index of 49.39. Despite being one of the prime tourist attractions in the country, Rotorua has the highest crime rate out of any city in New Zealand. However, in the bigger picture of comparison with other cities worldwide, you can see how safe Rotorua is comparatively. Major cities like Chicago and London all have higher Crime Indexes as compared to Rotorua.\nRotorua Police StationAddress: 1190/1214 Fenton Street, Rotorua 3010, New ZealandTel: +64 7-349 9554Opening hours: 8 a.m. to 4 p.m. (Daily)\nNew Zealand crime rates: Queenstown\n\n<img decoding=\"async\" src=\"https://images.unsplash.com/photo-1593755673003-8ca8dbf906c2?ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&ixlib=rb-1.2.1&auto=format&fit=crop&w=1740&q=80\" alt=\"new zealand city crowded \"/>\nPhoto credit: Sulthan Auliya / Unsplash\nQueenstown is a tourist hotspot in Otago, New Zealand’s South Island, recognised for its enterprise tourism, specifically venture and skiing. Due to the city’s large tourist appeal, it’s a little costly when compared to other cities in New Zealand.\nAccording to Numbeo Queenstown, it has a Crime Index of 20.22 and a safety Index of 79.78. Unfortunately, there has been a definite upswing in sexual violence and assaults over the last year in the city, with an estimate of at least three cases a month in Queenstown. The majority of sexual crimes are being committed when the victim is drunk or using drugs, often after a night out or at kick-ons, when friends put their intoxicated mate to bed. Queenstown police have launched a ‘Don’t Guess the Yes’ campaign, aimed squarely at tackling the perpetrators of sex crimes.\nQueenstown Police Station Address: 11 Camp Street, Queenstown 9300, New ZealandTel: +64 3-441 1600Opening hours: 8 a.m. to 5 p.m. (Mon. to Fri.); 9 a.m. to 5 p.m. (Sat. + Sun.)\nFeeling ready to make the big move?\nHead over here to begin your journeyOr start ticking off your relocation checklist here\nStill unsure?\nLearn more about New Zealand here Explore more destinations here \n\n\n| City | Crime Index | Safety Index | Police Station Address |\n"
] |
2024-01-10 | gilgamesh7/openai_example_applications | movie_to_emoji.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
text_to_convert_to_emoji = input("Enter movie name : ")
response = openai.Completion.create(
model = "text-davinci-003",
prompt = f"Convert movie titles into emoji.\n\nBack to the Future: 👨👴🚗🕒 \nBatman: 🤵🦇 \nTransformers: 🚗🤖 \n{text_to_convert_to_emoji}: ",
temperature=0.8,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=["\n"]
)
print(response["choices"][0]["text"]) | [
"Convert movie titles into emoji.\n\nBack to the Future: 👨👴🚗🕒 \nBatman: 🤵🦇 \nTransformers: 🚗🤖 \nPLACEHOLDER: "
] |
2024-01-10 | gilgamesh7/openai_example_applications | classify_sentinemnt.py | import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
reviews_list_prompt="""
"Classify the sentiment in these reviews for the movie Last Action Hero:\n\n
1. If there's one thing going for "Last Action Hero" (not counting Arnold Schwarzenegger, of course), it's the construction of it all. Even if this intermixing of kid fantasy (meet PG-13 hero Austin O'Brien) and adult shoot'em-up, Hollywood insider jokes and cheap Arnold puns, doesn't completely bowl you over, it's clever and intriguing.
At least, mostly. This Other Big Summer Movie, this rival to "Jurassic Park," is not seamless. Structurally, it can be a little haphazard and messy. The humor sometimes hits, sometimes misses. It'll depend on what mood the audience is in. A 7:30 crowd could roll in the aisles over this; but the 9:45 folks might sit through this stunned. It's that kind of movie. But if "Action Hero" has its dead spots, it can also be funny -- very funny. Take Arnold playing Hamlet . . . .
But first, the story. Eleven-year-old Danny (O'Brien) has only one respite from dangerous New York City life and school: To watch his favorite movie hero, Jack Slater. Thanks to friendly, old projectionist Nick (Robert Prosky), he can slip into the theater day or night to watch Slater in celluloid action.
Slater (played by Maria Shriver's muscular Austrian sidekick) is an L. A. cop who chews a mean cigar, walks unharmed through gunfire and always gets his man. Naturally he does this with enormous infrastructural damage, vehicular destruction, stunt heroics and gratuitous loss of life. His screen life is a swaggering, fictional blast. And watching alone in the theater, Danny drinks it all in.
Things get interesting when projectionist Nick gives Danny a magic movie ticket -- given to him years ago by Harry Houdini. The old man, who has never dared use it, finally tears it in half and hands the stub to Danny. Clutching the ticket, Danny finds himself transported into Slater's world. In the midst of a frenetic car chase, Slater is shocked to find an 11-year-old sitting in the back of his car.
First, it's Danny's turn to be in Slater's world. A veteran of Slater's movies, he knows every cliche, every heroic one-liner and all of Slater's history. A hero-and-the-kid story develops, as Danny helps Slater contend with his latest villains (played by Anthony Quinn and Charles Dance). But all hell breaks loose when Benedict (Dance) gets hold of the magic ticket and enters Danny's world. Now, Danny brings Slater into the real world, and the movie hero discovers it really hurts when you punch through glass.
At this point, director John "Die Hard" McTiernan and his team of scriptwriters (a conspiracy of four, collectively responsible for "Lethal Weapon," "The Last Boy Scout" and "The Adventures of Ford Fairlane") have their multidimensional work cut out for them. Whether these convolutions are too involved for summer audiences remains to be seen. But story momentum is often lost. The drama doesn't keep you gripped the way "Terminator II: Judgment Day" did -- or that far superior dimension-fusion fantasy, "Back to the Future."
"Action Hero," with multiple cameos from Sharon Stone, Hammer, Jean-Claude Van Damme and a myriad others, has a quorum of good moments -- and more than enough of Arnold -- to satisfy the crowd. As for those smart-alecky lines, they come at you like frenzied bullets, hoping desperately to connect. Some are better than others. "Could I speak to the drug dealer of the house?" asks Slater sweetly, when villain Dance opens his mansion door. Danny also has an amusing daydream at the beginning, when he pictures Slater as Hamlet in an action-movie version of the play: "Something's rotten in the State of Denmark," says the announcer in the imaginary movie trailer. "And Hamlet is taking out the trash!" Perhaps that was the movie Arnold should have done.
2. Trying to be everything to everybody rarely, if ever, works, and Last Action Hero is a textbook case in point. Borrowing heavily from Woody Allen's Purple Rose of Cairo, professional action director McTiernan has crafted a massive juggernaut of a film, overflowing with breathtaking stunts, witty one-liners, and well-chosen cameos. It's a film with so much promise, you just want to hunt down everybody involved when you realize it doesn't deliver the goods. Twelve-year-old New Yorker Danny Madigan (O'Brien) is a movie buff engrossed in the “Jack Slater” series of action films. Slater, a fictional cross between Bruce Willis, Van Damme, Sly Stallone, and others, is the sort of film character Schwarzenegger was born to play -- it's hard to imagine anyone else in the role. When Danny receives a magic ticket at a private screening of Jack Slater IV, he finds himself catapulted through the screen and into Slater's “reality,” a place where all the women are California knockouts, the good guys always win, and you can't say four-letter words because “this movie's PG-13.” Director McTiernan uses this alternate world as a means to poke fun at Schwarzenegger's rugged persona and that of action films in general. In-jokes flow like Tinseltown dollars here -- in one brief shot, Danny spies both Sharon Stone and an evil-looking Robert Patrick (Schwarzenegger's robotic foe in last summer's Terminator 2) exiting the opulent LAPD headquarters just as he's walking in with Slater. And upon first seeing Slater's friend Practice, played by F. Murray Abraham, Danny exclaims, “You'd better keep your eye on that guy. He killed Mozart.” When villainous killer Benedict (Charles Dance, in perhaps the film's best role) manages to get his hands on Danny's magic ticket, the film jumps back to the “real” world climax, and to the NYC premiere of Jack Slater IV, complete with cameos from the “real” Schwarzenegger and wife Maria Shriver, Chevy Chase, Jean-Claude Van Damme, et al. Really, it's not as confusing as it sounds, and, unfortunately, it's also not nearly as good as it sounds, either. Slater/Schwarzenegger's quips wear thin in the film's first half-hour, and even McTiernan's Player-esque stabs at skewering his own stock-in-trade fall flat way too often. At the nearly-packed screening I attended, there seemed to be far more babies crying than people laughing in the audience -- a sure sign that something's amiss. The film tries so very hard to be The Movie of Summer '93 that it almost makes you sick for what could have been, what should have been, and, in the end, what it is: soulless sound and fury -- action in a vacuum.
3. It's not a bomb at all. A dud is more like it - Last Action Hero isn't interesting enough to be explosively bad. For all the inflated pyrotechnics on the screen, the picture seems consistently grey and almost pitiably small.
4. The script is weak and unrelenting. The stunts are unspectacular. The special effects are nothing you haven't seen before. But worst of all, there's the spectacle of Schwarzenegger glorying in the wonder of Schwarzenegger.
5. Last Action Hero starts out mostly nuts, and winds up mostly bolts. Or, rather, winds down. That's a problem with pastiche: it must be constantly jump-started with ingenuity, and even that ultimately pales. By the end, nothing matters.
6. Last Action Hero is sporadically entertaining, but it could have been a whole lot more. Trimmed down and better edited, this film might have been a top-notch satire. As it is, however, it gets caught someplace in between action and comedy, and never really comes across as a solid example of either.
7. Last Action Hero is something of a mess, but a frequently enjoyable one. It tries to be too many things to too many different kinds of audiences, the result being that it will probably confuse, and perhaps even alienate, the hard-core action fans.
\n\nTweet sentiment ratings:"
"""
response = openai.Completion.create(
model="text-davinci-003",
prompt=reviews_list_prompt,
temperature=0,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
print(f'{response["choices"][0]["text"]}') | [
"\n\"Classify the sentiment in these reviews for the movie Last Action Hero:\n\n\n1. If there's one thing going for \"Last Action Hero\" (not counting Arnold Schwarzenegger, of course), it's the construction of it all. Even if this intermixing of kid fantasy (meet PG-13 hero Austin O'Brien) and adult shoot'em-up, Hollywood insider jokes and cheap Arnold puns, doesn't completely bowl you over, it's clever and intriguing.\nAt least, mostly. This Other Big Summer Movie, this rival to \"Jurassic Park,\" is not seamless. Structurally, it can be a little haphazard and messy. The humor sometimes hits, sometimes misses. It'll depend on what mood the audience is in. A 7:30 crowd could roll in the aisles over this; but the 9:45 folks might sit through this stunned. It's that kind of movie. But if \"Action Hero\" has its dead spots, it can also be funny -- very funny. Take Arnold playing Hamlet . . . .\nBut first, the story. Eleven-year-old Danny (O'Brien) has only one respite from dangerous New York City life and school: To watch his favorite movie hero, Jack Slater. Thanks to friendly, old projectionist Nick (Robert Prosky), he can slip into the theater day or night to watch Slater in celluloid action.\nSlater (played by Maria Shriver's muscular Austrian sidekick) is an L. A. cop who chews a mean cigar, walks unharmed through gunfire and always gets his man. Naturally he does this with enormous infrastructural damage, vehicular destruction, stunt heroics and gratuitous loss of life. His screen life is a swaggering, fictional blast. And watching alone in the theater, Danny drinks it all in.\nThings get interesting when projectionist Nick gives Danny a magic movie ticket -- given to him years ago by Harry Houdini. The old man, who has never dared use it, finally tears it in half and hands the stub to Danny. Clutching the ticket, Danny finds himself transported into Slater's world. In the midst of a frenetic car chase, Slater is shocked to find an 11-year-old sitting in the back of his car.\nFirst, it's Danny's turn to be in Slater's world. A veteran of Slater's movies, he knows every cliche, every heroic one-liner and all of Slater's history. A hero-and-the-kid story develops, as Danny helps Slater contend with his latest villains (played by Anthony Quinn and Charles Dance). But all hell breaks loose when Benedict (Dance) gets hold of the magic ticket and enters Danny's world. Now, Danny brings Slater into the real world, and the movie hero discovers it really hurts when you punch through glass.\nAt this point, director John \"Die Hard\" McTiernan and his team of scriptwriters (a conspiracy of four, collectively responsible for \"Lethal Weapon,\" \"The Last Boy Scout\" and \"The Adventures of Ford Fairlane\") have their multidimensional work cut out for them. Whether these convolutions are too involved for summer audiences remains to be seen. But story momentum is often lost. The drama doesn't keep you gripped the way \"Terminator II: Judgment Day\" did -- or that far superior dimension-fusion fantasy, \"Back to the Future.\"\n\"Action Hero,\" with multiple cameos from Sharon Stone, Hammer, Jean-Claude Van Damme and a myriad others, has a quorum of good moments -- and more than enough of Arnold -- to satisfy the crowd. As for those smart-alecky lines, they come at you like frenzied bullets, hoping desperately to connect. Some are better than others. \"Could I speak to the drug dealer of the house?\" asks Slater sweetly, when villain Dance opens his mansion door. Danny also has an amusing daydream at the beginning, when he pictures Slater as Hamlet in an action-movie version of the play: \"Something's rotten in the State of Denmark,\" says the announcer in the imaginary movie trailer. \"And Hamlet is taking out the trash!\" Perhaps that was the movie Arnold should have done.\n2. Trying to be everything to everybody rarely, if ever, works, and Last Action Hero is a textbook case in point. Borrowing heavily from Woody Allen's Purple Rose of Cairo, professional action director McTiernan has crafted a massive juggernaut of a film, overflowing with breathtaking stunts, witty one-liners, and well-chosen cameos. It's a film with so much promise, you just want to hunt down everybody involved when you realize it doesn't deliver the goods. Twelve-year-old New Yorker Danny Madigan (O'Brien) is a movie buff engrossed in the “Jack Slater” series of action films. Slater, a fictional cross between Bruce Willis, Van Damme, Sly Stallone, and others, is the sort of film character Schwarzenegger was born to play -- it's hard to imagine anyone else in the role. When Danny receives a magic ticket at a private screening of Jack Slater IV, he finds himself catapulted through the screen and into Slater's “reality,” a place where all the women are California knockouts, the good guys always win, and you can't say four-letter words because “this movie's PG-13.” Director McTiernan uses this alternate world as a means to poke fun at Schwarzenegger's rugged persona and that of action films in general. In-jokes flow like Tinseltown dollars here -- in one brief shot, Danny spies both Sharon Stone and an evil-looking Robert Patrick (Schwarzenegger's robotic foe in last summer's Terminator 2) exiting the opulent LAPD headquarters just as he's walking in with Slater. And upon first seeing Slater's friend Practice, played by F. Murray Abraham, Danny exclaims, “You'd better keep your eye on that guy. He killed Mozart.” When villainous killer Benedict (Charles Dance, in perhaps the film's best role) manages to get his hands on Danny's magic ticket, the film jumps back to the “real” world climax, and to the NYC premiere of Jack Slater IV, complete with cameos from the “real” Schwarzenegger and wife Maria Shriver, Chevy Chase, Jean-Claude Van Damme, et al. Really, it's not as confusing as it sounds, and, unfortunately, it's also not nearly as good as it sounds, either. Slater/Schwarzenegger's quips wear thin in the film's first half-hour, and even McTiernan's Player-esque stabs at skewering his own stock-in-trade fall flat way too often. At the nearly-packed screening I attended, there seemed to be far more babies crying than people laughing in the audience -- a sure sign that something's amiss. The film tries so very hard to be The Movie of Summer '93 that it almost makes you sick for what could have been, what should have been, and, in the end, what it is: soulless sound and fury -- action in a vacuum.\n3. It's not a bomb at all. A dud is more like it - Last Action Hero isn't interesting enough to be explosively bad. For all the inflated pyrotechnics on the screen, the picture seems consistently grey and almost pitiably small.\n4. The script is weak and unrelenting. The stunts are unspectacular. The special effects are nothing you haven't seen before. But worst of all, there's the spectacle of Schwarzenegger glorying in the wonder of Schwarzenegger. \n5. Last Action Hero starts out mostly nuts, and winds up mostly bolts. Or, rather, winds down. That's a problem with pastiche: it must be constantly jump-started with ingenuity, and even that ultimately pales. By the end, nothing matters.\n6. Last Action Hero is sporadically entertaining, but it could have been a whole lot more. Trimmed down and better edited, this film might have been a top-notch satire. As it is, however, it gets caught someplace in between action and comedy, and never really comes across as a solid example of either.\n7. Last Action Hero is something of a mess, but a frequently enjoyable one. It tries to be too many things to too many different kinds of audiences, the result being that it will probably confuse, and perhaps even alienate, the hard-core action fans.\n\n\nTweet sentiment ratings:\"\n"
] |
2024-01-10 | dair-iitd/jeebench | inference.py | import os
from tqdm import tqdm
import json
import os
import openai
from tqdm import tqdm
import argparse
import multiprocessing
from copy import deepcopy
from functools import partial
prompt_library = {
"MCQ": "In this problem, only one option will be correct. Give a detailed solution and end the solution with the final answer.",
"MCQ(multiple)": "In this problem, multiple options can be correct. Give a detailed solution and end the solution with the final answer.",
"Integer": "In this problem, the final answer will be a non-negative integer. Give a detailed solution and end the solution with the final answer.",
"Numeric": "In this problem, the final will be a numeric value. Give the numerical answer correct upto the 2nd decimal digit. Give a detailed solution and end the solution with the final answer.",
}
few_shot_examples = json.load(open('data/few_shot_examples.json'))
def write_in_file(response_file, response_dict, question, mode, model_nickname):
if os.path.exists(response_file):
with open(response_file, 'r') as infile:
responses = json.load(infile)
else:
responses = []
found = False
for i, old_resp in enumerate(responses):
if old_resp['description'] == question['description'] and old_resp['index'] == question['index']:
responses[i][f"{model_nickname}_{mode}_response" ] = response_dict[f"{model_nickname}_{mode}_response"]
found = True
break
if not found:
responses.append(response_dict)
json.dump(sorted(responses, key=lambda elem: (elem['description'], elem['index'])), open(response_file, 'w'), indent=4)
print(f"####UPDATED {response_file}, Current size : {len(responses)}####")
def get_response(question,model, model_nickname, mode, response_file, lock):
response_dict = deepcopy(question)
prefix_prompt = prompt_library[question['type']]
suffix_prompt = ""
if mode in ['CoT', 'CoT+SC', 'CoT+Exam'] :
suffix_prompt = "Let's think step by step.\n"
ques = question["question"]
stripped_ques = ques.replace("\n\n", "\n").strip()
if mode in ['CoT+OneShot', 'CoT', 'CoT+SC', 'CoT+Exam']:
if mode == 'CoT+Exam':
if response_dict['type'] in ['MCQ', 'MCQ(multiple)']:
if response_dict['type'] == 'MCQ':
exam_prompt = "If the answer is wrong, you'll be given -1 marks. If the answer is correct, you'll be given +3 marks. If you're unsure of the answer, you can skip the question, and you'll be given 0 marks."
else:
exam_prompt = "If any of the options in the final answer is wrong, you'll be given -2 marks. If all the options are correct, you'll be given +4 marks. If some of the options are correct, you'll be given +1 for each correct option. If you're unsure of the answer, you can skip the question, and you'll be given 0 marks."
prompt = prefix_prompt + " " + exam_prompt + "\n\n" + "Problem: " + stripped_ques + "\nSolution: " + suffix_prompt
else:
print("No point doing this for Numeric/Integer questions since there is no negative marking...")
breakpoint()
else:
if mode == 'CoT+OneShot':
ex = few_shot_examples[question['subject']][question['type']]
prompt = prefix_prompt + "\n\n" + "Problem: " + ex['problem'] + "\nSolution: " + ex['solution'] + "\n\n" + "Problem: " + stripped_ques + "\nSolution: "
else:
prompt = prefix_prompt + "\n\n" + "Problem: " + stripped_ques + "\nSolution: " + suffix_prompt
else:
prompt = prefix_prompt + "\n\n" + "Problem: " + stripped_ques + suffix_prompt
prompt = prompt.strip()
response_dict[f"prompt"] = prompt
num_retries = 0
print(f'Question: {question["description"]}, Index: {question["index"]}, Model: {model_nickname}, Mode: {mode}, query begins')
while True:
try:
if model in ["text-davinci-003", "text-davinci-002", 'davinci-002']:
response = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=2048,
temperature=0 if mode in ['CoT', 'normal', 'CoT+Exam'] else 0.5,
n=1 if mode in ['CoT', 'normal', 'CoT+Exam'] else 3
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": ""},
{"role": "user", "content": prompt}
],
max_tokens=2048,
temperature=0 if mode in ['CoT+OneShot', 'CoT', 'normal', 'CoT+Exam'] else 0.5,
n=1 if mode in ['CoT+OneShot', 'CoT', 'normal', 'CoT+Exam'] else 8
)
lock.acquire()
response_dict[f"{model_nickname}_{mode}_response"] = response
write_in_file(response_file, response_dict, question, mode, model_nickname)
lock.release()
break
except Exception as e:
num_retries += 1
print("Failure!", e)
return
def main():
'''
The code can restart from the already done questions in case there is a failure midpoint.
'''
args = argparse.ArgumentParser()
args.add_argument('--model', default='gpt-3.5-turbo')
args.add_argument('--data', default='data/dataset.json')
args.add_argument('--mode', default='normal')
args.add_argument('--num_procs', default=1, type=int)
args.add_argument('--max_questions', default=1, type=int)
args = args.parse_args()
openai.organization = os.getenv("OPENAI_ORG")
openai.api_key = os.getenv("OPENAI_API_KEY")
model_nickname = {
"davinci-002": "davinci-002",
"text-davinci-003": "GPT3",
"gpt-3.5-turbo": "GPT3.5",
"gpt-4-0613": "GPT4_0613",
"gpt-4-0314": "GPT4"
}
assert args.model in model_nickname.keys()
assert args.mode in ['normal', 'CoT', 'CoT+OneShot', 'CoT+Exam', 'CoT+SC']
out_file_dir = f'responses/{model_nickname[args.model]}_{args.mode}_responses'
out_file = os.path.join(out_file_dir, 'responses.json')
questions = json.load(open(args.data))
rem_ques = []
if os.path.exists(out_file):
for question in tqdm(questions[:args.max_questions]):
if os.path.exists(out_file):
with open(out_file, 'r') as infile:
responses = json.load(infile)
found = False
for i, old_resp in enumerate(responses):
if question['type'] in ['Numeric', 'Integer'] and args.mode == 'CoT+Exam':
found = True
if old_resp['description'] == question['description'] and old_resp['index'] == question['index']:
found = all([old_resp.get(
f"{model_nickname[args.model]}_{args.mode}_response", False) for model in [args.model]])
if found:
print("This question has already been done")
else:
rem_ques.append(question)
else:
os.makedirs(out_file_dir, exist_ok=True)
if args.mode == 'CoT+Exam':
rem_ques = []
for q in questions:
if q['type'] in ['MCQ', 'MCQ(multiple)']:
rem_ques.append(q)
else:
rem_ques = questions[:args.max_questions]
print(f"There are {len(rem_ques)} problems remaining")
manager = multiprocessing.Manager()
lock = manager.Lock()
pool = multiprocessing.Pool(args.num_procs)
f = partial(get_response, model=args.model, model_nickname=model_nickname[args.model], mode=args.mode, response_file=out_file, lock=lock)
pool.map(f, rem_ques)
if __name__ == '__main__':
main()
| [
"\n\n",
"Problem: ",
"PLACEHOLDER\n\nProblem: PLACEHOLDERPLACEHOLDER",
"solution",
"PLACEHOLDER PLACEHOLDER\n\nProblem: PLACEHOLDER\nSolution: PLACEHOLDER",
"{'MCQ': 'In this problem, only one option will be correct. Give a detailed solution and end the solution with the final answer.', 'MCQ(multiple)': 'In this problem, multiple options can be correct. Give a detailed solution and end the solution with the final answer.', 'Integer': 'In this problem, the final answer will be a non-negative integer. Give a detailed solution and end the solution with the final answer.', 'Numeric': 'In this problem, the final will be a numeric value. Give the numerical answer correct upto the 2nd decimal digit. Give a detailed solution and end the solution with the final answer.'}",
"\nSolution: ",
"PLACEHOLDER\n\nProblem: PLACEHOLDER\nSolution: PLACEHOLDER",
"If any of the options in the final answer is wrong, you'll be given -2 marks. If all the options are correct, you'll be given +4 marks. If some of the options are correct, you'll be given +1 for each correct option. If you're unsure of the answer, you can skip the question, and you'll be given 0 marks.",
"Let's think step by step.\n",
"If the answer is wrong, you'll be given -1 marks. If the answer is correct, you'll be given +3 marks. If you're unsure of the answer, you can skip the question, and you'll be given 0 marks."
] |
2024-01-10 | Fir121/bpdc_gpt_chatbot | chatbot~Lib~site-packages~llama_index~indices~tree~leaf_query.py | """Leaf query mechanism."""
import logging
from typing import Any, Dict, List, Optional, cast
from langchain.input import print_text
from llama_index.data_structs.data_structs_v2 import IndexGraph
from llama_index.data_structs.node_v2 import Node
from llama_index.indices.query.base import BaseGPTIndexQuery
from llama_index.indices.query.embedding_utils import SimilarityTracker
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.response.response_builder import get_response_builder
from llama_index.indices.utils import extract_numbers_given_response, get_sorted_node_list
from llama_index.prompts.default_prompt_selectors import DEFAULT_REFINE_PROMPT_SEL
from llama_index.prompts.default_prompts import (
DEFAULT_QUERY_PROMPT,
DEFAULT_QUERY_PROMPT_MULTIPLE,
DEFAULT_TEXT_QA_PROMPT,
)
from llama_index.prompts.prompts import (
QuestionAnswerPrompt,
RefinePrompt,
TreeSelectMultiplePrompt,
TreeSelectPrompt,
)
from llama_index.response.schema import Response
from llama_index.utils import truncate_text
logger = logging.getLogger(__name__)
def get_text_from_node(
node: Node,
level: Optional[int] = None,
verbose: bool = False,
) -> str:
"""Get text from node."""
level_str = "" if level is None else f"[Level {level}]"
fmt_text_chunk = truncate_text(node.get_text(), 50)
logger.debug(f">{level_str} Searching in chunk: {fmt_text_chunk}")
response_txt = node.get_text()
fmt_response = truncate_text(response_txt, 200)
if verbose:
print_text(f">{level_str} Got node text: {fmt_response}\n", color="blue")
return response_txt
class GPTTreeIndexLeafQuery(BaseGPTIndexQuery[IndexGraph]):
"""GPT Tree Index leaf query.
This class traverses the index graph and searches for a leaf node that can best
answer the query.
.. code-block:: python
response = index.query("<query_str>", mode="default")
Args:
query_template (Optional[TreeSelectPrompt]): Tree Select Query Prompt
(see :ref:`Prompt-Templates`).
query_template_multiple (Optional[TreeSelectMultiplePrompt]): Tree Select
Query Prompt (Multiple)
(see :ref:`Prompt-Templates`).
child_branch_factor (int): Number of child nodes to consider at each level.
If child_branch_factor is 1, then the query will only choose one child node
to traverse for any given parent node.
If child_branch_factor is 2, then the query will choose two child nodes.
"""
def __init__(
self,
index_struct: IndexGraph,
query_template: Optional[TreeSelectPrompt] = None,
text_qa_template: Optional[QuestionAnswerPrompt] = None,
refine_template: Optional[RefinePrompt] = None,
query_template_multiple: Optional[TreeSelectMultiplePrompt] = None,
child_branch_factor: int = 1,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(index_struct, **kwargs)
self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
self._refine_template = refine_template or DEFAULT_REFINE_PROMPT_SEL
self.query_template = query_template or DEFAULT_QUERY_PROMPT
self.query_template_multiple = (
query_template_multiple or DEFAULT_QUERY_PROMPT_MULTIPLE
)
self.child_branch_factor = child_branch_factor
def _query_with_selected_node(
self,
selected_node: Node,
query_bundle: QueryBundle,
prev_response: Optional[str] = None,
level: int = 0,
) -> str:
"""Get response for selected node.
If not leaf node, it will recursively call _query on the child nodes.
If prev_response is provided, we will update prev_response with the answer.
"""
query_str = query_bundle.query_str
if len(self.index_struct.get_children(selected_node)) == 0:
response_builder = get_response_builder(
self._service_context,
self._text_qa_template,
self._refine_template,
)
# use response builder to get answer from node
node_text = get_text_from_node(selected_node, level=level)
cur_response = response_builder.get_response(
query_str, [node_text], prev_response=prev_response
)
cur_response = cast(str, cur_response)
logger.debug(f">[Level {level}] Current answer response: {cur_response} ")
else:
cur_response = self._query_level(
self.index_struct.get_children(selected_node),
query_bundle,
level=level + 1,
)
if prev_response is None:
return cur_response
else:
context_msg = selected_node.get_text()
(
cur_response,
formatted_refine_prompt,
) = self._service_context.llm_predictor.predict(
self._refine_template,
query_str=query_str,
existing_answer=prev_response,
context_msg=context_msg,
)
logger.debug(f">[Level {level}] Refine prompt: {formatted_refine_prompt}")
logger.debug(f">[Level {level}] Current refined response: {cur_response} ")
return cur_response
def _query_level(
self,
cur_node_ids: Dict[int, str],
query_bundle: QueryBundle,
level: int = 0,
) -> str:
"""Answer a query recursively."""
query_str = query_bundle.query_str
cur_nodes = {
index: self._docstore.get_node(node_id)
for index, node_id in cur_node_ids.items()
}
cur_node_list = get_sorted_node_list(cur_nodes)
if len(cur_node_list) == 1:
logger.debug(f">[Level {level}] Only one node left. Querying node.")
return self._query_with_selected_node(
cur_node_list[0], query_bundle, level=level
)
elif self.child_branch_factor == 1:
query_template = self.query_template.partial_format(
num_chunks=len(cur_node_list), query_str=query_str
)
numbered_node_text = (
self._service_context.prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template
)
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template,
context_list=numbered_node_text,
)
else:
query_template_multiple = self.query_template_multiple.partial_format(
num_chunks=len(cur_node_list),
query_str=query_str,
branching_factor=self.child_branch_factor,
)
numbered_node_text = (
self._service_context.prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template_multiple
)
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template_multiple,
context_list=numbered_node_text,
)
logger.debug(
f">[Level {level}] current prompt template: {formatted_query_prompt}"
)
self._service_context.llama_logger.add_log(
{"formatted_prompt_template": formatted_query_prompt, "level": level}
)
debug_str = f">[Level {level}] Current response: {response}"
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
numbers = extract_numbers_given_response(response, n=self.child_branch_factor)
if numbers is None:
debug_str = (
f">[Level {level}] Could not retrieve response - no numbers present"
)
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
# just join text from current nodes as response
return response
result_response = None
for number_str in numbers:
number = int(number_str)
if number > len(cur_node_list):
logger.debug(
f">[Level {level}] Invalid response: {response} - "
f"number {number} out of range"
)
return response
# number is 1-indexed, so subtract 1
selected_node = cur_node_list[number - 1]
info_str = (
f">[Level {level}] Selected node: "
f"[{number}]/[{','.join([str(int(n)) for n in numbers])}]"
)
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
debug_str = " ".join(selected_node.get_text().splitlines())
full_debug_str = (
f">[Level {level}] Node "
f"[{number}] Summary text: "
f"{ selected_node.get_text() }"
)
logger.debug(full_debug_str)
if self._verbose:
print_text(full_debug_str, end="\n")
result_response = self._query_with_selected_node(
selected_node,
query_bundle,
prev_response=result_response,
level=level,
)
# result_response should not be None
return cast(str, result_response)
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
# NOTE: this overrides the _query method in the base class
info_str = f"> Starting query: {query_bundle.query_str}"
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
response_str = self._query_level(
self.index_struct.root_nodes,
query_bundle,
level=0,
).strip()
# TODO: fix source nodes
return Response(response_str, source_nodes=[])
def _select_nodes(
self,
cur_node_list: List[Node],
query_bundle: QueryBundle,
level: int = 0,
) -> List[Node]:
query_str = query_bundle.query_str
if self.child_branch_factor == 1:
query_template = self.query_template.partial_format(
num_chunks=len(cur_node_list), query_str=query_str
)
numbered_node_text = (
self._service_context.prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template
)
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template,
context_list=numbered_node_text,
)
else:
query_template_multiple = self.query_template_multiple.partial_format(
num_chunks=len(cur_node_list),
query_str=query_str,
branching_factor=self.child_branch_factor,
)
numbered_node_text = (
self._service_context.prompt_helper.get_numbered_text_from_nodes(
cur_node_list, prompt=query_template_multiple
)
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template_multiple,
context_list=numbered_node_text,
)
logger.debug(
f">[Level {level}] current prompt template: {formatted_query_prompt}"
)
self._service_context.llama_logger.add_log(
{"formatted_prompt_template": formatted_query_prompt, "level": level}
)
debug_str = f">[Level {level}] Current response: {response}"
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
numbers = extract_numbers_given_response(response, n=self.child_branch_factor)
if numbers is None:
debug_str = (
f">[Level {level}] Could not retrieve response - no numbers present"
)
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
# just join text from current nodes as response
return []
selected_nodes = []
for number_str in numbers:
number = int(number_str)
if number > len(cur_node_list):
logger.debug(
f">[Level {level}] Invalid response: {response} - "
f"number {number} out of range"
)
continue
# number is 1-indexed, so subtract 1
selected_node = cur_node_list[number - 1]
info_str = (
f">[Level {level}] Selected node: "
f"[{number}]/[{','.join([str(int(n)) for n in numbers])}]"
)
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
debug_str = " ".join(selected_node.get_text().splitlines())
full_debug_str = (
f">[Level {level}] Node "
f"[{number}] Summary text: "
f"{ selected_node.get_text() }"
)
logger.debug(full_debug_str)
if self._verbose:
print_text(full_debug_str, end="\n")
selected_nodes.append(selected_node)
return selected_nodes
def _retrieve_level(
self,
cur_node_ids: Dict[int, str],
query_bundle: QueryBundle,
level: int = 0,
) -> List[Node]:
"""Answer a query recursively."""
cur_nodes = {
index: self._docstore.get_node(node_id)
for index, node_id in cur_node_ids.items()
}
cur_node_list = get_sorted_node_list(cur_nodes)
if len(cur_node_list) > self.child_branch_factor:
selected_nodes = self._select_nodes(
cur_node_list,
query_bundle,
level=level,
)
else:
selected_nodes = cur_node_list
children_nodes = {}
for node in selected_nodes:
node_dict = self.index_struct.get_children(node)
children_nodes.update(node_dict)
if len(children_nodes) == 0:
# NOTE: leaf level
return selected_nodes
else:
return self._retrieve_level(children_nodes, query_bundle, level + 1)
def _retrieve(
self,
query_bundle: QueryBundle,
similarity_tracker: Optional[SimilarityTracker] = None,
) -> List[Node]:
"""Get nodes for response."""
return self._retrieve_level(
self.index_struct.root_nodes,
query_bundle,
level=0,
)
| [] |
2024-01-10 | Fir121/bpdc_gpt_chatbot | backend.py | import constants
import os
os.environ["OPENAI_API_KEY"] = constants.openapi_key
from llama_index import GPTVectorStoreIndex, QuestionAnswerPrompt
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from llama_index.langchain_helpers.agents import LlamaToolkit, create_llama_chat_agent, IndexToolConfig
from langchain.chat_models import ChatOpenAI
import time, pickle
import mysql.connector as ms
import re, ast
# Load indices from disk
index_set = {}
datas = ['About', 'Career', 'Clubs', 'FAQ', 'First Degree Programmes', 'Higher Degree Programmes', 'Phd Programmes', 'Visa Information', 'Wiki', 'Events', 'Courses','Links']
for d in datas:
cur_index = GPTVectorStoreIndex.load_from_disk(f'{d}.json')
index_set[d] = cur_index
index_summaries = [
"Simple description About Bits Pilani Dubai extracted from the BPDC Website including Mission, Vision, Policy and a general overview great to answer questions like what is bits pilani",
"All about careers and placements at BPDC, in depth information about the Practice School (PS) program at BPDC, also talks about the thesis alternative to practice school for FD, HD and PhD",
"Details on all the clubs, associations and chapters at BPDC, with names of the Presidents, chairpersons, and faculty in charge of each club or association including clubs such as as Sports, Scientific Associations, Cultural Activites, MAD (social and environmental club making a difference), public speaking and literary, dance club (groove), drama club (paribasha), art club (shades), music club (trebel), chimera, design club, fashion club (allure), photography club (reflexions), quiz club (flummoxed), supernova (astronomy), wall street club, ieee, acm and sae chapters, Association of Electronics Engineers (AOEE), American Institute of Chemical Engineers (AIChE), American Society of Mechanical Engineers (ASME), Center for Innovation, Incubation and Entrepreneurship (CIIE), Intelligent Flying Object for Reconnaissance Club (Team IFOR), Microsoft Tech Club, Skyline, WIE, SWE, Editorial Board",
"A great index in case none of the other indexes are appropriate. Frequently asked questions about BPDC related to Admissions, Fees, Hostel, Visa, and other including transfers, visa, costs, dress code, insurance, prospects, BPDC Library, WebOPAC portal, textbooks, parents, clinic, Question Papers. And Events such as BSF, IceBreakers, College timings etc.",
"Details on all the First Degree (FD), Bachelor of Engineering (B.E.) programmes at BPDC, their fees, eligibility, scholarships, concessions, special info, detailed writeups on each program. Also talks about minor programs and the structure of the program itself",
"Details on all the Higher Degree (FD), Master of Engineering (M.E.) and M.B.A. programmes at BPDC, their fees, eligibility, scholarships, concessions, special info, detailed writeups on each program",
"Details on the PHD program at BPDC its eligibility and general information",
"Details about UAE Residence Visa which is required to study at BPDC, how to apply and get this visa",
"Overview of Bits Pilani Dubai Campus and extract from the Wikipedia. Has information on the director, chancellor, vice chancellor, campus size and location, campus affiliations, overview, history, campus and DIAC (Dubai International Academic City), Hostels, Labs, Departments, Practice School (PS 1 AND 2), Events, DIAC Residential Halls, and notable alumni",
"Details about most annual BPDC events such as Jashn, Sparks, BITS Sports Festival (BSF), Icebreakers, Technofest & Enginuity, STEM Day, Spectrum, Artex, Dandiya Night, Recharge, Inter Year Sports Tournament, Synergy, Ethnic Day, Diro’s Tea Party, Convocation, BITS Innovation Challenge",
"Details about all the lectures, lectorial, practical, course requirements, attendance requirements for the courses offered for the FD programmes at BPDC with course codes such as MATH F111, BITS F225 etc. does NOT contain all the courses available",
"Links to important documents such as the academic calendar, FD General timetable, guide for giving feedback, applying for projects, widthdrawing from courses, enrolling/registering for courses/classes, semester abroad program, facilities services and help, projects list.",
]
# define toolkit
index_configs = []
i = 0
for y in datas:
tool_config = IndexToolConfig(
index=index_set[y],
name=f"Vector Index {y}",
description=index_summaries[i],
tool_kwargs={"return_direct": True, "return_sources": True},
)
index_configs.append(tool_config)
i += 1
toolkit = LlamaToolkit(
index_configs=index_configs
)
QA_PROMPT_TMPL = (
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given the context information and common sense, and chat history but not prior knowledge"
"answer the question. If you don't know the answer, reply 'I don't know': {query_str}\n"
)
QA_PROMPT = QuestionAnswerPrompt(QA_PROMPT_TMPL)
'''
create table chat(chat_id int auto_increment, fname varchar(30), feedback_count int default 0, primary key(chat_id));
create table conversation(conversation_id int auto_increment, chat_id int, user_message text, bot_message text, primary key(conversation_id), foreign key (chat_id) references chat(chat_id) on delete cascade);
'''
def create_cursor():
mydb = ms.connect(host='localhost', user='root', password=constants.mysqlpassword, database="chatbot", autocommit=True)
cursor = mydb.cursor(dictionary=True, buffered=True)
return mydb, cursor
def return_chain(memory):
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
agent_chain = create_llama_chat_agent(
toolkit,
llm,
memory=memory,
verbose=True,
text_qa_template=QA_PROMPT
)
return agent_chain
def create_chain():
memory = ConversationBufferMemory(memory_key="chat_history")
agc = return_chain(memory)
fname = f"{time.time()}.pkl"
with open("memorychains/"+fname,"wb") as f:
pickle.dump(agc.memory,f)
mydb, cursor = create_cursor()
cursor.execute("insert into chat(fname) values(%s)",(fname,))
cursor.execute("select LAST_INSERT_ID() as chat_id")
data = cursor.fetchone()
chat_id = data["chat_id"]
cursor.close()
mydb.close()
return agc, chat_id
def save_chain(chain, chat_id):
mydb, cursor = create_cursor()
cursor.execute("select fname from chat where chat_id=%s", (chat_id,))
data = cursor.fetchone()
fname = data["fname"]
cursor.close()
mydb.close()
with open("memorychains/"+fname,"wb") as f:
pickle.dump(chain.memory,f)
def load_chain(chat_id):
mydb, cursor = create_cursor()
cursor.execute("select fname from chat where chat_id=%s", (chat_id,))
data = cursor.fetchone()
fname = data["fname"]
cursor.close()
mydb.close()
with open("memorychains/"+fname,"rb") as f:
memory = pickle.load(f)
return return_chain(memory)
def none_parser(dataDict):
for d in dataDict:
if dataDict[d] == 'None':
dataDict[d] = None
return dataDict
def log_feedback(chat_id):
mydb, cursor = create_cursor()
cursor.execute("update chat set feedback_count=feedback_count+1 where chat_id=%s",(chat_id,))
cursor.close()
mydb.close()
return True
def return_output(message, chain, chat_id):
simplification = re.compile(re.escape('bpdc'), re.IGNORECASE)
message = simplification.sub('Bits Pilani Dubai Campus', message)
try:
message_response = chain.run(message)
except Exception as e:
print(e)
return "Sorry, something went wrong!"
save_chain(chain, chat_id)
if message_response[0] == "{":
message_response = ast.literal_eval(message_response)
if type(message_response) == dict:
message_response = message_response["answer"]
mydb, cursor = create_cursor()
cursor.execute("insert into conversation(chat_id, user_message, bot_message) values(%s,%s,%s)",(chat_id,message,message_response))
cursor.close()
mydb.close()
return message_response
def get_chats():
mydb, cursor = create_cursor()
cursor.execute("select distinct chat_id, feedback_count from chat natural join conversation order by feedback_count desc")
data = cursor.fetchall()
cursor.close()
mydb.close()
return data
def get_conversation(chat_id):
mydb, cursor = create_cursor()
cursor.execute("select * from conversation where chat_id=%s",(chat_id,))
data = cursor.fetchall()
cursor.close()
mydb.close()
return data | [
"Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and common sense, and chat history but not prior knowledgeanswer the question. If you don't know the answer, reply 'I don't know': {query_str}\n"
] |
2024-01-10 | Fir121/bpdc_gpt_chatbot | chatbot~Lib~site-packages~llama_index~evaluation~dataset_generation.py | """Dataset generation from documents"""
from __future__ import annotations
from typing import List, Optional
import re
from llama_index import (
Document,
GPTListIndex,
QuestionAnswerPrompt,
ServiceContext,
LLMPredictor,
)
from llama_index.data_structs.node_v2 import Node
from langchain.chat_models import ChatOpenAI
DEFAULT_QUESTION_GENERATION_PROMPT = """Context information is below.\n"
"\n---------------------\n{context_str}\n---------------------\n"
"Given the context information and not prior knowledge.\n"
"generate only questions based on the below query.\n"
"{query_str}\n"
"""
def _get_default_service_context() -> ServiceContext:
"""Get default service context."""
llm_predictor = LLMPredictor(
llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, chunk_size_limit=3000
)
return service_context
class DatasetGenerator:
"""Generate dataset (question/ question-answer pairs) \
based on the given documents.
NOTE: this is a beta feature, subject to change!
Args:
nodes (List[Node]): List of nodes. (Optional)
service_context (ServiceContext): Service Context.
num_questions_per_chunk: number of question to be \
generated per chunk. Each document is chunked of size 512 words.
text_question_template: Question generation template.
"""
def __init__(
self,
nodes: List[Node],
service_context: Optional[ServiceContext] = None,
num_questions_per_chunk: int = 10,
text_question_template: Optional[QuestionAnswerPrompt] = None,
question_gen_query: Optional[str] = None,
) -> None:
"""Init params."""
if service_context is None:
service_context = _get_default_service_context()
self.service_context = service_context
self.text_question_template = text_question_template or QuestionAnswerPrompt(
DEFAULT_QUESTION_GENERATION_PROMPT
)
self.question_gen_query = (
question_gen_query
or f"You are a Teacher/ Professor. Your task is to setup \
{num_questions_per_chunk} questions for an upcoming \
quiz/examination. The questions should be diverse in nature \
across the document. Restrict the questions to the \
context information provided."
)
self.nodes = nodes
@classmethod
def from_documents(
cls,
documents: List[Document],
service_context: Optional[ServiceContext] = None,
num_questions_per_chunk: int = 10,
text_question_template: Optional[QuestionAnswerPrompt] = None,
question_gen_query: Optional[str] = None,
) -> "DatasetGenerator":
"""Generate dataset from documents."""
if service_context is None:
service_context = _get_default_service_context()
nodes = service_context.node_parser.get_nodes_from_documents(documents)
return cls(
nodes=nodes,
service_context=service_context,
num_questions_per_chunk=num_questions_per_chunk,
text_question_template=text_question_template,
question_gen_query=question_gen_query,
)
def _node_question_generator(self, nodes: List[Node]) -> List[str]:
"""Node question generator."""
questions = []
for node in nodes:
index = GPTListIndex.from_documents([Document(node.get_text())])
response = index.query(
self.question_gen_query,
service_context=self.service_context,
text_qa_template=self.text_question_template,
use_async=True,
)
result = str(response).strip().split("\n")
cleaned_questions = [
re.sub(r"^\d+[\).\s]", "", question).strip() for question in result
]
questions.extend(cleaned_questions)
questions = [question for question in questions if question != ""]
return questions
def generate_questions_from_nodes(self) -> List[str]:
"""Generates questions for each document."""
return self._node_question_generator(self.nodes)
| [
"Context information is below.\n\"\n\"\n---------------------\n{context_str}\n---------------------\n\"\n\"Given the context information and not prior knowledge.\n\"\n\"generate only questions based on the below query.\n\"\n\"{query_str}\n\"\n"
] |
2024-01-10 | Fir121/bpdc_gpt_chatbot | chatbot~Lib~site-packages~langchain~agents~conversational~output_parser.py | import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish
class ConvoOutputParser(AgentOutputParser):
ai_prefix: str = "AI"
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
regex = r"Action: (.*?)[\n]*Action Input: (.*)"
match = re.search(regex, text)
if not match:
raise ValueError(f"Could not parse LLM output: `{text}`")
action = match.group(1)
action_input = match.group(2)
if action_input == "None":
action_input = "Summarise the text"
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
| [] |
2024-01-10 | Fir121/bpdc_gpt_chatbot | temp.py | import constants
import os
os.environ["OPENAI_API_KEY"] = constants.openapi_key
from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, LLMPredictor, ServiceContext, GPTListIndex
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from llama_index.langchain_helpers.agents import LlamaToolkit, create_llama_chat_agent, IndexToolConfig, LlamaIndexTool, GraphToolConfig
from langchain.chat_models import ChatOpenAI
from llama_index.indices.composability import ComposableGraph
from llama_index.indices.query.query_transform.base import DecomposeQueryTransform
index = GPTVectorStoreIndex.load_from_disk(f'Clubs.json')
r = index.query("What is ACM?")
print(r)
| [] |
2024-01-10 | Fir121/bpdc_gpt_chatbot | chatbot~Lib~site-packages~llama_index~readers~weaviate~client.py | """Weaviate-specific serializers for LlamaIndex data structures.
Contain conversion to and from dataclasses that LlamaIndex uses.
"""
import json
from dataclasses import field
from typing import Any, Dict, List, Optional, cast
from llama_index.data_structs.data_structs_v2 import Node
from llama_index.data_structs.node_v2 import DocumentRelationship
from llama_index.readers.weaviate.utils import (
get_by_id,
parse_get_response,
validate_client,
)
from llama_index.utils import get_new_id
from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryMode
import logging
_logger = logging.getLogger(__name__)
NODE_SCHEMA: List[Dict] = [
{
"dataType": ["string"],
"description": "Text property",
"name": "text",
},
{
"dataType": ["string"],
"description": "Document id",
"name": "doc_id",
},
{
"dataType": ["string"],
"description": "extra_info (in JSON)",
"name": "extra_info",
},
{
"dataType": ["string"],
"description": "The ref_doc_id of the Node",
"name": "ref_doc_id",
},
{
"dataType": ["string"],
"description": "node_info (in JSON)",
"name": "node_info",
},
{
"dataType": ["string"],
"description": "The hash of the Document",
"name": "doc_hash",
},
{
"dataType": ["string"],
"description": "The relationships of the node (in JSON)",
"name": "relationships",
},
]
def _get_by_id(client: Any, object_id: str, class_prefix: str) -> Dict:
"""Get entry by id."""
validate_client(client)
class_name = _class_name(class_prefix)
properties = NODE_SCHEMA
prop_names = [p["name"] for p in properties]
entry = get_by_id(client, object_id, class_name, prop_names)
return entry
def create_schema(client: Any, class_prefix: str) -> None:
"""Create schema."""
validate_client(client)
# first check if schema exists
schema = client.schema.get()
classes = schema["classes"]
existing_class_names = {c["class"] for c in classes}
# if schema already exists, don't create
class_name = _class_name(class_prefix)
if class_name in existing_class_names:
return
properties = NODE_SCHEMA
class_obj = {
"class": _class_name(class_prefix), # <= note the capital "A".
"description": f"Class for {class_name}",
"properties": properties,
}
client.schema.create_class(class_obj)
def weaviate_query(
client: Any,
class_prefix: str,
query_spec: VectorStoreQuery,
) -> List[Node]:
"""Convert to LlamaIndex list."""
validate_client(client)
class_name = _class_name(class_prefix)
prop_names = [p["name"] for p in NODE_SCHEMA]
vector = query_spec.query_embedding
# build query
query = client.query.get(class_name, prop_names).with_additional(["id", "vector"])
if query_spec.mode == VectorStoreQueryMode.DEFAULT:
_logger.debug("Using vector search")
if vector is not None:
query = query.with_near_vector(
{
"vector": vector,
}
)
elif query_spec.mode == VectorStoreQueryMode.HYBRID:
_logger.debug(f"Using hybrid search with alpha {query_spec.alpha}")
query = query.with_hybrid(
query=query_spec.query_str,
alpha=query_spec.alpha,
vector=vector,
)
query = query.with_limit(query_spec.similarity_top_k)
_logger.debug(f"Using limit of {query_spec.similarity_top_k}")
# execute query
query_result = query.do()
# parse results
parsed_result = parse_get_response(query_result)
entries = parsed_result[class_name]
results = [_to_node(entry) for entry in entries]
return results
def _class_name(class_prefix: str) -> str:
"""Return class name."""
return f"{class_prefix}_Node"
def _to_node(entry: Dict) -> Node:
"""Convert to Node."""
extra_info_str = entry["extra_info"]
if extra_info_str == "":
extra_info = None
else:
extra_info = json.loads(extra_info_str)
node_info_str = entry["node_info"]
if node_info_str == "":
node_info = None
else:
node_info = json.loads(node_info_str)
relationships_str = entry["relationships"]
relationships: Dict[DocumentRelationship, str]
if relationships_str == "":
relationships = field(default_factory=dict)
else:
relationships = {
DocumentRelationship(k): v for k, v in json.loads(relationships_str).items()
}
return Node(
text=entry["text"],
doc_id=entry["doc_id"],
embedding=entry["_additional"]["vector"],
extra_info=extra_info,
node_info=node_info,
relationships=relationships,
)
def _node_to_dict(node: Node) -> dict:
node_dict = node.to_dict()
node_dict.pop("embedding") # NOTE: stored outside of dict
# json-serialize the extra_info
extra_info = node_dict.pop("extra_info")
extra_info_str = ""
if extra_info is not None:
extra_info_str = json.dumps(extra_info)
node_dict["extra_info"] = extra_info_str
# json-serialize the node_info
node_info = node_dict.pop("node_info")
node_info_str = ""
if node_info is not None:
node_info_str = json.dumps(node_info)
node_dict["node_info"] = node_info_str
# json-serialize the relationships
relationships = node_dict.pop("relationships")
relationships_str = ""
if relationships is not None:
relationships_str = json.dumps(relationships)
node_dict["relationships"] = relationships_str
ref_doc_id = node.ref_doc_id
if ref_doc_id is not None:
node_dict["ref_doc_id"] = ref_doc_id
return node_dict
def _add_node(
client: Any, node: Node, class_prefix: str, batch: Optional[Any] = None
) -> str:
"""Add node."""
node_dict = _node_to_dict(node)
vector = node.embedding
# TODO: account for existing nodes that are stored
node_id = get_new_id(set())
class_name = _class_name(class_prefix)
# if batch object is provided (via a context manager), use that instead
if batch is not None:
batch.add_data_object(node_dict, class_name, node_id, vector)
else:
client.batch.add_data_object(node_dict, class_name, node_id, vector)
return node_id
def delete_document(client: Any, ref_doc_id: str, class_prefix: str) -> None:
"""Delete entry."""
validate_client(client)
# make sure that each entry
class_name = _class_name(class_prefix)
where_filter = {
"path": ["ref_doc_id"],
"operator": "Equal",
"valueString": ref_doc_id,
}
query = (
client.query.get(class_name).with_additional(["id"]).with_where(where_filter)
)
query_result = query.do()
parsed_result = parse_get_response(query_result)
entries = parsed_result[class_name]
for entry in entries:
client.data_object.delete(entry["_additional"]["id"], class_name)
def add_node(client: Any, node: Node, class_prefix: str) -> str:
"""Convert from LlamaIndex."""
validate_client(client)
index_id = _add_node(client, node, class_prefix)
client.batch.flush()
return index_id
def add_nodes(client: Any, nodes: List[Node], class_prefix: str) -> List[str]:
"""Add nodes."""
from weaviate import Client # noqa: F401
client = cast(Client, client)
validate_client(client)
index_ids = []
with client.batch as batch:
for node in nodes:
index_id = _add_node(client, node, class_prefix, batch=batch)
index_ids.append(index_id)
return index_ids
| [] |
2024-01-10 | Fir121/bpdc_gpt_chatbot | chatbot~Lib~site-packages~llama_index~langchain_helpers~text_splitter.py | """Text splitter implementations."""
from dataclasses import dataclass
from typing import Callable, List, Optional
from langchain.text_splitter import TextSplitter
from llama_index.utils import globals_helper
@dataclass
class TextSplit:
"""Text split with overlap.
Attributes:
text_chunk: The text string.
num_char_overlap: The number of overlapping characters with the previous chunk.
"""
text_chunk: str
num_char_overlap: Optional[int] = None
class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at word tokens."""
def __init__(
self,
separator: str = " ",
chunk_size: int = 3900,
chunk_overlap: int = 200,
tokenizer: Optional[Callable] = None,
backup_separators: Optional[List[str]] = ["\n"],
):
"""Initialize with parameters."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._separator = separator
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self.tokenizer = tokenizer or globals_helper.tokenizer
self._backup_separators = backup_separators
def _reduce_chunk_size(
self, start_idx: int, cur_idx: int, splits: List[str]
) -> int:
"""Reduce the chunk size by reducing cur_idx.
Return the new cur_idx.
"""
current_doc_total = len(
self.tokenizer(self._separator.join(splits[start_idx:cur_idx]))
)
while current_doc_total > self._chunk_size:
percent_to_reduce = (
current_doc_total - self._chunk_size
) / current_doc_total
num_to_reduce = int(percent_to_reduce * (cur_idx - start_idx)) + 1
cur_idx -= num_to_reduce
current_doc_total = len(
self.tokenizer(self._separator.join(splits[start_idx:cur_idx]))
)
return cur_idx
def _preprocess_splits(self, splits: List[str], chunk_size: int) -> List[str]:
"""Process splits.
Specifically search for tokens that are too large for chunk size,
and see if we can separate those tokens more
(via backup separators if specified, or force chunking).
"""
new_splits = []
for split in splits:
num_cur_tokens = len(self.tokenizer(split))
if num_cur_tokens <= chunk_size:
new_splits.append(split)
else:
cur_splits = [split]
if self._backup_separators:
for sep in self._backup_separators:
if sep in split:
cur_splits = split.split(sep)
break
else:
cur_splits = [split]
cur_splits2 = []
for cur_split in cur_splits:
num_cur_tokens = len(self.tokenizer(cur_split))
if num_cur_tokens <= chunk_size:
cur_splits2.extend([cur_split])
else:
cur_split_chunks = [
cur_split[i : i + chunk_size]
for i in range(0, len(cur_split), chunk_size)
]
cur_splits2.extend(cur_split_chunks)
new_splits.extend(cur_splits2)
return new_splits
def _postprocess_splits(self, docs: List[TextSplit]) -> List[TextSplit]:
"""Post-process splits."""
# TODO: prune text splits, remove empty spaces
new_docs = []
for doc in docs:
if doc.text_chunk.replace(" ", "") == "":
continue
new_docs.append(doc)
return new_docs
def split_text(self, text: str, extra_info_str: Optional[str] = None) -> List[str]:
"""Split incoming text and return chunks."""
text_splits = self.split_text_with_overlaps(text, extra_info_str=extra_info_str)
return [text_split.text_chunk for text_split in text_splits]
def split_text_with_overlaps(
self, text: str, extra_info_str: Optional[str] = None
) -> List[TextSplit]:
"""Split incoming text and return chunks with overlap size."""
if text == "":
return []
# NOTE: Consider extra info str that will be added to the chunk at query time
# This reduces the effective chunk size that we can have
if extra_info_str is not None:
# NOTE: extra 2 newline chars for formatting when prepending in query
num_extra_tokens = len(self.tokenizer(f"{extra_info_str}\n\n")) + 1
effective_chunk_size = self._chunk_size - num_extra_tokens
if effective_chunk_size <= 0:
raise ValueError(
"Effective chunk size is non positive after considering extra_info"
)
else:
effective_chunk_size = self._chunk_size
# First we naively split the large input into a bunch of smaller ones.
splits = text.split(self._separator)
splits = self._preprocess_splits(splits, effective_chunk_size)
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
docs: List[TextSplit] = []
start_idx = 0
cur_idx = 0
cur_total = 0
prev_idx = 0 # store the previous end index
while cur_idx < len(splits):
cur_token = splits[cur_idx]
num_cur_tokens = max(len(self.tokenizer(cur_token)), 1)
if num_cur_tokens > effective_chunk_size:
raise ValueError(
"A single term is larger than the allowed chunk size.\n"
f"Term size: {num_cur_tokens}\n"
f"Chunk size: {self._chunk_size}"
f"Effective chunk size: {effective_chunk_size}"
)
# If adding token to current_doc would exceed the chunk size:
# 1. First verify with tokenizer that current_doc
# 1. Update the docs list
if cur_total + num_cur_tokens > effective_chunk_size:
# NOTE: since we use a proxy for counting tokens, we want to
# run tokenizer across all of current_doc first. If
# the chunk is too big, then we will reduce text in pieces
cur_idx = self._reduce_chunk_size(start_idx, cur_idx, splits)
overlap = 0
# after first round, check if last chunk ended after this chunk begins
if prev_idx > 0 and prev_idx > start_idx:
overlap = sum([len(splits[i]) for i in range(start_idx, prev_idx)])
docs.append(
TextSplit(self._separator.join(splits[start_idx:cur_idx]), overlap)
)
prev_idx = cur_idx
# 2. Shrink the current_doc (from the front) until it is gets smaller
# than the overlap size
# NOTE: because counting tokens individually is an imperfect
# proxy (but much faster proxy) for the total number of tokens consumed,
# we need to enforce that start_idx <= cur_idx, otherwise
# start_idx has a chance of going out of bounds.
while cur_total > self._chunk_overlap and start_idx < cur_idx:
# # call tokenizer on entire overlap
# cur_total = self.tokenizer()
cur_num_tokens = max(len(self.tokenizer(splits[start_idx])), 1)
cur_total -= cur_num_tokens
start_idx += 1
# NOTE: This is a hack, make more general
if start_idx == cur_idx:
cur_total = 0
# Build up the current_doc with term d, and update the total counter with
# the number of the number of tokens in d, wrt self.tokenizer
# we reassign cur_token and num_cur_tokens, because cur_idx
# may have changed
cur_token = splits[cur_idx]
num_cur_tokens = max(len(self.tokenizer(cur_token)), 1)
cur_total += num_cur_tokens
cur_idx += 1
overlap = 0
# after first round, check if last chunk ended after this chunk begins
if prev_idx > start_idx:
overlap = sum([len(splits[i]) for i in range(start_idx, prev_idx)]) + len(
range(start_idx, prev_idx)
)
docs.append(TextSplit(self._separator.join(splits[start_idx:cur_idx]), overlap))
# run postprocessing to remove blank spaces
docs = self._postprocess_splits(docs)
return docs
def truncate_text(self, text: str) -> str:
"""Truncate text in order to fit the underlying chunk size."""
if text == "":
return ""
# First we naively split the large input into a bunch of smaller ones.
splits = text.split(self._separator)
splits = self._preprocess_splits(splits, self._chunk_size)
start_idx = 0
cur_idx = 0
cur_total = 0
while cur_idx < len(splits):
cur_token = splits[cur_idx]
num_cur_tokens = max(len(self.tokenizer(cur_token)), 1)
if cur_total + num_cur_tokens > self._chunk_size:
cur_idx = self._reduce_chunk_size(start_idx, cur_idx, splits)
break
cur_total += num_cur_tokens
cur_idx += 1
return self._separator.join(splits[start_idx:cur_idx])
class SentenceSplitter(TextSplitter):
"""Split text with a preference for complete sentences.
In general, this class tries to keep sentences and paragraphs together. Therefore
compared to the original TokenTextSplitter, there are less likely to be
hanging sentences or parts of sentences at the end of the node chunk.
"""
def __init__(
self,
separator: str = " ",
chunk_size: int = 4000,
chunk_overlap: int = 200,
tokenizer: Optional[Callable] = None,
backup_separators: Optional[List[str]] = ["\n"],
paragraph_separator: Optional[str] = "\n\n\n",
chunking_tokenizer_fn: Optional[Callable[[str], List[str]]] = None,
secondary_chunking_regex: Optional[str] = "[^,.;。]+[,.;。]?",
):
"""Initialize with parameters."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._separator = separator
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self.tokenizer = tokenizer or globals_helper.tokenizer
self._backup_separators = backup_separators
if chunking_tokenizer_fn is None:
import nltk.tokenize.punkt as pkt
class CustomLanguageVars(pkt.PunktLanguageVars):
_period_context_fmt = r"""
%(SentEndChars)s # a potential sentence ending
(\)\"\s)\s* # other end chars and
# any amount of white space
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
(?P<next_tok>\S+) # or whitespace and some other token
))"""
custom_tknzr = pkt.PunktSentenceTokenizer(lang_vars=CustomLanguageVars())
chunking_tokenizer_fn = custom_tknzr.tokenize
self.paragraph_separator = paragraph_separator
self.chunking_tokenizer_fn = chunking_tokenizer_fn
self.second_chunking_regex = secondary_chunking_regex
"""
By default we use the second chunking regex "[^,.;]+[,.;]?".
This regular expression will split the sentences into phrases,
where each phrase is a sequence of one or more non-comma,
non-period, and non-semicolon characters, followed by an optional comma,
period, or semicolon. The regular expression will also capture the
delimiters themselves as separate items in the list of phrases.
"""
def _postprocess_splits(self, docs: List[TextSplit]) -> List[TextSplit]:
"""Post-process splits."""
# TODO: prune text splits, remove empty spaces
new_docs = []
for doc in docs:
if doc.text_chunk.replace(" ", "") == "":
continue
new_docs.append(doc)
return new_docs
def split_text_with_overlaps(
self, text: str, extra_info_str: Optional[str] = None
) -> List[TextSplit]:
"""
Split incoming text and return chunks with overlap size.
Has a preference for complete sentences, phrases, and minimal overlap.
"""
if text == "":
return []
# NOTE: Consider extra info str that will be added to the chunk at query time
# This reduces the effective chunk size that we can have
if extra_info_str is not None:
# NOTE: extra 2 newline chars for formatting when prepending in query
num_extra_tokens = len(self.tokenizer(f"{extra_info_str}\n\n")) + 1
effective_chunk_size = self._chunk_size - num_extra_tokens
if effective_chunk_size <= 0:
raise ValueError(
"Effective chunk size is non positive after considering extra_info"
)
else:
effective_chunk_size = self._chunk_size
# First we split paragraphs using separator
splits = text.split(self.paragraph_separator)
# Merge paragraphs that are too small.
idx = 0
while idx < len(splits):
if idx < len(splits) - 1 and len(splits[idx]) < effective_chunk_size:
splits[idx] = "\n\n".join([splits[idx], splits[idx + 1]])
splits.pop(idx + 1)
else:
idx += 1
# Next we split the text using the chunk tokenizer fn,
# which defaults to the sentence tokenizer from nltk.
chunked_splits = [self.chunking_tokenizer_fn(text) for text in splits]
splits = [chunk for split in chunked_splits for chunk in split]
# Check if any sentences exceed the chunk size. If they do, split again
# using the second chunk separator. If it any still exceed,
# use the default separator (" ").
@dataclass
class Split:
text: str # the split text
is_sentence: bool # save whether this is a full sentence
new_splits: List[Split] = []
for split in splits:
split_len = len(self.tokenizer(split))
if split_len <= effective_chunk_size:
new_splits.append(Split(split, True))
else:
if self.second_chunking_regex is not None:
import re
# Default regex is "[^,\.;]+[,\.;]?"
splits2 = re.findall(self.second_chunking_regex, split)
else:
splits2 = [split]
for split2 in splits2:
if len(self.tokenizer(split2)) <= effective_chunk_size:
new_splits.append(Split(split2, False))
else:
splits3 = split2.split(self._separator)
new_splits.extend([Split(split3, False) for split3 in splits3])
# Create the list of text splits by combining smaller chunks.
docs: List[TextSplit] = []
cur_doc_list: List[str] = []
cur_tokens = 0
while len(new_splits) > 0:
cur_token = new_splits[0]
cur_len = len(self.tokenizer(cur_token.text))
if cur_len > effective_chunk_size:
raise ValueError("Single token exceed chunk size")
if cur_tokens + cur_len > effective_chunk_size:
docs.append(TextSplit("".join(cur_doc_list).strip()))
cur_doc_list = []
cur_tokens = 0
else:
if (
cur_token.is_sentence
or cur_tokens + cur_len < effective_chunk_size - self._chunk_overlap
):
cur_tokens += cur_len
cur_doc_list.append(cur_token.text)
new_splits.pop(0)
else:
docs.append(TextSplit("".join(cur_doc_list).strip()))
cur_doc_list = []
cur_tokens = 0
docs.append(TextSplit("".join(cur_doc_list).strip()))
# run postprocessing to remove blank spaces
docs = self._postprocess_splits(docs)
return docs
def split_text(self, text: str, extra_info_str: Optional[str] = None) -> List[str]:
"""Split incoming text and return chunks."""
text_splits = self.split_text_with_overlaps(text, extra_info_str=extra_info_str)
return [text_split.text_chunk for text_split in text_splits]
__all__ = ["TextSplitter", "TokenTextSplitter", "SentenceSplitter"]
| [] |
2024-01-10 | Fir121/bpdc_gpt_chatbot | chatbot~Lib~site-packages~llama_index~indices~struct_store~pandas_query.py | """Default query for GPTPandasIndex."""
import logging
from typing import Any, Callable, Optional
import pandas as pd
from langchain.input import print_text
from llama_index.data_structs.table_v2 import PandasStructTable
from llama_index.indices.query.base import BaseGPTIndexQuery
from llama_index.indices.query.schema import QueryBundle
from llama_index.prompts.default_prompts import DEFAULT_PANDAS_PROMPT
from llama_index.prompts.prompts import PandasPrompt
from llama_index.response.schema import Response
logger = logging.getLogger(__name__)
DEFAULT_INSTRUCTION_STR = (
"We wish to convert this query to executable Python code using Pandas.\n"
"The final line of code should be a Python expression that can be called "
"with the `eval()` function. This expression should represent a solution "
"to the query."
)
def default_output_processor(
output: str, df: pd.DataFrame, **output_kwargs: Any
) -> str:
"""Process outputs in a default manner."""
import ast
import sys
import traceback
if sys.version_info < (3, 9):
logger.warn(
"Python version must be >= 3.9 in order to use "
"the default output processor, which executes "
"the Python query. Instead, we will return the "
"raw Python instructions as a string."
)
return output
local_vars = {"df": df}
# NOTE: inspired from langchain's tool
# see langchain.tools.python.tool (PythonAstREPLTool)
try:
tree = ast.parse(output)
module = ast.Module(tree.body[:-1], type_ignores=[])
exec(ast.unparse(module), {}, local_vars) # type: ignore
module_end = ast.Module(tree.body[-1:], type_ignores=[])
module_end_str = ast.unparse(module_end) # type: ignore
try:
return str(eval(module_end_str, {}, local_vars))
except Exception as e:
raise e
except Exception as e:
err_string = (
"There was an error running the output as Python code. "
f"Error message: {e}"
)
traceback.print_exc()
return err_string
class GPTNLPandasIndexQuery(BaseGPTIndexQuery[PandasStructTable]):
"""GPT Pandas query.
Convert natural language to Pandas python code.
.. code-block:: python
response = index.query("<query_str>", mode="default")
Args:
df (pd.DataFrame): Pandas dataframe to use.
instruction_str (Optional[str]): Instruction string to use.
output_processor (Optional[Callable[[str], str]]): Output processor.
A callable that takes in the output string, pandas DataFrame,
and any output kwargs and returns a string.
pandas_prompt (Optional[PandasPrompt]): Pandas prompt to use.
head (int): Number of rows to show in the table context.
"""
def __init__(
self,
index_struct: PandasStructTable,
df: Optional[pd.DataFrame] = None,
instruction_str: Optional[str] = None,
output_processor: Optional[Callable] = None,
pandas_prompt: Optional[PandasPrompt] = None,
output_kwargs: Optional[dict] = None,
head: int = 5,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(index_struct=index_struct, **kwargs)
if df is None:
raise ValueError("df must be provided.")
self.df = df
self._head = head
self._pandas_prompt = pandas_prompt or DEFAULT_PANDAS_PROMPT
self._instruction_str = instruction_str or DEFAULT_INSTRUCTION_STR
self._output_processor = output_processor or default_output_processor
self._output_kwargs = output_kwargs or {}
def _get_table_context(self) -> str:
"""Get table context."""
return str(self.df.head(self._head))
def query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
context = self._get_table_context()
pandas_response_str, _ = self._service_context.llm_predictor.predict(
self._pandas_prompt,
df_str=context,
query_str=query_bundle.query_str,
instruction_str=self._instruction_str,
)
if self._verbose:
print_text(f"> Pandas Instructions:\n" f"```\n{pandas_response_str}\n```\n")
pandas_output = self._output_processor(
pandas_response_str,
self.df,
**self._output_kwargs,
)
if self._verbose:
print_text(f"> Pandas Output: {pandas_output}\n")
response_extra_info = {
"pandas_instruction_str": pandas_response_str,
}
return Response(response=pandas_output, extra_info=response_extra_info)
| [] |
2024-01-10 | songweige/sd-webui-rich-text | scripts~models~region_diffusion_xl.py | # Adapted from diffusers_official.pipelines.stable_diffusion.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.py
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers_official.image_processor import VaeImageProcessor
from diffusers_official.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
# from diffusers_official.models import AutoencoderKL, UNet2DConditionModel
from diffusers_official.models import AutoencoderKL
from diffusers_official.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from diffusers_official.schedulers import EulerDiscreteScheduler
from diffusers_official.utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers_official.pipelines.pipeline_utils import DiffusionPipeline
from diffusers_official.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
### cutomized modules
import collections
from functools import partial
from diffusers_official.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
from scripts.models.unet_2d_condition import UNet2DConditionModel
from scripts.models.utils.attention_utils import CrossAttentionLayers_XL
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class RegionDiffusionXL(DiffusionPipeline, FromSingleFileMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
- *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
def __init__(
self,
load_path: str = "stabilityai/stable-diffusion-xl-base-1.0",
device: str = "cuda",
force_zeros_for_empty_prompt: bool = True,
):
super().__init__()
# self.register_modules(
# vae=vae,
# text_encoder=text_encoder,
# text_encoder_2=text_encoder_2,
# tokenizer=tokenizer,
# tokenizer_2=tokenizer_2,
# unet=unet,
# scheduler=scheduler,
# )
self.model_id = load_path
variant = "fp16" if "stable-diffusion-xl" in load_path else None
for load_attemp in range(10):
print(f'[INFO] loading stable diffusion {self.model_id} ... attempt {load_attemp}')
# try:
# 1. Load the autoencoder model which will be used to decode the latents into image space.
self.vae = AutoencoderKL.from_pretrained(load_path, subfolder="vae", use_safetensors=True, variant=variant).to(device)
# 2. Load the tokenizer and text encoder to tokenize and encode the text.
self.tokenizer = CLIPTokenizer.from_pretrained(load_path, subfolder='tokenizer')
self.tokenizer_2 = CLIPTokenizer.from_pretrained(load_path, subfolder='tokenizer_2')
self.text_encoder = CLIPTextModel.from_pretrained(load_path, subfolder='text_encoder', torch_dtype=torch.float16, use_safetensors=True, variant=variant).to(device)
self.text_encoder_2 = CLIPTextModelWithProjection.from_pretrained(load_path, subfolder='text_encoder_2', torch_dtype=torch.float16, use_safetensors=True, variant=variant).to(device)
# 3. The UNet model for generating the latents.
self.unet = UNet2DConditionModel.from_pretrained(load_path, subfolder="unet", torch_dtype=torch.float16, use_safetensors=True, variant=variant).to(device)
# 4. Scheduler.
self.scheduler = EulerDiscreteScheduler.from_pretrained(load_path, subfolder="scheduler")
# except Exception as e:
# print(f'[INFO] failed to load stable diffusion {self.model_id} ... error {e}')
# continue
if self.unet is not None and self.vae is not None and self.text_encoder is not None:
break
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.default_sample_size = self.unet.config.sample_size
self.watermark = StableDiffusionXLWatermarker()
self.device_type = device
self.masks = []
self.attention_maps = None
self.selfattn_maps = None
self.crossattn_maps = None
self.color_loss = torch.nn.functional.mse_loss
self.forward_hooks = []
self.forward_replacement_hooks = []
# Overwriting the method from diffusers_official.pipelines.diffusion_pipeline.DiffusionPipeline
@property
def device(self) -> torch.device:
r"""
Returns:
`torch.device`: The torch device on which the pipeline is located.
"""
return torch.device(self.device_type)
# Copied from diffusers_official.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding.
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers_official.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers_official.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding.
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers_official.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
def enable_sequential_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
Note that offloading happens on a submodule basis. Memory savings are higher than with
`enable_model_cpu_offload`, but performance is lower.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
from accelerate import cpu_offload
else:
raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
for cpu_offloaded_model in [self.unet, self.text_encoder, self.text_encoder_2, self.vae]:
cpu_offload(cpu_offloaded_model, device)
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
model_sequence = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
model_sequence.extend([self.unet, self.vae])
hook = None
for cpu_offloaded_model in model_sequence:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
@property
# Copied from diffusers_official.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _execution_device(self):
r"""
Returns the device on which the pipeline's models will be executed. After calling
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
hooks.
"""
if not hasattr(self.unet, "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(module, "_hf_hook")
and hasattr(module._hf_hook, "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def encode_prompt(
self,
prompt,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
batch_size_neg = len(negative_prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(
text_input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
# elif batch_size != len(negative_prompt):
# raise ValueError(
# f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
# f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
# " the batch size of `prompt`."
# )
else:
uncond_tokens = negative_prompt
negative_prompt_embeds_list = []
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(
batch_size_neg * num_images_per_prompt, seq_len, -1
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
bs_embed = pooled_prompt_embeds.shape[0]
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
bs_embed = negative_pooled_prompt_embeds.shape[0]
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers_official.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if prompt_embeds is not None and pooled_prompt_embeds is None:
raise ValueError(
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
)
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
raise ValueError(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
# Copied from diffusers_official.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
add_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
return add_time_ids
@torch.no_grad()
def sample(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Optional[Tuple[int, int]] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Optional[Tuple[int, int]] = None,
# Rich-Text args
use_guidance: bool = False,
inject_selfattn: float = 0.0,
inject_background: float = 0.0,
text_format_dict: Optional[dict] = None,
run_rich_text: bool = False,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
TODO
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
TODO
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
TODO
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple. When returning a tuple, the first element is a list with the generated images, and the second
element is a list of `bool`s denoting whether the corresponding generated image likely represents
"not-safe-for-work" (nsfw) content, according to the `safety_checker`.
"""
# 0. Default height and width to unet
height = height or self.default_sample_size * self.vae_scale_factor
width = width or self.default_sample_size * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
# TODO: support batched prompts
batch_size = 1
# batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Prepare added time ids & embeddings
add_text_embeds = pooled_prompt_embeds
add_time_ids = self._get_add_time_ids(
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
)
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
# make sure the VAE is in float32 mode, as it overflows in float16
self.vae.to(dtype=torch.float32)
# 8. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
if run_rich_text:
if inject_selfattn > 0 or inject_background > 0:
latents_reference = latents.clone().detach()
n_styles = prompt_embeds.shape[0]-1
self.masks = [mask.to(dtype=prompt_embeds.dtype) for mask in self.masks]
print(n_styles, len(self.masks))
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(self.scheduler.timesteps):
# predict the noise residual
with torch.no_grad():
feat_inject_step = t > (1-inject_selfattn) * 1000
background_inject_step = i < inject_background * len(self.scheduler.timesteps)
latent_model_input = self.scheduler.scale_model_input(latents, t)
# import ipdb;ipdb.set_trace()
# unconditional prediction
noise_pred_uncond_cur = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds[:1],
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs={"text_embeds": add_text_embeds[:1], "time_ids": add_time_ids[:1]}
)['sample']
# tokens without any style or footnote
self.register_fontsize_hooks(text_format_dict)
noise_pred_text_cur = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds[-1:],
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs={"text_embeds": add_text_embeds[-1:], "time_ids": add_time_ids[:1]}
)['sample']
self.remove_fontsize_hooks()
if inject_selfattn > 0 or inject_background > 0:
latent_reference_model_input = self.scheduler.scale_model_input(latents_reference, t)
noise_pred_uncond_refer = self.unet(latent_reference_model_input, t, encoder_hidden_states=prompt_embeds[:1],
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs={"text_embeds": add_text_embeds[:1], "time_ids": add_time_ids[:1]}
)['sample']
self.register_selfattn_hooks(feat_inject_step)
noise_pred_text_refer = self.unet(latent_reference_model_input, t, encoder_hidden_states=prompt_embeds[-1:],
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs={"text_embeds": add_text_embeds[-1:], "time_ids": add_time_ids[:1]}
)['sample']
self.remove_selfattn_hooks()
noise_pred_uncond = noise_pred_uncond_cur * self.masks[-1]
noise_pred_text = noise_pred_text_cur * self.masks[-1]
# tokens with style or footnote
for style_i, mask in enumerate(self.masks[:-1]):
self.register_replacement_hooks(feat_inject_step)
noise_pred_text_cur = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds[style_i+1:style_i+2],
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs={"text_embeds": add_text_embeds[style_i+1:style_i+2], "time_ids": add_time_ids[:1]}
)['sample']
self.remove_replacement_hooks()
noise_pred_uncond = noise_pred_uncond + noise_pred_uncond_cur*mask
noise_pred_text = noise_pred_text + noise_pred_text_cur*mask
# perform guidance
noise_pred = noise_pred_uncond + guidance_scale * \
(noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# TODO: Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
# noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
raise NotImplementedError
if inject_selfattn > 0 or background_inject_step > 0:
noise_pred_refer = noise_pred_uncond_refer + guidance_scale * \
(noise_pred_text_refer - noise_pred_uncond_refer)
# compute the previous noisy sample x_t -> x_t-1
latents_reference = self.scheduler.step(torch.cat([noise_pred, noise_pred_refer]), t,
torch.cat([latents, latents_reference]))[
'prev_sample']
latents, latents_reference = torch.chunk(
latents_reference, 2, dim=0)
else:
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents)[
'prev_sample']
# apply guidance
if use_guidance and t < text_format_dict['guidance_start_step']:
with torch.enable_grad():
if not latents.requires_grad:
latents.requires_grad = True
# import ipdb;ipdb.set_trace()
# latents_0 = self.predict_x0(latents, noise_pred, t).to(dtype=latents.dtype)
latents_0 = self.predict_x0(latents, noise_pred, t).to(dtype=torch.bfloat16)
latents_inp = latents_0 / self.vae.config.scaling_factor
# imgs = self.vae.decode(latents_inp.to(dtype=torch.float32)).sample
imgs = self.vae.to(dtype=latents_inp.dtype).decode(latents_inp).sample
imgs = (imgs / 2 + 0.5).clamp(0, 1)
loss_total = 0.
for attn_map, rgb_val in zip(text_format_dict['color_obj_atten'], text_format_dict['target_RGB']):
avg_rgb = (
imgs*attn_map[:, 0]).sum(2).sum(2)/attn_map[:, 0].sum()
loss = self.color_loss(
avg_rgb, rgb_val[:, :, 0, 0])*100
loss_total += loss
loss_total.backward()
latents = (
latents - latents.grad * text_format_dict['color_guidance_weight'] * text_format_dict['color_obj_atten_all']).detach().clone().to(dtype=prompt_embeds.dtype)
self.unet.to(device=latents.device)
# apply background injection
if i == int(inject_background * len(self.scheduler.timesteps)) and inject_background > 0:
latents = latents_reference * self.masks[-1] + latents * \
(1-self.masks[-1])
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
else:
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(latents.dtype)
self.vae.decoder.conv_in.to(latents.dtype)
self.vae.decoder.mid_block.to(latents.dtype)
else:
latents = latents.float()
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents
return StableDiffusionXLPipelineOutput(images=image)
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
def predict_x0(self, x_t, eps_t, t):
alpha_t = self.scheduler.alphas_cumprod[t.cpu().long().item()]
return (x_t - eps_t * torch.sqrt(1-alpha_t)) / torch.sqrt(alpha_t)
def register_tokenmap_hooks(self):
r"""Function for registering hooks during evaluation.
We mainly store activation maps averaged over queries.
"""
self.forward_hooks = []
def save_activations(selfattn_maps, crossattn_maps, n_maps, name, module, inp, out):
r"""
PyTorch Forward hook to save outputs at each forward pass.
"""
# out[0] - final output of attention layer
# out[1] - attention probability matrices
if name in n_maps:
n_maps[name] += 1
else:
n_maps[name] = 1
if 'attn2' in name:
assert out[1][0].shape[-1] == 77
if name in CrossAttentionLayers_XL and n_maps[name] > 10:
# if n_maps[name] > 10:
if name in crossattn_maps:
crossattn_maps[name] += out[1][0].detach().cpu()[1:2]
else:
crossattn_maps[name] = out[1][0].detach().cpu()[1:2]
# For visualization
# crossattn_maps[name].append(out[1][0].detach().cpu()[1:2])
else:
assert out[1][0].shape[-1] != 77
# if name in SelfAttentionLayers and n_maps[name] > 10:
if n_maps[name] > 10:
if name in selfattn_maps:
selfattn_maps[name] += out[1][0].detach().cpu()[1:2]
else:
selfattn_maps[name] = out[1][0].detach().cpu()[1:2]
selfattn_maps = collections.defaultdict(list)
crossattn_maps = collections.defaultdict(list)
n_maps = collections.defaultdict(list)
for name, module in self.unet.named_modules():
leaf_name = name.split('.')[-1]
if 'attn' in leaf_name:
# Register hook to obtain outputs at every attention layer.
self.forward_hooks.append(module.register_forward_hook(
partial(save_activations, selfattn_maps,
crossattn_maps, n_maps, name)
))
# attention_dict is a dictionary containing attention maps for every attention layer
self.selfattn_maps = selfattn_maps
self.crossattn_maps = crossattn_maps
self.n_maps = n_maps
def remove_tokenmap_hooks(self):
for hook in self.forward_hooks:
hook.remove()
self.selfattn_maps = None
self.crossattn_maps = None
self.n_maps = None
def register_replacement_hooks(self, feat_inject_step=False):
r"""Function for registering hooks to replace self attention.
"""
self.forward_replacement_hooks = []
def replace_activations(name, module, args):
r"""
PyTorch Forward hook to save outputs at each forward pass.
"""
if 'attn1' in name:
modified_args = (args[0], self.self_attention_maps_cur[name].to(args[0].device))
return modified_args
# cross attention injection
# elif 'attn2' in name:
# modified_map = {
# 'reference': self.self_attention_maps_cur[name],
# 'inject_pos': self.inject_pos,
# }
# modified_args = (args[0], modified_map)
# return modified_args
def replace_resnet_activations(name, module, args):
r"""
PyTorch Forward hook to save outputs at each forward pass.
"""
modified_args = (args[0], args[1],
self.self_attention_maps_cur[name].to(args[0].device))
return modified_args
for name, module in self.unet.named_modules():
leaf_name = name.split('.')[-1]
if 'attn' in leaf_name and feat_inject_step:
# Register hook to obtain outputs at every attention layer.
self.forward_replacement_hooks.append(module.register_forward_pre_hook(
partial(replace_activations, name)
))
if name == 'up_blocks.1.resnets.1' and feat_inject_step:
# Register hook to obtain outputs at every attention layer.
self.forward_replacement_hooks.append(module.register_forward_pre_hook(
partial(replace_resnet_activations, name)
))
def remove_replacement_hooks(self):
for hook in self.forward_replacement_hooks:
hook.remove()
def register_selfattn_hooks(self, feat_inject_step=False):
r"""Function for registering hooks during evaluation.
We mainly store activation maps averaged over queries.
"""
self.selfattn_forward_hooks = []
def save_activations(activations, name, module, inp, out):
r"""
PyTorch Forward hook to save outputs at each forward pass.
"""
# out[0] - final output of attention layer
# out[1] - attention probability matrix
if 'attn2' in name:
assert out[1][1].shape[-1] == 77
# cross attention injection
# activations[name] = out[1][1].detach()
else:
assert out[1][1].shape[-1] != 77
activations[name] = out[1][1].detach().cpu()
def save_resnet_activations(activations, name, module, inp, out):
r"""
PyTorch Forward hook to save outputs at each forward pass.
"""
# out[0] - final output of residual layer
# out[1] - residual hidden feature
# import ipdb;ipdb.set_trace()
# assert out[1].shape[-1] == 64
activations[name] = out[1].detach().cpu()
attention_dict = collections.defaultdict(list)
for name, module in self.unet.named_modules():
leaf_name = name.split('.')[-1]
if 'attn' in leaf_name and feat_inject_step:
# Register hook to obtain outputs at every attention layer.
self.selfattn_forward_hooks.append(module.register_forward_hook(
partial(save_activations, attention_dict, name)
))
if name == 'up_blocks.1.resnets.1' and feat_inject_step:
self.selfattn_forward_hooks.append(module.register_forward_hook(
partial(save_resnet_activations, attention_dict, name)
))
# attention_dict is a dictionary containing attention maps for every attention layer
self.self_attention_maps_cur = attention_dict
def remove_selfattn_hooks(self):
for hook in self.selfattn_forward_hooks:
hook.remove()
def register_fontsize_hooks(self, text_format_dict={}):
r"""Function for registering hooks to replace self attention.
"""
self.forward_fontsize_hooks = []
def adjust_attn_weights(name, module, args):
r"""
PyTorch Forward hook to save outputs at each forward pass.
"""
if 'attn2' in name:
modified_args = (args[0], None, attn_weights)
return modified_args
if text_format_dict['word_pos'] is not None and text_format_dict['font_size'] is not None:
attn_weights = {'word_pos': text_format_dict['word_pos'], 'font_size': text_format_dict['font_size']}
else:
attn_weights = None
for name, module in self.unet.named_modules():
leaf_name = name.split('.')[-1]
if 'attn' in leaf_name and attn_weights is not None:
# Register hook to obtain outputs at every attention layer.
self.forward_fontsize_hooks.append(module.register_forward_pre_hook(
partial(adjust_attn_weights, name)
))
def remove_fontsize_hooks(self):
for hook in self.forward_fontsize_hooks:
hook.remove() | [
"[]",
"False"
] |
2024-01-10 | songweige/sd-webui-rich-text | diffusers_official~pipelines~stable_diffusion_xl~pipeline_stable_diffusion_xl_img2img.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import PIL.Image
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from ...image_processor import VaeImageProcessor
from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, UNet2DConditionModel
from ...models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from ..pipeline_utils import DiffusionPipeline
from . import StableDiffusionXLPipelineOutput
from .watermark import StableDiffusionXLWatermarker
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import StableDiffusionXLImg2ImgPipeline
>>> from diffusers.utils import load_image
>>> pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
... "stabilityai/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"
>>> init_image = load_image(url).convert("RGB")
>>> prompt = "a photo of an astronaut riding a horse on mars"
>>> image = pipe(prompt, image=init_image).images[0]
```
"""
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class StableDiffusionXLImg2ImgPipeline(DiffusionPipeline, FromSingleFileMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
- *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`]
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
as well as the following saving methods:
- *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
_optional_components = ["tokenizer", "text_encoder"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
text_encoder_2: CLIPTextModelWithProjection,
tokenizer: CLIPTokenizer,
tokenizer_2: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
requires_aesthetics_score: bool = False,
force_zeros_for_empty_prompt: bool = True,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
text_encoder_2=text_encoder_2,
tokenizer=tokenizer,
tokenizer_2=tokenizer_2,
unet=unet,
scheduler=scheduler,
)
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.watermark = StableDiffusionXLWatermarker()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
def enable_vae_slicing(self):
r"""
Enable sliced VAE decoding.
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
steps. This is useful to save some memory and allow larger batch sizes.
"""
self.vae.enable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
def disable_vae_slicing(self):
r"""
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_slicing()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
def enable_vae_tiling(self):
r"""
Enable tiled VAE decoding.
When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
"""
self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
def disable_vae_tiling(self):
r"""
Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
computing decoding in one step.
"""
self.vae.disable_tiling()
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.enable_sequential_cpu_offload
def enable_sequential_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
Note that offloading happens on a submodule basis. Memory savings are higher than with
`enable_model_cpu_offload`, but performance is lower.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
from accelerate import cpu_offload
else:
raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
for cpu_offloaded_model in [self.unet, self.text_encoder, self.text_encoder_2, self.vae]:
cpu_offload(cpu_offloaded_model, device)
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.enable_model_cpu_offload
def enable_model_cpu_offload(self, gpu_id=0):
r"""
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
"""
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
device = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=True)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
model_sequence = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
model_sequence.extend([self.unet, self.vae])
hook = None
for cpu_offloaded_model in model_sequence:
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
# We'll offload the last model manually.
self.final_offload_hook = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _execution_device(self):
r"""
Returns the device on which the pipeline's models will be executed. After calling
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
hooks.
"""
if not hasattr(self.unet, "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(module, "_hf_hook")
and hasattr(module._hf_hook, "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
def encode_prompt(
self,
prompt,
device: Optional[torch.device] = None,
num_images_per_prompt: int = 1,
do_classifier_free_guidance: bool = True,
negative_prompt=None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
lora_scale: Optional[float] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
lora_scale (`float`, *optional*):
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
"""
device = device or self._execution_device
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
self._lora_scale = lora_scale
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
# Define tokenizers and text encoders
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
text_encoders = (
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
)
if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
prompt_embeds_list = []
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, tokenizer)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = text_encoder(
text_input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
pooled_prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.hidden_states[-2]
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
prompt_embeds_list.append(prompt_embeds)
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
# get unconditional embeddings for classifier free guidance
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
elif do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt or ""
uncond_tokens: List[str]
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
negative_prompt_embeds_list = []
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
negative_prompt_embeds = text_encoder(
uncond_input.input_ids.to(device),
output_hidden_states=True,
)
# We are only ALWAYS interested in the pooled output of the final text encoder
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(
batch_size * num_images_per_prompt, seq_len, -1
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
negative_prompt_embeds_list.append(negative_prompt_embeds)
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
bs_embed = pooled_prompt_embeds.shape[0]
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
bs_embed * num_images_per_prompt, -1
)
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None
):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
# Offload text encoder if `enable_model_cpu_offload` was enabled
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.text_encoder_2.to("cpu")
torch.cuda.empty_cache()
image = image.to(device=device, dtype=dtype)
batch_size = batch_size * num_images_per_prompt
if image.shape[1] == 4:
init_latents = image
else:
# make sure the VAE is in float32 mode, as it overflows in float16
image = image.float()
self.vae.to(dtype=torch.float32)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
elif isinstance(generator, list):
init_latents = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
]
init_latents = torch.cat(init_latents, dim=0)
else:
init_latents = self.vae.encode(image).latent_dist.sample(generator)
self.vae.to(dtype)
init_latents = init_latents.to(dtype)
init_latents = self.vae.config.scaling_factor * init_latents
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
# expand init_latents for batch_size
additional_image_per_prompt = batch_size // init_latents.shape[0]
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
raise ValueError(
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
)
else:
init_latents = torch.cat([init_latents], dim=0)
shape = init_latents.shape
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
# get latents
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
latents = init_latents
return latents
def _get_add_time_ids(
self, original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, dtype
):
if self.config.requires_aesthetics_score:
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
else:
add_time_ids = list(original_size + crops_coords_top_left + target_size)
add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
)
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
if (
expected_add_embed_dim > passed_add_embed_dim
and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
)
elif (
expected_add_embed_dim < passed_add_embed_dim
and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
):
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
)
elif expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
return add_time_ids, add_neg_time_ids
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: Union[
torch.FloatTensor,
PIL.Image.Image,
np.ndarray,
List[torch.FloatTensor],
List[PIL.Image.Image],
List[np.ndarray],
] = None,
strength: float = 0.3,
num_inference_steps: int = 50,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
aesthetic_score: float = 6.0,
negative_aesthetic_score: float = 2.5,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
image (`torch.FloatTensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`):
The image(s) to modify with the pipeline.
strength (`float`, *optional*, defaults to 0.8):
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
be maximum and the denoising process will run for the full number of iterations specified in
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
If not provided, pooled text embeddings will be generated from `prompt` input argument.
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.7):
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
Guidance rescale factor should fix overexposure when using zero terminal SNR.
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
TODO
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
TODO
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
TODO
aesthetic_score (`float`, *optional*, defaults to 6.0):
TODO
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
TDOO
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
`tuple. When returning a tuple, the first element is a list with the generated images, and the second
element is a list of `bool`s denoting whether the corresponding generated image likely represents
"not-safe-for-work" (nsfw) content, according to the `safety_checker`.
"""
# 1. Check inputs. Raise error if not correct
self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Preprocess image
image = self.image_processor.preprocess(image)
# 5. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
# 6. Prepare latent variables
latents = self.prepare_latents(
image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
)
# 7. Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
height, width = latents.shape[-2:]
height = height * self.vae_scale_factor
width = width * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 8. Prepare added time ids & embeddings
add_text_embeds = pooled_prompt_embeds
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
dtype=prompt_embeds.dtype,
)
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
# 9. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
# make sure the VAE is in float32 mode, as it overflows in float16
self.vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
self.vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,
XFormersAttnProcessor,
LoRAXFormersAttnProcessor,
LoRAAttnProcessor2_0,
),
)
# if xformers or torch_2_0 is used attention block does not need
# to be in float32 which can save lots of memory
if use_torch_2_0_or_xformers:
self.vae.post_quant_conv.to(latents.dtype)
self.vae.decoder.conv_in.to(latents.dtype)
self.vae.decoder.mid_block.to(latents.dtype)
else:
latents = latents.float()
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents
return StableDiffusionXLPipelineOutput(images=image)
image = self.watermark.apply_watermark(image)
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image,)
return StableDiffusionXLPipelineOutput(images=image)
| [
"[]",
"False"
] |
2024-01-10 | ykhli/llama_index | llama_index~query_engine~sql_vector_query_engine.py | """SQL Vector query engine."""
from langchain.input import print_text
from typing import Optional, cast, Dict, Any, Callable
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.struct_store.sql_query import GPTNLStructStoreQueryEngine
from llama_index.indices.vector_store.retrievers.auto_retriever import (
VectorIndexAutoRetriever,
)
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.tools.query_engine import QueryEngineTool
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.indices.service_context import ServiceContext
from llama_index.selectors.llm_selectors import LLMSingleSelector
from llama_index.prompts.base import Prompt
from llama_index.indices.query.query_transform.base import BaseQueryTransform
import logging
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor
from llama_index.callbacks.base import CallbackManager
logger = logging.getLogger(__name__)
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL = """
The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
Given the SQL response, the question has also been translated into a vector store query.
The vector store query and response is given below.
Given SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
Transformed vector store query: {vector_store_query_str}
Vector store response: {vector_store_response_str}
Response:
""" # noqa
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT = Prompt(DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL)
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL = """
"The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
The SQL response either answers the question, or should provide additional context that can be used to make the question more specific.
Your job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.
Examples:
Original question: Please give more details about the demographics of the city with the highest population.
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: Can you tell me more about the demographics of New York City?
Original question: Please compare the sports environment of cities in North America.
SQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3
SQL response: The cities in North America are New York, San Francisco, and Toronto.
New question: What sports are played in New York, San Francisco, and Toronto?
Original question: What is the city with the highest population?
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: None
Original question: What countries are the top 3 ATP players from?
SQL query: SELECT country FROM players WHERE rank <= 3
SQL response: The top 3 ATP players are from Serbia, Russia, and Spain.
New question: None
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
New question: "
""" # noqa
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT = Prompt(DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL)
def _default_check_stop(query_bundle: QueryBundle) -> bool:
"""Default check stop function."""
return query_bundle.query_str.lower() == "none"
def _format_sql_query(sql_query: str) -> str:
"""Format SQL query."""
return sql_query.replace("\n", " ").replace("\t", " ")
class SQLAugmentQueryTransform(BaseQueryTransform):
"""SQL Augment Query Transform.
This query transform will transform the query into a more specific query
after augmenting with SQL results.
Args:
llm_predictor (LLMPredictor): LLM predictor to use for query transformation.
sql_augment_transform_prompt (Prompt): Prompt to use for query transformation.
check_stop_parser (Optional[Callable[[str], bool]]): Check stop function.
"""
def __init__(
self,
llm_predictor: Optional[BaseLLMPredictor] = None,
sql_augment_transform_prompt: Optional[Prompt] = None,
check_stop_parser: Optional[Callable[[QueryBundle], bool]] = None,
) -> None:
"""Initialize params."""
self._llm_predictor = llm_predictor or LLMPredictor()
self._sql_augment_transform_prompt = (
sql_augment_transform_prompt or DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT
)
self._check_stop_parser = check_stop_parser or _default_check_stop
def _run(self, query_bundle: QueryBundle, extra_info: Dict) -> QueryBundle:
"""Run query transform."""
query_str = query_bundle.query_str
sql_query = extra_info["sql_query"]
sql_query_response = extra_info["sql_query_response"]
new_query_str, formatted_prompt = self._llm_predictor.predict(
self._sql_augment_transform_prompt,
query_str=query_str,
sql_query_str=sql_query,
sql_response_str=sql_query_response,
)
return QueryBundle(
new_query_str, custom_embedding_strs=query_bundle.custom_embedding_strs
)
def check_stop(self, query_bundle: QueryBundle) -> bool:
"""Check if query indicates stop."""
return self._check_stop_parser(query_bundle)
class SQLAutoVectorQueryEngine(BaseQueryEngine):
"""SQL + Vector Index Auto Retriever Query Engine.
This query engine can query both a SQL database
as well as a vector database. It will first decide
whether it needs to query the SQL database or vector store.
If it decides to query the SQL database, it will also decide
whether to augment information with retrieved results from the vector store.
We use the VectorIndexAutoRetriever to retrieve results.
Args:
sql_query_tool (QueryEngineTool): Query engine tool for SQL database.
vector_query_tool (QueryEngineTool): Query engine tool for vector database.
selector (Optional[LLMSingleSelector]): Selector to use.
service_context (Optional[ServiceContext]): Service context to use.
sql_vector_synthesis_prompt (Optional[Prompt]): Prompt to use for SQL vector
synthesis.
sql_augment_query_transform (Optional[SQLAugmentQueryTransform]): Query
transform to use for SQL augmentation.
use_sql_vector_synthesis (bool): Whether to use SQL vector synthesis.
callback_manager (Optional[CallbackManager]): Callback manager to use.
verbose (bool): Whether to print intermediate results.
"""
def __init__(
self,
sql_query_tool: QueryEngineTool,
vector_query_tool: QueryEngineTool,
selector: Optional[LLMSingleSelector] = None,
service_context: Optional[ServiceContext] = None,
sql_vector_synthesis_prompt: Optional[Prompt] = None,
sql_augment_query_transform: Optional[SQLAugmentQueryTransform] = None,
use_sql_vector_synthesis: bool = True,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = True,
) -> None:
"""Initialize params."""
super().__init__(callback_manager=callback_manager)
# validate that the query engines are of the right type
if not isinstance(sql_query_tool.query_engine, GPTNLStructStoreQueryEngine):
raise ValueError(
"sql_query_tool.query_engine must be an instance of "
"GPTNLStructStoreQueryEngine"
)
if not isinstance(vector_query_tool.query_engine, RetrieverQueryEngine):
raise ValueError(
"vector_query_tool.query_engine must be an instance of "
"RetrieverQueryEngine"
)
if not isinstance(
vector_query_tool.query_engine.retriever, VectorIndexAutoRetriever
):
raise ValueError(
"vector_query_tool.query_engine.retriever must be an instance "
"of VectorIndexAutoRetriever"
)
self._sql_query_tool = sql_query_tool
self._vector_query_tool = vector_query_tool
sql_query_engine = cast(
GPTNLStructStoreQueryEngine, sql_query_tool.query_engine
)
self._service_context = service_context or sql_query_engine.service_context
self._selector = selector or LLMSingleSelector.from_defaults()
self._sql_vector_synthesis_prompt = (
sql_vector_synthesis_prompt or DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT
)
self._sql_augment_query_transform = (
sql_augment_query_transform
or SQLAugmentQueryTransform(
llm_predictor=self._service_context.llm_predictor
)
)
self._use_sql_vector_synthesis = use_sql_vector_synthesis
self._verbose = verbose
@classmethod
def from_sql_and_vector_query_engines(
cls,
sql_query_engine: GPTNLStructStoreQueryEngine,
sql_tool_name: str,
sql_tool_description: str,
vector_auto_retriever: RetrieverQueryEngine,
vector_tool_name: str,
vector_tool_description: str,
selector: Optional[LLMSingleSelector] = None,
**kwargs: Any,
) -> "SQLAutoVectorQueryEngine":
"""From SQL and vector query engines.
Args:
sql_query_engine (GPTNLStructStoreQueryEngine): SQL query engine.
vector_query_engine (VectorIndexAutoRetriever): Vector retriever.
selector (Optional[LLMSingleSelector]): Selector to use.
"""
sql_query_tool = QueryEngineTool.from_defaults(
sql_query_engine, name=sql_tool_name, description=sql_tool_description
)
vector_query_tool = QueryEngineTool.from_defaults(
vector_auto_retriever,
name=vector_tool_name,
description=vector_tool_description,
)
return cls(sql_query_tool, vector_query_tool, selector, **kwargs)
def _query_sql_vector(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query SQL database + vector db in sequence."""
# first query SQL database
sql_response = self._sql_query_tool.query_engine.query(query_bundle)
if not self._use_sql_vector_synthesis:
return sql_response
sql_query = (
sql_response.extra_info["sql_query"] if sql_response.extra_info else None
)
if self._verbose:
print_text(f"SQL query: {sql_query}\n", color="yellow")
print_text(f"SQL response: {sql_response}\n", color="yellow")
# given SQL db, transform query into new query
new_query = self._sql_augment_query_transform(
query_bundle.query_str,
extra_info={
"sql_query": _format_sql_query(sql_query),
"sql_query_response": str(sql_response),
},
)
if self._verbose:
print_text(
f"Transformed query given SQL response: {new_query.query_str}\n",
color="blue",
)
logger.info(f"> Transformed query given SQL response: {new_query.query_str}")
if self._sql_augment_query_transform.check_stop(new_query):
return sql_response
vector_response = self._vector_query_tool.query_engine.query(new_query)
if self._verbose:
print_text(f"Vector DB response: {vector_response}\n", color="pink")
logger.info(f"> Vector DB response: {vector_response}")
response_str, _ = self._service_context.llm_predictor.predict(
self._sql_vector_synthesis_prompt,
query_str=query_bundle.query_str,
sql_query_str=sql_query,
sql_response_str=str(sql_response),
vector_store_query_str=new_query.query_str,
vector_store_response_str=str(vector_response),
)
if self._verbose:
print_text(f"Final response: {response_str}\n", color="green")
response_extra_info = {
**(sql_response.extra_info or {}),
**(vector_response.extra_info or {}),
}
source_nodes = vector_response.source_nodes
return Response(
response_str,
extra_info=response_extra_info,
source_nodes=source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query and get response."""
# TODO: see if this can be consolidated with logic in RouterQueryEngine
metadatas = [self._sql_query_tool.metadata, self._vector_query_tool.metadata]
result = self._selector.select(metadatas, query_bundle)
# pick sql query
if result.ind == 0:
if self._verbose:
print_text(f"Querying SQL database: {result.reason}\n", color="blue")
logger.info(f"> Querying SQL database: {result.reason}")
return self._query_sql_vector(query_bundle)
elif result.ind == 1:
if self._verbose:
print_text(f"Querying vector database: {result.reason}\n", color="blue")
logger.info(f"> Querying vector database: {result.reason}")
response = self._vector_query_tool.query_engine.query(query_bundle)
if self._verbose:
print_text(f"Vector DB response: {response}\n", color="pink")
return response
else:
raise ValueError(f"Invalid result.ind: {result.ind}")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
# TODO: make async
return self._query(query_bundle)
| [
"\nThe original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nGiven the SQL response, the question has also been translated into a vector store query.\nThe vector store query and response is given below.\nGiven SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nTransformed vector store query: {vector_store_query_str}\nVector store response: {vector_store_response_str}\nResponse: \n",
"\n\"The original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nThe SQL response either answers the question, or should provide additional context that can be used to make the question more specific.\nYour job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.\n\nExamples:\n\nOriginal question: Please give more details about the demographics of the city with the highest population.\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: Can you tell me more about the demographics of New York City?\n\nOriginal question: Please compare the sports environment of cities in North America.\nSQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3\nSQL response: The cities in North America are New York, San Francisco, and Toronto.\nNew question: What sports are played in New York, San Francisco, and Toronto?\n\nOriginal question: What is the city with the highest population?\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: None\n\nOriginal question: What countries are the top 3 ATP players from?\nSQL query: SELECT country FROM players WHERE rank <= 3\nSQL response: The top 3 ATP players are from Serbia, Russia, and Spain.\nNew question: None\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nNew question: \"\n",
"North America",
"None"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.