prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AsuradaYuci/TF-CLIP
# Path: loss/softmax_loss.py
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).mean(0).sum()
return loss
# Path: loss/softmax_loss.py
class LabelSmoothingCrossEntropy(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.1):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothingCrossEntropy, self).__init__()
assert smoothing < 1.0
self.smoothing = smoothing
self.confidence = 1. - smoothing
def forward(self, x, target):
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
# Path: loss/triplet_loss.py
class TripletLoss(object):
"""
Triplet loss using HARDER example mining,
modified based on original triplet loss using hard example mining
"""
def __init__(self, margin=None, hard_factor=0.0):
self.margin = margin
self.hard_factor = hard_factor
if margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
def __call__(self, global_feat, labels, normalize_feature=False):
if normalize_feature:
global_feat = normalize(global_feat, axis=-1)
dist_mat = euclidean_dist(global_feat, global_feat) #B,B
dist_ap, dist_an = hard_example_mining(dist_mat, labels)
dist_ap *= (1.0 + self.hard_factor)
dist_an *= (1.0 - self.hard_factor)
y = dist_an.new().resize_as_(dist_an).fill_(1)
if self.margin is not None:
loss = self.ranking_loss(dist_an, dist_ap, y)
else:
loss = self.ranking_loss(dist_an - dist_ap, y)
return loss, dist_ap, dist_an
# Path: loss/center_loss.py
class CenterLoss(nn.Module):
"""Center loss.
Reference:
Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
num_classes (int): number of classes.
feat_dim (int): feature dimension.
"""
def __init__(self, num_classes=751, feat_dim=2048, use_gpu=True):
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, feat_dim).
labels: ground truth labels with shape (num_classes).
"""
assert x.size(0) == labels.size(0), "features.size(0) is not equal to labels.size(0)"
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = []
for i in range(batch_size):
value = distmat[i][mask[i]]
value = value.clamp(min=1e-12, max=1e+12) # for numerical stability
dist.append(value)
dist = torch.cat(dist)
loss = dist.mean()
return loss
# Path: loss/make_loss.py
import torch.nn.functional as F
from .softmax_loss import CrossEntropyLabelSmooth, LabelSmoothingCrossEntropy
from .triplet_loss import TripletLoss
from .center_loss import CenterLoss
# encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
def make_loss(cfg, num_classes): # modified by gu
sampler = cfg.DATALOADER.SAMPLER
feat_dim = 2048
| center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=True) # center loss |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: MarilynKeller/aitviewer-skel
# Path: aitviewer/configuration.py
CONFIG = Configuration()
# Path: aitviewer/utils/so3.py
def aa2rot_torch(rotation_vectors):
"""
Convert rotation vectors (angle-axis representation) to rotation matrices.
:param rotation_vectors: A torch tensor of shape (..., 3).
:return: A torch tensor of shape (..., 3, 3).
"""
assert isinstance(rotation_vectors, torch.Tensor)
return roma.rotvec_to_rotmat(rotation_vectors)
# Path: aitviewer/utils/so3.py
def rot2aa_torch(rotation_matrices):
"""
Convert rotation matrices to rotation vectors (angle-axis representation).
:param rotation_matrices: A torch tensor of shape (..., 3, 3).
:return: A torch tensor of shape (..., 3).
"""
assert isinstance(rotation_matrices, torch.Tensor)
return roma.rotmat_to_rotvec(rotation_matrices)
# Path: aitviewer/utils/utils.py
def compute_vertex_and_face_normals_torch(vertices, faces, vertex_faces, normalize=False):
"""
Compute (unnormalized) vertex normals for the given vertices.
:param vertices: A tensor of shape (N, V, 3).
:param faces: A tensor of shape (F, 3) indexing into `vertices`.
:param vertex_faces: A tensor of shape (V, MAX_VERTEX_DEGREE) that lists the face IDs each vertex is a part of.
:param normalize: Whether to make the normals unit length or not.
:return: The vertex and face normals as tensors of shape (N, V, 3) and (N, F, 3) respectively.
"""
vs = vertices[:, faces.to(dtype=torch.long)]
face_normals = torch.cross(vs[:, :, 1] - vs[:, :, 0], vs[:, :, 2] - vs[:, :, 0], dim=-1) # (N, F, 3)
ns_all_faces = face_normals[:, vertex_faces] # (N, V, MAX_VERTEX_DEGREE, 3)
ns_all_faces[:, vertex_faces == -1] = 0.0
vertex_degrees = (vertex_faces > -1).sum(dim=-1).to(dtype=ns_all_faces.dtype)
vertex_normals = ns_all_faces.sum(dim=-2) / vertex_degrees[None, :, None] # (N, V, 3)
if normalize:
face_normals = face_normals / torch.norm(face_normals, dim=-1).unsqueeze(-1)
vertex_normals = vertex_normals / torch.norm(vertex_normals, dim=-1).unsqueeze(-1)
return vertex_normals, face_normals
# Path: aitviewer/models/smpl.py
import collections
import numpy as np
import smplx
import torch
import torch.nn as nn
import trimesh
from abc import ABC
from aitviewer.configuration import CONFIG as C
from aitviewer.utils.so3 import aa2rot_torch as aa2rot
from aitviewer.utils.so3 import rot2aa_torch as rot2aa
from aitviewer.utils.utils import compute_vertex_and_face_normals_torch
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos
class SMPLLayer(nn.Module, ABC):
"""A wrapper for the various SMPL body models."""
def __init__(
self,
model_type="smpl",
gender="neutral",
num_betas=10,
device=None,
dtype=None,
**smpl_model_params,
):
"""
Initializer.
:param model_type: Which type of SMPL model to load, currently SMPL, SMPL-H and SMPL-X are supported.
:param gender: Which gender to load.
:param num_betas: Number of shape components.
:param device: CPU or GPU.
:param dtype: The pytorch floating point data type.
:param smpl_model_params: Other keyword arguments that can be passed to smplx.create.
"""
assert model_type in ["smpl", "smplh", "smplx", "mano", "flame"]
assert gender in ["male", "female", "neutral"]
if model_type == "smplh" and gender == "neutral":
gender = "female" # SMPL-H has no neutral gender.
super(SMPLLayer, self).__init__()
self.num_betas = num_betas
smpl_model_params["use_pca"] = smpl_model_params.get("use_pca", False)
smpl_model_params["flat_hand_mean"] = smpl_model_params.get("flat_hand_mean", True)
self.bm = smplx.create(
| C.smplx_models, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: wukan1986/polars_ta
# Path: polars_ta/wq/time_series.py
def ts_co_kurtosis(x: Expr, y: Expr, d: int = 5, ddof: int = 0) -> Expr:
return map_batches([x, y], lambda xx: batches_i2_o1([x1.to_numpy() for x1 in xx], roll_co_kurtosis, d))
# Path: polars_ta/utils/numba_.py
@jit(nopython=True, nogil=True, cache=True)
def nb_roll_sum(x1, window):
"""演示代码,请直接用 pl.col('A').rolling_sum(10).alias('a1')"""
out = np.full(x1.shape, np.nan, dtype=float)
a1 = sliding_window_view(x1, window)
for i, v1 in enumerate(a1):
out[i + window - 1] = np.sum(v1)
return out
# Path: polars_ta/utils/numba_.py
def batches_i1_o1(x1: np.ndarray, func, *args, dtype=None) -> Series:
return Series(func(x1, *args), nan_to_null=True, dtype=dtype)
# Path: polars_ta/utils/numba_.py
def roll_sum(x: Expr, n: int) -> Expr:
return x.map_batches(lambda x1: batches_i1_o1(x1.to_numpy(), nb_roll_sum, n))
# Path: polars_ta/utils/numba_.py
def roll_cov(a: Expr, b: Expr, n: int) -> Expr:
return map_batches([a, b], lambda xx: batches_i2_o1([x1.to_numpy() for x1 in xx], nb_roll_cov, n))
# Path: tests/numba_test.py
import time
import numpy as np
import polars as pl
from numba import jit
from polars_ta.wq.time_series import ts_co_kurtosis
from polars_ta.utils.numba_ import nb_roll_sum, batches_i1_o1, roll_sum, roll_cov
@jit(nopython=True, nogil=True, fastmath=True, cache=True)
def nb_sum(x):
return np.sum(x)
df = pl.DataFrame({'A': range(100000), 'B': range(100000)})
a = df.with_columns([
pl.col('A').rolling_sum(10).alias('a1'),
pl.col('A').rolling_map(lambda x: x.sum(), 10).alias('a2'),
pl.col('A').rolling_map(lambda x: nb_sum(x.to_numpy()), 10).alias('a3'),
roll_sum(pl.col('A'), 10).alias('a4'),
pl.col('A').map_batches(lambda x: batches_i1_o1(x.to_numpy(), nb_roll_sum, 10)).alias('a5'),
pl.rolling_cov(pl.col('A'), pl.col('B'), window_size=10).alias('a6'),
roll_cov(pl.col('A'), pl.col('B'), 10).alias('a7'),
| ts_co_kurtosis(pl.col('A'), pl.col('B'), 10).alias('a8'), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: facebookresearch/taskmet
# Path: utils.py
def dense_nn(
num_features,
num_targets,
num_layers,
intermediate_size=10,
activation="relu",
output_activation="sigmoid",
):
if num_layers > 1:
if intermediate_size is None:
intermediate_size = max(num_features, num_targets)
if activation == "relu":
activation_fn = torch.nn.ReLU
elif activation == "sigmoid":
activation_fn = torch.nn.Sigmoid
else:
raise Exception("Invalid activation function: " + str(activation))
net_layers = [torch.nn.Linear(num_features, intermediate_size), activation_fn()]
for _ in range(num_layers - 2):
net_layers.append(torch.nn.Linear(intermediate_size, intermediate_size))
net_layers.append(activation_fn())
if not isinstance(num_targets, tuple):
net_layers.append(torch.nn.Linear(intermediate_size, num_targets))
else:
net_layers.append(
torch.nn.Linear(intermediate_size, reduce(operator.mul, num_targets, 1))
)
net_layers.append(View(num_targets))
else:
if not isinstance(num_targets, tuple):
net_layers = [torch.nn.Linear(num_features, num_targets)]
else:
net_layers = [
torch.nn.Linear(num_features, reduce(operator.mul, num_targets, 1)),
View(num_targets),
]
if output_activation == "relu":
net_layers.append(torch.nn.ReLU())
elif output_activation == "sigmoid":
net_layers.append(torch.nn.Sigmoid())
elif output_activation == "tanh":
net_layers.append(torch.nn.Tanh())
elif output_activation == "softmax":
net_layers.append(torch.nn.Softmax(dim=-1))
elif output_activation == "elu":
net_layers.append(torch.nn.ELU())
return torch.nn.Sequential(*net_layers)
# Path: utils.py
class View(torch.nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def __repr__(self):
return f"View{self.shape}"
def forward(self, input):
"""
Reshapes the input according to the shape saved in the view data structure.
"""
batch_size = input.shape[:-1]
shape = (*batch_size, *self.shape)
out = input.view(shape)
return out
# Path: metric.py
class Metric(nn.Module):
def __init__(
self,
num_features,
num_output,
num_hidden,
identity_init,
identity_init_scale,
):
super().__init__()
self.base = nn.Sequential(
nn.Linear(num_features, num_hidden),
nn.ReLU(),
nn.Linear(num_hidden, num_output * num_output),
)
self.identity_fac_log = torch.nn.parameter.Parameter(torch.zeros([]))
if identity_init:
last_layer = self.base[-1]
last_layer.weight.data.div_(identity_init_scale)
last_layer.bias.data = torch.eye(num_output).view(-1)
self.num_output = num_output
def forward(self, x):
# A = torch.nn.functional.softplus(self.base(x))
identity_fac = torch.exp(self.identity_fac_log)
L = self.base(x)
L = L.view(L.shape[0], self.num_output, self.num_output)
A = (
torch.bmm(L, L.transpose(1, 2))
+ identity_fac * torch.eye(self.num_output).repeat(x.shape[0], 1, 1).cuda()
)
# TODO: extend for PSD matrices with bounds from the
# identity metric
return A
# Path: taskmet.py
import torch
import torch.nn as nn
import numpy as np
import functorch
import torchopt
import random
from typing import List, Tuple, Dict, Union, Optional, Callable
from utils import dense_nn, View
from metric import Metric
# Copyright (c) Meta Platforms, Inc. and affiliates
class Predictor(nn.Module):
def __init__(self, args):
super().__init__()
| self.model = dense_nn() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kylemcdonald/i2i-realtime
# Path: utils/itertools.py
def chunks(x, n):
# return slices of lists
if hasattr(x, '__len__'):
for i in range(0, len(x), n):
yield x[i:i+n]
else:
# return sub-generators of generators
i = iter(x)
for e in i:
yield chain([e], islice(i, n-1))
# Path: diffusion_processor.py
class DiffusionProcessor:
def __init__(self, warmup=None, local_files_only=True):
base_model = "stabilityai/sdxl-turbo"
vae_model = "madebyollin/taesdxl"
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
disable_progress_bar()
self.pipe = AutoPipelineForImage2Image.from_pretrained(
base_model,
torch_dtype=torch.float16,
variant="fp16",
local_files_only=local_files_only,
)
self.pipe.vae = AutoencoderTiny.from_pretrained(
vae_model, torch_dtype=torch.float16, local_files_only=local_files_only
)
fix_seed(self.pipe)
print("Model loaded")
config = CompilationConfig.Default()
config.enable_xformers = True
config.enable_triton = True
config.enable_cuda_graph = True
self.pipe = compile(self.pipe, config=config)
print("Model compiled")
self.pipe.to(device="cuda", dtype=torch.float16)
self.pipe.set_progress_bar_config(disable=True)
print("Model moved to GPU", flush=True)
self.compel = Compel(
tokenizer=[self.pipe.tokenizer, self.pipe.tokenizer_2],
text_encoder=[self.pipe.text_encoder, self.pipe.text_encoder_2],
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
requires_pooled=[False, True],
)
self.prompt_cache = FixedSizeDict(32)
print("Prepared compel")
self.generator = torch.manual_seed(0)
if warmup:
warmup_shape = [int(e) for e in warmup.split("x")]
images = np.zeros(warmup_shape, dtype=np.float32)
for i in range(2):
print(f"Warmup {warmup} {i+1}/2")
start_time = time.time()
self.run(
images,
prompt="warmup",
num_inference_steps=2,
strength=1.0
)
print("Warmup finished", flush=True)
def embed_prompt(self, prompt):
if prompt not in self.prompt_cache:
with torch.no_grad():
print("embedding prompt", prompt)
self.prompt_cache[prompt] = self.compel(prompt)
return self.prompt_cache[prompt]
def meta_embed_prompt(self, prompt):
pattern = r'\("(.*?)"\s*,\s*"(.*?)"\)\.blend\((.*?),(.*?)\)'
match = re.search(pattern, prompt)
if not match:
return self.embed_prompt(prompt)
str1, str2, t1, t2 = match.groups()
t1 = float(t1)
t2 = float(t2)
cond1, pool1 = self.embed_prompt(str1)
cond2, pool2 = self.embed_prompt(str2)
cond = cond1 * t1 + cond2 * t2
pool = pool1 * t1 + pool2 * t2
return cond, pool
def run(self, images, prompt, num_inference_steps, strength, use_compel=False, seed=None):
strength = min(max(1 / num_inference_steps, strength), 1)
if seed is not None:
self.generator = torch.manual_seed(seed)
kwargs = {}
if use_compel:
conditioning, pooled = self.meta_embed_prompt(prompt)
batch_size = len(images)
conditioning_batch = conditioning.expand(batch_size, -1, -1)
pooled_batch = pooled.expand(batch_size, -1)
kwargs["prompt_embeds"] = conditioning_batch
kwargs["pooled_prompt_embeds"] = pooled_batch
else:
kwargs["prompt"] = [prompt] * len(images)
return self.pipe(
image=images,
generator=self.generator,
num_inference_steps=num_inference_steps,
guidance_scale=0,
strength=strength,
output_type="np",
**kwargs
).images
# Path: offline_renderer.py
import os
import numpy as np
from tqdm import tqdm
from natsort import natsorted
from turbojpeg import TurboJPEG, TJPF_RGB
from utils.itertools import chunks
from diffusion_processor import DiffusionProcessor
input_directory = "data/frames-1080"
output_directory = input_directory + "-i2i"
batch_size = 4
prompt = "Three ballety dancers in a psychedelic landscape."
steps = 2
strength = 0.7
seed = 0
jpeg = TurboJPEG()
def imread(fn):
with open(fn, 'rb') as f:
return jpeg.decode(f.read(), pixel_format=TJPF_RGB)
def imwrite(fn, img):
with open(fn, 'wb') as f:
f.write(jpeg.encode(img, pixel_format=TJPF_RGB))
def main():
diffusion = DiffusionProcessor()
fns = natsorted(os.listdir(input_directory))
| batches = list(chunks(fns, batch_size)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: wusize/CLIM
# Path: src/training/distributed.py
def is_master(args, local=False):
return is_local_master(args) if local else is_global_master(args)
# Path: src/training/zero_shot.py
def zero_shot_eval(model, data, epoch, args):
if 'val' not in data:
return {}
if args.zeroshot_frequency == 0:
return {}
if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
return {}
logging.info('Region classifier')
results = {}
if args.test_type == "coco_panoptic":
correct_rois, correct_crops, correct_maskpool, \
similarity_rois, similarity_crops, similarity_maskpool, \
all_box_sizes, all_is_thing, all_cls_labels = run_panoptic(model, data['val'].dataloader, args)
results.update(macc_with_is_thing(correct_rois, all_is_thing, all_cls_labels, 'rois'))
results.update(macc_with_is_thing(correct_crops, all_is_thing, all_cls_labels, 'crops'))
results.update(macc_with_is_thing(correct_maskpool, all_is_thing, all_cls_labels, 'maskpool'))
else:
assert args.test_type == "coco_detection"
correct_rois, correct_crops, all_box_sizes, all_cls_labels = run_det(model, data['val'].dataloader, args)
results.update(macc_with_det(correct_rois, all_cls_labels, 'rois'))
results.update(macc_with_det(correct_crops, all_cls_labels, 'crops'))
return results
# Path: src/training/precision.py
def get_autocast(precision):
if precision == 'amp':
return torch.cuda.amp.autocast
elif precision == 'amp_bfloat16' or precision == 'amp_bf16':
# amp_bfloat16 is more stable than amp float16 for clip training
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
else:
return suppress
# Path: src/training/train.py
import json
import logging
import math
import time
import torch
import os
from open_clip import get_cast_dtype
from .distributed import is_master
from .zero_shot import zero_shot_eval
from .precision import get_autocast
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def postprocess_clip_output(model_out):
return {
"image_features": model_out[0],
"text_features": model_out[1],
"logit_scale": model_out[2]
}
def unwrap_model(model):
if hasattr(model, 'module'):
return model.module
else:
return model
def backward(total_loss, scaler):
if scaler is not None:
scaler.scale(total_loss).backward()
else:
total_loss.backward()
@torch.no_grad()
def student_teacher_ensemble(student, teacher, alpha=0.5):
target_state_dict = {}
for k, v in student.items():
target_state_dict[k] = v * alpha + teacher[k] * (1.0 - alpha)
return target_state_dict
def train_one_epoch(model, method, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args):
device = torch.device(args.device)
| autocast = get_autocast(args.precision) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: firstof9/ha-gasbuddy
# Path: custom_components/gasbuddy/const.py
CONF_INTERVAL = "interval"
# Path: custom_components/gasbuddy/const.py
CONF_NAME = "name"
# Path: custom_components/gasbuddy/const.py
CONF_POSTAL = "zipcode"
# Path: custom_components/gasbuddy/const.py
CONF_STATION_ID = "station_id"
# Path: custom_components/gasbuddy/const.py
CONF_UOM = "uom"
# Path: custom_components/gasbuddy/const.py
DEFAULT_NAME = "Gas Station"
# Path: custom_components/gasbuddy/const.py
DOMAIN = "gasbuddy"
# Path: tests/const.py
CONFIG_DATA = {
CONF_NAME: "Gas Station",
CONF_INTERVAL: 3600,
CONF_STATION_ID: 208656,
CONF_UOM: True,
}
# Path: tests/const.py
STATION_LIST = {
"187725": "Shell @ 1520 N Verrado Way",
"208656": "Costco @ 1101 N Verrado Way",
"87490": "Chevron @ 1419 N 195th Ave",
"110402": "Circle K @ 721 N 195th Ave",
"203982": "Fry's @ 19600 W Indian School Rd",
"126744": "Circle K @ 537 S Watson Rd",
"201250": "QuikTrip @ 900 S Watson Rd",
"38363": "Fry's @ 1300 S Watson Rd",
"27487": "Love's Travel Stop @ 1610 N Miller Rd",
"160044": "QuikTrip @ 1850 S Miller Rd",
"135437": "Chevron @ 2075 S Miller Rd",
"130812": "Fry's @ 16380 W Yuma Rd",
"200905": "Circle K @ 15535 W McDowell Rd",
"85320": "Safeway @ 440 N Estrella Pkwy",
"155795": "QuikTrip @ 575 N Estrella Pkwy",
"118417": "Circle K @ 307 E US-85",
"154238": "Chevron @ 825 E Monroe Ave",
"150938": "Shell @ 501 E Monroe Ave",
"209199": "QuikTrip @ 1540 N Bullard Ave",
"27442": "Safeway @ 14175 W Indian School Rd",
}
# Path: tests/test_config_flow.py
from unittest.mock import patch
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.const import CONF_NAME
from homeassistant.data_entry_flow import FlowResult, FlowResultType
from pytest_homeassistant_custom_component.common import MockConfigEntry
from custom_components.gasbuddy.const import (
CONF_INTERVAL,
CONF_NAME,
CONF_POSTAL,
CONF_STATION_ID,
CONF_UOM,
DEFAULT_NAME,
DOMAIN,
)
from tests.const import CONFIG_DATA, STATION_LIST
import pytest
"""Test config flow."""
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize(
"input,step_id,title,data",
[
(
{
| CONF_NAME: DEFAULT_NAME, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ku-dmlab/PORelDICE
# Path: actor.py
def update_actor(
key: PRNGKey,
actor: Model,
critic: Model,
value: Model,
batch: Batch,
alpha: float,
epsilon: float,
alg: str,
) -> Tuple[Model, InfoDict]:
v = value(batch.observations)
if alg == "PORelDICE":
q1, q2 = critic(batch.observations, batch.actions)
q = jnp.minimum(q1, q2)
weight = 1 + (q - v) / alpha
weight = jnp.maximum(weight, 0.0)
else:
NotImplementedError
weight = jnp.clip(weight, 0.0, 100.0)
def actor_loss_fn(actor_params: Params) -> Tuple[jnp.ndarray, InfoDict]:
dist = actor.apply(
{"params": actor_params},
batch.observations,
training=True,
rngs={"dropout": key},
)
log_probs = dist.log_prob(batch.actions)
actor_loss = -(weight * log_probs).mean()
return actor_loss, {"actor_loss": actor_loss}
new_actor, info = actor.apply_gradient(actor_loss_fn)
return new_actor, info
# Path: common.py
def default_init(scale: Optional[float] = jnp.sqrt(2)):
def __call__(self, x: jnp.ndarray, training: bool = False) -> jnp.ndarray:
def create(
cls,
model_def: nn.Module,
inputs: Sequence[jnp.ndarray],
tx: Optional[optax.GradientTransformation] = None,
) -> "Model":
def __call__(self, *args, **kwargs):
def apply(self, *args, **kwargs):
def apply_gradient(self, loss_fn) -> Tuple[Any, "Model"]:
def save(self, save_path: str):
def load(self, load_path: str) -> "Model":
class MLP(nn.Module):
class Model:
# Path: critic.py
def update_q(
critic: Model, value: Model, batch: Batch, discount: float, alg: str
) -> Tuple[Model, InfoDict]:
next_v = value(batch.next_observations)
target_q = batch.rewards + discount * batch.masks * next_v
def critic_loss_fn(critic_params: Params) -> Tuple[jnp.ndarray, InfoDict]:
q1, q2 = critic.apply(
{"params": critic_params}, batch.observations, batch.actions
)
critic_loss = ((q1 - target_q) ** 2 + (q2 - target_q) ** 2).mean()
return critic_loss, {
"critic_loss": critic_loss,
"q1": q1.mean(),
}
new_critic, info = critic.apply_gradient(critic_loss_fn)
return new_critic, info
# Path: critic.py
def update_v(
critic: Model, value: Model, batch: Batch, alpha: float, epsilon:float, discount: float, alg: str
) -> Tuple[Model, InfoDict]:
def value_loss_fn(value_params: Params) -> Tuple[jnp.ndarray, InfoDict]:
v = value.apply({"params": value_params}, batch.observations)
v_0 = value.apply({"params": value_params}, batch.initial_observations)
q1, q2 = critic(batch.observations, batch.actions)
q = jnp.minimum(q1, q2)
if alg == "PORelDICE":
sp_term = (q-v) / alpha
value_loss = ((1-discount) * v_0).mean() + (alpha *
jnp.where(1 + sp_term > epsilon,
(0.5 * sp_term **2 + sp_term),
(epsilon) * (sp_term - epsilon + 1) + 0.5 * (epsilon - 1) ** 2 + epsilon - 1
)).mean()
else:
raise NotImplementedError("please choose PORelDICE")
return value_loss, {
"value_loss": value_loss,
"v": v.mean(),
"q-v": (q - v).mean(),
}
new_value, info = value.apply_gradient(value_loss_fn)
return new_value, info
# Path: learner.py
from typing import Optional, Sequence, Tuple
from actor import update_actor
from common import Batch, InfoDict, Model, PRNGKey
from critic import update_q, update_v
import jax
import jax.numpy as jnp
import numpy as np
import optax
import policy
import value_net
"""Implementations of algorithms for continuous control."""
def target_update(critic: Model, target_critic: Model, tau: float) -> Model:
new_target_params = jax.tree_util.tree_map(
lambda p, tp: p * tau + tp * (1 - tau), critic.params, target_critic.params
)
return target_critic.replace(params=new_target_params)
@jax.jit
def _update_jit_PORelDICE(
rng: PRNGKey,
actor: Model,
critic: Model,
value: Model,
target_critic: Model,
batch: Batch,
discount: float,
tau: float,
alpha: float,
epsilon:float,
) -> Tuple[PRNGKey, Model, Model, Model, Model, InfoDict]:
| new_value, value_info = update_v(target_critic, value, batch, alpha, epsilon, discount, alg="PORelDICE") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Megant88/Valorant-GUI-Cheat-Arduino
# Path: mouse_instruct.py
class MouseInstruct:
def __init__(self, dev):
self._buttons_mask = 0
self._dev = dev
self.move(0, 0)
@classmethod
def getMouse(cls, vid=0, pid=0, ping_code=0xf9):
dev = find_mouse_device(vid, pid, ping_code)
if not dev:
vid_str = hex(vid) if vid else "Unspecified"
pid_str = hex(pid) if pid else "Unspecified"
ping_code_str = hex(ping_code) if pid else "Unspecified"
error_msg = ("[-] Device "
f"Vendor ID: {vid_str}, Product ID: {pid_str} "
f"Pingcode: {ping_code_str} not found!")
raise DeviceNotFoundError(error_msg)
return cls(dev)
def _buttons(self, buttons):
if buttons != self._buttons_mask:
self._buttons_mask = buttons
self.move(0, 0)
def click(self, button = MOUSE_LEFT):
self._buttons_mask = button
self.move(0, 0)
self._buttons_mask = 0
self.move(0, 0)
def press(self, button = MOUSE_LEFT):
self._buttons(self._buttons_mask | button)
def release(self, button = MOUSE_LEFT):
self._buttons(self._buttons_mask & ~button)
def is_pressed(self, button = MOUSE_LEFT):
return bool(button & self._buttons_mask)
def move(self, x, y):
limited_x = limit_xy(x)
limited_y = limit_xy(y)
self._sendRawReport(self._makeReport(limited_x, limited_y))
def _makeReport(self, x, y):
report_data = [
0x01, # Report ID: 0
self._buttons_mask,
low_byte(x), high_byte(x),
low_byte(y), high_byte(y)
]
return report_data
def _sendRawReport(self, report_data):
self._dev.write(report_data)
# Path: mouse_instruct.py
class DeviceNotFoundError(Exception):
pass
# Path: cheese.py
import cv2
import numpy as np
import win32api, sys
import serial
import keyboard, threading
import time, json
from mss import mss
from mouse_instruct import MouseInstruct, DeviceNotFoundError
from ctypes import WinDLL
from valclient.client import Client
user32, kernel32, shcore = (
WinDLL("user32", use_last_error=True),
WinDLL("kernel32", use_last_error=True),
WinDLL("shcore", use_last_error=True),
)
shcore.SetProcessDpiAwareness(2)
WIDTH, HEIGHT = [user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)]
ZONE = 5
GRAB_ZONE = (
int(WIDTH / 2 - ZONE),
int(HEIGHT / 2 - ZONE),
int(WIDTH / 2 + ZONE),
int(HEIGHT / 2 + ZONE),
)
GRAB_ZONE_CENTER_X = (GRAB_ZONE[2] - GRAB_ZONE[0]) / 2
GRAB_ZONE_CENTER_Y = (GRAB_ZONE[3] - GRAB_ZONE[1]) / 2
def exiting():
try:
exec(type((lambda: 0).__code__)(0, 0, 0, 0, 0, 0, b'\x053', (), (), (), '', '', 0, b''))
except:
try:
sys.exit()
except:
raise SystemExit
cfg_path = "config.json"
def set_config(config):
global cfg_path
cfg_path = config
return cfg_path
with open(cfg_path) as json_file:
data = json.load(json_file)
try:
enable_aim = data['aimbot']["enable_aimbot"]
enable_trigger = data['triggerbot']["enable_triggerbot"]
enable_instalock = data['instantlocker']["enable_instantlocker"]
except:
exiting()
def getMouse():
try:
| mouse = MouseInstruct.getMouse()
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Anashel-RPG/echoai
# Path: image_downloader.py
def download_image(image_url, local_path, job_id, prompt, additional_metadata):
logging.info(f"Initiating download: URL {image_url}, Local Path {local_path}, Job ID {job_id}, Prompt {prompt[:30]}...")
try:
response = requests.get(image_url, stream=True)
response.raise_for_status()
# Read image from response
image_data = response.content
image = Image.open(BytesIO(image_data))
# Draw text on the image
draw = ImageDraw.Draw(image)
font = ImageFont.load_default(size=28) # Specifying font size
text = prompt.split(',')[0] # Extract first part of the prompt
# Positioning the text at top left (10, 10)
# draw.text((20, 10), text, font=font)
# Prepare metadata (EXIF) with additional fields
exif_dict = {
"0th": {},
"Exif": {},
"1st": {},
"thumbnail": None,
"GPS": {} # Optional, if you want to include GPS-related tags
}
exif_dict["0th"][piexif.ImageIFD.Artist] = job_id
exif_dict["0th"][piexif.ImageIFD.ImageDescription] = prompt
# Concatenate additional metadata into a single string
user_comment = "; ".join([f"{key}: {value}" for key, value in additional_metadata.items()])
# Encode user comment with ASCII prefix
encoded_comment = b"ASCII\x00\x00" + user_comment.encode("utf-8")
# Assign encoded user comment to EXIF
exif_dict["Exif"][piexif.ExifIFD.UserComment] = encoded_comment
# Generate EXIF bytes
exif_bytes = piexif.dump(exif_dict)
# Save image with metadata and added text
image.save(local_path, "jpeg", exif=exif_bytes)
logging.info(f"Image downloaded successfully and saved to {local_path}, with embedded text and metadata")
except requests.exceptions.HTTPError as e:
logging.error(f"HTTP error occurred while downloading the image: {e.response.status_code} - {e.response.text}")
except requests.exceptions.ConnectionError as e:
logging.error("Connection error occurred while downloading the image.")
except requests.exceptions.Timeout as e:
logging.error("Timeout error occurred while downloading the image.")
except requests.exceptions.RequestException as e:
logging.error(f"An error occurred while downloading the image: {e}")
except IOError as e:
logging.error(f"I/O error occurred while saving the image to {local_path}: {e}")
except Exception as e:
logging.error(f"An unexpected error occurred while downloading the image: {e}")
# Path: config.py
MAX_CONCURRENT_JOBS = 1
# Path: config.py
RATE_LIMIT_DELAY = timedelta(seconds=2)
# Path: config.py
API_BASE_URL = 'https://cloud.leonardo.ai/api/rest/v1/'
# Path: config.py
HEADERS = {
"accept": "application/json",
"authorization": AUTHORIZATION_TOKEN
}
# Path: config.py
API_CALL_DELAY = 3
# Path: job_data_store.py
def get_job_data(job_id):
global job_data_store
data = job_data_store.get(job_id)
if data:
logging.info(f"Retrieved job data for ID {job_id}: {data}")
else:
logging.warning(f"No job data found for ID {job_id}")
return data
# Path: job_data_store.py
def store_job_data(job_id, prompt):
global job_data_store
job_data_store[job_id] = {
"prompt": prompt
}
# logging.info(f"Job data stored: ID {job_id}, Prompt {prompt[:30]}...")
# Log the current state of the job_data_store
# logging.info(f"Current state of job_data_store: {job_data_store}")
# Path: job_manager.py
import threading
import time
import os
import json
import requests
import logging
from queue import Queue, Empty
from datetime import datetime
from image_downloader import download_image
from config import MAX_CONCURRENT_JOBS, RATE_LIMIT_DELAY, API_BASE_URL, HEADERS, API_CALL_DELAY
from job_data_store import get_job_data, store_job_data
# job_manager.py
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class API:
total_api_credit_cost = 0 # Class-level variable to track the total cost
total_images = 0 # Class-level variable to track the total images
@staticmethod
def start_job(data):
url = API_BASE_URL + 'generations'
| headers = HEADERS
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: llegomark/gemini-pro-chat
# Path: chat.py
class ChatHistoryManager:
def __init__(self, filename="chat_history.txt", max_file_size_mb=5):
self.history = []
self.filename = filename
self.max_file_size_mb = max_file_size_mb
def add_message(self, role, text):
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.history.append(
{'role': role, 'text': text, 'timestamp': timestamp})
def save_to_file(self):
self._rotate_file_if_needed()
with open(self.filename, "a", encoding="utf-8") as file:
for message in self.history:
file.write(
f"{message['timestamp']} {message['role']}: {message['text']}\n")
self.history.clear()
def display(self):
for message in self.history:
print(
f"{message['timestamp']} {message['role']}: {message['text']}")
def _rotate_file_if_needed(self):
if not os.path.exists(self.filename):
with open(self.filename, "a", encoding="utf-8") as file:
pass
if os.path.getsize(self.filename) > self.max_file_size_mb * 1024 * 1024:
os.rename(self.filename, self.filename + ".backup")
# Path: chat.py
def main():
load_dotenv()
api_key = os.getenv("GEMINI_API_KEY")
if not api_key:
raise ValueError(
"API key not found. Please set your GEMINI_API_KEY in the environment.")
genai.configure(api_key=api_key)
generation_config = {
"temperature": 0.7,
"top_p": 1,
"top_k": 1,
"max_output_tokens": 2048,
}
safety_settings = {
"HARM_CATEGORY_HARASSMENT": "BLOCK_NONE",
"HARM_CATEGORY_HATE_SPEECH": "BLOCK_NONE",
"HARM_CATEGORY_SEXUALLY_EXPLICIT": "BLOCK_NONE",
"HARM_CATEGORY_DANGEROUS_CONTENT": "BLOCK_NONE",
}
history_manager = ChatHistoryManager()
history_manager.add_message("system", "--- New Session ---")
model = genai.GenerativeModel(
'gemini-pro', generation_config=generation_config, safety_settings=safety_settings)
chat = model.start_chat(history=[])
while True:
user_input = input("User: ").strip()
if not user_input:
print("Please enter some text.")
continue
if user_input.lower() == "history":
history_manager.display()
continue
if user_input.lower() == "restart":
history_manager.save_to_file()
os.system('cls' if os.name == 'nt' else 'clear')
history_manager.add_message("system", "--- New Session ---")
chat = model.start_chat(history=[])
continue
if user_input.lower() == "exit":
history_manager.save_to_file()
break
try:
response = chat.send_message(user_input, stream=True)
response_text = ""
for chunk in response:
if chunk.text.endswith("."):
response_text += chunk.text
else:
response_text += re.sub(r'\s*$', '.', chunk.text)
print(chunk.text)
history_manager.add_message("user", user_input)
history_manager.add_message("gemini", response_text)
except Exception as e:
print(f"An error occurred: {e}")
# Path: test_chat.py
import unittest
import os
from unittest.mock import patch, mock_open, MagicMock
from chat import ChatHistoryManager, main
class TestChatHistoryManager(unittest.TestCase):
def test_initialization(self):
manager = ChatHistoryManager()
self.assertEqual(manager.history, [])
self.assertEqual(manager.filename, 'chat_history.txt')
self.assertEqual(manager.max_file_size_mb, 5)
@patch('os.path.exists')
@patch('os.path.getsize')
@patch('os.rename')
def test_add_and_save_message(self, mock_rename, mock_getsize, mock_exists):
manager = ChatHistoryManager()
manager.add_message('user', 'test message')
self.assertEqual(len(manager.history), 1)
mock_exists.return_value = True
mock_getsize.return_value = 4 * 1024 * 1024
m = mock_open()
with patch('builtins.open', m):
manager.save_to_file()
m.assert_called_once_with('chat_history.txt', 'a', encoding='utf-8')
self.assertEqual(manager.history, [])
mock_getsize.return_value = 6 * 1024 * 1024
manager.add_message('user', 'another message')
with patch('builtins.open', m):
manager.save_to_file()
mock_rename.assert_called_once_with(
'chat_history.txt', 'chat_history.txt.backup')
@patch('builtins.print')
def test_display(self, mock_print):
manager = ChatHistoryManager()
manager.add_message('user', 'display test')
manager.display()
mock_print.assert_called()
class TestMainFunction(unittest.TestCase):
@patch('builtins.input', side_effect=['exit'])
@patch('os.getenv', return_value='dummy_key')
@patch('google.generativeai.GenerativeModel')
@patch('chat.ChatHistoryManager')
def test_main(self, mock_manager, mock_gen_model, mock_getenv, mock_input):
| main() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CXH-Research/DeVigNet
# Path: config/config.py
class Config(object):
r"""
A collection of all the required configuration parameters. This class is a nested dict-like
structure, with nested keys accessible as attributes. It contains sensible default values for
all the parameters, which may be overriden by (first) through a YAML file and (second) through
a list of attributes and values.
Extended Summary
----------------
This class definition contains default values corresponding to ``joint_training`` phase, as it
is the final training phase and uses almost all the configuration parameters. Modification of
any parameter after instantiating this class is not possible, so you must override required
parameter values in either through ``config_yaml`` file or ``config_override`` list.
Parameters
----------
config_yaml: str
Path to a YAML file containing configuration parameters to override.
config_override: List[Any], optional (default= [])
A list of sequential attributes and values of parameters to override. This happens after
overriding from YAML file.
Examples
--------
Let a YAML file named "config.yaml" specify these parameters to override::
ALPHA: 1000.0
BETA: 0.5
>>> _C = Config("config.yaml", ["OPTIM.BATCH_SIZE", 2048, "BETA", 0.7])
>>> _C.ALPHA # default: 100.0
1000.0
>>> _C.BATCH_SIZE # default: 256
2048
>>> _C.BETA # default: 0.1
0.7
Attributes
----------
"""
def __init__(self, config_yaml: str, config_override: List[Any] = []):
self._C = CN()
self._C.GPU = [0]
self._C.VERBOSE = False
self._C.MODEL = CN()
self._C.MODEL.SESSION = 'MRI-CT'
self._C.MODEL.INPUT = 'MRI'
self._C.MODEL.TARGET = 'CT'
self._C.OPTIM = CN()
self._C.OPTIM.BATCH_SIZE = 1
self._C.OPTIM.SEED = 3407
self._C.OPTIM.NUM_EPOCHS = 200
self._C.OPTIM.NEPOCH_DECAY = [100]
self._C.OPTIM.LR_INITIAL = 0.0002
self._C.OPTIM.LR_MIN = 0.0002
self._C.OPTIM.BETA1 = 0.5
self._C.OPTIM.WANDB = False
self._C.TRAINING = CN()
self._C.TRAINING.VAL_AFTER_EVERY = 1
self._C.TRAINING.RESUME = False
self._C.TRAINING.TRAIN_DIR = '../dataset/MRI-CT/train'
self._C.TRAINING.VAL_DIR = '../dataset/MRI-CT/test'
self._C.TRAINING.SAVE_DIR = 'checkpoints'
self._C.TRAINING.PS_W = 256
self._C.TRAINING.PS_H = 256
self._C.TRAINING.ORI = False
self._C.TESTING = CN()
self._C.TESTING.WEIGHT = './checkpoints/MRI-PET_epoch_68.pth'
self._C.TESTING.SAVE_IMAGES = False
# Override parameter values from YAML file first, then from override list.
self._C.merge_from_file(config_yaml)
self._C.merge_from_list(config_override)
# Make an instantiated object of this class immutable.
self._C.freeze()
def dump(self, file_path: str):
r"""Save config at the specified file path.
Parameters
----------
file_path: str
(YAML) path to save config at.
"""
self._C.dump(stream=open(file_path, "w"))
def __getattr__(self, attr: str):
return self._C.__getattr__(attr)
def __repr__(self):
return self._C.__repr__()
# Path: data/data_RGB.py
def get_training_data(rgb_dir, inp, target, img_options):
assert os.path.exists(rgb_dir)
return DataLoaderTrain(rgb_dir, inp, target, img_options)
# Path: data/data_RGB.py
def get_validation_data(rgb_dir, inp, target, img_options):
assert os.path.exists(rgb_dir)
return DataLoaderVal(rgb_dir, inp, target, img_options)
# Path: train.py
import warnings
import torch.optim as optim
from accelerate import Accelerator
from pytorch_msssim import SSIM
from torch.utils.data import DataLoader
from torchmetrics.functional import peak_signal_noise_ratio, structural_similarity_index_measure
from torchmetrics.functional.regression import mean_absolute_error
from tqdm import tqdm
from config import Config
from data import get_training_data, get_validation_data
from models import *
from utils import *
warnings.filterwarnings('ignore')
opt = Config('config.yml')
seed_everything(opt.OPTIM.SEED)
def train():
# Accelerate
accelerator = Accelerator(log_with='wandb') if opt.OPTIM.WANDB else Accelerator()
device = accelerator.device
config = {
"dataset": opt.TRAINING.TRAIN_DIR
}
accelerator.init_trackers("Vig", config=config)
if accelerator.is_local_main_process:
os.makedirs(opt.TRAINING.SAVE_DIR, exist_ok=True)
# Data Loader
train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
| train_dataset = get_training_data(train_dir, opt.MODEL.INPUT, opt.MODEL.TARGET, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: moonshot-admin/moonshot
# Path: third-party/tqdm-4.66.1/tqdm/auto.py
class tqdm(notebook_tqdm, asyncio_tqdm): # pylint: disable=inconsistent-mro
pass
# Path: third-party/tqdm-4.66.1/tqdm/std.py
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
# Path: third-party/tqdm-4.66.1/tqdm/contrib/utils_worker.py
class MonoWorker(object):
"""
Supports one running task and one waiting task.
The waiting task is the most recent submitted (others are discarded).
"""
def __init__(self):
self.pool = ThreadPoolExecutor(max_workers=1)
self.futures = deque([], 2)
def submit(self, func, *args, **kwargs):
"""`func(*args, **kwargs)` may replace currently waiting task."""
futures = self.futures
if len(futures) == futures.maxlen:
running = futures.popleft()
if not running.done():
if len(futures): # clear waiting
waiting = futures.pop()
waiting.cancel()
futures.appendleft(running) # re-insert running
try:
waiting = self.pool.submit(func, *args, **kwargs)
except Exception as e:
tqdm_auto.write(str(e))
else:
futures.append(waiting)
return waiting
# Path: third-party/tqdm-4.66.1/tqdm/contrib/telegram.py
from os import getenv
from warnings import warn
from requests import Session
from ..auto import tqdm as tqdm_auto
from ..std import TqdmWarning
from .utils_worker import MonoWorker
"""
Sends updates to a Telegram bot.
Usage:
>>> from tqdm.contrib.telegram import tqdm, trange
>>> for i in trange(10, token='{token}', chat_id='{chat_id}'):
... ...

"""
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['TelegramIO', 'tqdm_telegram', 'ttgrange', 'tqdm', 'trange']
class TelegramIO(MonoWorker):
"""Non-blocking file-like IO using a Telegram Bot."""
API = 'https://api.telegram.org/bot'
def __init__(self, token, chat_id):
"""Creates a new message in the given `chat_id`."""
super(TelegramIO, self).__init__()
self.token = token
self.chat_id = chat_id
self.session = Session()
self.text = self.__class__.__name__
self.message_id
@property
def message_id(self):
if hasattr(self, '_message_id'):
return self._message_id
try:
res = self.session.post(
self.API + '%s/sendMessage' % self.token,
data={'text': '`' + self.text + '`', 'chat_id': self.chat_id,
'parse_mode': 'MarkdownV2'}).json()
except Exception as e:
tqdm_auto.write(str(e))
else:
if res.get('error_code') == 429:
warn("Creation rate limit: try increasing `mininterval`.",
| TqdmWarning, stacklevel=2) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: LkPrtctrd/BSL-V53
# Path: Heart/Logic/LogicCommandManager.py
class LogicCommandManager:
commandsList = {
201: ChangeAvatarNameCommand,
202: 'DiamondsAddedCommand',
203: 'GiveDeliveryItemsCommand',
204: 'DayChangedCommand',
205: 'DecreaseHeroScoreCommand',
206: 'AddNotificationCommand',
207: 'ChangeResourcesCommand',
208: 'TransactionsRevokedCommand',
209: 'KeyPoolChangedCommand',
210: 'IAPChangedCommand',
211: 'OffersChangedCommand',
212: 'PlayerDataChangedCommand',
213: 'InviteBlockingChangedCommand',
214: 'GemNameChangeStateChangedCommand',
215: 'SetSupportedCreatorCommand',
216: 'CooldownExpiredCommand',
217: 'ProLeagueSeasonChangedCommand',
218: 'BrawlPassSeasonChangedCommand',
219: 'BrawlPassUnlockedCommand',
220: 'HerowinQuestsChangedCommand',
221: 'TeamChatMuteStateChangedCommand',
222: 'RankedSeasonChangedCommand',
223: 'CooldownAddedCommand',
224: 'SetESportsHubNotificationCommand',
228: 'RefreshRandomRewardsCommand',
500: 'GatchaCommand',
503: 'ClaimDailyRewardCommand',
504: 'SendAllianceMailCommand',
505: SetPlayerThumbnailCommand,
506: 'SelectSkinCommand',
507: 'UnlockSkinCommand',
508: 'ChangeControlModeCommand',
509: 'PurchaseDoubleCoinsCommand',
511: 'HelpOpenedCommand',
512: 'ToggleInGameHintsCommand',
514: 'DeleteNotificationCommand',
515: 'ClearShopTickersCommand',
517: 'ClaimRankUpRewardCommand',
518: 'PurchaseTicketsCommand',
519: PurchaseOfferCommand,
520: 'LevelUpCommand',
521: 'PurchaseHeroLvlUpMaterialCommand',
522: 'HeroSeenCommand',
523: 'ClaimAdRewardCommand',
524: 'VideoStartedCommand',
525: 'SelectCharacterCommand',
526: 'UnlockFreeSkinsCommand',
527: SetPlayerNameColorCommand,
528: 'ViewInboxNotificationCommand',
529: 'SelectStarPowerCommand',
530: 'SetPlayerAgeCommand',
531: 'CancelPurchaseOfferCommand',
532: 'ItemSeenCommand',
533: 'QuestSeenCommand',
534: 'PurchaseBrawlPassCommand',
535: 'ClaimTailRewardCommand',
536: 'PurchaseBrawlpassProgressCommand',
537: 'VanityItemSeenCommand',
538: 'SelectEmoteCommand',
539: 'BrawlPassAutoCollectWarningSeenCommand',
540: 'PurchaseChallengeLivesCommand',
541: 'ClearESportsHubNotificationCommand',
542: 'SelectGroupSkinCommand',
571: 'OpenRandomCommand'
}
def getCommandsName(commandType):
try:
command = LogicCommandManager.commandsList[commandType]
except KeyError:
command = str(commandType)
if type(command) == str:
return command
else:
return command.__name__
def commandExist(commandType):
return (commandType in LogicCommandManager.commandsList.keys())
def createCommand(commandType, commandPayload=b''):
commandList = LogicCommandManager.commandsList
if LogicCommandManager.commandExist(commandType):
print(LogicCommandManager.getCommandsName(commandType), "created")
if type(commandList[commandType]) == str:
pass
else:
return commandList[commandType](commandPayload)
else:
print(commandType, "skipped")
return None
def isServerToClient(commandType):
if 200 <= commandType < 500:
return True
elif 500 <= commandType:
return False
# Path: Heart/Packets/PiranhaMessage.py
class PiranhaMessage(ByteStream):
def __init__(self, messageData):
super().__init__(messageData)
self.messageBuffer = messageData
self.fields = {}
def decode(self, fields):
if True:
print()
for typeName,value in fields.items():
print(f"{typeName}: {value}")
print()
def getLength(self):
return len(self.messageBuffer)
def isServerToClient(self):
messageType = self.getMessageType()
if 20000 <= messageType < 30000 or messageType == 40000:
return True
elif 10000 <= messageType < 20000 or messageType == 30000:
return False
# Path: Heart/Packets/Server/Home/AvailableServerCommandMessage.py
from Heart.Logic.LogicCommandManager import LogicCommandManager
from Heart.Packets.PiranhaMessage import PiranhaMessage
class AvailableServerCommandMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields, player):
self.writeVInt(fields["Command"]["ID"])
| command = LogicCommandManager.createCommand(fields["Command"]["ID"], self.messagePayload) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sockheadrps/AIODesa
# Path: aiodesa/utils/table.py
def make_schema(name: str, data_cls: Any) -> TableSchema:
"""
Generate a TableSchema based on the provided data class.
Args:
name: The name of the table.
data_cls: A data class defining the schema for the table.
Returns:
TableSchema: An instance of TableSchema containing the table_name and
SQL data definition.
Example:
.. code-block:: python
user_table_schema = generate_table_schema(name='users', data_cls=User)
Note:
The function returns a TableSchema instance containing the table_name
and SQL data definition.
"""
columns = []
name = name.replace(" ", "_")
for field_name, field_type in data_cls.__annotations__.items():
if field_name == "table_name":
pass
else:
columns.append(f"{field_name} {py_to_sql_type(field_type)}")
if hasattr(data_cls, "primary_key"):
columns.append(f"PRIMARY KEY ({data_cls.primary_key})")
if hasattr(data_cls, "unique_key"):
columns.append(f"UNIQUE ({data_cls.unique_key})")
schema = TableSchema(
name, f"CREATE TABLE IF NOT EXISTS {name} (\n{', '.join(columns)}\n);"
)
return schema
# Path: aiodesa/utils/table.py
class TableSchema:
"""
Represents the schema for a database table.
Args:
table_name: The name of the table.
data: The SQL data definition language (DDL) statement.
Example:
.. code-block:: python
# Create a TableSchema for a 'users' table
user_table_schema = TableSchema(
table_name='users',
data='CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT);')
Note:
The `data` attribute contains the SQL data definition language (DDL).
"""
table_name: str
data: str
# Path: aiodesa/database.py
from dataclasses import is_dataclass, fields
from typing import Tuple, Callable, Any, Coroutine
from pathlib import Path
from aiodesa.utils.table import make_schema, TableSchema
import aiosqlite
"""
aiodesa.Database: Simple SQLite Database Interface
This module provides the `Db` class, a simple SQLite database interface that
supports asynchronous operations.
Classes:
- :class:`Db`: Represents a simple SQLite database interface.
Example:
.. code-block:: python
from aiodesa import Db
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
"""
class Db:
"""
Represents a simple SQLite database interface.
Args:
db_path : str
The path to the SQLite database file.
Example:
.. code-block:: python
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
...
"""
_tables: dict
db_path: Path
_conn: Any
def __init__(self, db_path: str) -> None:
self.db_path = Path(db_path)
self._conn = None
self._create_db()
self._tables = {}
def _create_db(self) -> None:
"""
Internal method to create the database file if it does not exist.
Notes:
- This method is automatically called during the initialization of the
Db class.
- It ensures that the SQLite database file is created at the specified
path if
it does not exist.
"""
if not self.db_path.exists():
self.db_path.parent.mkdir(parents=True, exist_ok=True)
self.db_path.touch()
async def _process_single_data_class(self, schema: Any) -> None:
"""
Process a single data class schema.
Args:
schema: The data class schema representing a table.
Returns:
This method does not return any value.
"""
if not is_dataclass(schema):
raise ValueError("Provided schema is not a data class")
self._tables[schema.table_name] = schema
class_fields = fields(schema)
for field in class_fields:
if field.name == "table_name":
| schema_ = make_schema(str(field.default), schema) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DavidBellamy/labrador
# Path: lab_transformers/utils.py
def json_lines_loader(filepath: Union[str, Path]) -> List[Dict[str, Any]]:
"""Loads the JSON lines located at filepath and returns them as a list of flat dictionaries."""
jsonl = []
with open(filepath) as f:
for line in tqdm(f):
jsonl.append(json.loads(line))
return jsonl
# Path: lab_transformers/utils.py
class NpEncoder(json.JSONEncoder):
"""A JSONEncoder subclass to handle Numpy integers, floats and arrays when writing JSON lines to disk.
Usage: json.dumps(data, cls=NpEncoder)
This function overwrites the default() method of JSONEncoder to handle additional types; specifically Numpy
integers, floats and arrays. For all other types, the standard default() method is used for encoding.
"""
def default(
self, obj: Union[np.integer, np.floating, np.ndarray, Any]
) -> Union[int, float, List[Any], Any]:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
# Path: scripts/preprocessing/pretraining_jsonl_to_bert_bags.py
import json
import os.path as op
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from lab_transformers.utils import json_lines_loader, NpEncoder
def make_lab_bags_for_bert(
jsonl_batch: list, filepath: str, max_time_delta: float, min_bag_length: int = 3
) -> None:
"""Creates all unique bags of labs spanning max_time_delta (and with size min_bag_length) for the patients
in jsonl_batch.
Inputs:
> jsonl_batch: a list of JSON lines, where each line contains the 5 keys: subject_id, tokens,
time_deltas, hadm_id, and charttime.
> filepath: a string specifying the path to the desired output jsonl file.
> max_time_delta: a float specifying the maximum time period that a bag may span.
> min_bag_length: a positive integer specifying the minimum length requirement for each bag.
Returns:
> No return value, has the side effect of writing JSON lines containing all precomputed bags for each patient to
the file at filepath. Each JSON line has the following structure:
{'subject_id': 123456, token_bags: [[1, 2, 3], [4, 5, 6]], 'hadm_id': [101, 102],
'charttime': ["2175-12-30T17:03", "2143-08-14T05:01"]}
The hadm_id is the hospital admission ID for each corresponding token in token_bags. This may have
missingness. Similarly, 'charttime' is the moment when the labs were added to the patient's chart. When
max_time_delta = 0, each bag only has 1 'charttime' value, whereas bags with larger values of max_time_delta could
have multiple, in which case we take the minimum of all those times (i.e. the start time of the bag).
"""
# For each patient loop over time deltas and construct bags of labs with max_time_delta width
# Redundant subsets are filtered out
# Only bags with min_bag_length will be included
output_jsonl = []
for patient in tqdm(jsonl_batch, desc="Making bags of labs"):
# Separate out the patient's data components (reduces the time spent indexing below)
time_deltas = patient["time_deltas"]
tokens = patient["token"]
hadm_ids = patient["hadm_id"]
charttimes = patient["charttime"]
bags_of_lab_indexes = (
[]
) # will hold the bags of indexes, which correspond to bags of codes/values
token_bags = [] # will hold the bags of codes for the current patient
hadm_id_list = [] # will hold the hadm_id for each bag of codes/values
charttime_list = [] # will hold the start time fore ach bag of codes/values
end_of_list = len(patient["time_deltas"])
for index in range(end_of_list):
# Start a set of indexes to be returned, beginning with the current index
index_list = [index]
# collect indexes going rightwards until max_time_delta is surpassed or end of list is reached
cumsum = 0
while True:
index += 1
if index >= end_of_list:
break
cumsum += time_deltas[index]
if cumsum > max_time_delta:
break
index_list.append(index)
# pass if the proposed bag of lab indexes is not at least min_bag_length
if len(index_list) < min_bag_length:
continue
# collect this proposed bag of lab indexes, only if it isn't a subset of any that came before it
sets = {frozenset(e) for e in bags_of_lab_indexes}
proposed_indexes = set(index_list)
if not any(proposed_indexes <= s for s in sets):
bags_of_lab_indexes.append(index_list)
# Convert the bag of lab indexes into the corresponding lab codes, values, hadm_id's and charttimes
codes = [tokens[i] for i in index_list]
temp_hadm_ids = [hadm_ids[i] for i in index_list]
temp_charttimes = np.array(
[pd.to_datetime(charttimes[i]) for i in index_list],
dtype=np.datetime64,
)
bag_start_time = min(temp_charttimes)
# If there were multiple hospital admission IDs for the same bag, assign 'NA' to this bag's hadm_id
if len(set(temp_hadm_ids)) > 1:
hadm_id = float("nan")
else:
hadm_id = temp_hadm_ids[
0
] # take the first hadm_id from the list, since all are the same
token_bags.append(codes)
hadm_id_list.append(hadm_id)
charttime_list.append(bag_start_time)
if len(bags_of_lab_indexes) > 0:
patient_jsonl = {
"subject_id": patient["subject_id"],
"token_bags": token_bags,
"hadm_id": hadm_id_list,
"charttime": np.datetime_as_string(charttime_list, unit="m").tolist(),
}
output_jsonl.append(patient_jsonl)
# Write JSON lines
first_line = True
mode = "w"
for patient in tqdm(output_jsonl, desc=f"Writing JSON lines..."):
# Write patient to file
with open(filepath, mode=mode, encoding="utf-8") as f:
| json_record = json.dumps(patient, cls=NpEncoder) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: NLP-Core-Team/RealCode_eval
# Path: lm_eval/datatypes.py
class Task:
repo: str
repo_n: int
path_from_root: str
left_context: str
right_context: str
gt: str
total_tests: int
# Path: lm_eval/context_parser.py
class BaseParser:
def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:
"""
main method, that returns tuple (left_context, right_context) for the task
"""
raise NotImplementedError()
# Path: lm_eval/context_parser.py
class TrivialContextParser(BaseParser):
def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:
"""
returns left and right context without processing
"""
return task.left_context, task.right_context
# Path: lm_eval/generators.py
import os
import typing as tp
import json
import re
import torch
import logging
from pathlib import Path
from dataclasses import asdict, fields
from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList
from tqdm import tqdm
from .datatypes import Task
from .context_parser import BaseParser, TrivialContextParser
logger = logging.getLogger("RealCode")
class InfillGenerator:
def __init__(self,
model_path: str,
num_samples: int,
prefix_tokens: tp.Union[str, tp.List[int]] = [],
middle_tokens: tp.Union[str, tp.List[int]] = [],
suffix_tokens: tp.Union[str, tp.List[int]] = [],
max_context_length: int = None,
left_context_ratio: int = 1,
dtype = torch.bfloat16,
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"],
model_kwargs: tp.Dict = {},
generation_params: tp.Dict[str, tp.Any] = {},
context_parser: BaseParser = TrivialContextParser(),
add_extra_spaces_to_generation=0,
):
"""
Class to generate code in fill-in-the-middle mode
params:
model_path: str - which model to use for generation, anything that can be passed to AutoModelForCausalLM.from_pretrained
num_samples: int - number of samples to generate per task, values > 1 should be paired with generation_params
prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the left context. Can be either str or list of int tokens
middle_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
max_context_length: int = None - truncation length for prompt, measured in tokens (len(left_context) + len(right_context) < max_context_length)
left_context_ratio: int = 1 - proportion of max_context_length given to left_context. 1 means 1:1 split between left and right, 3 means 3:1 split in favor of left context
dtype=torch.bfloat16 - torch dtype to use for inference
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"] - regular expressions that determine end of geneartion
model_kwargs: tp.Dict = {} - kwargs to be passed to AutoModelForCausalLM.from_pretrained
generation_params: tp.Dict[str, tp.Any] = {} - kwargs to be passed to AutoModelForCausalLM.generate
context_parser: BaseParser = TrivialContextParser() - parser for left and right contexts
add_extra_spaces_to_generation=0 - number of added extra spaces add the begining of generation to fix indentation. May be required due to bugs in some tokenizers (e.g. Codellama)
"""
self.device = torch.device("cuda")
# self.device = torch.device("cpu")
logger.info(f"Loading model from {model_path} with kwargs f{model_kwargs}")
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.model = AutoModelForCausalLM.from_pretrained(model_path,
torch_dtype=dtype, device_map="auto", trust_remote_code=True, **model_kwargs
).eval()
logger.info(f"Loaded model from {model_path} with kwargs f{model_kwargs}")
logger.info(f"Device map: \n{self.model.hf_device_map}")
self.num_samples = num_samples
self.prefix_tokens = self.tokenize_special_tokens(prefix_tokens)
self.middle_tokens = self.tokenize_special_tokens(middle_tokens)
self.suffix_tokens = self.tokenize_special_tokens(suffix_tokens)
logger.debug(f"prefix_tokens: {self.prefix_tokens}, middle_tokens: {self.middle_tokens}, suffix_tokens: {self.suffix_tokens}")
self.eos_sequences = eos_sequences[:]
#context truncation parameters
self.max_context_length = max_context_length
self.left_context_truncate_at = left_context_ratio / (left_context_ratio + 1)
self.right_context_truncate_at = 1 / (left_context_ratio + 1)
self.generation_params = generation_params
self.generation_params['num_return_sequences'] = self.num_samples
self.context_parser = context_parser
# Number of tokens before and after truncating to max_context_length
self.count_inferenced_tokens = []
self.count_possible_tokens = []
self.add_extra_spaces_to_generation = add_extra_spaces_to_generation
def tokenize_special_tokens(self, str_or_list: tp.Union[str, tp.List[int]]) -> torch.Tensor:
if type(str_or_list) == str:
return self.tokenizer.encode(str_or_list, return_tensors="pt", add_special_tokens=False).to(self.device) # ['input_ids']
else:
return torch.as_tensor(str_or_list).unsqueeze(0).to(self.device)
| def _prepare_tokens(self, task: Task) -> torch.Tensor: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: centrifugal/grand-chat-tutorial
# Path: backend/chat/models.py
class Message(models.Model):
room = models.ForeignKey(Room, related_name='messages', on_delete=models.CASCADE)
# Note, message may have null user – we consider such messages "system". These messages
# initiated by the backend and have no user author. We are not using such messages in
# the example currently, but leave the opportunity to extend.
user = models.ForeignKey(User, related_name='messages', on_delete=models.CASCADE, null=True)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
# Path: backend/chat/models.py
class Room(models.Model):
name = models.CharField(max_length=100, unique=True)
version = models.PositiveBigIntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
bumped_at = models.DateTimeField(auto_now_add=True)
last_message = models.ForeignKey(
'Message', related_name='last_message_rooms',
on_delete=models.SET_NULL, null=True, blank=True,
)
def increment_version(self):
self.version += 1
self.save()
return self.version
def __str__(self):
return self.name
# Path: backend/chat/models.py
class RoomMember(models.Model):
room = models.ForeignKey(Room, related_name='memberships', on_delete=models.CASCADE)
user = models.ForeignKey(User, related_name='rooms', on_delete=models.CASCADE)
joined_at = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ('room', 'user')
def __str__(self):
return f"{self.user.username} in {self.room.name}"
# Path: backend/chat/models.py
class Outbox(models.Model):
method = models.TextField(default="publish")
payload = models.JSONField()
partition = models.BigIntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
# Path: backend/chat/models.py
class CDC(models.Model):
method = models.TextField(default="publish")
payload = models.JSONField()
partition = models.BigIntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
# Path: backend/chat/serializers.py
class MessageSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
room = MessageRoomSerializer(read_only=True)
class Meta:
model = Message
fields = ['id', 'content', 'user', 'room', 'created_at']
# Path: backend/chat/serializers.py
class RoomSearchSerializer(serializers.ModelSerializer):
is_member = serializers.BooleanField(read_only=True)
class Meta:
model = Room
fields = ['id', 'name', 'created_at', 'is_member']
# Path: backend/chat/serializers.py
class RoomSerializer(serializers.ModelSerializer):
member_count = serializers.SerializerMethodField()
last_message = LastMessageSerializer(read_only=True)
def get_member_count(self, obj):
return obj.member_count
class Meta:
model = Room
fields = ['id', 'name', 'version', 'bumped_at', 'member_count', 'last_message']
# Path: backend/chat/serializers.py
class RoomMemberSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
room = RoomSerializer(read_only=True)
class Meta:
model = RoomMember
fields = ['room', 'user']
# Path: backend/chat/views.py
import json
import logging
import requests
from requests.adapters import HTTPAdapter, Retry
from django.conf import settings
from django.db import transaction
from django.db.models import Exists, OuterRef, Count
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import status, viewsets
from rest_framework.generics import ListCreateAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from .models import Message, Room, RoomMember, Outbox, CDC
from .serializers import MessageSerializer, RoomSearchSerializer, RoomSerializer, RoomMemberSerializer
class RoomListViewSet(ListModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
| return Room.objects.annotate( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: shinkungoo/SymbolicCDM
# Path: SCDM/eval.py
def accuracy(y_pred, y_true, threshold=0.5, weights=None):
pred = np.array(y_pred)
true = np.array(y_true)
result = np.where(pred > threshold, 1, 0)
if weights is not None:
correct = np.sum((true == result) * weights)
total = np.sum(weights)
return correct / total
else:
return metrics.accuracy_score(true, result)
# Path: SCDM/eval.py
def area_under_curve(y_pred, y_true):
pred = np.array(y_pred)
true = np.array(y_true)
fpr, tpr, thresholds = metrics.roc_curve(true, pred)
return metrics.auc(fpr, tpr)
# Path: SCDM/eval.py
def f1_score(y_pred, y_true, threshold=0.5):
pred = np.array(y_pred)
true = np.array(y_true)
result = np.where(pred >= threshold, 1, 0)
return metrics.f1_score(true, result)
# Path: SCDM/utility.py
def init_interaction_function(discrimination, proficiency, q_matrix_line):
if type(proficiency) is np.ndarray:
return discrimination * np.sum(proficiency * q_matrix_line)
else:
return discrimination * (proficiency * q_matrix_line).sum(dim=1).unsqueeze(1)
# Path: SCDM/parameter.py
import torch
import torch.nn as nn
from tqdm import tqdm
from .eval import accuracy, area_under_curve, f1_score
from .utility import init_interaction_function
class ComputeIF(nn.Module):
def __init__(self,
student_number,
question_number,
knowledge_number):
super(ComputeIF, self).__init__()
self.student_emb = nn.Embedding(student_number, knowledge_number)
self.difficulty = nn.Embedding(question_number, knowledge_number)
self.discrimination = nn.Embedding(question_number, 1)
# initialize
for name, param in self.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
def forward(self, student_id, question, q_matrix_line, interaction_func):
proficiency_level = torch.sigmoid(self.student_emb(student_id))
difficulty = torch.sigmoid(self.difficulty(question))
discrimination = torch.sigmoid(self.discrimination(question))
input_x = interaction_func(discrimination, proficiency_level - difficulty, q_matrix_line)
output = torch.sigmoid(input_x)
return output.view(-1)
class Parameter:
def __init__(self,
student_number: int,
question_number: int,
knowledge_number: int,):
self.net = ComputeIF(student_number, question_number, knowledge_number)
self.student_number = student_number
self.question_number = question_number
self.knowledge_number = knowledge_number
self.interaction_function = init_interaction_function
self.interaction_function_string = "initial interaction function"
def train(self, train_set, epochs, device="cpu", lr=0.002, init=True):
# initialize
if init:
for name, param in self.net.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
self.net = self.net.to(device)
self.net.train()
loss_function = nn.BCELoss()
optimizer = torch.optim.Adam(self.net.parameters(), lr=lr)
with tqdm(total=epochs, desc="Training Process", unit="epoch") as pbar:
for epoch in range(epochs):
epoch_losses = []
for batch_data in train_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
y: torch.Tensor = y.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
self.interaction_function)
loss = loss_function(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_losses.append(loss.mean().item())
pbar.update()
def evaluate(self, test_set, interaction_func, device="cpu"):
self.net = self.net.to(device)
self.net.eval()
y_true, y_pred = [], []
for batch_data in test_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
interaction_func)
y_pred.extend(pred.detach().cpu().tolist())
y_true.extend(y.tolist())
acc = accuracy(y_pred, y_true)
auc = area_under_curve(y_pred, y_true)
| f1 = f1_score(y_pred, y_true) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pan-x-c/EE-LLM
# Path: megatron/core/parallel_state.py
def get_tensor_and_expert_parallel_group():
assert (
_TENSOR_AND_EXPERT_PARALLEL_GROUP is not None
), 'tensor and expert parallel group is not initialized'
return _TENSOR_AND_EXPERT_PARALLEL_GROUP
# Path: megatron/core/parallel_state.py
def get_tensor_model_parallel_group(check_initialized=True):
"""Get the tensor model parallel group the caller rank belongs to."""
if check_initialized:
assert (
_TENSOR_MODEL_PARALLEL_GROUP is not None
), 'tensor model parallel group is not initialized'
return _TENSOR_MODEL_PARALLEL_GROUP
# Path: megatron/core/parallel_state.py
def get_tensor_model_parallel_rank():
"""Return my rank for the tensor model parallel group."""
global _MPU_TENSOR_MODEL_PARALLEL_RANK
if _MPU_TENSOR_MODEL_PARALLEL_RANK is not None:
return _MPU_TENSOR_MODEL_PARALLEL_RANK
return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
# Path: megatron/core/parallel_state.py
def get_tensor_model_parallel_world_size():
"""Return world size for the tensor model parallel group."""
global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
if _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE is not None:
return _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE
return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
# Path: megatron/core/tensor_parallel/utils.py
def split_tensor_along_last_dim(
tensor: torch.Tensor, num_partitions: int, contiguous_split_chunks: bool = False,
) -> List[torch.Tensor]:
""" Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
Returns:
A list of Tensors
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
# Path: megatron/core/tensor_parallel/mappings.py
import torch
from megatron.core.parallel_state import (
get_tensor_and_expert_parallel_group,
get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from .utils import split_tensor_along_last_dim
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
def _reduce(input_):
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_tensor_model_parallel_world_size() == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group())
return input_
def _split_along_last_dim(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Split along last dimension.
| input_list = split_tensor_along_last_dim(input_, world_size) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kanadeblisst00/WeChat-PyRobot
# Path: src/wechat_pyrobot/ctypes_json.py
class CDataJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, (Array, list)):
return [self.default(e) for e in obj]
if isinstance(obj, _Pointer):
return self.default(obj.contents) if obj else None
if isinstance(obj, _SimpleCData):
return self.default(obj.value)
if isinstance(obj, (bool, int, float, str)):
return obj
if obj is None:
return obj
if isinstance(obj, (Structure, Union)):
result = {}
anonymous = getattr(obj, '_anonymous_', [])
for key, *_ in getattr(obj, '_fields_', []):
value = getattr(obj, key)
# private fields don't encode
if key.startswith('_'):
continue
if key in anonymous:
result.update(self.default(value))
else:
result[key] = self.default(value)
return result
return JSONEncoder.default(self, obj)
# Path: src/wechat_pyrobot/offset.py
CALL_OFFSET = {
"3.9.8.12": {
"SendMsgFreeCallOffset": 0x823370,
"SendTextCallOffset": 0xDE22D0,
"SendImageCall0Offset": 0x821E40,
"SendImageCall1Offset": 0xDE1880,
"SendImageCall2Offset": 0xF755C0,
"LogEnterCallOffset": 0x102C250,
"LogLeaveCallOffset": 0x102C584,
"HookMsgCallOffset": 0xE0F743,
"RevokeMsgCallOffset": 0xE14880,
},
"3.9.8.15": {
"SendMsgFreeCallOffset": 0x94E590,
"SendTextCallOffset": 0x1091CE0,
"SendImageCall0Offset": 0x94CD10,
"SendImageCall1Offset": 0x10911F0,
"LogEnterCallOffset": 0x13D6380,
"LogLeaveCallOffset": 0x13D6380,
"HookMsgCallOffset": 0x10E8E30,
"RevokeMsgCallOffset": 0x10E2A50,
}
}
# Path: src/wechat_pyrobot/hookmsg32.py
import json
from py_process_hooker import Hook
from py_process_hooker.winapi import *
from .ctypes_json import CDataJSONEncoder
from .offset import CALL_OFFSET
struct_size = 0x2E0
class GeneralStructW32(Structure):
_fields_ = [
('value', c_wchar_p),
('len1', c_uint32),
('len2', c_uint32),
('_unkown_value0', c_uint32),
('_unkown_value1', c_uint32)
]
class WeChatMsgStruct32(Structure):
_fields_ = [
('_unkown_value0', c_uint32 * 8),
('localid', c_uint32),
('_unkown_value2', c_uint32 * 3),
('msgid', c_ulonglong),
('msg_type', c_uint32),
('is_self_msg', c_uint32),
('_unkown_value3', c_uint32),
('timestamp', c_uint32),
('sender', GeneralStructW32),
('_unkown_value4', c_uint32 * 5),
('content', GeneralStructW32),
('_unkown_value5', c_uint32 * 66),
('room_sender', GeneralStructW32),
('sign', GeneralStructW32),
('thumb_path', GeneralStructW32),
('file_path', GeneralStructW32),
]
| class MyCDataJSONEncoder(CDataJSONEncoder):
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mitrefireline/simharness
# Path: simharness2/environments/reactive.py
class ReactiveHarness(RLHarness): # noqa: D205,D212,D415
def __init__(self, config: EnvContext) -> None:
def set_trial_results_path(self, path: str) -> None:
def step(
self, action: np.ndarray
) -> Tuple[np.ndarray, float, bool, bool, Dict[str, Any]]: # noqa
def _do_one_agent_step(self, action: np.ndarray) -> None:
def _parse_action(self, action: np.ndarray) -> Tuple[int, int]:
def _update_agent_position(self) -> None:
def _agent_pos_is_unburned(self) -> bool:
def _update_mitigation(self) -> None:
def _do_one_simulation_step(self) -> bool:
def _run_simulation(self):
def _run_benchmark(self):
def _update_state(self):
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[Dict[Any, Any]] = None,
) -> Tuple[np.ndarray, Dict[Any, Any]]: # noqa
def get_nonsim_attribute_bounds(self) -> OrderedDict[str, Dict[str, int]]: # noqa
def get_nonsim_attribute_data(self) -> OrderedDict[str, np.ndarray]: # noqa
def render(self): # noqa
def _configure_env_rendering(self, should_render: bool) -> None:
def _increment_evaluation_iterations(self) -> None:
def _set_agent_pos_for_episode_start(self):
def _log_env_init(self):
def _log_env_reset(self):
def _setup_harness_analytics(self, harness_analytics_partial: partial) -> None:
def _setup_reward_cls(self, reward_cls_partial: partial) -> None:
# Path: simharness2/sim_registry.py
def get_simulation_from_name(name: str) -> Tuple[Type[Simulation], Config, Config]:
"""
Return the simulation class and config files associated with the given name.
Arguments:
name (str): Name of the requested simulation
Raises:
KeyError: Assert that the simulation has been registered with the
simulation registry
Returns:
Tuple(Type[Simulation], Config, Config): Tuple of the simulation class and
train/eval configs associated with the given name
"""
if name not in _simulation_registry:
raise KeyError(
f"Error: unknown simulation type {name}, "
"the only registed simulation types are: "
f"{list(_simulation_registry.keys())}!"
)
return _simulation_registry[name]
# Path: simharness2/environments/tests/check_reactive_environments.py
import argparse
import logging
import os
import yaml
import traceback
from typing import Any, Dict
from ray.rllib.utils.pre_checks.env import check_gym_environments
from simharness2.environments.reactive import (
ReactiveDiscreteHarness,
ReactiveHarness,
)
from simharness2.sim_registry import get_simulation_from_name
# noqa : D212,D415
"""
To avoid an ImportError and/or ModueNotFoundError, run this script as a module:
python -m simharness2.environments.tests.check_reactive_environments \
--config <path_to_config_file> --env-type <train|eval>
(above command should be executed from the root of the repository)
"""
def setup_args():
"""Parse command line options (mode and config)."""
parser = argparse.ArgumentParser(description="Test custom environment with RLlib.")
help_s = "Path to (harness) config file."
parser.add_argument("--config", required=True, type=str, help=help_s)
help_s, choices = "Environment type.", ["train", "eval"]
parser.add_argument(
"--env-type", required=True, type=str, help=help_s, choices=choices
)
return parser.parse_args()
def get_config(cfg_path: str) -> Dict[str, Any]:
"""Load the YAML config file from the given path.
Arguments:
cfg_path: A string indicating the file path to load the YAML file from.
Returns:
A dictionary containing the contents of the YAML configuration file.
"""
with open(cfg_path, "r") as f:
return yaml.safe_load(f)
def reactive_multidiscrete_env_creator(env_config: Dict[str, Any]) -> ReactiveHarness:
"""Environment creator for RLlib.
Arguments:
env_config: A dictionary containing the environment configuration.
Returns:
An instance of the ReactiveHarness (environment) class.
"""
return ReactiveHarness(**env_config)
| def reactive_discrete_env_creator(env_config: str) -> ReactiveDiscreteHarness: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JeffJerseyCow/eviloauth
# Path: eviloauth/idp.py
class IDP():
idps = get_idps()
authz_endpoint = 'https://login.microsoftonline.com/common/oauth2/v2.0/authorize'
token_endpoint = 'https://login.microsoftonline.com/common/oauth2/v2.0/token'
def __init__(self, idp, redirect_server, **kwargs):
if idp not in self.idps:
raise EviloauthCommandException(
f'IDP {idp} is not supported. Supported IDPs: {self.idps}')
self.redirect_server = redirect_server
self.idp = idp
self.client_id = kwargs.get('client_id')
self.scope = kwargs.get('scope')
self.final_destination = kwargs.get('final_destination')
app.config['TOKEN_ENDPOINT'] = self.token_endpoint
self.__idp_setup__()
def __idp_setup__(self):
if not self.client_id or not self.scope or not self.final_destination:
self.client_id = prompt('Client ID: ')
self.scope = prompt('Scope: ')
self.final_destination = prompt('Final Destination: ')
app.config['CLIENT_ID'] = self.client_id
app.config['SCOPE'] = self.scope
app.config['FINAL_DESTINATION'] = self.final_destination
if self.idp == 'entra_implicit_flow':
self.response_type = 'token'
self.redirect_uri = f'https://{self.redirect_server}/redirect'
app.config['REDIRECT_URI'] = self.redirect_uri
app.config['RESPONSE_TYPE'] = self.response_type
params = {
'client_id': self.client_id,
'scope': self.scope,
'response_type': self.response_type,
'redirect_uri': f'https://{self.redirect_server}/redirect'
}
self.uri = requests.Request(
'GET', self.authz_endpoint, params=params).prepare().url
elif self.idp == 'entra_code_flow':
self.response_type = 'code'
self.redirect_uri = f'https://{self.redirect_server}/hook'
self.state = self.__generate_state__()
self.code_verifier = self.__generate_code_verifier__()
self.code_challenge = self.__generate_code_challenge__(
self.code_verifier)
self.code_challenge_method = 'S256'
app.config['TOKEN_ENDPOINT'] = self.token_endpoint
app.config['REDIRECT_URI'] = self.redirect_uri
app.config['RESPONSE_TYPE'] = self.response_type
app.config['STATE'] = self.state
app.config['CODE_VERIFIER'] = self.code_verifier
app.config['CODE_CHALLENGE'] = self.code_challenge
app.config['CODE_CHALLENGE_METHOD'] = self.code_challenge_method
params = {
'client_id': self.client_id,
'scope': self.scope,
'response_type': self.response_type,
'redirect_uri': self.redirect_uri,
'state': self.state,
'code_challenge': self.code_challenge,
'code_challenge_method': self.code_challenge_method
}
self.uri = requests.Request(
'GET', self.authz_endpoint, params=params).prepare().url
logging.info(self.uri)
def __generate_state__(self):
return ''.join([str(random.randint(0, 9)) for _ in range(5)])
def __generate_code_verifier__(self):
allowed_chars = string.ascii_letters + string.digits + "-._~"
return ''.join([random.choice(allowed_chars) for _ in range(48)])
def __generate_code_challenge__(self, code_verifier):
code_verifier_encoded = code_verifier.encode()
code_verifier_digest = hashlib.sha256(code_verifier_encoded).digest()
code_challenge = base64.urlsafe_b64encode(
code_verifier_digest).decode().replace('=', '')
return code_challenge
def __str__(self):
idp_str = f'{self.idp}'
idp_str += f'\n\tClient ID: {self.client_id}'
idp_str += f'\n\tScope: {self.scope}'
idp_str += f'\n\tFinal Destination: {self.final_destination}'
return idp_str
def __repr__(self):
return self.__str__()
# Path: eviloauth/exceptions.py
class EviloauthCommandException(Exception):
def __init__(self, message="An error occurred in Eviloauth command execution"):
super().__init__(message)
# Path: eviloauth/dispatcher.py
import sys
import logging
from eviloauth.idp import IDP
from eviloauth.exceptions import EviloauthCommandException
class Dispatcher:
def __init__(self, flask_server, module_dict, cache, redirect_server):
logging.debug('Initializing dispatcher')
logging.debug(f'\tFlask server: {flask_server}')
logging.debug(f'\tModule dict: {module_dict}')
logging.debug(f'\tCache: {cache}')
logging.debug(f'\tRedirect server: {redirect_server}')
self.flask_server = flask_server
self.module_dict = module_dict
self.cache = cache
self.redirect_server = redirect_server
def dispatch(self, commands):
cmd, sub, arg, *args = commands.split(' ') + [None, None, None]
if cmd == 'exit':
self.dispatch_exit()
elif cmd == 'module':
self.dispatch_module(cmd, sub, arg)
elif cmd == 'tokens':
self.dispatch_tokens(cmd, sub)
elif cmd == 'idp':
self.dispatch_idp(cmd, sub, arg)
elif cmd == 'target':
self.dispatch_target(cmd, sub, arg)
else:
raise EviloauthCommandException(
'Unknown command %s' % cmd)
def dispatch_exit(self):
print('Exiting...')
self.flask_server.shutdown()
sys.exit()
def dispatch_module(self, cmd, sub, arg):
mod = self.module_dict[f'eviloauth.{cmd}.{sub}.{arg}']
mod.__run__(self.cache.get('target'), 0)
def dispatch_tokens(self, cmd, sub):
general_tokens = self.cache.get('tokens')
if sub == 'list':
print([v for v in general_tokens.keys()])
elif sub == 'add':
logging.error('Not implemented yet')
else:
raise EviloauthCommandException(
'Unknown "%s" command %s' % (cmd, sub))
def dispatch_idp(self, cmd, sub, arg):
if sub == 'list':
print('Current IDP: %s' % self.cache.get('idp'))
elif sub == 'configure':
| idp = IDP(arg, self.redirect_server) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: racinette/querky
# Path: querky/backends/postgresql/name_type_mapper.py
class PostgresqlNameTypeMapper(PostgresqlTypeMapper):
def __init__(self, typemap: dict[str, dict[str, TypeMetaData]]):
self.type_cache = dict()
# копируем
self.typemap = {
schema_name: {
type_name: type_metadata
for type_name, type_metadata in schema_map.items()
}
for schema_name, schema_map in typemap.items()
}
def set_mapping(self, schema: str, type_name: str, metadata: TypeMetaData) -> None:
if schema not in self.typemap:
self.typemap[schema] = dict()
s = self.typemap[schema]
s[type_name] = metadata
async def get_pg_type(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = await contract.raw_fetchone(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
def get_pg_type_sync(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = contract.raw_fetchone_sync(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
def get_type_knowledge_impl(self, pg_type) -> TypeKnowledge:
basename: str = pg_type['type_string']
schema: str = pg_type['namespace_string']
is_array = basename.endswith("[]")
if is_array:
basename = basename[:-2]
try:
transforms = self.typemap[schema]
except KeyError:
raise KeyError(f"No transforms for schema: {schema} ({basename})")
try:
metadata = transforms[basename]
except KeyError:
raise KeyError(f"No metadata for type: {schema}.{basename} (array={is_array})")
return TypeKnowledge(
metadata,
is_array=is_array,
is_optional=None
)
async def get_type_knowledge(self, contract: Contract, conn, oid: int) -> TypeKnowledge:
return self.get_type_knowledge_impl(await self.get_pg_type(contract, conn, oid))
def get_type_knowledge_sync(self, contract: Contract, conn, oid: int) -> TypeKnowledge:
return self.get_type_knowledge_impl(self.get_pg_type_sync(contract, conn, oid))
# Path: querky/base_types.py
class TypeMetaData(GetImportsMixin):
counterpart: str
required_imports: set[str] | None = None
def get_imports(self) -> set[str]:
if self.required_imports is None:
return set()
return set(self.required_imports)
@classmethod
def from_type(cls, t: typing.Type) -> TypeMetaData:
type_name = t.__name__
module_path = t.__module__
return TypeMetaData(
counterpart=type_name,
required_imports={f"from {module_path} import {type_name}"}
)
# Path: querky/common_imports.py
DATETIME_MODULE = "import datetime"
# Path: querky/common_imports.py
DECIMAL = "from decimal import Decimal"
# Path: querky/common_imports.py
UUID = "from uuid import UUID"
# Path: querky/common_imports.py
UNION = "from typing import Union"
# Path: querky/backends/postgresql/asyncpg/name_type_mapper.py
from querky.backends.postgresql.name_type_mapper import PostgresqlNameTypeMapper
from querky.base_types import TypeMetaData
from querky.common_imports import DATETIME_MODULE
from querky.common_imports import DECIMAL as DECIMAL_IMPORT
from querky.common_imports import UUID as UUID_IMPORT
from querky.common_imports import UNION as UNION_IMPORT
ASYNCPG_RANGE_IMPORT = "from asyncpg import Range as _Range"
ASYNCPG_RECORD_IMPORT = "from asyncpg import Record as _Record"
ASYNCPG_BITSTRING_IMPORT = "from asyncpg import BitString as _BitString"
ASYNCPG_BOX_IMPORT = "from asyncpg import Box as _Box"
ASYNCPG_CIRCLE_IMPORT = "from asyncpg import Circle as _Circle"
ASYNCPG_LINE_IMPORT = "from asyncpg import Line as _Line"
ASYNCPG_LINE_SEGMENT_IMPORT = "from asyncpg import LineSegment as _LineSegment"
ASYNCPG_PATH_IMPORT = "from asyncpg import Path as _Path"
ASYNCPG_POINT_IMPORT = "from asyncpg import Point as _Point"
ASYNCPG_POLYGON_IMPORT = "from asyncpg import Polygon as _Polygon"
| INT = TypeMetaData("int") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Shahzadnit/EZ-CLIP
# Path: utils/lr_scheduler.py
class WarmupMultiStepLR(WarmupLR):
def __init__(self,
optimizer,
milestones,
gamma=0.1,
warmup_epochs=0,
warmup_powers=1,
warmup_lrs=0,
last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing integers. Got %s' % repr(milestones))
self.milestones = milestones
self.gamma = gamma
super(WarmupMultiStepLR, self).__init__(optimizer,
warmup_epochs,
warmup_powers,
warmup_lrs,
last_epoch)
if self.milestones[0] <= max(self.warmup_epochs):
raise ValueError('milstones[0] ({}) <= max(warmup_epochs) ({})'.format(
milestones[0], max(self.warmup_epochs)))
def get_single_lr_after_warmup(self, group_index):
factor = self.gamma ** bisect_right(self.milestones, self.last_epoch)
return self.base_lrs[group_index] * factor
# Path: utils/lr_scheduler.py
class WarmupCosineAnnealingLR(WarmupLR):
def __init__(self,
optimizer,
total_epoch,
final_factor=0,
warmup_epochs=0,
warmup_powers=1,
warmup_lrs=0,
last_epoch=-1):
self.total_epoch = total_epoch
self.final_factor = final_factor
super(WarmupCosineAnnealingLR, self).__init__(optimizer,
warmup_epochs,
warmup_powers,
warmup_lrs,
last_epoch)
def get_single_lr_after_warmup(self, group_index):
warmup_epoch = self.warmup_epochs[group_index]
progress = (self.last_epoch - warmup_epoch) / (self.total_epoch - warmup_epoch)
progress = min(progress, 1.0)
cosine_progress = (math.cos(math.pi * progress) + 1) / 2
factor = cosine_progress * (1 - self.final_factor) + self.final_factor
return self.base_lrs[group_index] * factor
# Path: utils/solver.py
import torch.optim as optim
from utils.lr_scheduler import WarmupMultiStepLR, WarmupCosineAnnealingLR
def _optimizer(config, model):
if config.solver.optim == 'adam':
optimizer = optim.Adam([{'params': model.parameters()}],
lr=config.solver.lr, betas=(0.9, 0.98), eps=1e-8,
weight_decay=0.2) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
print('Adam')
elif config.solver.optim == 'sgd':
optimizer = optim.SGD([{'params': model.parameters()}],
config.solver.lr,
momentum=config.solver.momentum,
weight_decay=config.solver.weight_decay)
print('SGD')
elif config.solver.optim == 'adamw':
vision_params = list(map(id, model.visual.parameters()))
text_params = filter(lambda p: id(p) not in vision_params,
model.parameters())
optimizer = optim.AdamW([{'params': text_params},
{'params': model.visual.parameters(), 'lr': config.solver.lr * config.solver.ratio},],
betas=(0.9, 0.98), lr=config.solver.lr, eps=1e-8,
weight_decay=config.solver.weight_decay) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
for param_group in optimizer.param_groups:
print(param_group['lr'])
print('AdamW')
else:
raise ValueError('Unknown optimizer: {}'.format(config.solver.optim))
return optimizer
def _lr_scheduler(config,optimizer):
if config.solver.type == 'cosine':
lr_scheduler = WarmupCosineAnnealingLR(
optimizer,
config.solver.epochs,
warmup_epochs=config.solver.lr_warmup_step
)
elif config.solver.type == 'multistep':
if isinstance(config.solver.lr_decay_step, list):
milestones = config.solver.lr_decay_step
elif isinstance(config.solver.lr_decay_step, int):
milestones = [
config.solver.lr_decay_step * (i + 1)
for i in range(config.solver.epochs //
config.solver.lr_decay_step)]
else:
raise ValueError("error learning rate decay step: {}".format(type(config.solver.lr_decay_step)))
| lr_scheduler = WarmupMultiStepLR(
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Gwolfgit/Authoritah
# Path: functions.py
def get_tailscale_ip4() -> str:
try:
output = subprocess.check_output(
["tailscale", "ip", "-4"],
stderr=subprocess.STDOUT,
universal_newlines=True,
)
ip = output.strip()
if is_valid_ip(ip):
return ip
except subprocess.CalledProcessError as e:
logger.error(e)
return ""
# Path: functions.py
def get_tailscale_ip6() -> str:
try:
output = subprocess.check_output(
["tailscale", "ip", "-6"],
stderr=subprocess.STDOUT,
universal_newlines=True,
)
ip = output.strip()
if is_valid_ip(ip):
return ip
except subprocess.CalledProcessError as e:
logger.error(e)
return ""
# Path: models.py
import orjson
from typing import Any, Dict, Tuple
from functions import get_tailscale_ip4, get_tailscale_ip6
from pathlib import Path
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_config():
with open(Path(Path(__file__).parent.resolve(), "config.json"), "r") as fd:
return dotdict(orjson.loads(fd.read()))
class DefaultDict(dict):
"""
A dictionary subclass that maintains default keys and values.
"""
def __init__(self, default_values: Dict[Any, Any], *args, **kwargs):
"""
Initialize the dictionary with default values and any additional provided values.
:param default_values: A dictionary of default key-value pairs.
"""
super().__init__()
self.default_values = default_values
self.update(self.default_values)
def __setitem__(self, key, value):
"""
Set a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__setitem__(key, value)
def __delitem__(self, key):
"""
Delete a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__delitem__(key)
def pop(self, key, *args, **kwargs):
"""
Pop a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
return self.default_values[key]
return super().pop(key, *args, **kwargs)
def update(self, *args, **kwargs):
"""
Update the dictionary. Default keys are reset to default values.
"""
updates = dict(*args, **kwargs)
super().update(
{
k: self.default_values[k] if k in self.default_values else updates[k]
for k in updates
}
)
class MyAuthoritah:
def __init__(self, cfg: dotdict):
self.cfg = cfg
self.data = {}
self._relay = self.cfg.default_relay
self._ip6 = get_tailscale_ip6()
| self._ip = get_tailscale_ip4() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bluuewhale/nexon-openapi-python
# Path: src/nexon_openapi/utils/_utils.py
def is_list(obj: object) -> TypeGuard[list[object]]:
return isinstance(obj, list)
# Path: src/nexon_openapi/utils/_utils.py
def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]:
return isinstance(obj, Mapping)
# Path: src/nexon_openapi/utils/_utils.py
def is_list_type(typ: type) -> bool:
return (get_origin(typ) or typ) == list
# Path: src/nexon_openapi/utils/_utils.py
def is_union_type(typ: type) -> bool:
return _is_union(get_origin(typ))
# Path: src/nexon_openapi/utils/_utils.py
def extract_type_arg(typ: type, index: int) -> type:
args = get_args(typ)
try:
return cast(type, args[index])
except IndexError as err:
raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err
# Path: src/nexon_openapi/utils/_utils.py
def is_required_type(typ: type) -> bool:
return get_origin(typ) == Required
# Path: src/nexon_openapi/utils/_utils.py
def is_annotated_type(typ: type) -> bool:
return get_origin(typ) == Annotated
# Path: src/nexon_openapi/utils/_utils.py
def strip_annotated_type(typ: type) -> type:
if is_required_type(typ) or is_annotated_type(typ):
return strip_annotated_type(cast(type, get_args(typ)[0]))
return typ
# Path: src/nexon_openapi/_compat.py
def model_dump(
model: pydantic.BaseModel,
*,
exclude_unset: bool = False,
exclude_defaults: bool = False,
) -> Dict[str, Any]:
if PYDANTIC_V2:
return model.model_dump(
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
)
return cast(
"dict[str, Any]",
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
),
)
# Path: src/nexon_openapi/_compat.py
def is_typeddict(type_: Type[Any]) -> bool: # noqa: ARG001
...
# Path: src/nexon_openapi/utils/_transform.py
from typing import Any, Mapping, Optional, TypeVar, Union, cast
from datetime import date, datetime
from typing_extensions import Literal, get_args, override, get_type_hints
from ._utils import (
is_list,
is_mapping,
is_list_type,
is_union_type,
extract_type_arg,
is_required_type,
is_annotated_type,
strip_annotated_type,
)
from .._compat import model_dump, is_typeddict
import pydantic
from __future__ import annotations
_T = TypeVar("_T")
PropertyFormat = Literal["iso8601", "custom"]
class PropertyInfo:
"""Metadata class to be used in Annotated types to provide information about a given type.
For example:
class MyParams(TypedDict):
account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: Optional[str]
format: Optional[PropertyFormat]
format_template: Optional[str]
def __init__(
self,
*,
alias: Optional[str] = None,
format: Optional[PropertyFormat] = None,
format_template: Optional[str] = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Optional[Any]:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias='cardID')]]
transformed = transform({'card_id': '<my card ID>'}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
def _get_annotated_type(type_: type) -> Union[type, None]:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
| if is_annotated_type(type_): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Jack24658735/FedLGT
# Path: dataloaders/data_utils.py
def get_unk_mask_indices(image,testing,num_labels,known_labels,epoch=1):
if testing:
# for consistency across epochs and experiments, seed using hashed image array
random.seed(hashlib.sha1(np.array(image)).hexdigest())
unk_mask_indices = random.sample(range(num_labels), (num_labels-int(known_labels)))
else:
# sample random number of known labels during training
if known_labels>0:
random.seed()
num_known = random.randint(0,int(num_labels*0.75))
else:
num_known = 0
unk_mask_indices = random.sample(range(num_labels), (num_labels-num_known))
return unk_mask_indices
# Path: dataloaders/data_utils.py
def image_loader(path,transform):
try:
image = Image.open(path)
except FileNotFoundError: # weird issues with loading images on our servers
# print('FILE NOT FOUND')
time.sleep(10)
image = Image.open(path)
image = image.convert('RGB')
if transform is not None:
image = transform(image)
return image
# Path: dataloaders/flair_dataset_fed.py
import os
import torch
import numpy as np
import pickle
import h5py
from torch.utils.data import Dataset, DataLoader
from pdb import set_trace as stop
from dataloaders.data_utils import get_unk_mask_indices,image_loader
class FlairFedDataset(Dataset):
def __init__(self, inp_data, split, num_labels, data_file, img_root, curr_user=None, max_samples=-1,transform=None,known_labels=0,testing=False, label_mapping=None, fine_grained_label_mapping=None):
super(FlairFedDataset, self).__init__()
# print(data_file)
#self.split_data = h5py.File('/home/liujack/multi_label/C-Tran/data/flair/cent_data.hdf5', 'r')
self.split_data = inp_data
self.split = split
self.fine_grained_label_mapping = fine_grained_label_mapping
self.label_mapping = label_mapping
if max_samples != -1:
self.split_data = self.split_data[0:max_samples]
self.img_root = img_root
self.transform = transform
self.num_labels = num_labels
self.known_labels = known_labels
self.testing = testing
self.curr_user = curr_user
self.image_id_list = list(self.split_data[self.split][self.curr_user]['image_ids'])
self.image_list = list(self.split_data[self.split][self.curr_user]['images'])
self.label_list = list(self.split_data[self.split][self.curr_user]['labels'])
self.fg_label_list = list(self.split_data[self.split][self.curr_user]['fine_grained_labels'])
def __len__(self):
return len(self.image_id_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# take a sample
image_ID = self.image_id_list[idx]
# img = np.array(self.split_data[self.split][self.curr_user][image_ID]['image'])
img = self.image_list[idx]
image = self.transform(img)
if self.fine_grained_label_mapping != None:
# fine grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
labels_str = self.fg_label_list[idx]
else:
# coarse grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['labels'])
labels_str = self.label_list[idx]
assert self.label_mapping != None
# fg_labels = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
# image_ID = self.split_data[idx]['file_name']
# img_name = os.path.join(self.img_root,image_ID + '.jpg')
# image = image_loader(img_name,self.transform)
labels_str = labels_str.tolist()
labels_str = str(labels_str)[2:-1].split('|')
tran_labels = [0] * self.num_labels
if self.fine_grained_label_mapping != None:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.fine_grained_label_mapping[label]))
else:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.label_mapping[label]))
assert tran_labels.count(1) == len(labels_str)
labels = torch.Tensor(tran_labels)
| unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AgriCodeHub/dairy-django-backend
# Path: core/choices.py
class CowCategoryChoices(models.TextChoices):
"""
Choices for the category of a cow.
Choices:
- `CALF`: Represents a calf.
- `WEANER`: Represents a weaner.
- `HEIFER`: Represents a heifer.
- `BULL`: Represents a bull.
- `MILKING_COW`: Represents a milking cow.
Usage:
These choices represent the category of a cow in the Cow model.
Use these choices when defining or querying Cow instances to represent the category of a cow.
Example:
```
class Cow(models.Model):
category = models.CharField(max_length=15, choices=CowCategoryChoices.choices)
```
"""
CALF = "Calf"
WEANER = "Weaner"
HEIFER = "Heifer"
BULL = "Bull"
MILKING_COW = "Milking Cow"
# Path: core/choices.py
class CowAvailabilityChoices(models.TextChoices):
"""
Choices for the availability status of a cow.
Choices:
- `ALIVE`: Cow is alive and active.
- `SOLD`: Cow has been sold.
- `DEAD`: Cow has died.
Usage:
These choices represent the availability status of a cow in the Cow model.
Use these choices when defining or querying Cow instances to represent the current status of a cow.
Example:
```
class Cow(models.Model):
availability_status = models.CharField(max_length=50, choices=CowAvailabilityChoices.choices)
```
"""
ALIVE = "Alive"
SOLD = "Sold"
DEAD = "Dead"
# Path: core/utils.py
# Path: production/choices.py
class LactationStageChoices(models.TextChoices):
"""
Choices for the stage of lactation.
Choices:
- `EARLY`: Early stage of lactation.
- `MID`: Mid stage of lactation.
- `LATE`: Late stage of lactation.
- `DRY`: Dry stage (post-lactation).
- `ENDED`: Lactation has ended.
Usage:
These choices represent the stage of lactation in the Lactation model and are utilized in the LactationManager
to determine lactation stages based on the number of days.
Example:
```
class Lactation(models.Model):
lactation_stage = models.CharField(
max_length=5,
choices=LactationStageChoices.choices,
default=LactationStageChoices.DRY,
)
```
Manager Usage:
The `LactationManager` uses these choices in the `lactation_stage` method to determine the stage of lactation
based on the number of days.
Example:
```
class LactationManager(models.Manager):
...
def lactation_stage(self, lactation):
days_in_lactation = self.days_in_lactation(lactation)
if lactation.end_date:
return LactationStageChoices.ENDED
elif days_in_lactation <= 100:
return LactationStageChoices.EARLY
elif days_in_lactation <= 200:
return LactationStageChoices.MID
elif days_in_lactation <= 275:
return LactationStageChoices.LATE
else:
return LactationStageChoices.DRY
```
"""
EARLY = "Early"
MID = "Mid"
LATE = "Late"
DRY = "Dry"
ENDED = "Ended"
# Path: users/choices.py
class SexChoices(models.TextChoices):
MALE = "Male"
FEMALE = "Female"
# Path: production/validators.py
from datetime import timedelta
from django.core.exceptions import ValidationError
from core.choices import CowCategoryChoices, CowAvailabilityChoices
from core.utils import todays_date
from production.choices import LactationStageChoices
from users.choices import SexChoices
from production.models import Lactation
class LactationValidator:
"""
Provides validation methods for lactation records associated with cows.
Methods:
- `validate_age(start_date, cow)`: Validates the start date of lactation based on the cow's age.
- `validate_cow_origin(cow)`: Validates that manual entry is allowed only for bought cows.
- `validate_cow_category(category)`: Validates the cow category for lactation records, allowing only bought cows with calves.
- `validate_fields(start_date, pregnancy, lactation_number, cow, lactation)`: Validates various fields of the lactation record, including start date, end date, pregnancy status, lactation number, and cow's age.
"""
@staticmethod
def validate_age(start_date, cow):
"""
Validates the start date of lactation based on the cow's age.
Args:
- `start_date` (date): The start date of the lactation.
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If the start date is before the cow reaches 635 days of age.
"""
if start_date < cow.date_of_birth + timedelta(days=635):
raise ValidationError(
code="invalid_start_date",
message=f"Invalid start date. Lactation must have started or be around {cow.date_of_birth + timedelta(days=635)}, not {start_date}.",
)
@staticmethod
def validate_cow_origin(cow):
"""
Validates that manual entry is allowed only for bought cows.
Args:
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If manual entry is attempted on a cow that is not bought.
"""
if not cow.is_bought:
raise ValidationError(
code="manual_entry_only_on_bought_cows",
message="Manual entry is allowed only for bought cows.",
)
@staticmethod
def validate_cow_category(category):
"""
Validates the cow category for lactation records, allowing only bought cows with calves.
Args:
- `category` (str): The cow category associated with the lactation record.
Raises:
- `ValidationError`: If the cow category is invalid or not a milking cow with calves.
"""
| if category not in CowCategoryChoices.values: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: PeriniM/Rotary-Pendulum-RL
# Path: control/reinforcement_learning/DQN/DeepQNetwork.py
class DeepQNetwork:
"""
Deep Q Network to approximate the Q function
"""
def __init__(self, lr, num_actions, input_dims, fc_dims = [32, 32], opt='adam', loss='mse'):
self.model = Sequential()
for i in range(len(fc_dims)):
if i == 0:
self.model.add(Dense(fc_dims[i], input_shape=(input_dims,), activation='relu'))
else:
self.model.add(Dense(fc_dims[i], activation='relu'))
self.model.add(Dense(num_actions, activation='linear'))
self.model.compile(optimizer=opt, loss=loss)
self.model.optimizer.learning_rate = lr
def predict(self, state):
"""
Predict the Q values for a given state
"""
return self.model(state).numpy()
def train_on_batch(self, states, q_targets):
"""
Train the network on a batch of states and q_targets
"""
return self.model.train_on_batch(states, q_targets)
def train_batch_gradientTape(self, states, q_targets):
"""
Train the network on a batch of states and q_targets using GradientTape
"""
with tf.GradientTape() as tape:
predictions = self.model(states)
loss = tf.keras.losses.MSE(q_targets, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
# take the mean of the loss
loss = tf.reduce_mean(loss).numpy()
return loss
def evaluate(self, states, q_targets, verbose=0):
"""
Evaluate the network on a batch of states and q_targets
"""
return self.model.evaluate(states, q_targets, verbose=verbose)
# Path: control/reinforcement_learning/DQN/ReplayBuffer.py
class ReplayBuffer:
"""
Stores and retrieves gameplay experiences
"""
def __init__(self, size):
self.gameplay_experiences = deque(maxlen=size)
def store_tuple(self, state, action, reward, new_state, done):
"""
Store the experience in the replay buffer
"""
self.gameplay_experiences.append((state, action, reward, new_state, done))
def sample_batch(self, batch_size):
"""
Sample a random batch of experiences from the replay buffer
"""
random_sample = random.sample(self.gameplay_experiences, batch_size)
states, actions, rewards, new_states, dones = map(np.asarray, zip(*random_sample))
return states, actions, rewards, new_states, dones
# Path: control/reinforcement_learning/DQN/Agent.py
import os
import configparser
import ast
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import copy
import time
import tensorflow as tf
from matplotlib import cm
from datetime import datetime
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import TensorBoard
from control.reinforcement_learning.DQN.DeepQNetwork import DeepQNetwork
from control.reinforcement_learning.DQN.ReplayBuffer import ReplayBuffer
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Agent:
"""
DQN Agent
- Take an environment
- Set up the deep neural network
- Store the experience
- Choose action
- Train the network
- Evaluate the network
"""
def __init__(self, env):
# check if gpu is available
if tf.config.list_physical_devices('GPU'):
# print the device name
print("GPU is available")
print("Device name: {}".format(tf.test.gpu_device_name()))
else:
print("GPU is not available")
self.env = env
self.nJoint = self.env.nbJoint
# read INI file
# get the path of the root directory
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ini_file_path = os.path.join(root_dir, 'config.ini')
self.params = self.parse_ini(ini_file_path)
# set up the parameters from the INI file
self.action_steps = int(self.params['action_steps'])
self.torque_range = ast.literal_eval(self.params['control_range'])
self.max_episode_steps = int(self.params['max_episode_steps'])
self.train_episodes = int(self.params['train_episodes'])
self.lr = float(self.params['lr'])
self.discount_factor = float(self.params['discount_factor'])
self.epsilon = float(self.params['epsilon'])
self.epsilon_decay_episodes = int(self.params['epsilon_decay_episodes'])
self.epsilon_final = float(self.params['epsilon_final'])
self.buffer_size = int(self.params['buffer_size'])
self.batch_size = int(self.params['batch_size'])
self.hidden_dims = ast.literal_eval(self.params['hidden_dims'])
self.update_rate_episodes = int(self.params['target_update_episodes'])
self.train_rate_steps = int(self.params['train_rate_steps'])
self.discounted_reward = 0.0
self.epsilon_decay = (self.epsilon - self.epsilon_final) / self.epsilon_decay_episodes
# set up the environment parameters
self.env.num_actions = self.action_steps
self.env.range_actions = self.torque_range
self.env.maxIter = self.max_episode_steps
self.env.umax = self.torque_range[1]
self.env.actions = np.linspace(self.env.range_actions[0], self.env.range_actions[1], self.action_steps)
self.env.action_space = [i for i in range(self.action_steps)]
self.action_space = self.env.action_space
self.total_step_counter = 0
| self.replay_buffer = ReplayBuffer(self.buffer_size) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Kokonico/ObjLog
# Path: objlog/LogMessages.py
class Debug(LogMessage):
"""the default debug message, with blue color"""
level = "DEBUG"
color = "\033[94m"
# Path: objlog/Base/LogMessage.py
class LogMessage:
"""a base message to be logged
Attributes:
color
level (name)
WARNING: this class should not be used directly, use a subclass instead
it is designed to be used as a base class for other classes, and will not work properly if used directly.
"""
def __init__(self, message):
self.message = str(message)
self.timestamp = datetime.now()
self.unix_timestamp = time_ns() // 1_000_000 # deprecated, use self.unix instead
self.unix = time_ns() // 1_000_000
# create uuid
self.uuid = f"{time_ns()}-{random.randint(0, 1000)}"
try:
t1 = self.color
t2 = self.level
except AttributeError:
raise TypeError("this class should not be used directly, use a subclass instead")
def __str__(self):
return f"[{self.timestamp}] {self.level}: {self.message}"
def __repr__(self):
return f"{self.level}: {self.message}"
def __eq__(self, other):
return self.uuid == other.uuid
def __ne__(self, other):
return self.uuid != other.uuid
def colored(self) -> str:
"""return a colored version of the message"""
return f"{self.color}[{self.timestamp}] {self.level}: {self.message}\033[0m"
# Path: objlog/Base/LogNode.py
from objlog.LogMessages import Debug
from objlog.Base.LogMessage import LogMessage # "no parent package" error happens when I don't specify the package,
from collections import deque
"""The LogNode class, the main class of the ObjLogger"""
# IDK why
class LogNode:
"""A LogNode, the main class of the ObjLogger. It can log messages to a file, to the console, or both."""
open = open # this code is probably the reason why my dad left me
# this is clearly not a good way to do this, but I don't know how to do it better
# if anyone can prevent doing this, and fix the exception caused when deleting a LogNode, please do it
# else please increment this number by 1
# thank you
# total_failed_attempts_to_fix_this = 1
def __init__(self, name: str, log_file: str | None = None, print_to_console: bool = False,
print_filter: list | None = None, max_messages_in_memory: int = 500, max_log_messages: int = 1000,
log_when_closed: bool = True, wipe_log_file_on_init: bool = False):
self.log_file = log_file
self.name = name
self.print = print_to_console
self.messages = deque(maxlen=max_messages_in_memory)
self.max = max_messages_in_memory
self.maxinf = max_log_messages
self.print_filter = print_filter
self.log_closure_message = log_when_closed
self.log_len = 0
# check if log exists (in file system), and if so, clear it
if isinstance(log_file, str) and wipe_log_file_on_init:
with open(log_file, "w+") as f:
f.write("")
def log(self, message, override_log_file: str | None = None, force_print: tuple[bool, bool] = (False, False),
preserve_message_in_memory: bool = True) -> None:
"""log a message"""
# make sure it's a LogMessage or its subclass
| if not isinstance(message, LogMessage): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: anyquest/pyaq
# Path: aq/providers/provider.py
class BaseProvider:
async def create_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse:
pass
# Path: aq/providers/provider.py
class ProviderError(Exception):
def __init__(self, code, message):
self.code = code
super().__init__(message)
# Path: aq/providers/types/chat.py
class ChatCompletionMessage(BaseModel):
role: str
content: Optional[str | List[Content]] = ""
name: Optional[str] = None
tool_call_id: Optional[str] = None
tool_calls: Optional[List[ToolCall]] = None
# Path: aq/providers/types/chat.py
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatCompletionMessage]
tools: Optional[List[Tool]] = None
response_format: Optional[ResponseFormat] = None
tool_choice: Optional[str] = None
temperature: float = 0.5
presence_penalty: float = 0.0
frequency_penalty: float = 0.0
max_tokens: int = 1000
# Path: aq/providers/types/chat.py
class ChatCompletionResponse(BaseModel):
id: str
object: str
created: int
choices: List[Choice]
# Path: aq/providers/types/chat.py
class Choice(BaseModel):
index: int
message: ChatCompletionMessage
finish_reason: Optional[str] = None
# Path: aq/providers/types/chat.py
class Error(BaseModel):
code: str | int
message: str
# Path: aq/http_client/async_http_client.py
class AsyncHttpClient:
TIMEOUT = 120
def __init__(self):
logging.getLogger('httpcore').setLevel(logging.ERROR)
logging.getLogger('httpx').setLevel(logging.ERROR)
self._logger = logging.getLogger(self.__class__.__name__)
async def post(self, url: str, headers: Dict[str, Any], data: Any, json=True) -> Any:
retry_count = 0
while retry_count < 5:
try:
async with httpx.AsyncClient() as ac:
response = await ac.post(url, headers=headers, data=data, timeout=self.TIMEOUT)
if json:
json_response = response.json()
if "error" in json_response and "code" in json_response["error"]:
code = int(json_response["error"]["code"])
if code == 429:
self._logger.error("Received a 429 error. Retrying ...")
retry_count += 1
await asyncio.sleep(5*(2**retry_count))
else:
return json_response
else:
return json_response
else:
return response.text
except httpx.HTTPStatusError as e:
if e.response.status_code == 429:
self._logger.error("Received a 429 error. Retrying ...")
retry_count += 1
await asyncio.sleep(5*(2**retry_count))
else:
self._logger.error(f"HTTP error: {e.response.status_code}, {e.response.text}")
raise e
async def get(self, url: str, query: Dict[str, Any] = None, headers: [str, Any] = None, json=True) -> Any:
get_url = f"{url}?{urlencode(query)}" if query else url
async with httpx.AsyncClient() as ac:
response = await ac.get(get_url, headers=headers or {}, timeout=self.TIMEOUT)
return response.json() if json else response.text
@staticmethod
def urljoin(*args):
stripped = map(lambda x: str(x).strip('/'), args)
return "/".join(stripped)
# Path: aq/providers/gemini/provider.py
import logging
import re
from typing import Dict, Any, Optional, List, Literal
from pydantic import BaseModel
from ..provider import BaseProvider, ProviderError
from ..types import ChatCompletionRequest, ChatCompletionResponse, ChatCompletionMessage, Choice, Error
from ...http_client import AsyncHttpClient
class InlineData(BaseModel):
mimeType: str
data: str
class Part(BaseModel):
text: Optional[str] = None
inlineData: Optional[InlineData] = None
class Content(BaseModel):
role: Literal["user", "model"]
parts: List[Part]
class GenerationConfig(BaseModel):
temperature: float = 0.5
maxOutputTokens: int = 1000
class GeminiCompletionRequest(BaseModel):
contents: List[Content]
generationConfig: GenerationConfig
class ResponseCandidate(BaseModel):
content: Content
finishReason: Literal["STOP"]
class GeminiCompletionResponse(BaseModel):
candidates: List[ResponseCandidate]
class GeminiProvider(BaseProvider):
def __init__(self, config: Dict[str, Any], http_client: AsyncHttpClient):
self._config = config
self._http_client = http_client
self._logger = logging.getLogger(self.__class__.__name__)
@staticmethod
def _check_config(config: Dict[str, Any]) -> None:
required_keys = ['endpoint', 'key']
if not all(key in config for key in required_keys):
raise ProviderError(400, "The Gemini provider is not configured. Add settings to config.yml.")
| async def create_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: multimodallearning/DG-TTA
# Path: dg_tta/tta/config_log_utils.py
def get_data_filepaths(tta_dataset_name, tta_dataset_bucket):
raw_tta_dataset_dir = Path(nnUNet_raw, tta_dataset_name)
if tta_dataset_bucket == "imagesTr":
source_folders = [raw_tta_dataset_dir / "imagesTr"]
elif tta_dataset_bucket == "imagesTs":
source_folders = [raw_tta_dataset_dir / "imagesTs"]
elif tta_dataset_bucket == "imagesTrAndTs":
source_folders = [
raw_tta_dataset_dir / "imagesTr",
raw_tta_dataset_dir / "imagesTs",
]
file_list = []
for src_fld in source_folders:
if src_fld.is_dir():
file_list.extend(filter(lambda x: x.is_file(), src_fld.iterdir()))
return file_list
# Path: dg_tta/tta/config_log_utils.py
def get_dgtta_colormap():
hi_1 = "#248888"
hi_2 = "#79DCF0"
hi_3 = "#e7475e"
hi_4 = "#f0d879"
return matplotlib.colors.LinearSegmentedColormap.from_list(
"", [hi_3, hi_4, hi_2, hi_1]
)
# Path: dg_tta/tta/config_log_utils.py
def get_resources_dir():
return Path(dg_tta.__file__).parent / "__resources__"
# Path: dg_tta/utils.py
def check_dga_root_is_set(soft_check=False):
prompt = "Please define an existing root directory for DG-TTA by setting DG_TTA_ROOT."
check = Path(
os.environ.get("DG_TTA_ROOT", "_")
).is_dir()
if soft_check and not check:
print(prompt)
return
assert check, prompt
# Path: dg_tta/tta/ipynb_utils.py
import json
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
from mpl_toolkits.axes_grid1.axes_grid import ImageGrid
from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO
from dg_tta.tta.config_log_utils import (
get_data_filepaths,
get_dgtta_colormap,
get_resources_dir,
)
from dg_tta.utils import check_dga_root_is_set
def read_image(source_data_paths, path_idx):
if source_data_paths is None:
return None, None
source_img, source_sitk_stuff = SimpleITKIO().read_images(
source_data_paths[path_idx : path_idx + 1]
)
source_img = source_img[0]
return torch.tensor(source_img)[None, None, :], source_sitk_stuff
def get_target_imgs_datapaths():
check_dga_root_is_set()
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
return tta_plan["tta_data_filepaths"]
def get_source_imgs_datapaths():
check_dga_root_is_set()
buckets = ["imagesTr", "imagesTs"]
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
source_dataset_name = tta_plan["__pretrained_dataset_name__"]
if source_dataset_name.startswith("TS104"):
return "TS104"
source_data_paths = []
for buc in buckets:
source_data_paths.extend(get_data_filepaths(source_dataset_name, buc))
return source_data_paths
def get_orient_imgs(img):
def get_axes_idxs(axis_size):
NUM_IDXS = 16
return np.linspace(0, axis_size - 1, NUM_IDXS).round().astype(int)
img = img.squeeze(0, 1)
D, H, W = img.shape
slices = dict(HW=[], DW=[], DH=[])
for d in get_axes_idxs(D):
slices["HW"].append(img[d, :, :])
for h in get_axes_idxs(H):
slices["DW"].append(img[:, h, :])
for w in get_axes_idxs(W):
slices["DH"].append(img[:, :, w])
return slices
def clear_axis(ax):
ax.get_yaxis().set_ticks([])
ax.get_xaxis().set_ticks([])
ax.grid(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
def get_spacing_ratio(sitk_stuff, axis_idx):
rolled_spacing = np.roll(np.array(sitk_stuff["spacing"]), axis_idx)
return rolled_spacing[1] / rolled_spacing[0]
def show_image_overview(img, sitk_stuff, fig_inch_size=5.0):
orient_imgs = get_orient_imgs(img)
vmin, vmax = img.min(), img.max()
dpi = 150.0
large_text_size = fig_inch_size * 10
small_text_size = fig_inch_size * 2
| cmap = get_dgtta_colormap() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tommy-xq/SA2VP
# Path: vpt_main/src/models/mlp.py
class MLP(nn.Module):
def __init__(
self,
input_dim: int,
mlp_dims: List[int],
dropout: float = 0.1,
nonlinearity: Type[nn.Module] = nn.ReLU,
normalization: Type[nn.Module] = nn.BatchNorm1d, # nn.LayerNorm,
special_bias: bool = False,
add_bn_first: bool = False,
):
super(MLP, self).__init__()
projection_prev_dim = input_dim
projection_modulelist = []
last_dim = mlp_dims[-1]
mlp_dims = mlp_dims[:-1]
if add_bn_first:
if normalization is not None:
projection_modulelist.append(normalization(projection_prev_dim))
if dropout != 0:
projection_modulelist.append(nn.Dropout(dropout))
for idx, mlp_dim in enumerate(mlp_dims):
fc_layer = nn.Linear(projection_prev_dim, mlp_dim)
nn.init.kaiming_normal_(fc_layer.weight, a=0, mode='fan_out')
projection_modulelist.append(fc_layer)
projection_modulelist.append(nonlinearity())
if normalization is not None:
projection_modulelist.append(normalization(mlp_dim))
if dropout != 0:
projection_modulelist.append(nn.Dropout(dropout))
projection_prev_dim = mlp_dim
self.projection = nn.Sequential(*projection_modulelist)
self.last_layer = nn.Linear(projection_prev_dim, last_dim)
nn.init.kaiming_normal_(self.last_layer.weight, a=0, mode='fan_out')
if special_bias:
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
torch.nn.init.constant_(self.last_layer.bias, bias_value)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
input_arguments:
@x: torch.FloatTensor
"""
x = self.projection(x)
x = self.last_layer(x)
return x
# Path: vpt_main/src/utils/logging.py
_FORMAT = "[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s"
def _suppress_print():
def print_pass(*objects, sep=" ", end="\n", file=sys.stdout, flush=False):
def _cached_log_stream(filename):
def setup_logging(
num_gpu, num_shards, output="", name="visual_prompt", color=True):
def setup_single_logging(name, output=""):
def get_logger(name):
def log_json_stats(stats, sort_keys=True):
def __init__(self, *args, **kwargs):
def formatMessage(self, record: logging.LogRecord) -> str:
class _ColorfulFormatter(logging.Formatter):
# Path: vpt_main/src/models/resnet.py
import torch
import torch.nn as nn
import torchvision as tv
from collections import OrderedDict
from torchvision import models
from .mlp import MLP
from ..utils import logging
#!/usr/bin/env python3
"""
ResNet-related models:
"imagenet_sup_rn18",
"imagenet_sup_rn34",
"imagenet_sup_rn50",
"imagenet_sup_rn101",
"imagenet_sup_rn152",
"mocov3_rn50"
"""
| logger = logging.get_logger("visual_prompt") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SooLab/DDCOT
# Path: evaluations.py
def caculate_bleu(results, data, gram):
bleus = []
for qid, output in results.items():
prediction = output
target = data[qid]
# target = data[qid]['lecture'] + data[qid]['solution']
target = target.strip()
if target == "":
continue
bleu = bleu_score(target, prediction, gram)
bleus.append(bleu)
avg_bleu = sum(bleus) / len(bleus)
return avg_bleu
# Path: evaluations.py
def caculate_rouge(results, data):
rouges = []
for qid, output in results.items():
prediction = output
target = data[qid]
# target = data[qid]['lecture'] + data[qid]['solution']
target = target.strip()
if prediction == "":
continue
if target == "":
continue
rouge = score_rouge(target, prediction)
rouges.append(rouge)
avg_rouge = sum(rouges) / len(rouges)
return avg_rouge
# Path: evaluations.py
def caculate_similariry(results, data, model):
scores = []
for qid, output in results.items():
prediction = output
target = data[qid]
# target = data[qid]['lecture'] + data[qid]['solution']
target = target.strip()
score = similariry_score(target, prediction, model)
scores.append(score)
avg_score = sum(scores) / len(scores)
return avg_score
# Path: utils_evaluate.py
import os
import json
import argparse
import warnings
import pandas as pd
from sentence_transformers import SentenceTransformer
from evaluations import caculate_bleu, caculate_rouge, caculate_similariry
'''
Adapted from https://github.com/lupantech/ScienceQA
'''
warnings.filterwarnings('ignore')
def get_acc_with_contion(res_pd, key, values):
if isinstance(values, list):
total_pd = res_pd[res_pd[key].isin(values)]
else:
total_pd = res_pd[res_pd[key] == values]
correct_pd = total_pd[total_pd['true_false'] == True]
acc = "{:.2f}".format(len(correct_pd) / len(total_pd) * 100)
return acc
def get_scores(result_data, rationale_data, results_reference, data_file, img):
# read result file
results = result_data
num = len(results)
# read data file
sqa_data = json.load(open(data_file))
# construct pandas data
sqa_pd = pd.DataFrame(sqa_data).T
res_pd = sqa_pd[sqa_pd['split'] == 'test'] # test set
if img:
res_pd = res_pd[res_pd["image"] == 'image.png']
# update data
for index, row in res_pd.iterrows():
res_pd.loc[index, 'no_context'] = True if (not row['hint'] and not row['image']) else False
res_pd.loc[index, 'has_text'] = True if row['hint'] else False
res_pd.loc[index, 'has_image'] = True if row['image'] else False
res_pd.loc[index, 'has_text_image'] = True if (row['hint'] and row['image']) else False
res_pd.loc[index, 'has_no_image'] = False if row['image'] else True
label = row['answer']
pred = int(results[index])
res_pd.loc[index, 'pred'] = pred
res_pd.loc[index, 'true_false'] = (label == pred)
# accuracy scores
acc_average = len(res_pd[res_pd['true_false'] == True]) / num * 100
# rationale quality
## BLEU
bleu1 = caculate_bleu(rationale_data, results_reference, gram=1)
bleu4 = caculate_bleu(rationale_data, results_reference, gram=4)
## Rouge-L
rouge = caculate_rouge(rationale_data, results_reference)
## Similarity
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2').cuda()
| similariry = caculate_similariry(rationale_data, results_reference, model) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Qazalbash/jaxampler
# Path: jaxampler/_src/typing.py
# Path: jaxampler/_src/rvs/binomial.py
class Binomial(DiscreteRV):
r"""Binomial random variable
.. math::
X\sim Bin(p,n) \iff P(X=x|p,n)=\binom{n}{x}p^{x}(1-p)^{n-x}
"""
def __init__(self, p: Numeric | Any, n: Numeric | Any, name: Optional[str] = None) -> None:
"""
:param p: Probability of success
:param n: Number of trials
:param name: Name of the random variable
"""
shape, self._p, self._n = jx_cast(p, n)
self.check_params()
self._q = 1.0 - self._p
super().__init__(name=name, shape=shape)
def check_params(self) -> None:
"""Check the parameters of the random variable."""
assert jnp.all(self._p >= 0.0) and jnp.all(self._p <= 1.0), "p must be in [0, 1]"
assert jnp.all(self._n.dtype == jnp.int32), "n must be an integer"
assert jnp.all(self._n > 0), "n must be positive"
@partial(jit, static_argnums=(0,))
def logpmf_x(self, x: Numeric) -> Numeric:
return jax_binom.logpmf(x, self._n, self._p)
@partial(jit, static_argnums=(0,))
def pmf_x(self, x: Numeric) -> Numeric:
return jax_binom.pmf(x, self._n, self._p)
@partial(jit, static_argnums=(0,))
def logcdf_x(self, x: Numeric) -> Numeric:
return jnp.log(self.cdf_x(x))
@partial(jit, static_argnums=(0,))
def cdf_x(self, x: Numeric) -> Numeric:
floor_x = jnp.floor(x)
cond = [x < 0, x >= self._n, jnp.logical_and(x >= 0, x < self._n)]
return jnp.select(cond, [0.0, 1.0, betainc(self._n - floor_x, floor_x + 1, self._q)])
def rvs(self, shape: tuple[int, ...], key: Optional[Array] = None) -> Array:
if key is None:
key = self.get_key()
new_shape = shape + self._shape
return jax.random.binomial(key=key, n=self._n, p=self._p, shape=new_shape)
def __repr__(self) -> str:
string = f"Binomial(p={self._p}, n={self._n}"
if self._name is not None:
string += f", name={self._name}"
string += ")"
return string
# Path: jaxampler/_src/rvs/bernoulli.py
from typing import Any, Optional
from ..typing import Numeric
from .binomial import Binomial
# Copyright 2023 The Jaxampler Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
class Bernoulli(Binomial):
r"""Bernoulli random variable with probability of success p
.. math::
X\sim \mathbf{B}(p)\iff P\left(X=x|p\right)=p^{x}(1-p)^{1-x}
"""
| def __init__(self, p: Numeric | Any, name: Optional[str] = None) -> None: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: GXNU-ZhongLab/ODTrack
# Path: lib/models/layers/patch_embed.py
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
# allow different input size
# B, C, H, W = x.shape
# _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).")
# _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).")
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
# Path: lib/models/odtrack/utils.py
def combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False):
# [B, HW, C]
len_t = template_tokens.shape[1]
len_s = search_tokens.shape[1]
if mode == 'direct':
merged_feature = torch.cat((template_tokens, search_tokens), dim=1)
elif mode == 'template_central':
central_pivot = len_s // 2
first_half = search_tokens[:, :central_pivot, :]
second_half = search_tokens[:, central_pivot:, :]
merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1)
elif mode == 'partition':
feat_size_s = int(math.sqrt(len_s))
feat_size_t = int(math.sqrt(len_t))
window_size = math.ceil(feat_size_t / 2.)
# pad feature maps to multiples of window size
B, _, C = template_tokens.shape
H = W = feat_size_t
template_tokens = template_tokens.view(B, H, W, C)
pad_l = pad_b = pad_r = 0
# pad_r = (window_size - W % window_size) % window_size
pad_t = (window_size - H % window_size) % window_size
template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = template_tokens.shape
template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C)
template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2)
_, Hc, Wc, _ = template_tokens.shape
template_tokens = template_tokens.view(B, -1, C)
merged_feature = torch.cat([template_tokens, search_tokens], dim=1)
# calculate new h and w, which may be useful for SwinT or others
merged_h, merged_w = feat_size_s + Hc, feat_size_s
if return_res:
return merged_feature, merged_h, merged_w
else:
raise NotImplementedError
return merged_feature
# Path: lib/models/odtrack/utils.py
def recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'):
if mode == 'direct':
recovered_tokens = merged_tokens
elif mode == 'template_central':
central_pivot = len_search_token // 2
len_remain = len_search_token - central_pivot
len_half_and_t = central_pivot + len_template_token
first_half = merged_tokens[:, :central_pivot, :]
second_half = merged_tokens[:, -len_remain:, :]
template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :]
recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1)
elif mode == 'partition':
recovered_tokens = merged_tokens
else:
raise NotImplementedError
return recovered_tokens
# Path: lib/models/odtrack/base_backbone.py
from functools import partial
from timm.models.vision_transformer import resize_pos_embed
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from lib.models.layers.patch_embed import PatchEmbed
from lib.models.odtrack.utils import combine_tokens, recover_tokens
import torch
import torch.nn as nn
import torch.nn.functional as F
class BaseBackbone(nn.Module):
def __init__(self):
super().__init__()
# for original ViT
self.pos_embed = None
self.img_size = [224, 224]
self.patch_size = 16
self.embed_dim = 384
self.cat_mode = 'direct'
self.pos_embed_z = None
self.pos_embed_x = None
self.template_segment_pos_embed = None
self.search_segment_pos_embed = None
self.return_inter = False
self.return_stage = [2, 5, 8, 11]
self.add_sep_seg = False
def finetune_track(self, cfg, patch_start_index=1):
search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)
template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)
new_patch_size = cfg.MODEL.BACKBONE.STRIDE
self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE
self.return_inter = cfg.MODEL.RETURN_INTER
self.return_stage = cfg.MODEL.RETURN_STAGES
self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG
# resize patch embedding
if new_patch_size != self.patch_size:
print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')
old_patch_embed = {}
for name, param in self.patch_embed.named_parameters():
if 'weight' in name:
param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size),
mode='bicubic', align_corners=False)
param = nn.Parameter(param)
old_patch_embed[name] = param
| self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3,
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yilin-bao/nnanim
# Path: TestingCode/modules.py
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0
):
super(Attention, self).__init__()
assert (
dim % num_heads == 0
), "Embedding dimension should be divisible by number of heads"
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
# make torchscript happy (cannot use tensor as tuple)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
# Path: TestingCode/modules.py
class FeedForward(nn.Module):
"""
Implementation of MLP for transformer
"""
def __init__(self, dim, hidden_dim, dropout_rate=0.0, revised=False):
super(FeedForward, self).__init__()
if not revised:
"""
Original: https://arxiv.org/pdf/2010.11929.pdf
"""
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(p=dropout_rate),
nn.Linear(hidden_dim, dim),
)
else:
"""
Scaled ReLU: https://arxiv.org/pdf/2109.03810.pdf
"""
self.net = nn.Sequential(
nn.Conv1d(dim, hidden_dim, kernel_size=1, stride=1),
nn.BatchNorm1d(hidden_dim),
nn.GELU(),
nn.Dropout(p=dropout_rate),
nn.Conv1d(hidden_dim, dim, kernel_size=1, stride=1),
nn.BatchNorm1d(dim),
nn.GELU(),
)
self.revised = revised
self._init_weights()
def _init_weights(self):
for name, module in self.net.named_children():
if isinstance(module, nn.Linear):
nn.init.normal_(module.bias, std=1e-6)
def forward(self, x):
if self.revised:
x = x.permute(0, 2, 1)
x = self.net(x)
x = x.permute(0, 2, 1)
else:
x = self.net(x)
return x
# Path: TestingCode/modules.py
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super(PreNorm, self).__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
# Path: TestingCode/transformer.py
from torch import nn
from TestingCode.modules import Attention, FeedForward, PreNorm
class Transformer(nn.Module):
def __init__(
self,
dim,
depth,
heads,
mlp_ratio=4.0,
attn_dropout=0.0,
dropout=0.0,
qkv_bias=True,
revised=False,
):
super().__init__()
self.layers = nn.ModuleList([])
assert isinstance(
mlp_ratio, float
), "MLP ratio should be an integer for valid "
mlp_dim = int(mlp_ratio * dim)
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PreNorm(
dim,
Attention(
dim,
num_heads=heads,
qkv_bias=qkv_bias,
attn_drop=attn_dropout,
proj_drop=dropout,
),
),
PreNorm(
dim,
| FeedForward(dim, mlp_dim, dropout_rate=dropout,), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Tlntin/booking_simulator
# Path: modelscope_agent/agent_types.py
class AgentType(str, Enum):
DEFAULT = 'default'
""""""
MS_AGENT = 'ms-agent'
"""An agent that uses the ModelScope-agent specific format does a reasoning step before acting .
"""
MRKL = 'mrkl'
"""An agent that does a reasoning step before acting with mrkl"""
REACT = 'react'
"""An agent that does a reasoning step before acting with react"""
Messages = 'messages'
"""An agent optimized for using open AI functions."""
# Path: modelscope_agent/llm/base.py
class LLM:
name = ''
def __init__(self, cfg):
self.cfg = cfg
self.agent_type = None
self.model = None
self.model_id = self.model
def set_agent_type(self, agent_type):
self.agent_type = agent_type
@abstractmethod
def generate(self, prompt: str, functions: list = [], **kwargs) -> str:
"""each llm should implement this function to generate response
Args:
prompt (str): prompt
functions (list): list of functions object including: name, description, parameters
Returns:
str: response
"""
raise NotImplementedError
@abstractmethod
def stream_generate(self,
prompt: str,
functions: list = [],
**kwargs) -> str:
"""stream generate response, which yields a generator of response in each step
Args:
prompt (str): prompt
functions (list): list of functions object including: name, description, parameters
Yields:
Iterator[str]: iterator of step response
"""
raise NotImplementedError
def tokenize(self, input_text: str) -> List[int]:
"""tokenize is used to calculate the length of the text to meet the model's input length requirements
Args:
input_text (str): input text
Returns:
list[int]: token_ids
"""
raise NotImplementedError
def detokenize(self, input_ids: List[int]) -> str:
"""detokenize
Args:
input_ids (list[int]): input token_ids
Returns:
str: text
"""
raise NotImplementedError
# Path: modelscope_agent/llm/utils.py
DEFAULT_MESSAGE = {
'role': 'user',
'content': 'No entry from user - please suggest something to enter'
}
# Path: modelscope_agent/llm/custom_llm.py
import os
import json
import requests
import traceback
from modelscope_agent.agent_types import AgentType
from .base import LLM
from .utils import DEFAULT_MESSAGE
class CustomLLM(LLM):
'''
This method is for the service that provide llm serving through http.
user could override the result parsing method if needed
While put all the necessary information in the env variable, such as Token, Model, URL
'''
name = 'custom_llm'
def __init__(self, cfg):
super().__init__(cfg)
self.token = os.getenv('HTTP_LLM_TOKEN', None)
self.model = os.getenv('HTTP_LLM_MODEL', None)
self.model_id = self.model
self.url = os.getenv('HTTP_LLM_URL', None)
if self.token is None:
raise ValueError('HTTP_LLM_TOKEN is not set')
| self.agent_type = self.cfg.get('agent_type', AgentType.DEFAULT) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dx-dtran/gpt2-mlx
# Path: transformer.py
class GPT(nn.Module):
def __init__(self, config: GPTConfig):
super().__init__()
assert config.vocab_size is not None
assert config.block_size is not None
self.config = config
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.block_size, config.n_embd)
self.drop = nn.Dropout(config.dropout)
self.h = [Block(config) for _ in range(config.n_layer)]
self.ln_f = nn.LayerNorm(config.n_embd, affine=config.bias)
def _forward_transformer_blocks(
self, x: mx.array, pos: mx.array, mask=None, cache=None, build_cache=False
):
tok_emb = self.wte(x)
pos_emb = self.wpe(pos)
x = self.drop(tok_emb + pos_emb)
kv_cache = []
if cache is not None:
for i in range(len(cache)):
x, cache[i] = self.h[i](x, mask=None, cache=cache[i])
else:
for block in self.h:
x, curr_cache = block(x, mask=mask)
if build_cache:
kv_cache.append(curr_cache)
x = self.ln_f(x)
return x, kv_cache if build_cache else cache
def _create_causal_mask(self, length: int):
mask = nn.MultiHeadAttention.create_additive_causal_mask(length)
return mask.astype(self.wte.weight.dtype)
def _sample_next_token(self, x, temperature):
logits = mx.expand_dims(x[:, -1], axis=0) @ self.wte.weight.T
y = logits[:, -1, :]
y = mx.random.categorical(y * (1 / temperature))
return y
def generate(self, x: mx.array, max_new_tokens=256, temperature=0.8):
_, t = x.shape
pos = mx.arange(0, t, 1, dtype=x.dtype)
mask = self._create_causal_mask(t)
x, cache = self._forward_transformer_blocks(x, pos, mask=mask, build_cache=True)
y = self._sample_next_token(x, temperature)
position = t
yield y
for _ in range(max_new_tokens):
position += 1
x = y[:, None]
x, cache = self._forward_transformer_blocks(x, position, cache=cache)
y = self._sample_next_token(x, temperature)
yield y
def __call__(self, x: mx.array, targets: mx.array = None):
b, t = x.shape
assert (
t <= self.config.block_size
), f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
pos = mx.arange(0, t, 1, dtype=x.dtype)
mask = self._create_causal_mask(t)
x, _ = self._forward_transformer_blocks(x, pos, mask=mask)
return x @ self.wte.weight.T
def loss(self, x, y):
logits = self(x)
loss = nn.losses.cross_entropy(
logits.reshape(-1, logits.shape[-1]), y.reshape(-1)
)
mx.simplify(loss)
return mx.mean(loss)
# Path: transformer.py
class GPTConfig:
block_size: int = 1024
vocab_size: int = 50304
n_layer: int = 12
n_head: int = 12
n_embd: int = 768
dropout: float = 0.0
bias: bool = True
# Path: generate.py
import argparse
import tiktoken
import time
import mlx.core as mx
from mlx.utils import tree_unflatten, tree_flatten
from transformer import GPT, GPTConfig
def load_model(model_name):
config_args = {
"gpt2": dict(n_layer=12, n_head=12, n_embd=768),
"gpt2-medium": dict(n_layer=24, n_head=16, n_embd=1024),
"gpt2-large": dict(n_layer=36, n_head=20, n_embd=1280),
"gpt2-xl": dict(n_layer=48, n_head=25, n_embd=1600),
}[model_name]
config_args["vocab_size"] = 50257
config_args["block_size"] = 1024
config_args["bias"] = True
config = GPTConfig(**config_args)
| model = GPT(config) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: chenchenygu/watermark-learnability
# Path: kgw_watermarking/watermark_reliability_release/utils/data/lfqa.py
def load_lfqa(args=None, path="./utils/data/lfqa.jsonl"):
cols_to_load = ["prefix", "gold_completion", "title", "selftext", "q_id"]
args.dataset_config_name = None
args.dataset_split = None
args.columns_to_remove = list(set(args.columns_to_remove + cols_to_load))
def lfqa_generator():
for ex in read_jsonlines(path):
row = {k: ex[k] for k in cols_to_load}
row["prefix"] = f"{prompts[args.prompt_id]}{row['prefix']}"
yield row
dataset = IterableDataset.from_generator(lfqa_generator)
return dataset
# Path: kgw_watermarking/watermark_reliability_release/utils/data/essays.py
def load_essays(args=None):
cols_to_load = ["instructions", "essays"]
cols_to_remove = ["titles", "urls", "__index_level_0__"]
dataset = load_dataset(
"ChristophSchuhmann/essays-with-instructions",
streaming=True,
split=args.dataset_split,
)
dataset = dataset.remove_columns(cols_to_remove)
args.dataset_config_name = None
args.dataset_split = None
args.columns_to_remove = list(set(args.columns_to_remove + cols_to_load))
return dataset
# Path: kgw_watermarking/watermark_reliability_release/utils/data/wikitext.py
def load_wikitext(args=None):
assert args is not None, "args must be provided to load_wikitext"
assert (
args.dataset_config_name is not None
), "args.dataset_config_name must be None to load_wikitext"
assert args.dataset_split is not None, "args.dataset_split must be None to load_wikitext"
args.columns_to_remove = list(set(args.columns_to_remove + ["text"]))
# load the regular dataset
raw_dataset = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=args.dataset_split,
streaming=False, # we're doing this conversion ourselves
)
def wikitext_generator():
# the generator loop
for ex in raw_dataset:
yield ex
dataset = IterableDataset.from_generator(wikitext_generator)
return dataset
# Path: kgw_watermarking/watermark_reliability_release/utils/generation.py
import torch
from datasets import load_dataset, IterableDataset
from torch import Tensor
from tokenizers import Tokenizer
from transformers import (
AutoTokenizer,
LlamaTokenizer,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM,
DataCollatorWithPadding,
)
from .data.lfqa import load_lfqa
from .data.essays import load_essays
from .data.wikitext import load_wikitext
# coding=utf-8
# Copyright 2023 Authors of "A Watermark for Large Language Models"
# available at https://arxiv.org/abs/2301.10226
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# HF classes
MAX_GENERATIONS = int(10000) # Hardcoded max length to avoid infinite loop
def load_model(args):
"""Load and return the model and tokenizer"""
args.is_seq2seq_model = any(
[(model_type in args.model_name_or_path) for model_type in ["t5", "T0"]]
)
args.is_decoder_only_model = any(
[(model_type in args.model_name_or_path) for model_type in ["gpt", "opt", "bloom", "llama"]]
)
if args.is_seq2seq_model:
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path)
elif args.is_decoder_only_model:
if args.load_fp16:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path, torch_dtype=torch.float16, device_map="auto"
)
else:
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path)
else:
raise ValueError(f"Unknown model type: {args.model_name_or_path}")
if args.use_gpu:
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.load_fp16:
pass
else:
model = model.to(device)
else:
device = "cpu"
model.eval()
if args.is_decoder_only_model:
padding_side = "left"
else:
raise NotImplementedError(
"Need to check how to handle padding for seq2seq models when calling generate"
)
if "llama" in args.model_name_or_path:
tokenizer = LlamaTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
model.config.bos_token_id = 1
model.config.eos_token_id = 2
else:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
args.model_max_length = model.config.max_position_embeddings
return model, tokenizer, device
def add_idx(example, idx):
example.update({"idx": idx})
return example
def load_hf_dataset(args):
dataset_name, dataset_config_name = args.dataset_name, args.dataset_config_name
if dataset_name == "lfqa":
| dataset = load_lfqa(args) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: skyoux/SemAIM
# Path: util/pos_embed.py
def interpolate_pos_embed(model, checkpoint_model):
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
# Path: models/models_vit.py
class VisionTransformer(timm.models.vision_transformer.VisionTransformer):
def __init__(self, global_pool=False, **kwargs):
def forward_features(self, x):
def forward_head(self, x):
def vit_small_patch16(**kwargs):
def vit_base_patch16(**kwargs):
def vit_large_patch16(**kwargs):
def vit_huge_patch14(**kwargs):
B = x.shape[0]
# Path: main_knn.py
import os
import sys
import argparse
import numpy as np
import torch
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import timm.models as timm_models
import util.misc as misc
from torch import nn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
from timm.models.layers import trunc_normal_
from util.pos_embed import interpolate_pos_embed
from models import models_vit
#!/usr/bin/env python
def extract_feature_pipeline(args):
######################## preparing data ... ########################
resize_size = 256 if args.input_size == 224 else 512
transform = pth_transforms.Compose([
pth_transforms.Resize(resize_size, interpolation=3),
pth_transforms.CenterCrop(args.input_size),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, 'train'), transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, 'val'), transform)
train_labels = torch.tensor(dataset_train.target).long()
test_labels = torch.tensor(dataset_val.target).long()
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
######################## building network ... ########################
| model = models_vit.__dict__[args.model]( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: boweniac/autogan
# Path: autogan/oai/openai_utils.py
def chat_completions(messages: list, api_key: Dict, request_timeout: int, max_retries: int,
stream_mode: Optional[bool] = None):
"""OpenAI interface and OpenAI like interface call
:param messages:
:param api_key: LLM configuration.
:param request_timeout:
:param max_retries:
:param stream_mode:
"""
if api_key["api_type"] == "openai" or api_key["api_type"] == "azure":
return openai_chat_completions(messages, api_key, request_timeout, max_retries, stream_mode)
else:
return openai_like_chat_completions(messages, api_key, request_timeout, max_retries, stream_mode)
# Path: autogan/oai/config_utils.py
class LLMConfig:
"""LLM config object
"""
def __init__(
self,
api_key_list: ConfigList,
max_messages_tokens: str,
request_interval_time: int,
request_timeout: int,
max_retries: int
):
self._api_key_list = api_key_list
self._max_messages_tokens = max_messages_tokens
self._request_interval_time = request_interval_time
self._request_timeout = request_timeout
self._max_retries = max_retries
def api_key(self, index):
"""Get the one configuration in the api_key_list.
"""
return self._api_key_list.get_config(index)
@property
def next_api_key(self):
"""Get the next configuration in the api_key_list.
"""
return self._api_key_list.get_next_config
@property
def len_of_api_key_list(self) -> int:
"""Get the first configuration in the api_key_list list.
"""
return self._api_key_list.len
@property
def model(self):
"""Get the model of the first configuration in the api_key_list list.
"""
return self._api_key_list.get_first_config["model"]
@property
def max_messages_tokens(self):
"""Limit the maximum tokens of the context in each dialogue.
"""
return self._max_messages_tokens
@property
def request_interval_time(self):
return self._request_interval_time
@property
def request_timeout(self):
return self._request_timeout
@property
def max_retries(self):
return self._max_retries
# Path: autogan/oai/count_tokens_utils.py
def count_text_tokens(text: str, model: Optional[str] = "gpt-3.5-turbo") -> int:
"""Calculate the tokens of the text.
:param text: The text to be tokenized
:param model: Calculate tokens for a specific model. If the model is not listed, it will default to calculating the number of tokens based on the gpt-3.5-turbo standard.
:return: tokens
"""
if not text:
return 0
model_list = ['gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']
if model not in model_list:
model = "gpt-3.5-turbo"
try:
encoding = tiktoken.encoding_for_model(model)
num_tokens = len(encoding.encode(text))
except Exception as e:
print(e)
num_tokens = 0
return num_tokens
# Path: autogan/utils/response.py
def colored(x, *args, **kwargs):
def default_response_func(agent_name: str, gen: str, model: str, stream_mode: bool, index: int,
content: Optional[str], tokens: Optional[int], response: any):
def obj_to_dict(obj):
# Path: autogan/oai/generate_utils.py
import time
from typing import Optional, List
from autogan.oai.openai_utils import chat_completions
from autogan.oai.config_utils import LLMConfig
from autogan.oai.count_tokens_utils import count_text_tokens
from autogan.utils.response import ResponseFuncType
def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\
-> tuple[Optional[str], Optional[int]]:
"""Call the LLM interface
Currently, only the chatgpt model of openai (including azure) is adapted.
:param llm_config: LLM configuration.
:param messages:
:param agent_name:
:param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries
- main: agent replies
- idea: deep thoughts
- messages_summary: context compression
- text_summary: general summaries
- clue_summary: clue summaries
:param response_func: Used to return results to the interface or terminal.
:param stream_mode:
"""
# When a certain configuration in the configuration list fails to request,
# continue to try the next configuration until all configurations in the list are attempted.
loop = llm_config.len_of_api_key_list
for i in range(loop):
time.sleep(llm_config.request_interval_time)
api_key = llm_config.next_api_key
try:
completion_content = ""
completion_tokens = 0
index = 1
| for message in chat_completions(messages, api_key, llm_config.request_timeout, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation
# Path: utils/data_util.py
def crop_HWC_img(image, base=64):
"""
裁切到multiple of base的size上
:param image: H,W,C
:param base: (int)
:return:
"""
h = image.shape[0]
w = image.shape[1]
crop_h = h % base
crop_w = w % base
return image[crop_h // 2:h - crop_h + crop_h // 2, crop_w // 2:w - crop_w + crop_w // 2, :]
# Path: utils/data_util.py
def random_augmentation(*args):
out = []
flag_aug = random.randint(0,7)
for data in args:
out.append(data_augmentation(data, flag_aug).copy())
return out
# Path: utils/data_util.py
def padding(img, gt_size):
"""
padding到指定size上
img_lq (np.float32) 0-1 :
img_gt (np.float32) 0-1 :
gt_size (int) :
cv2.BORDER_REPLICATE/cv2.BORDER_CONSTANT,value=(255,255,255)/cv2.BORDER_REFLECT/cv2.BORDER_REFLECT_101/cv2.BORDER_WRAP"""
h, w, _ = img.shape
h_pad = max(0, gt_size - h)
w_pad = max(0, gt_size - w)
if h_pad == 0 and w_pad == 0:
return img
img = cv2.copyMakeBorder(img, 0, h_pad, 0, w_pad, cv2.BORDER_REFLECT)
# print('img_lq', img_lq.shape, img_gt.shape)
if img_lq.ndim == 2:
img_lq = np.expand_dims(img_lq, axis=2)
if img_gt.ndim == 2:
img_gt = np.expand_dims(img_gt, axis=2)
return img_lq, img_gt
# Path: utils/data_util.py
def onehot(label: int, classes: int):
"""
return torch.tensor
"""
onehot_label = np.zeros([1,classes])
onehot_label[:,label] = 1
onehot_label = torch.from_numpy(onehot_label)
return onehot_label
# Path: utils/data_util.py
def smooth_one_hot(true_labels: torch.Tensor, classes: int, smoothing=0.0):
"""
if smoothing == 0, it's one-hot method
if 0 < smoothing < 1, it's smooth method
"""
assert 0 <= smoothing < 1
confidence = 1.0 - smoothing
label_shape = torch.Size((true_labels.size(0), classes))
true_dist = torch.empty(size=label_shape)
true_dist.fill_(smoothing / (classes - 1))
_, index = torch.max(true_labels, 1)
true_dist.scatter_(1, torch.LongTensor(index.unsqueeze(1)), confidence)
return true_dist
# Path: data/degradation_util.py
class Degradation(object):
def __init__(self, dataset_opt):
super(Degradation, self).__init__()
self.dataset_opt = dataset_opt
self.toTensor = ToTensor()
self.crop_transform = Compose([
ToPILImage(),
RandomCrop(dataset_opt['patch_size']),
])
def _add_gaussian_noise(self, clean_patch, sigma):
noise = np.random.randn(*clean_patch.shape)
noisy_patch = np.clip(clean_patch + noise * sigma, 0, 255).astype(np.uint8)
return noisy_patch, clean_patch
def _degrade_by_type(self, clean_patch, degrade_type):
if degrade_type == 0:
# denoise sigma=15
degraded_patch, clean_patch = self._add_gaussian_noise(clean_patch, sigma=15)
elif degrade_type == 1:
# denoise sigma=25
degraded_patch, clean_patch = self._add_gaussian_noise(clean_patch, sigma=25)
elif degrade_type == 2:
# denoise sigma=50
degraded_patch, clean_patch = self._add_gaussian_noise(clean_patch, sigma=50)
return degraded_patch, clean_patch
def degrade(self, clean_patch_1, clean_patch_2, degrade_type=None):
if degrade_type == None:
degrade_type = random.randint(0, 3)
else:
degrade_type = degrade_type
degrad_patch_1, _ = self._degrade_by_type(clean_patch_1, degrade_type)
degrad_patch_2, _ = self._degrade_by_type(clean_patch_2, degrade_type)
return degrad_patch_1, degrad_patch_2
def degrade_single(self, clean_patch, degrade_type=None):
if degrade_type == None:
degrade_type = random.randint(0, 3)
else:
degrade_type = degrade_type
degrad_patch, _ = self._degrade_by_type(clean_patch, degrade_type)
return degrad_patch
# Path: data/IDR_dataset.py
import os
import random
import copy
import numpy as np
from PIL import Image, ImageFile
from torch.utils.data import Dataset
from torchvision.transforms import ToPILImage, Compose, RandomCrop, ToTensor
from utils.data_util import crop_HWC_img, random_augmentation, padding, onehot, smooth_one_hot
from sklearn.preprocessing import OneHotEncoder
from data.degradation_util import Degradation
ImageFile.LOAD_TRUNCATED_IMAGES = True
class IDR_dataset(Dataset):
def __init__(self, dataset_opt):
super(IDR_dataset, self).__init__()
self.dataset_opt = dataset_opt
self.rs_ids = []
self.hazy_ids = []
| self.D = Degradation(dataset_opt)
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TACJu/Compositor
# Path: Compositor_Mask2Former/mask2former/modeling/transformer_decoder/maskformer_transformer_decoder.py
def build_transformer_decoder(cfg, in_channels, mask_classification=True):
"""
Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.
"""
name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME
return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification)
# Path: Compositor_Mask2Former/mask2former/modeling/pixel_decoder/fpn.py
def build_pixel_decoder(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.
"""
name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME
model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model
# Path: Compositor_Mask2Former/mask2former/modeling/meta_arch/mask_former_head.py
import logging
import fvcore.nn.weight_init as weight_init
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.maskformer_transformer_decoder import build_transformer_decoder
from ..pixel_decoder.fpn import build_pixel_decoder
# Copyright (c) Facebook, Inc. and its affiliates.
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
# logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
| "transformer_predictor": build_transformer_decoder( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Mirascope/mirascope
# Path: mirascope/chat/models.py
class OpenAIChat:
"""A convenience wrapper for the OpenAI Chat client."""
def __init__(self, model: str = "gpt-3.5-turbo", api_key: Optional[str] = None):
"""Initializes an instance of `OpenAIChat."""
self.client = OpenAI(api_key=api_key)
self.model = model
def create(self, prompt: Prompt, **kwargs) -> OpenAIChatCompletion:
"""Makes a call to the model using `prompt`.
Args:
prompt: The `Prompt` to use for the call.
**kwargs: Additional keyword arguments to pass to the API call. You can
find available keyword arguments here:
https://platform.openai.com/docs/api-reference/chat/create
Returns:
A `OpenAIChatCompletion` instance.
Raises:
Re-raises any exceptions thrown by the openai chat completions create call.
"""
try:
return OpenAIChatCompletion(
completion=self.client.chat.completions.create(
model=self.model,
messages=get_openai_chat_messages(prompt),
stream=False,
**kwargs,
)
)
except:
raise
def stream(
self, prompt: Prompt, **kwargs
) -> Generator[OpenAIChatCompletionChunk, None, None]:
"""Streams the response for a call to the model using `prompt`.
Args:
prompt: The `Prompt` to use for the call.
**kwargs: Additional keyword arguments to pass to the API call. You can
find available keyword arguments here:
https://platform.openai.com/docs/api-reference/chat/create
Yields:
A `OpenAIChatCompletionChunk` for each chunk of the response.
Raises:
Re-raises any exceptions thrown by the openai chat completions create call.
"""
completion_stream = self.client.chat.completions.create(
model=self.model,
messages=get_openai_chat_messages(prompt),
stream=True,
**kwargs,
)
for chunk in completion_stream:
yield OpenAIChatCompletionChunk(chunk=chunk)
# Path: mirascope/prompts.py
class Prompt(BaseModel):
"""A Pydantic model for prompts."""
@classmethod
def template(cls) -> str:
"""Custom parsing functionality for docstring prompt.
This function is the first step in formatting the prompt template docstring.
For the default `Prompt`, this function dedents the docstring and replaces all
repeated sequences of newlines with one fewer newline character. This enables
writing blocks of text instead of really long single lines. To include any
number of newline characters, simply include one extra.
Raises:
ValueError: If the class docstring is empty.
"""
if cls.__doc__ is None:
raise ValueError("`Prompt` must have a prompt template docstring.")
return re.sub(
"(\n+)",
lambda x: x.group(0)[:-1] if len(x.group(0)) > 1 else " ",
dedent(cls.__doc__).strip("\n"),
)
def __str__(self) -> str:
"""Returns the docstring prompt template formatted with template variables."""
template = self.template()
template_vars = [
var for _, var, _, _ in Formatter().parse(template) if var is not None
]
return template.format(**{var: getattr(self, var) for var in template_vars})
@property
def messages(self) -> list[tuple[str, str]]:
"""Returns the docstring as a list of messages."""
return [("user", str(self))]
def save(self, filepath: str):
"""Saves the prompt to the given filepath."""
with open(filepath, "wb") as f:
pickle.dump(self, f)
@classmethod
def load(cls, filepath: str) -> Prompt:
"""Loads the prompt from the given filepath."""
with open(filepath, "rb") as f:
return pickle.load(f)
# Path: cookbook/api_example/api_example.py
import os
from fastapi import FastAPI
from mirascope import OpenAIChat, Prompt
"""A FastAPI app integrated with a multi-chain prompt for recommending books on a topic
and then asking which one is the best for beginners.
How to Run:
uvicorn api_example:app --reload
"""
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
app = FastAPI()
class BookRecommendationPrompt(Prompt):
"""
Can you recommend some books on {topic} in a list format?
"""
topic: str
class BestForBeginnersPrompt(Prompt):
"""
Given this list {book_list}, which one is the best for beginners?
"""
book_list: str
@app.post("/")
def root(book_recommendation: BookRecommendationPrompt):
"""Generates the best book for beginners on the given topic."""
| model = OpenAIChat() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: allisson/pysqsx
# Path: sqsx/exceptions.py
class NoRetry(Exception):
"""
This exception must be used when we need that the message will be removed from the queue
"""
pass
# Path: sqsx/exceptions.py
class Retry(Exception):
"""
This exception must be used when we need a custom backoff config
"""
def __init__(self, min_backoff_seconds: int, max_backoff_seconds: int):
self.min_backoff_seconds = min_backoff_seconds
self.max_backoff_seconds = max_backoff_seconds
# Path: sqsx/helper.py
def backoff_calculator_seconds(retries: int, minimum: int, maximum: int) -> int:
maximum = min(maximum, 43200)
return min(minimum * 2**retries, maximum)
# Path: sqsx/helper.py
def base64_to_dict(data: str) -> dict:
return json.loads(base64.urlsafe_b64decode(data).decode())
# Path: sqsx/helper.py
def dict_to_base64(data: dict) -> str:
return base64.urlsafe_b64encode(json.dumps(data).encode()).decode()
# Path: sqsx/queue.py
import logging
import signal
import time
from concurrent.futures import ThreadPoolExecutor, wait
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, Field, PrivateAttr
from sqsx.exceptions import NoRetry, Retry
from sqsx.helper import backoff_calculator_seconds, base64_to_dict, dict_to_base64
logger = logging.getLogger(__name__)
queue_url_regex = r"(http|https)[:][\/]{2}[a-zA-Z0-9-_:.]+[\/][0-9]{12}[\/]{1}[a-zA-Z0-9-_]{0,80}"
class BaseQueueMixin:
def consume_messages(
self, max_messages: int = 1, max_threads: int = 1, wait_seconds: int = 10, run_forever: bool = True
) -> None:
logger.info(f"Starting consuming tasks, queue_url={self.url}")
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
while True:
if self._should_consume_tasks_stop:
logger.info(f"Stopping consuming tasks, queue_url={self.url}")
break
response = self.sqs_client.receive_message(
QueueUrl=self.url,
AttributeNames=["All"],
MaxNumberOfMessages=min(max_messages, 10),
MessageAttributeNames=["All"],
)
sqs_messages = response.get("Messages", [])
if not sqs_messages:
logger.debug(
f"Waiting some seconds because no message was received, seconds={wait_seconds}, queue_url={self.url}"
)
time.sleep(wait_seconds)
continue
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = []
for sqs_message in sqs_messages:
futures.append(executor.submit(self._consume_message, sqs_message))
wait(futures)
if not run_forever:
break
def _exit_gracefully(self, signal_num, current_stack_frame) -> None:
logger.info("Starting graceful shutdown process")
self._should_consume_tasks_stop = True
def _message_ack(self, sqs_message: dict) -> None:
receipt_handle = sqs_message["ReceiptHandle"]
self.sqs_client.delete_message(QueueUrl=self.url, ReceiptHandle=receipt_handle)
def _message_nack(
self,
sqs_message: dict,
min_backoff_seconds: Optional[int] = None,
max_backoff_seconds: Optional[int] = None,
) -> None:
min_backoff_seconds = min_backoff_seconds if min_backoff_seconds else self.min_backoff_seconds
max_backoff_seconds = max_backoff_seconds if max_backoff_seconds else self.max_backoff_seconds
receipt_handle = sqs_message["ReceiptHandle"]
receive_count = int(sqs_message["Attributes"]["ApproximateReceiveCount"]) - 1
timeout = backoff_calculator_seconds(receive_count, min_backoff_seconds, max_backoff_seconds)
self.sqs_client.change_message_visibility(
QueueUrl=self.url, ReceiptHandle=receipt_handle, VisibilityTimeout=timeout
)
class Queue(BaseModel, BaseQueueMixin):
url: str = Field(pattern=queue_url_regex)
sqs_client: Any
min_backoff_seconds: int = Field(default=30)
max_backoff_seconds: int = Field(default=900)
_handlers: Dict[str, Callable] = PrivateAttr(default={})
_should_consume_tasks_stop: bool = PrivateAttr(default=False)
def add_task(self, task_name: str, **task_kwargs) -> dict:
return self.sqs_client.send_message(
QueueUrl=self.url,
MessageAttributes={"TaskName": {"DataType": "String", "StringValue": task_name}},
MessageBody=dict_to_base64({"kwargs": task_kwargs}),
)
def add_task_handler(self, task_name: str, task_handler_function: Callable) -> None:
self._handlers.update({task_name: task_handler_function})
def _consume_message(self, sqs_message: dict) -> None:
message_id = sqs_message["MessageId"]
task_name_attribute = sqs_message["MessageAttributes"].get("TaskName")
if task_name_attribute is None:
logger.warning(f"Message without TaskName attribute, message_id={message_id}")
return self._message_nack(sqs_message)
task_name = task_name_attribute["StringValue"]
task_handler_function = self._handlers.get(task_name)
if task_handler_function is None:
logger.warning(f"Task handler not found, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
try:
message_data = base64_to_dict(sqs_message["Body"])
except Exception:
logger.exception(f"Invalid message body, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
kwargs = message_data["kwargs"]
context = {
"queue_url": self.url,
"task_name": task_name,
"sqs_message": sqs_message,
}
try:
task_handler_function(context, **kwargs)
except Retry as exc:
logger.info(
f"Received an sqsx.Retry, setting a custom backoff policy, message_id={message_id}, task_name={task_name}"
)
return self._message_nack(
sqs_message,
min_backoff_seconds=exc.min_backoff_seconds,
max_backoff_seconds=exc.max_backoff_seconds,
)
| except NoRetry: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: turbopuffer/turbopuffer-python
# Path: turbopuffer/error.py
class TurbopufferError(Exception):
pass
# Path: turbopuffer/error.py
class AuthenticationError(TurbopufferError):
pass
# Path: turbopuffer/error.py
class APIError(TurbopufferError):
def __init__(self, status_code: int, status_name: str, message: str):
self.status_code = status_code
self.status_name = status_name
super().__init__(f'{status_name}: {message}')
# Path: turbopuffer/backend.py
import json
import time
import traceback
import requests
import turbopuffer as tpuf
import gzip
from turbopuffer.error import TurbopufferError, AuthenticationError, APIError
from typing import Optional, List
def find_api_key(api_key: Optional[str] = None) -> str:
if api_key is not None:
return api_key
elif tpuf.api_key is not None:
return tpuf.api_key
else:
raise AuthenticationError("No turbopuffer API key was provided.\n"
"Set the TURBOPUFFER_API_KEY environment variable, "
"or pass `api_key=` when creating a Namespace.")
class Backend:
api_key: str
api_base_url: str
session: requests.Session
def __init__(self, api_key: Optional[str] = None):
self.api_key = find_api_key(api_key)
self.api_base_url = tpuf.api_base_url
self.session = requests.Session()
self.session.headers.update({
'Authorization': f'Bearer {self.api_key}',
'User-Agent': f'tpuf-python/{tpuf.VERSION} {requests.utils.default_headers()["User-Agent"]}',
})
def make_api_request(self,
*args: List[str],
method: Optional[str] = None,
query: Optional[dict] = None,
payload: Optional[dict] = None) -> dict:
start = time.monotonic()
if method is None and payload is not None:
method = 'POST'
request = requests.Request(method or 'GET', self.api_base_url + '/' + '/'.join(args))
if query is not None:
request.params = query
if payload is not None:
# before = time.monotonic()
if isinstance(payload, dict):
# before = time.monotonic()
json_payload = tpuf.dump_json_bytes(payload)
# print('Json time:', time.monotonic() - before)
else:
raise ValueError(f'Unsupported POST payload type: {type(payload)}')
gzip_payload = gzip.compress(json_payload, compresslevel=1)
# json_mebibytes = len(json_payload) / 1024 / 1024
# gzip_mebibytes = len(gzip_payload) / 1024 / 1024
# print(f'Gzip time ({json_mebibytes} MiB json / {gzip_mebibytes} MiB gzip):', time.monotonic() - before)
request.headers.update({
'Content-Type': 'application/json',
'Content-Encoding': 'gzip',
})
request.data = gzip_payload
prepared = self.session.prepare_request(request)
retry_attempts = 0
while retry_attempts < 3:
# before = time.monotonic()
try:
# print(f'Sending request:', prepared.path_url, prepared.headers)
response = self.session.send(prepared, allow_redirects=False)
# print(f'Request time (HTTP {response.status_code}):', time.monotonic() - before)
if response.status_code > 500:
response.raise_for_status()
content_type = response.headers.get('Content-Type', 'text/plain')
if content_type == 'application/json':
try:
content = response.json()
except json.JSONDecodeError as err:
| raise APIError(response.status_code, traceback.format_exception_only(err), response.text) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: neu-spiral/multi-label-emg
# Path: multi_label_emg/slurm_utils.py
def run_one(job: str, running_job_count: int, dry_run: bool):
if ON_SLURM_CLUSTER:
_run_one_slurm(job, running_job_count, slurm_logs_dir, dry_run)
else:
_run_one_local(job, running_job_count, dry_run)
# Path: multi_label_emg/utils.py
PROJECT_ROOT = Path(__file__).resolve().parent
# Path: scripts/run_experiment_2.py
import itertools
import numpy as np
from run_experiment_1 import Setting
from multi_label_emg.slurm_utils import run_one
from multi_label_emg.utils import PROJECT_ROOT
"""
Experiment 2:
Using previous best parallel model type and classifier,
Vary method of subsetting synthetic doubles and how many to use.
"""
DRY_RUN = True
script = PROJECT_ROOT / "train.py"
python = PROJECT_ROOT.parent / "venv" / "bin" / "python"
assert script.exists()
assert python.exists()
subjects = [f"Subj{i}" for i in range(11)]
parallel_model_type = "ParallelA"
clf = "mlp"
doubles_methods = [
"subset_uniform",
"subset_near_mean",
"subset_spaced_quantiles",
"subsetInput_uniform",
"subsetInput_near_mean",
"subsetInput_spaced_quantiles",
]
settings = []
for subj, seed, doubles_method, doubles_frac in itertools.product(
subjects,
np.arange(3),
doubles_methods,
[0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5],
):
if doubles_method.startswith("subsetInput"):
frac = np.round(np.sqrt(doubles_frac), 4)
else:
frac = doubles_frac
settings.append(
Setting(
subject=subj,
seed=seed,
parallel_model_type=parallel_model_type,
clf_name=clf,
doubles_method=doubles_method,
fraction_doubles_per_class=frac,
singles_method="none",
rel_fraction_singles_per_class=1.0,
include_doubles_in_train=False,
feature_combine_type="avg",
)
)
if __name__ == "__main__":
if DRY_RUN:
print("#" * 80)
print("DRY RUN")
running_job_count = 0
for setting in settings:
job = f"{python} {script} "
job += f"--subject {setting.subject} "
job += f"--seed {setting.seed} "
job += f"--parallel_model_type {setting.parallel_model_type} "
job += f"--clf_name {setting.clf_name} "
job += f"--doubles_method {setting.doubles_method} "
job += f"--fraction_doubles_per_class {setting.fraction_doubles_per_class} "
job += f"--singles_method {setting.singles_method} "
job += f"--rel_fraction_singles_per_class {setting.rel_fraction_singles_per_class} "
job += f"--include_doubles_in_train {setting.include_doubles_in_train} "
| run_one(job, running_job_count, dry_run=DRY_RUN) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lbcb-sci/GNNome
# Path: config.py
def get_config():
return {
'checkpoints_path': 'checkpoints',
'models_path': 'models',
'tool_dir': 'vendor',
'raven_dir': 'vendor/raven-1.8.1',
'hifiasm_dir': 'vendor/hifiasm-0.18.8',
'pbsim3_dir': 'vendor/pbsim3',
'sample_profile_id': '',
'sample_file': '',
'sequencing_depth': 60,
}
# Path: utils.py
def preprocess_graph(g, data_path, idx):
g = g.int()
g.ndata['x'] = torch.ones(g.num_nodes(), 1)
ol_len = g.edata['overlap_length'].float()
ol_sim = g.edata['overlap_similarity']
ol_len = (ol_len - ol_len.mean()) / ol_len.std()
if get_hyperparameters()['use_similarities']:
g.edata['e'] = torch.cat((ol_len.unsqueeze(-1), ol_sim.unsqueeze(-1)), dim=1)
else:
g.edata['e'] = ol_len.unsqueeze(-1)
return g
# Path: utils.py
def add_positional_encoding(g):
"""
Initializing positional encoding with k-RW-PE
"""
g.ndata['in_deg'] = g.in_degrees().float()
g.ndata['out_deg'] = g.out_degrees().float()
pe_dim = get_hyperparameters()['nb_pos_enc']
pe_type = get_hyperparameters()['type_pos_enc']
if pe_dim == 0:
return g
if pe_type == 'RW':
# Geometric diffusion features with Random Walk
A = g.adjacency_matrix(scipy_fmt="csr")
Dinv = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -1.0, dtype=float) # D^-1
RW = A @ Dinv
M = RW
# Iterate
PE = [torch.from_numpy(M.diagonal()).float()]
M_power = M
for _ in range(pe_dim-1):
M_power = M_power @ M
PE.append(torch.from_numpy(M_power.diagonal()).float())
PE = torch.stack(PE,dim=-1)
g.ndata['pe'] = PE
if pe_type == 'PR':
# k-step PageRank features
A = g.adjacency_matrix(scipy_fmt="csr")
D = A.sum(axis=1) # out degree
Dinv = 1./ (D+1e-9); Dinv[D<1e-9] = 0 # take care of nodes without outgoing edges
Dinv = sp.diags(np.squeeze(np.asarray(Dinv)), dtype=float) # D^-1
P = (Dinv @ A).T
n = A.shape[0]
One = np.ones([n])
x = One/ n
PE = []
alpha = 0.95
for _ in range(pe_dim):
x = alpha* P.dot(x) + (1.0-alpha)/n* One
PE.append(torch.from_numpy(x).float())
PE = torch.stack(PE,dim=-1)
g.ndata['pe'] = PE
return g
# Path: utils.py
def extract_contigs(path, idx):
gfa_path = os.path.join(path, f'{idx}_asm.bp.p_ctg.gfa')
asm_path = os.path.join(path, f'{idx}_assembly.fasta')
contigs = []
with open(gfa_path) as f:
n = 0
for line in f.readlines():
line = line.strip()
if line[0] != 'S':
continue
seq=Seq.Seq(line.split()[2])
ctg = SeqIO.SeqRecord(seq, description=f'contig_{n}', id=f'contig_{n}')
contigs.append(ctg)
n += 1
SeqIO.write(contigs, asm_path, 'fasta')
subprocess.run(f'rm {path}/{idx}_asm*', shell=True)
# subprocess.run(f'rm {path}/output.csv', shell=True)
# Path: graph_dataset.py
import re
import os
import pickle
import subprocess
import dgl
import graph_parser
from dgl.data import DGLDataset
from config import get_config
from utils import preprocess_graph, add_positional_encoding, extract_contigs
class AssemblyGraphDataset(DGLDataset):
def __init__(self, root, assembler, threads=32, generate=False):
self.root = os.path.abspath(root)
self.assembler = assembler
self.threads = threads
self.assembly_dir = os.path.join(self.root, self.assembler)
# print(self.assembly_dir)
if 'raw' not in os.listdir(self.root):
subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root)
if 'output' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir)
if f'processed' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir)
if f'info' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.assembly_dir, f'processed')
self.output_dir = os.path.join(self.assembly_dir, f'output')
self.info_dir = os.path.join(self.assembly_dir, f'info')
config = get_config()
raven_dir = config['raven_dir']
self.raven_path = os.path.join(raven_dir, f'build/bin/raven')
self.raven_path = os.path.abspath(self.raven_path)
hifiasm_dir = config['hifiasm_dir']
self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')
self.hifiasm_path = os.path.abspath(self.hifiasm_path)
super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)
self.graph_list = []
if not generate:
for file in os.listdir(self.save_dir):
idx = int(file[:-4])
graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]
graph = preprocess_graph(graph, self.root, idx)
| graph = add_positional_encoding(graph) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: altfoxie/ha-sberdevices
# Path: custom_components/sberdevices/api.py
class DeviceAPI:
def __init__(self, home: HomeAPI, device_id: str) -> None:
self._home = home
self._id = device_id
@property
def device(self) -> dict[str, any]:
return self._home.get_cached_device(self._id)
async def update(self) -> None:
await self._home.update_devices_cache()
def get_state(self, key: str) -> dict[str, any]:
return find_from_list(self.device["desired_state"], key)
def get_attribute(self, key: str) -> dict[str, any]:
return find_from_list(self.device["attributes"], key)
async def set_states(self, states: [dict[str, any]]) -> None:
await self._home.set_device_state(self._id, states)
async def set_state(self, state: dict[str, any]) -> None:
await self.set_states([state])
async def set_state_bool(self, key: str, value: bool) -> None:
await self.set_state({"key": key, "bool_value": value})
async def set_state_integer(self, key: str, value: int) -> None:
await self.set_state({"key": key, "integer_value": value})
async def set_on_off(self, state: bool) -> None:
await self.set_state_bool("on_off", state)
# Path: custom_components/sberdevices/api.py
class HomeAPI:
def __init__(self, sber: SberAPI) -> None:
self._sber = sber
self._client = AsyncClient(
base_url="https://gateway.iot.sberdevices.ru/gateway/v1",
)
self._token_alive = False
self._devices = {}
async def update_token(self) -> None:
if self._token_alive:
return
token = await self._sber.fetch_home_token()
if token is not None:
self._client.headers.update({"X-AUTH-jwt": token})
async def request(
self, method: str, url: str, retry: bool = True, **kwargs
) -> dict[str, any]:
await self.update_token()
res = await self._client.request(method, url, **kwargs)
obj = res.json()
if res.status_code != 200:
code = obj["code"]
# dead token xd
if code == 16:
self._token_alive = False
if retry:
return await self.request(method, url, retry=False, **kwargs)
raise Exception(f"{code} ({res.status_code}): {obj['message']}")
return obj
async def get_device_tree(self) -> dict[str, any]:
return (await self.request("GET", "/device_groups/tree"))["result"]
# Cache
async def update_devices_cache(self) -> list[dict[str, any]]:
self._devices = extract_devices(await self.get_device_tree())
def get_cached_devices(self) -> list[dict[str, any]]:
return self._devices
def get_cached_device(self, device_id: str) -> dict[str, any]:
return self._devices[device_id]
async def set_device_state(self, device_id: str, state: [dict[str, any]]) -> None:
await self._client.request(
"PUT",
f"/devices/{device_id}/state",
json={
"device_id": device_id,
"desired_state": state,
"timestamp": datetime.now().isoformat()
+ "Z", # 2023-12-01T17:00:35.537Z
},
)
# Merge
for state_val in state:
for attribute in self._devices[device_id]["desired_state"]:
if attribute["key"] == state_val["key"]:
attribute.update(state_val)
break
# Path: custom_components/sberdevices/const.py
DOMAIN = "sberdevices"
# Path: custom_components/sberdevices/light.py
import math
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP_KELVIN,
ATTR_HS_COLOR,
ATTR_WHITE,
ColorMode,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.color import brightness_to_value, value_to_brightness
from homeassistant.util.scaling import scale_ranged_value_to_int_range
from .api import DeviceAPI, HomeAPI
from .const import DOMAIN
"""Support for Abode Security System lights."""
from __future__ import annotations
# hardcode xd
COLOR_TEMP_MIN = 2700
COLOR_TEMP_MAX = 6500
COLOR_TEMP_RANGE = (COLOR_TEMP_MIN, COLOR_TEMP_MAX)
H_RANGE = (0, 360)
S_RANGE = (0, 100)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
| home: HomeAPI = hass.data[DOMAIN][entry.entry_id]["home"] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: amadad/agentcy3
# Path: agency_swarm/tools/base_tool.py
class BaseTool(OpenAISchema, ABC):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@abstractmethod
def run(self, **kwargs):
pass
# Path: agency_swarm/util/schema.py
def reference_schema(schema):
# Enhanced function to only extract nested properties into $defs
def find_and_extract_defs(node, defs, parent_key=None, path_prefix="#/$defs/"):
if isinstance(node, dict):
# Extract nested properties into $defs
if parent_key == 'properties' and 'properties' in node and isinstance(node['properties'], dict):
def_name = node.get('title', None)
if def_name:
defs[def_name] = node
return {"$ref": path_prefix + def_name}
# Recursively process the dictionary
return {k: find_and_extract_defs(v, defs, parent_key=k) for k, v in node.items()}
elif isinstance(node, list):
# Recursively process the list
return [find_and_extract_defs(element, defs, parent_key) for element in node]
else:
return node
defs = {}
# Extract definitions and update the schema
new_schema = {k: find_and_extract_defs(v, defs) for k, v in schema.items()}
if defs:
new_schema['parameters'] = new_schema.get('parameters', {})
new_schema['parameters']['$defs'] = defs
return new_schema
# Path: agency_swarm/tools/tool_factory.py
import inspect
from typing import Any, Dict, List, Type
from pydantic import create_model, Field
from .base_tool import BaseTool
from ..util.schema import reference_schema
from langchain.tools import format_tool_to_openai_function
except ImportError:
raise ImportError("You must install langchain to use this method.")
if inspect.isclass(tool):
tool = tool()
def callback(self):
tool_input = self.model_dump()
try:
return tool.run(tool_input)
except TypeError:
if len(tool_input) == 1:
return tool.run(list(tool_input.values())[0])
else:
raise TypeError(f"Error parsing input for tool '{tool.__class__.__name__}' Please open an issue "
f"on github.")
return ToolFactory.from_openai_schema(
format_tool_to_openai_function(tool),
callback
)
@staticmethod
def from_openai_schema(schema: Dict[str, Any], callback: Any):
"""
Converts an OpenAI schema into a BaseTool. Nested propoerties without refs are not supported yet.
:param schema:
:param callback:
:return:
"""
def resolve_ref(ref: str, defs: Dict[str, Any]) -> Any:
# Extract the key from the reference
key = ref.split('/')[-1]
if key in defs:
return defs[key]
else:
raise ValueError(f"Reference '{ref}' not found in definitions")
def create_fields(schema: Dict[str, Any], type_mapping: Dict[str, Type[Any]], required_fields: List[str],
defs: Dict[str, Any]) -> Dict[str, Any]:
fields = {}
for prop, details in schema.items():
alias = None
if prop.startswith('_'):
alias = prop
prop = prop.lstrip('_')
json_type = details['type']
if json_type in type_mapping:
field_type = type_mapping[json_type]
field_description = details.get('description', '')
is_required = prop in required_fields
field_default = ... if is_required else None
if json_type == 'array':
items_schema = details.get('items', {})
if 'type' in items_schema:
item_type = type_mapping[items_schema['type']]
field_type = List[item_type]
elif 'properties' in items_schema: # Handling direct nested object in array
nested_properties = items_schema['properties']
nested_required = items_schema.get('required', [])
nested_model_name = items_schema.get('title', f"{prop}Item")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
nested_model = create_model(nested_model_name, **nested_fields)
field_type = List[nested_model]
elif '$ref' in items_schema:
ref_model = resolve_ref(items_schema['$ref'], defs)
field_type = List[ref_model]
else:
raise ValueError("Array items must have a 'type', 'properties', or '$ref'")
elif json_type == 'object':
if 'properties' in details:
nested_properties = details['properties']
nested_required = details.get('required', [])
nested_model_name = details.get('title', f"{prop}Model")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
field_type = create_model(nested_model_name, **nested_fields)
elif '$ref' in details:
ref_model = resolve_ref(details['$ref'], defs)
field_type = ref_model
else:
raise ValueError("Object must have 'properties' or '$ref'")
fields[prop] = (
field_type, Field(default=field_default, description=field_description, alias=alias))
else:
raise ValueError(f"Unsupported type '{json_type}' for property '{prop}'")
return fields
type_mapping = {
'string': str,
'integer': int,
'number': float,
'boolean': bool,
'array': List,
'object': dict,
'null': type(None),
}
schema = reference_schema(schema)
name = schema['name']
description = schema['description']
properties = schema['parameters']['properties']
required_fields = schema['parameters'].get('required', [])
# Add definitions ($defs) to type_mapping
defs = {k: create_model(k, **create_fields(v['properties'], type_mapping, v.get('required', []), {})) for k, v
in schema['parameters'].get('$defs', {}).items()}
type_mapping.update(defs)
fields = create_fields(properties, type_mapping, required_fields, defs)
# Dynamically creating the Pydantic model
model = create_model(name, **fields)
| tool = type(name, (BaseTool, model), { |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Deltares/imod-python
# Path: imod/flow/dis.py
class TimeDiscretization(Package):
"""
Time discretisation package class.
Parameters
----------
timestep_duration: xr.DataArray
is the length of the current stress period (PERLEN). If the flow
solution is transient, timestep_duration specified here must be equal
to that specified for the flow model. If the flow solution is
steady-state, timestep_duration can be set to any desired length.
n_timesteps: int, optional
is the number of time steps for the transient flow solution in the
current stress period (NSTP). If the flow solution is steady-state,
n_timestep=1. Default value is 1.
transient: bool, optional
Flag indicating wether the flow simulation is transient (True) or False
(Steady State). Default is True.
timestep_multiplier: float, optional
is the multiplier for the length of successive time steps used in the
transient flow solution (TSMULT); it is used only if n_timesteps>1.
timestep_multiplier>0, the length of each flow time step within the
current stress period is calculated using the geometric progression as
in MODFLOW. Note that both n_timesteps and timestep_multiplier
specified here must be identical to those specified in the flow model
if the flow model is transient. If timestep_multiplier ≤ 0, the length
of each flow time step within the current stress period is read from
the record TSLNGH. This option is needed in case the length of time
steps for the flow solution is not based on a geometric progression in
a flow model, unlike MODFLOW. Default is 1.0.
"""
_pkg_id = "dis"
_variable_order = [
"timestep_duration",
"n_timesteps",
"transient",
"timestep_multiplier",
]
def __init__(
self,
timestep_duration,
endtime,
n_timesteps=1,
transient=True,
timestep_multiplier=1.0,
):
super().__init__()
self.dataset["timestep_duration"] = timestep_duration
self.dataset["n_timesteps"] = n_timesteps
self.dataset["transient"] = transient
self.dataset["timestep_multiplier"] = timestep_multiplier
self.endtime = endtime
def _render(self):
"""Render iMOD TIM file, which is the time discretization of the iMODFLOW model"""
_template = jinja2.Template(
"{% for time in timestrings%}"
"{{time}},1,{{n_timesteps}},{{timestep_multiplier}}\n"
"{% endfor %}\n"
)
times = self.dataset["time"].values
timestrings = [imod.util._compose_timestring(time) for time in times]
timestrings.append(imod.util._compose_timestring(self.endtime))
d = dict(
timestrings=timestrings,
n_timesteps=self.dataset["n_timesteps"].item(),
timestep_multiplier=self.dataset["timestep_multiplier"].item(),
)
return _template.render(**d)
def save(self, path):
tim_content = self._render()
with open(path, "w") as f:
f.write(tim_content)
def _pkgcheck(self, **kwargs):
to_check = [
"timestep_duration",
"n_timesteps",
]
self._check_positive(to_check)
# Path: imod/wq/timeutil.py
def _check_year(year):
def to_datetime(time, use_cftime):
def timestep_duration(times, use_cftime):
def forcing_starts_ends(package_times, globaltimes):
# Path: imod/tests/test_flow/test_flow_dis.py
import cftime
import numpy as np
import pytest
import xarray as xr
from imod.flow import TimeDiscretization
from imod.wq import timeutil
@pytest.fixture(scope="module")
def time_discretization(three_days):
times = three_days
| duration = timeutil.timestep_duration(times, False) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Dong142857/Live3DPortrait
# Path: models/eg3d/volumetric_rendering/ray_marcher.py
class MipRayMarcher2(nn.Module):
def __init__(self):
super().__init__()
def run_forward(self, colors, densities, depths, rendering_options):
deltas = depths[:, :, 1:] - depths[:, :, :-1]
colors_mid = (colors[:, :, :-1] + colors[:, :, 1:]) / 2
densities_mid = (densities[:, :, :-1] + densities[:, :, 1:]) / 2
depths_mid = (depths[:, :, :-1] + depths[:, :, 1:]) / 2
if rendering_options['clamp_mode'] == 'softplus':
densities_mid = F.softplus(densities_mid - 1) # activation bias of -1 makes things initialize better
else:
assert False, "MipRayMarcher only supports `clamp_mode`=`softplus`!"
density_delta = densities_mid * deltas
alpha = 1 - torch.exp(-density_delta)
alpha_shifted = torch.cat([torch.ones_like(alpha[:, :, :1]), 1-alpha + 1e-10], -2)
weights = alpha * torch.cumprod(alpha_shifted, -2)[:, :, :-1]
composite_rgb = torch.sum(weights * colors_mid, -2)
weight_total = weights.sum(2)
composite_depth = torch.sum(weights * depths_mid, -2) / weight_total
# clip the composite to min/max range of depths
composite_depth = torch.nan_to_num(composite_depth, float('inf'))
composite_depth = torch.clamp(composite_depth, torch.min(depths), torch.max(depths))
if rendering_options.get('white_back', False):
composite_rgb = composite_rgb + 1 - weight_total
composite_rgb = composite_rgb * 2 - 1 # Scale to (-1, 1)
return composite_rgb, composite_depth, weights
def forward(self, colors, densities, depths, rendering_options):
composite_rgb, composite_depth, weights = self.run_forward(colors, densities, depths, rendering_options)
return composite_rgb, composite_depth, weights
# Path: models/eg3d/volumetric_rendering/math_utils.py
def transform_vectors(matrix: torch.Tensor, vectors4: torch.Tensor) -> torch.Tensor:
def normalize_vecs(vectors: torch.Tensor) -> torch.Tensor:
def torch_dot(x: torch.Tensor, y: torch.Tensor):
def get_ray_limits_box(rays_o: torch.Tensor, rays_d: torch.Tensor, box_side_length):
def linspace(start: torch.Tensor, stop: torch.Tensor, num: int):
# Path: models/eg3d/volumetric_rendering/renderer.py
import math
import torch
import torch.nn as nn
from models.eg3d.volumetric_rendering.ray_marcher import MipRayMarcher2
from models.eg3d.volumetric_rendering import math_utils
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
The renderer is a module that takes in rays, decides where to sample along each
ray, and computes pixel colors using the volume rendering equation.
"""
def generate_planes():
"""
Defines planes by the three vectors that form the "axes" of the
plane. Should work with arbitrary number of planes and planes of
arbitrary orientation.
"""
return torch.tensor([[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
[[1, 0, 0],
[0, 0, 1],
[0, 1, 0]],
[[0, 0, 1],
[1, 0, 0],
[0, 1, 0]]], dtype=torch.float32)
def project_onto_planes(planes, coordinates):
"""
Does a projection of a 3D point onto a batch of 2D planes,
returning 2D plane coordinates.
Takes plane axes of shape n_planes, 3, 3
# Takes coordinates of shape N, M, 3
# returns projections of shape N*n_planes, M, 2
"""
N, M, C = coordinates.shape
n_planes, _, _ = planes.shape
coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3)
inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3)
projections = torch.bmm(coordinates, inv_planes)
return projections[..., :2]
def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None):
assert padding_mode == 'zeros'
N, n_planes, C, H, W = plane_features.shape
_, M, _ = coordinates.shape
plane_features = plane_features.view(N*n_planes, C, H, W)
coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds
projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)
output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C)
return output_features
def sample_from_3dgrid(grid, coordinates):
"""
Expects coordinates in shape (batch_size, num_points_per_batch, 3)
Expects grid in shape (1, channels, H, W, D)
(Also works if grid has batch size)
Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels)
"""
batch_size, n_coords, n_dims = coordinates.shape
sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1),
coordinates.reshape(batch_size, 1, 1, -1, n_dims),
mode='bilinear', padding_mode='zeros', align_corners=False)
N, C, H, W, D = sampled_features.shape
sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C)
return sampled_features
class ImportanceRenderer(torch.nn.Module):
def __init__(self):
super().__init__()
| self.ray_marcher = MipRayMarcher2() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lumi-ua/goit-project2-django-assistant
# Path: personal_assistant/app_contacts/forms.py
class ContactForm(ModelForm):
fullname = CharField(max_length=255,
widget=forms.TextInput(attrs={'placeholder': 'Name Lastname', "class": "form-control"}))
address = CharField(max_length=255, required=False,
widget=forms.TextInput(attrs={'placeholder': 'City, Street, House number', "class": "form-control"}))
birthday = DateField(required=False, input_formats=["%d.%m.%Y"],
widget=forms.DateInput(attrs={'placeholder': 'DD.MM.YYYY', 'class': 'form-control'}))
class Meta:
model = Contact
fields = ["fullname", "address", "birthday"]
exclude = ["user"]
# Path: personal_assistant/app_contacts/forms.py
class PhoneNumberForm(forms.ModelForm):
phone_number = PhoneNumberField(
widget=PhoneNumberPrefixWidget(attrs={'placeholder': '+380', 'class': 'form-control'})
)
class Meta:
model = PhoneNumber
fields = ["phone_number"]
exclude = ["contact"]
# Path: personal_assistant/app_contacts/forms.py
class EmailAddressForm(forms.ModelForm):
email = EmailField(max_length=100, required=False, widget=forms.EmailInput(attrs={'placeholder': '[email protected]', 'class': 'form-control'}))
class Meta:
model = EmailAddress
fields = ["email"]
exclude = ["contact"]
# Path: personal_assistant/app_contacts/models.py
class Contact(models.Model):
fullname = models.CharField(max_length=255)
address = models.CharField(max_length=255, blank=True, null=True)
birthday = models.DateField(blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
# Path: personal_assistant/app_contacts/models.py
class PhoneNumber(models.Model):
phone_number = PhoneNumberField(null=True,)
contact = models.ForeignKey(
Contact, on_delete=models.CASCADE, default=None, null=True, related_name='phone_numbers'
)
# Path: personal_assistant/app_contacts/models.py
class EmailAddress(models.Model):
email = models.EmailField(max_length=100, null=True)
contact = models.ForeignKey(
Contact, on_delete=models.CASCADE, default=None, null=True, related_name='email_addresses'
)
# Path: personal_assistant/app_contacts/views.py
from datetime import date
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q
from django.urls import reverse_lazy
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from datetime import date, timedelta
from .forms import ContactForm, PhoneNumberForm, EmailAddressForm
from .models import Contact, PhoneNumber, EmailAddress
# from django.db.models import Q
# Create your views here.
@login_required
def dashboard(request):
return render(request, 'app_contacts/dashboard.html', {"title": "Dashboard contact operations"})
@login_required
def contact(request):
contact_form = ContactForm()
| phone_number_form = PhoneNumberForm() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SubConv/SubConv
# Path: modules/convert/util.py
def RandUserAgent() -> str:
return userAgents[random.randint(0, len(userAgents) - 1)]
# Path: modules/convert/util.py
def get(content):
if content is None:
return ""
else:
return content
# Path: modules/convert/util.py
def uniqueName(names: dict, name):
index = names.get(name)
if index is None:
index = 0
names[name] = index
else:
index += 1
names[name] = index
name = "%s-%02d" % (name, index)
return name
# Path: modules/convert/util.py
def urlSafe(string):
return string.replace("+", "-").replace("/", "_")
# Path: modules/convert/util.py
def base64RawStdDecode(encoded):
return base64.b64decode(
encoded + "="*(-len(encoded)%4)
).decode("utf-8")
# Path: modules/convert/util.py
def base64RawURLDecode(encoded):
return base64.urlsafe_b64decode(
encoded + "="*(-len(encoded)%4)
).decode("utf-8")
# Path: modules/convert/v.py
def handleVShareLink(names: dict, url: urlparse.ParseResult, scheme: str, proxy: dict):
query = dict(urlparse.parse_qsl(url.query))
proxy["name"] = uniqueName(names, urlparse.unquote(url.fragment))
if url.hostname == "":
raise
if url.port == "":
raise
proxy["type"] = scheme
proxy["server"] = url.hostname
proxy["port"] = url.port
proxy["uuid"] = url.username
proxy["udp"] = True
tls = get(query.get("security")).lower()
if tls.endswith("tls") or tls == "reality":
proxy["tls"] = True
fingerprint = get(query.get("fp"))
if fingerprint == "":
proxy["client-fingerprint"] = "chrome"
else:
proxy["client-fingerprint"] = fingerprint
alpn = get(query.get("alpn"))
if alpn != "":
proxy["alpn"] = alpn.split(",")
sni = get(query.get("sni"))
if sni != "":
proxy["servername"] = sni
realityPublicKey = get(query.get("pbk"))
if realityPublicKey != "":
proxy["reality-opts"] = {
"public-key": realityPublicKey,
"short-id": get(query.get("sid"))
}
switch = get(query.get("packetEncoding"))
if switch == "none" or switch == "":
pass
elif switch == "packet":
proxy["packet-addr"] = True
else:
proxy["xudp"] = True
network = get(query.get("type")).lower()
if network == "":
network = "tcp"
fakeType = get(query.get("headerType")).lower()
if fakeType == "http":
network = "http"
elif network == "http":
network = "h2"
proxy["network"] = network
if network == "tcp":
if fakeType != "none" and fakeType != "":
headers = {}
httpOpts = {}
httpOpts["path"] = "/"
host = get(query.get("host"))
if host != "":
headers["Host"] = str(host)
method = get(query.get("method"))
if method != "":
httpOpts["method"] = method
path = get(query.get("path"))
if path != "":
httpOpts["path"] = str(path)
httpOpts["headers"] = headers
proxy["http-opts"] = httpOpts
elif network == "http":
headers = {}
h2Opts = {}
h2Opts["path"] = "/"
path = get(query.get("path"))
if path != "":
h2Opts["path"] = str(path)
host = get(query.get("host"))
if host != "":
h2Opts["host"] = str(host)
h2Opts["headers"] = headers
proxy["h2-opts"] = h2Opts
elif network == "ws":
headers = {}
wsOpts = {}
headers["User-Agent"] = RandUserAgent()
headers["Host"] = get(query.get("host"))
wsOpts["path"] = get(query.get("path"))
wsOpts["headers"] = headers
earlyData = get(query.get("ed"))
if earlyData != "":
try:
med = int(earlyData)
except:
raise
wsOpts["max-early-data"] = med
earlyDataHeader = get(query.get("edh"))
if earlyDataHeader != "":
wsOpts["early-data-header-name"] = earlyDataHeader
proxy["ws-opts"] = wsOpts
elif network == "grpc":
grpcOpts = {}
grpcOpts["grpc-service-name"] = get(query.get("serviceName"))
proxy["grpc-opts"] = grpcOpts
# Path: modules/convert/converter.py
from modules.convert.util import RandUserAgent
from modules.convert.util import get
from modules.convert.util import uniqueName
from modules.convert.util import urlSafe
from modules.convert.util import base64RawStdDecode
from modules.convert.util import base64RawURLDecode
from modules.convert.v import handleVShareLink
import json
import base64
import urllib.parse as urlparse
import distutils.util
async def ConvertsV2Ray(buf):
try:
data = base64.b64decode(buf).decode("utf-8")
except:
try:
data = buf.decode("utf-8")
except:
data = buf
arr = data.splitlines()
proxies = []
names = {}
for line in arr:
if line == "":
continue
if -1 == line.find("://"):
continue
else:
scheme, body = line.split("://", 1)
scheme = scheme.lower()
if scheme == "hysteria":
try:
urlHysteria = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlHysteria.query))
name = uniqueName(names, urlparse.unquote(urlHysteria.fragment))
hysteria = {}
hysteria["name"] = name
hysteria["type"] = scheme
hysteria["server"] = urlHysteria.hostname
hysteria["port"] = urlHysteria.port
| hysteria["sni"] = query.get("peer") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Opt-Mucca/PySCIPOpt-ML
# Path: src/pyscipopt_ml/exceptions.py
class NotRegistered(Exception):
"""Predictor is not supported by pyscipopt-ml."""
def __init__(self, predictor):
super().__init__(
f"Object of type {predictor} is not registered/supported with pyscipopt-ml"
)
# Path: src/pyscipopt_ml/modelling/get_convertor.py
def get_convertor(predictor, convertors):
"""Return the convertor for a given predictor."""
convertor = None
try:
convertor = convertors[type(predictor)]
except KeyError:
pass
if convertor is None:
for parent in type(predictor).mro():
try:
convertor = convertors[parent]
break
except KeyError:
pass
if convertor is None:
name = type(predictor).__name__
try:
convertor = convertors[name]
except KeyError:
pass
return convertor
# Path: src/pyscipopt_ml/registered_predictors.py
def registered_predictors():
"""Return the list of registered predictors."""
convertors = {
**sklearn_convertors(),
**pytorch_convertors(),
**xgboost_convertors(),
**lightgbm_convertors(),
}
return convertors
# Path: src/pyscipopt_ml/add_predictor.py
from warnings import warn
from .exceptions import NotRegistered
from .modelling.get_convertor import get_convertor
from .registered_predictors import registered_predictors
def add_predictor_constr(
scip_model, predictor, input_vars, output_vars=None, unique_naming_prefix="p_", **kwargs
):
"""Formulate predictor in PySCIPOpt model.
The formulation predicts the values of output_vars using input_vars according to
predictor.
Parameters
----------
scip_model : PySCIPOpt Model
The pyscipopt model where the predictor should be inserted.
predictor:
The predictor to insert.
input_vars : list or np.ndarray
Decision variables used as input for predictor in scip_model.
output_vars : list or np.ndarray, optional
Decision variables used as output for predictor in scip_model.
unique_naming_prefix : str, optional
A unique naming prefix that is used before all variable and constraint names. This parameter is important if
the SCIP model is later printed to file and many predictors are added to the same SCIP model.
Returns
-------
AbstractPredictorConstr
Object containing information about what was added to scip_model to insert the
predictor in it
Note
----
The parameters `input_vars` and `output_vars` can be either
* Lists of variables (List of lists etc. for higher dimensional input)
* np.ndarray of variables
For internal use in the package they are cast into a np.ndarray of variables
They should have dimensions that conform with the input/output of the predictor.
We denote by `n_samples` the number of samples (or objects) that we want to predict with our predictor.
We denote by `n_features` the dimension of the input of the predictor.
We denote by `n_output` the dimension of the output.
The `input_vars` are therefore of shape `(n_samples, n_features)` and the `output_vars` of
shape `(n_samples, n_outputs)`. In the case of `output_vars` not being passed, appropriate variables will
be automatically created.
In the case of `n_samples == 1` the first dimension can simply be removed from the input.
"""
convertors = registered_predictors()
| convertor = get_convertor(predictor, convertors) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DongqiShen/qwen-fast
# Path: model.py
class Transformer(nn.Module):
def __init__(self, config: ModelArgs) -> None:
super().__init__()
self.config = config
self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)
self.layers = nn.ModuleList(TransformerBlock(config) for _ in range(config.n_layer))
self.norm = RMSNorm(config.dim, eps=config.norm_eps)
self.output = nn.Linear(config.dim, config.vocab_size, bias=False)
self.freqs_cis: Optional[Tensor] = None
self.mask_cache: Optional[Tensor] = None
self.max_batch_size = -1
self.max_seq_length = -1
def setup_caches(self, max_batch_size, max_seq_length):
if self.max_seq_length >= max_seq_length and self.max_batch_size >= max_batch_size:
return
head_dim = self.config.dim // self.config.n_head
max_seq_length = find_multiple(max_seq_length, 8)
self.max_seq_length = max_seq_length
self.max_batch_size = max_batch_size
for b in self.layers:
b.attention.kv_cache = KVCache(max_batch_size, max_seq_length, self.config.n_local_heads, head_dim)
self.freqs_cis = precompute_freqs_cis(self.config.block_size, self.config.dim // self.config.n_head, self.config.rope_base)
self.causal_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool))
def forward(self, idx: Tensor, input_pos: Optional[Tensor] = None) -> Tensor:
assert self.freqs_cis is not None, "Caches must be initialized first"
mask = self.causal_mask[None, None, input_pos]
freqs_cis = self.freqs_cis[input_pos]
x = self.tok_embeddings(idx)
for i, layer in enumerate(self.layers):
x = layer(x, input_pos, freqs_cis, mask)
x = self.norm(x)
logits = self.output(x)
return logits
@classmethod
def from_name(cls, name: str):
return cls(ModelArgs.from_name(name))
# Path: tp.py
def maybe_init_dist() -> Optional[int]:
try:
# provided by torchrun
rank = _get_rank()
world_size = _get_world_size()
if world_size < 2:
# too few gpus to parallelize, tp is no-op
return None
except KeyError:
# not run via torchrun, no-op
return None
dist.init_process_group(backend="nccl", rank=rank, world_size=world_size)
return rank
# Path: generate.py
import sys
import time
import itertools
import torch
import torch._inductor.config
import torch._dynamo.config
import contextlib
import argparse
from pathlib import Path
from typing import Optional, Tuple
from model import Transformer
from tp import maybe_init_dist
from sentencepiece import SentencePieceProcessor
from quantize import WeightOnlyInt8QuantHandler
from quantize import WeightOnlyInt4QuantHandler
from tp import apply_tp
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future
# support running without installing as a package
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization
q = torch.empty_like(probs_sort).exponential_(1)
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None):
logits = logits / max(temperature, 1e-5)
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
pivot = v.select(-1, -1).unsqueeze(-1)
logits = torch.where(logits < pivot, -float("Inf"), logits)
probs = torch.nn.functional.softmax(logits, dim=-1)
return probs
def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None):
probs = logits_to_probs(logits[0, -1], temperature, top_k)
idx_next = multinomial_sample_one_no_sync(probs)
return idx_next, probs
| def prefill(model: Transformer, x: torch.Tensor, input_pos: torch.Tensor, **sampling_kwargs) -> torch.Tensor: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Yanyutin753/CowAndPandoraNext
# Path: channel/channel.py
class Channel(object):
NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE, ReplyType.IMAGE]
def startup(self):
"""
init channel
"""
raise NotImplementedError
def handle_text(self, msg):
"""
process received msg
:param msg: message object
"""
raise NotImplementedError
# 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息
def send(self, reply: Reply, context: Context):
"""
send message to user
:param msg: message content
:param receiver: receiver channel account
:return:
"""
raise NotImplementedError
def build_reply_content(self, query, context: Context = None) -> Reply:
return Bridge().fetch_reply_content(query, context)
def build_voice_to_text(self, voice_file) -> Reply:
return Bridge().fetch_voice_to_text(voice_file)
def build_text_to_voice(self, text) -> Reply:
return Bridge().fetch_text_to_voice(text)
# Path: common/dequeue.py
class Dequeue(Queue):
def putleft(self, item, block=True, timeout=None):
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._putleft(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def putleft_nowait(self, item):
return self.putleft(item, block=False)
def _putleft(self, item):
self.queue.appendleft(item)
# Path: common/log.py
def _reset_logger(log):
def _get_logger():
# Path: config.py
def conf():
return config
# Path: channel/chat_channel.py
import os
import re
import threading
import time
from asyncio import CancelledError
from concurrent.futures import Future, ThreadPoolExecutor
from bridge.context import *
from bridge.reply import *
from channel.channel import Channel
from common.dequeue import Dequeue
from common.log import logger
from config import conf
from plugins import *
from voice.audio_convert import any_to_wav
try:
except Exception as e:
pass
# 抽象类, 它包含了与消息通道无关的通用处理逻辑
class ChatChannel(Channel):
name = None # 登录的用户名
user_id = None # 登录的用户id
futures = {} # 记录每个session_id提交到线程池的future对象, 用于重置会话时把没执行的future取消掉,正在执行的不会被取消
sessions = {} # 用于控制并发,每个session_id同时只能有一个context在处理
lock = threading.Lock() # 用于控制对sessions的访问
handler_pool = ThreadPoolExecutor(max_workers=8) # 处理消息的线程池
def __init__(self):
_thread = threading.Thread(target=self.consume)
_thread.setDaemon(True)
_thread.start()
# 根据消息构造context,消息内容相关的触发项写在这里
def _compose_context(self, ctype: ContextType, content, **kwargs):
context = Context(ctype, content)
context.kwargs = kwargs
# context首次传入时,origin_ctype是None,
# 引入的起因是:当输入语音时,会嵌套生成两个context,第一步语音转文本,第二步通过文本生成文字回复。
# origin_ctype用于第二步文本回复时,判断是否需要匹配前缀,如果是私聊的语音,就不需要匹配前缀
if "origin_ctype" not in context:
context["origin_ctype"] = ctype
# context首次传入时,receiver是None,根据类型设置receiver
first_in = "receiver" not in context
# 群名匹配过程,设置session_id和receiver
if first_in: # context首次传入时,receiver是None,根据类型设置receiver
| config = conf() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nerdslab/bams
# Path: bams/models/mlp.py
class MLP(nn.Module):
r"""Flexible Multi-layer perceptron model, with optional batchnorm layers.
Args:
hidden_layers (list): List of layer dimensions, from input layer to output
layer. If first input size is -1, will use a lazy layer.
bias (boolean, optional): If set to :obj:`True`, bias will be used in linear
layers. (default: :obj:`True`).
activation (torch.nn.Module, optional): Activation function. (default:
:obj:`nn.ReLU`).
batchnorm (boolean, optional): If set to :obj:`True`, batchnorm layers are
added after each linear layer, before the activation (default:
:obj:`False`).
drop_last_nonlin (boolean, optional): If set to :obj:`True`, the last layer
won't have activations or batchnorm layers. (default: :obj:`True`)
Examples:
>>> m = MLP([-1, 16, 64])
MLP(
(layers): Sequential(
(0): LazyLinear(in_features=0, out_features=16, bias=True)
(1): ReLU(inplace=True)
(2): Linear(in_features=16, out_features=64, bias=True)
)
)
"""
def __init__(
self,
hidden_layers,
*,
bias=True,
activation=nn.ReLU(True),
batchnorm=False,
drop_last_nonlin=True
):
super().__init__()
# build the layers
layers = []
for in_dim, out_dim in zip(hidden_layers[:-1], hidden_layers[1:]):
if in_dim == -1:
layers.append(nn.LazyLinear(out_dim, bias=bias and not batchnorm))
else:
layers.append(nn.Linear(in_dim, out_dim, bias=bias and not batchnorm))
if batchnorm:
layers.append(nn.BatchNorm1d(num_features=out_dim, momentum=0.99))
# ayers.append(nn.LayerNorm(out_dim))
if activation is not None:
activation = nn.PReLU(1)
layers.append(activation)
# remove activation and/or batchnorm layers from the last block
if drop_last_nonlin:
remove_layers = -(int(activation is not None) + int(batchnorm))
if remove_layers:
layers = layers[:remove_layers]
self.layers = nn.Sequential(*layers)
self.out_dim = hidden_layers[-1]
def forward(self, x):
x = self.layers(x)
return x
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Linear):
m.reset_parameters()
# Path: bams/models/tcn.py
class TemporalConvNet(nn.Module):
def __init__(
self,
num_inputs,
num_channels,
kernel_size=2,
num_layers_per_block=2,
dropout=0.2,
shift=0,
dilation=2,
):
super(TemporalConvNet, self).__init__()
self.num_levels = len(num_channels)
self.kernel_size = prepare_argument(kernel_size, self.num_levels)
self.num_layers_per_block = prepare_argument(
num_layers_per_block, self.num_levels
)
self.dilation = dilation
self.feat_dim = num_channels[-1]
layers = []
for i in range(self.num_levels):
dilation_size = dilation**i
shift_ = shift if i == (self.num_levels - 1) else 0
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
kernel_size = self.kernel_size[i]
num_layers_per_block = self.num_layers_per_block[i]
layers += [
TemporalBlock(
in_channels,
out_channels,
kernel_size,
stride=1,
dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size,
n_layers=num_layers_per_block,
dropout=dropout,
shift=shift_ * dilation_size,
)
]
self.network = nn.Sequential(*layers)
@property
def receptive_field(self):
return compute_receiptive_field(
kernel_size=self.kernel_size,
num_blocks=self.num_levels,
num_layers_per_block=self.num_layers_per_block,
dilation=self.dilation,
)
def forward(self, x):
x = rearrange(x, "b l k -> b k l")
ret = self.network(x)
ret = rearrange(ret, "b k l -> b l k")
return ret
# Path: bams/models/bams.py
from collections import OrderedDict
from bams.models import TemporalConvNet, MLP
import torch
import torch.nn as nn
class BAMS(nn.Module):
r"""BAMS model.
Args:
input_size (int): Number of input features.
predictor (dict): Parameters for the predictor MLP.
encoders (dict[dict]): A dictionnary of encoders, where each key is the name of
the encoder, and each value is a dictionnary of parameters for the encoder.
Each encoder is a TemporalConvNet.
"""
def __init__(
self,
input_size,
*,
predictor=None,
**encoder_kwargs,
):
super().__init__()
self.input_size = input_size
self.representation_size = 0
encoders = dict()
for name, tcn_kwargs in encoder_kwargs.items():
assert "num_inputs" not in tcn_kwargs
encoders[name] = TemporalConvNet(num_inputs=input_size, **tcn_kwargs)
self.representation_size += tcn_kwargs["num_channels"][-1]
self.encoders = torch.nn.ModuleDict(encoders)
# hoa predictor (first layer is a lazy linear layer)
| self.predictor = MLP(**predictor) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: FF14CN/Sarean-arsenal
# Path: Utility/sdoLogin/Daoyu.py
def dykey_encrypt(self):
def config_handler():
def initialize():
def get_guid(device_id, manuid):
def get_flowid(manuid, deviceid, sessionid, show_username):
def get_account_id_list(flowid, deviceid, manuid, sessionid, show_username):
def make_confirm(account_id, flowid, deviceid, manuid, sessionid, show_username):
def get_sub_account_key(flowid, manuid, deviceid, sessionid, show_username):
def get_temp_sessionid(main_key):
def get_sub_account_session(sub_account_key, temp_account_sessionid):
# Path: Utility/sqMall/daoyuBuildinMallSign.py
def daoyumall_sign(sub_session_id, account_id):
"""
仅适用于叨鱼内的盛趣商城签到操作 PC端不适用
:param sub_session_id: 子账号的Daoyukey值
:param account_id: 子账号的AccountID
:return: 0: 签到成功 1: 重复签到 2: 签到失败
"""
sign_url = 'https://sqmallservice.u.sdo.com/api/us/integration/checkIn'
sign_data = {'merchantId': 1}
sign_header = {
'authority': 'sqmallservice.u.sdo.com',
'method': 'PUT',
'scheme': 'https',
'qu-web-host': 'https://m.qu.sdo.com',
'qu-hardware-platform': '1',
'qu-software-platform': '2',
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 DaoYu/9.3.3',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'accept': 'application/json, text/javascript, */*; q=0.01',
'qu-deploy-platform': '4',
'qu-merchant-id': '1',
'origin': 'https://m.qu.sdo.com',
'x-requested-with': 'com.sdo.sdaccountkey',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://m.qu.sdo.com/',
}
sign_cookies = {
'sessionId': sub_session_id,
'direbmemllam': account_id,
}
sign_response = requests.put(sign_url, headers=sign_header, cookies=sign_cookies, data=sign_data, verify=False)
sign_json = sign_response.json()
if sign_json['resultMsg'] == 'SUCCESS':
return 0
elif sign_json['resultMsg'] == '今日已签到,请勿重复签到':
return 1
else:
return 2
# Path: Utility/sqMall/daoyuBuildinMallBalance.py
def daoyu_mall_balance(session_id):
"""
仅适用于叨鱼内部商城的查询签到积分 PC端不适用
:param session_id: 子账号的Daoyukey值
:return: 返回签到积分余额
"""
get_balance_url = 'https://sqmallservice.u.sdo.com/api/rs/member/integral/balance?merchantId=1'
get_balance_header = {
'authority': 'sqmallservice.u.sdo.com',
'method': 'GET',
'scheme': 'https',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'qu-deploy-platform': '4',
'accept': 'application/json, text/javascript, */*; q=0.01',
'qu-merchant-id': '1',
'qu-hardware-platform': '1',
'qu-software-platform': '2',
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 DaoYu/9.3.3',
'qu-web-host': 'https://m.qu.sdo.com',
'origin': 'https://m.qu.sdo.com',
'x-requested-with': 'com.sdo.sdaccountkey',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://m.qu.sdo.com/',
'accept-encoding': 'gzip, deflate',
'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
}
get_balance_cookies = {
'sessionId': session_id
}
get_balance_response = requests.get(get_balance_url, headers=get_balance_header, cookies=get_balance_cookies,
verify=False)
get_balance_json = get_balance_response.json()
balance = get_balance_json['data']['balance']
return balance
# Path: Utility/sqMall/sqMallDoSign.py
from Utility.sdoLogin import Daoyu
from Utility.sqMall.daoyuBuildinMallSign import daoyumall_sign
from Utility.sqMall.daoyuBuildinMallBalance import daoyu_mall_balance
import Utility.Notifications.push as pusher
"""
Author: KuliPoi
Contact: [email protected]
Created: 2023-12-21
File: sqMailDoSign.py
Version: 2.5.0
Description: Do SQMALL AUTO SIGN, FUCK SQ BY THE WAY
"""
def main():
| if Daoyu.initialize(): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: janmartchouk/vidgen
# Path: config/dicts.py
SUBREDDITS = {
'tifu': 'rss',
'confession': 'rss',
'relationship_advice': 'web',
'amitheasshole': 'rss'
}
# Path: utils/logger.py
def setup_logger(name, level=logging.INFO, emoji='⚙️'):
"""To setup as many loggers as you want"""
# Create handlers
c_handler = logging.StreamHandler()
c_handler.setLevel(level)
# Create formatters and add it to handlers
c_format = ColoredFormatter(emoji + ' | %(name)s | %(message)s')
c_handler.setFormatter(c_format)
# Add handlers to the logger
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(c_handler)
return logger
# Path: models/post.py
class Post:
"""
A class representing a Reddit post.
"""
def __init__(self, title, author, subreddit, content, crawl_date):
"""
Initialize a Post object.
:param title: The title of the post.
:type title: str
:param author: The author of the post.
:type author: str
:param subreddit: The subreddit of the post.
:type subreddit: str
:param content: The content of the post.
:type content: str
:param crawl_date: The date the post was crawled.
:type crawl_date: datetime.datetime
"""
# Simple data stores
self.author = author
self.subreddit = subreddit
self.crawl_date = crawl_date
# Replace Reddit slang in title and content
self.title = replace_words(title, REDDIT_SLANG)
self.content = replace_words(content, REDDIT_SLANG)
# Remove Age/Gender Reddit-typical tuples
self.title = re.sub(r"\(?\d{1,3}[mfMF]\)?", '', self.title).strip()
self.content = re.sub(r"\(?\d{1,3}[mfMF]\)?", '', self.content).strip()
# Clean up potentially spammy fields
self.author = self.author.replace('\n', ' ').replace('\t', ' ')
self.author = re.sub(' +', ' ', self.author).strip()
self.title = self.title.replace('\n', ' ').replace('\t', ' ')
self.title = re.sub(' +', ' ', self.title).strip()
self.content = self.content.replace('\n', ' ').replace('\t', ' ')
self.content = re.sub(' +', ' ', self.content).strip()
# Calculate hash from title + author + post
self.hash = hashlib.sha256(
str.encode(self.title) + str.encode(self.author) +
str.encode(self.subreddit)
).hexdigest()
# Shorten title and hash
self.short_title = shorten_string(self.title)
self.short_hash = shorten_hash(self.hash)
# By default, we don't have a generated audio, subtitles or video yet
self.audio = False
self.subtitles = False
self.video = False
self.uploaded_youtube = False
# Used for storing which platforms the post has been uploaded to
self.posted_to = []
def __str__(self, short=True) -> str:
return f"""{self.hash}
├── title: {self.title},
├── author: {self.author},
├── subreddit: {self.subreddit},
├── content: {shorten_string(self.content, max_length=50) if short else self.content},
└── crawl_date: {self.crawl_date})"""
# Path: src/content_getter.py
import feedparser
import logging
import time
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from config.dicts import SUBREDDITS
from utils.logger import setup_logger
from models.post import Post
class ContentGetter:
def __init__(self, loglevel = logging.INFO):
self.logger = setup_logger(__name__, loglevel, emoji='🌍')
# Get a list of Reddit Posts from an RSS feed
def from_subreddit(self, subreddit):
if not subreddit in SUBREDDITS:
self.logger.error(f"{subreddit} is not configured")
exit(1)
if SUBREDDITS[subreddit] == 'rss':
return self.from_rss_subreddit(subreddit)
elif SUBREDDITS[subreddit] == 'web':
return self.from_web(subreddit)
else:
self.logger.error(f"{subreddit} is not configured properly")
exit(1)
def from_rss_subreddit(self, subreddit):
data = feedparser.parse(f'https://reddit.com/r/{subreddit}/top.rss')
posts = []
failed_number = 0
if data.entries:
try:
for entry in data.entries:
paragraphs = BeautifulSoup(entry.content[0].value, 'html.parser').find_all('p')
content = ''.join([p.get_text() for p in paragraphs])
| post_obj = Post( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: asdfghjil/XMUCourseCheckin
# Path: checkinList.py
def getCheckinList(session, http_header, userInfo, today=True):
try:
url = serverUrl + "/getQdKbList"
data = {
'sign': userInfo['sign'],
'userType': userInfo['userType'],
'userCode': userInfo['userCode'],
'unitCode': userInfo['unitCode'],
'userName': userInfo['userName'],
'roleCode': userInfo['roleCode'],
'bm': None,
'xyMc': userInfo['xy'],
'zy': userInfo['zy'],
'bj': userInfo['bj'],
'xsCc': userInfo['xsCc'],
'scene': 1,
'key': 1 if today else 2
}
res = session.post(url, data=data, headers=http_header).text
res = json.loads(res)
if res['status'] != 1:
print('get Checkin list failed')
raise Exception('get Checkin list failed')
# print(res)
return res['Rows']
except:
print(json.dumps({
"status": "failed",
"reason": "Get checkin list failed"
}, indent=4))
raise
# Path: checkinList.py
def printCheckinList(session, http_header, userInfo, today=True, type="签到"):
rows = getCheckinList(session, http_header, userInfo, today)
for id, lesson in enumerate(rows):
print(id)
print('课程名称:', lesson['kcMc'])
print('上课时间:', lesson['skSj'])
print('签到发起情况:', lesson['qdQkMc'])
print("签到情况:", lesson['xsQdQkMc'] + ('' if lesson['xsQdQk'] == '0' else f"({lesson['skXsStr']})"))
# print('\n')
try:
ckid = int(input("请输入" + type + "课程的序号:"))
except:
print('输入错误')
raise Exception('输入错误')
if ckid < 0 or ckid >= len(rows):
print('输入错误')
raise Exception('输入错误')
return rows[ckid]
# Path: checkin.py
import json
import requests
import sys
import time
import random
from checkinList import getCheckinList, printCheckinList
serverUrl = "https://tingke.xmu.edu.cn/app"
def getCheckinInfo(session, http_header, userInfo, lesson):
try:
url = serverUrl + "/getXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'xkKh': lesson['xkKh'],
'qdRq': lesson['qdRq'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'qdId': lesson['qdId'],
'isFz': lesson['isFz'],
'fzMc': lesson['fzMc']
}
res = session.post(url, data=data, headers=http_header)
if res.status_code != 200:
raise Exception('get Checkin info failed')
res = json.loads(res.text)
return res['Rows']
except:
print(json.dumps({
"status": "failed",
"reason": "Get checkin info failed"
}, indent=4))
raise
def checkin(session, http_header, userInfo, lesson, tips=True):
checkinInfo = getCheckinInfo(session, http_header, userInfo, lesson)
print('签到口令:', checkinInfo['klHm'])
# print(lesson['xsQdQk'], lesson['skXs'], lesson['bqMode'], lesson['qdNum'])
if tips:
if lesson['xsQdQk'] != '0' and lesson['skXs'] == '2' and (lesson['bqMode'] != '2' or lesson['qdNum'] != 1):
choice = input('您似乎已经线下签到过了,是否继续签到?(y/n)')
if choice != 'y':
return
if input('是否进行自动签到?(y/n)') != 'y':
return
try:
url = serverUrl + "/saveXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'bjMc': userInfo['bj'],
'zyMc': userInfo['zy'],
'xyMc': userInfo['xy'],
'wzJd': str(float(checkinInfo['wzJd']) + (random.random() - 0.5) * 2 * 0.0001),
'wzWd': str(float(checkinInfo['wzWd']) + (random.random() - 0.5) * 2 * 0.0001),
'qdId': checkinInfo['uniqueCode'],
'xkKh': checkinInfo['xkKh'],
'skDd': lesson['skDd'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'isFace': None,
# 'isFace': checkinInfo['xsIsFace'],
'wzAcc': 0,
'bqMode': lesson['bqMode'],
'isFz': checkinInfo['isFz'],
'fzMc': lesson['fzMc'],
'djc': lesson['djc'],
'qdJc': lesson['qdJc']
}
# print("**********")
res = session.post(url, data=data, headers=http_header).text
res = json.loads(res)
if res['status'] == 1:
print('签到成功!')
return True
elif res['status'] == 6:
print('签到异常提醒:', res['msg'])
return False
else:
print('签到失败!', res['msg'])
raise Exception('签到失败:' + res['msg'])
except:
print(json.dumps({
"status": "failed",
"reason": "Checkin failed"
}, indent=4))
return False
def courseCheckin(session, http_header, userInfo):
| lesson = printCheckinList(session, http_header, userInfo, today=True) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Kanaries/kanaries-track
# Path: kanaries_track/config.py
class Config:
# Path: kanaries_track/request.py
class RequestClient:
"""Client for sending events to kanaries-track server"""
def __init__(
self,
*,
host: str,
auth_token: str,
max_retries: int,
timeout: int,
verify: bool,
proxy: Any
) -> None:
self.host = host
self.auth_token = auth_token
self.max_retries = max_retries
self.timeout = timeout
self.verify = verify
self.proxy = proxy
self.session = Session()
def _post(self, path: str, data: Dict[str, Any]) -> Response:
"""Post data to url"""
url = f"{self.host}{path}"
@backoff.on_exception(
backoff.expo,
Exception,
max_tries=self.max_retries,
)
def __post():
return self.session.post(
url,
headers={"Track-Key": self.auth_token},
json=data,
timeout=self.timeout,
verify=self.verify,
proxies=self.proxy
)
return __post()
def track(self, events: List[Dict[str, Any]]):
"""Send events to kanaries-track server"""
logger.debug("send requests to server, event count: %s", len(events))
try:
resp = self._post("/ingest/track", events)
logger.debug("track resp: %s", resp.text)
except Exception as e:
logger.error("Failed to send events to server: %s", str(e))
# Path: kanaries_track/client.py
from typing import Dict, Any
from datetime import datetime
from threading import Thread
from functools import lru_cache
from dateutil.tz import tzlocal
from .config import config
from .request import RequestClient
import queue
import uuid
import logging
import time
import atexit
self.ruuning = False
def _upload(self):
"""Upload events"""
start_time = time.monotonic()
events = []
while len(events) < self.upload_size:
elapsed_seconds = time.monotonic() - start_time
if elapsed_seconds >= self.upload_interval_seconds:
break
try:
event = self.event_queue.get(block=True, timeout=self.upload_interval_seconds - elapsed_seconds)
events.append(event)
except queue.Empty:
break
except Exception as e:
logger.error("Failed to get event from queue: %s", str(e))
logger.debug("invoke uploading events, event count: %s", len(events))
if events:
self.request_client.track(events)
class Client:
"""Client for sending events to kanaries-track server"""
def __init__(
self,
*,
host: str,
auth_token: str,
debug: bool,
send: bool,
sync_send: bool,
max_queue_size: int,
timeout_seconds: int,
max_retries: int,
proxies: Dict[str, Any],
thread_count: int,
verify: bool,
upload_interval_seconds: int,
upload_size: int
):
self.host = host
self.auth_token = auth_token
self.debug = debug
self.send = send
self.sync_send = sync_send
self.max_queue_size = max_queue_size
self.timeout_seconds = timeout_seconds
self.max_retries = max_retries
self.proxies = proxies
self.thread_count = thread_count
self.verify = verify
self.upload_interval_seconds = upload_interval_seconds
self.upload_size = upload_size
self._consumers = []
self._request_client = RequestClient(
host=self.host,
auth_token=self.auth_token,
max_retries=self.max_retries,
timeout=self.timeout_seconds,
verify=self.verify,
proxy=self.proxies
)
self._event_queue = queue.Queue(self.max_queue_size)
if not self.sync_send and self.send:
for _ in range(self.thread_count):
consumer = _Consumer(
event_queue=self._event_queue,
request_client=self._request_client,
upload_size=self.upload_size,
upload_interval_seconds=self.upload_interval_seconds
)
consumer.start()
self._consumers.append(consumer)
atexit.register(self._end)
if self.debug:
logger.setLevel(logging.DEBUG)
def track(self, event: Dict[str, Any]):
"""Track an event"""
event = self._fill_data(event)
if not self.send:
return
if self.sync_send:
self._request_client.track([event])
else:
self._enqueue(event)
def _fill_data(self, event: Dict[str, Any]) -> Dict[str, Any]:
"""Fill data for an event"""
event["timestamp"] = datetime.now().replace(tzinfo=tzlocal()).isoformat()
event["message_id"] = str(uuid.uuid4())
return event
def _enqueue(self, event: Dict[str, Any]):
"""Enqueue an event"""
logger.debug("enqueue event: %s", event)
try:
self._event_queue.put(event, block=False)
except queue.Full:
logger.warning("Event queue is full, dropping event")
def _end(self):
"""End the client when the main thread exits"""
for consumer in self._consumers:
consumer.pause()
consumer.join()
@lru_cache(maxsize=1)
def get_client():
"""Get a client"""
return Client(
| host=config.host, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Yingyue-L/Mamba-LLaVA
# Path: llava/model/multimodal_encoder/builder.py
def build_vision_tower(vision_tower_cfg, **kwargs):
vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
is_absolute_path_exists = os.path.exists(vision_tower)
if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"):
return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
raise ValueError(f'Unknown vision tower: {vision_tower}')
# Path: llava/model/multimodal_projector/builder.py
def build_vision_projector(config, delay_load=False, **kwargs):
projector_type = getattr(config, 'mm_projector_type', 'linear')
if projector_type == 'linear':
return nn.Linear(config.mm_hidden_size, config.hidden_size)
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
if mlp_gelu_match:
mlp_depth = int(mlp_gelu_match.group(1))
modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(config.hidden_size, config.hidden_size))
return nn.Sequential(*modules)
if projector_type == 'identity':
return IdentityMap()
raise ValueError(f'Unknown projector type: {projector_type}')
# Path: llava/constants.py
IGNORE_INDEX = -100
# Path: llava/constants.py
IMAGE_TOKEN_INDEX = -200
# Path: llava/constants.py
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
# Path: llava/constants.py
DEFAULT_IM_START_TOKEN = "<im_start>"
# Path: llava/constants.py
DEFAULT_IM_END_TOKEN = "<im_end>"
# Path: llava/model/llava_arch.py
from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch
import torch.nn as nn
# Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
| self.vision_tower = build_vision_tower(config, delay_load=True) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Theia-4869/MoSA
# Path: src/engine/eval/multilabel.py
def get_continuous_ids(probe_labels: List[int]) -> Dict[int, int]:
def multihot(x: List[List[int]], nb_classes: int) -> np.ndarray:
def compute_map(
scores: np.ndarray, multihot_targets: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, float, float]:
def compute_f1(
multihot_targets: np.ndarray, scores: np.ndarray, threshold: float = 0.5
) -> Tuple[float, float, float]:
def get_best_f1_scores(
multihot_targets: np.ndarray, scores: np.ndarray, threshold_end: int
) -> Dict[str, float]:
# Path: src/engine/eval/singlelabel.py
def accuracy(y_probs, y_true):
def top_n_accuracy(y_probs, truths, n=1):
def compute_acc_auc(y_probs, y_true_ids):
def topks_correct(preds, labels, ks):
def topk_errors(preds, labels, ks):
def topk_accuracies(preds, labels, ks):
# Path: src/utils/logging.py
_FORMAT = "[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s"
def _suppress_print():
def print_pass(*objects, sep=" ", end="\n", file=sys.stdout, flush=False):
def _cached_log_stream(filename):
def setup_logging(
num_gpu, num_shards, output="", name="MOSA", color=True):
def setup_single_logging(name, output=""):
def get_logger(name):
def log_json_stats(stats, sort_keys=True):
def __init__(self, *args, **kwargs):
def formatMessage(self, record: logging.LogRecord) -> str:
class _ColorfulFormatter(logging.Formatter):
# Path: src/engine/evaluator.py
import numpy as np
from collections import defaultdict
from typing import List, Union
from .eval import multilabel
from .eval import singlelabel
from ..utils import logging
#!/usr/bin/env python3
logger = logging.get_logger("MOSA")
class Evaluator():
"""
An evaluator with below logics:
1. find which eval module to use.
2. store the eval results, pretty print it in log file as well.
"""
def __init__(
self,
) -> None:
self.results = defaultdict(dict)
self.iteration = -1
self.threshold_end = 0.5
def update_iteration(self, iteration: int) -> None:
"""update iteration info"""
self.iteration = iteration
def update_result(self, metric: str, value: Union[float, dict]) -> None:
if self.iteration > -1:
key_name = "epoch_" + str(self.iteration)
else:
key_name = "final"
if isinstance(value, float):
self.results[key_name].update({metric: value})
else:
if metric in self.results[key_name]:
self.results[key_name][metric].update(value)
else:
self.results[key_name].update({metric: value})
def classify(self, probs, targets, test_data, multilabel=False):
"""
Evaluate classification result.
Args:
probs: np.ndarray for num_data x num_class, predicted probabilities
targets: np.ndarray for multilabel, list of integers for single label
test_labels: map test image ids to a list of class labels
"""
if not targets:
raise ValueError(
"When evaluating classification, need at least give targets")
if multilabel:
self._eval_multilabel(probs, targets, test_data)
else:
self._eval_singlelabel(probs, targets, test_data)
def _eval_singlelabel(
self,
scores: np.ndarray,
targets: List[int],
eval_type: str
) -> None:
"""
if number of labels > 2:
top1 and topk (5 by default) accuracy
if number of labels == 2:
top1 and rocauc
"""
| acc_dict = singlelabel.compute_acc_auc(scores, targets) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: IBM/AI-assisted-chemical-sensing
# Path: src/chemsense/vision/logging_configuration.py
def setup_basic_logging_for_scripts() -> None:
"""Setup basic stdout logging for scripts."""
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
# Path: src/chemsense/vision/modeling/encoders.py
ENCODERS_REGISTRY = {
"mobilenetv2_35_96": {
"processor": AutoImageProcessor.from_pretrained("google/mobilenet_v2_0.35_96"),
"model": MobileNetV2Model.from_pretrained("google/mobilenet_v2_0.35_96"),
"size": 96,
},
"mobilenetv2_100_224": {
"processor": AutoImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224"),
"model": MobileNetV2Model.from_pretrained("google/mobilenet_v2_1.0_224"),
"size": 224,
},
"mobilenetv2_140_224": {
"processor": AutoImageProcessor.from_pretrained("google/mobilenet_v2_1.4_224"),
"model": MobileNetV2Model.from_pretrained("google/mobilenet_v2_1.4_224"),
"size": 224,
},
"resnet_18": {
"processor": AutoImageProcessor.from_pretrained("microsoft/resnet-18"),
"model": ResNetModel.from_pretrained("microsoft/resnet-18"),
"size": 224,
},
"resnet_50": {
"processor": AutoImageProcessor.from_pretrained("microsoft/resnet-50"),
"model": ResNetModel.from_pretrained("microsoft/resnet-50"),
"size": 224,
},
"resnet_101": {
"processor": AutoImageProcessor.from_pretrained("microsoft/resnet-101"),
"model": ResNetModel.from_pretrained("microsoft/resnet-101"),
"size": 224,
},
"vit_base_224": {
"processor": ViTImageProcessor.from_pretrained("google/vit-base-patch16-224"),
"model": ViTModel.from_pretrained("google/vit-base-patch16-224"),
"size": 224,
},
"vit_base_384": {
"processor": ViTImageProcessor.from_pretrained("google/vit-base-patch16-384"),
"model": ViTModel.from_pretrained("google/vit-base-patch16-384"),
"size": 384,
},
"vit_large_224": {
"processor": ViTImageProcessor.from_pretrained("google/vit-large-patch16-224"),
"model": ViTModel.from_pretrained("google/vit-large-patch16-224"),
"size": 224,
},
"beit_base_224": {
"processor": BeitImageProcessor.from_pretrained(
"microsoft/beit-base-patch16-224-pt22k-ft22k"
),
"model": BeitModel.from_pretrained(
"microsoft/beit-base-patch16-224-pt22k-ft22k"
),
"size": 224,
},
"beit_base_384": {
"processor": BeitImageProcessor.from_pretrained(
"microsoft/beit-base-patch16-384"
),
"model": BeitModel.from_pretrained("microsoft/beit-base-patch16-384"),
"size": 384,
},
"beit_large_224": {
"processor": BeitImageProcessor.from_pretrained(
"microsoft/beit-large-patch16-224-pt22k-ft22k"
),
"model": BeitModel.from_pretrained(
"microsoft/beit-large-patch16-224-pt22k-ft22k"
),
"size": 224,
},
}
# Path: src/chemsense/vision/cli/classification_analysis.py
from pathlib import Path
from chemsense.vision.modeling.classification import (
attach_classification_head_fewshots,
attach_classification_head_kfold,
attach_classification_head_loco,
attach_classification_head_loco_sugars,
)
from ..logging_configuration import setup_basic_logging_for_scripts
from ..modeling.encoders import ENCODERS_REGISTRY
import click
import numpy as np
import pandas as pd
"""Training and testing models with extracted features."""
__copyright__ = """
LICENSED INTERNAL CODE. PROPERTY OF IBM.
IBM Research Licensed Internal Code
(C) Copyright IBM Corp. 2023
ALL RIGHTS RESERVED
"""
@click.command()
@click.option("--task", type=str, default="red_wines", help="Dataset name identifier.")
@click.option(
"--validation",
type=str,
default="kfold",
help="Validation strategy. Supported types are kfold, LOCO, few_shots and Sugar_LOCO.",
)
@click.option(
"--number_of_folds",
type=int,
default=5,
help="number of folds to be used in case of kfold validation.",
)
@click.option(
"--number_of_components",
type=int,
default=30,
help="Max number of principal components to be used.",
)
@click.option(
"--features_path",
required=True,
type=click.Path(path_type=Path, exists=True),
help="Path to directory containing extracted features.",
)
@click.option(
"--output_path",
required=True,
type=click.Path(path_type=Path),
help="Path to save classification model validation results.",
)
def main(
task: str,
validation: str,
number_of_folds: int,
number_of_components: int,
features_path: Path,
output_path: Path,
) -> None:
| setup_basic_logging_for_scripts()
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pymike00/tinychat
# Path: tinychat/llms/google.py
class GoogleAIHandler:
"""
Handler class to interact with the OpenAI models.
Returns chat responses and stores the chat history.
TODO: add chat message dataclass so that we can enforce validation of
message format that is needed for working client requests to the API?
"""
def __init__(self):
self._messages = []
self._client = GoogleAIClient()
def export_conversation(self) -> str:
string_conversation = ""
for message in self._messages:
print(message)
if message["role"] == "user":
if string_conversation != "":
string_conversation += "\n\n"
string_conversation += f"You: {message['parts'][0]['text']}"
else:
string_conversation += f"LLM: {message['parts'][0]['text']}"
return string_conversation
def stream_response(self, user_input: str) -> Generator[str, None, None]:
"""
Yield stream responses from the client as they are received.
This method sends the user input to the client and then yields each piece
of the language model's response as it is received in real-time. After the
streaming is complete, it updates the message list with the user input and
the full language model response.
:param user_input: The input string from the user to be sent to the model.
:return: A generator yielding the model's response in streamed parts.
"""
self._messages.append({"parts": [{"text": user_input}], "role": "user"})
stream = self._client.perform_stream_request(self._messages)
lm_response = ""
for event in stream.events(): # type: ignore
if event.data != "[DONE]":
json_load = json.loads(event.data)["candidates"][0]["content"]["parts"][0]
response_piece = json_load["text"]
lm_response += response_piece
yield response_piece
self._messages.append({"parts": [{"text": lm_response}], "role": "model"})
# Path: tinychat/llms/google.py
class GoogleAIClient(BaseLLMClient):
"""
Simple client for interacting with the Google API.
Currently only supports the chat completions endpoint.
:param model_name: The name of the model to be used for chat requests.
"""
BASE_GEMINI_ENDPOINT = "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:streamGenerateContent"
SAFETY_SETTINGS = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE",
},
]
def __init__(self) -> None:
super().__init__(api_key_name=GOOGLE_API_KEY_NAME)
@property
def gemini_endpoint(self):
return f"{self.BASE_GEMINI_ENDPOINT}?alt=sse&key={self.api_key}"
@property
def gemini_headers(self):
return {"Content-Type": "application/json"}
def perform_stream_request(self, messages: list[dict]) -> SSEClient:
# info: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events
data = {"contents": messages, "safetySettings": self.SAFETY_SETTINGS}
response = requests.post(
self.gemini_endpoint,
headers=self.gemini_headers, # type: ignore
json=data,
stream=True
)
if response.status_code != 200:
raise ValueError(
f"Server responded with an error. Status Code: {response.status_code}"
)
return SSEClient(event_source=response) # type: ignore
# Path: tests/llms/test_google_handler.py
import json
import unittest
from unittest.mock import MagicMock, Mock, patch
from tinychat.llms.google import GoogleAIHandler, GoogleAIClient
class TestGoogleGeminiHandlerStreaming(unittest.TestCase):
@patch.object(GoogleAIClient, "perform_stream_request")
def test_stream_response(self, mock_perform_stream_request):
# Create a mock SSEClient with a mock events method
mock_sse_client = MagicMock()
mock_stream = iter(
[
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 1"}]}}
]
}
)
),
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 2"}]}}
]
}
)
),
Mock(data="[DONE]"),
]
)
mock_sse_client.events.return_value = mock_stream
mock_perform_stream_request.return_value = mock_sse_client
| handler = GoogleAIHandler() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nickruggeri/hypergraph-message-passing
# Path: src/model/sampling.py
def _community_count_combinations(
n_nodes: int, comm_counts: list[int]
) -> Iterable[list[int]]:
r"""Generate all possible community count vectors :math::`\#`.
Parameters
----------
n_nodes: number of nodes in the hyperedges.
comm_counts: list of community counts.
The entry i of the list specifies the total number of nodes in community i in
the full hypergraph.
Yields
-------
All the possible vectors of community counts :math::`\#`.
"""
K = len(comm_counts)
yield from (
counts
for counts in _community_count_combinations_recursive(n_nodes, comm_counts)
if len(counts) == K
)
# Path: src/model/sampling.py
def _log_n_sharp(comm_counts: list[int], hye_comm_counts: list[int]) -> float:
r"""compute the logarithm of the :math::`N_{\#}` factor.
Parameters
----------
comm_counts: the number of nodes in every community of the hypergraph, as a list of
length K, where K is the number of communities.
hye_comm_counts: the number of nodes in the hyperedge contained in every community,
as a list of length K.
Returns
-------
The value of :math::`N_{\#}`.
"""
if len(comm_counts) != len(hye_comm_counts):
raise ValueError("The inputs have different lengths.")
return sum(
log_binomial_coefficient(a, b) for a, b in zip(comm_counts, hye_comm_counts)
)
# Path: src/model/sampling.py
def _sample_hye_from_count(
comm_nodes: dict[int, np.ndarray],
hye_comm_counts: list[int],
rng: np.random.Generator | None,
) -> tuple[int]:
"""Sample a hyperedge given, for every community, the number of nodes in the
hyperedge belonging to the community.
Parameters
----------
comm_nodes: dictionary specifying the nodes belonging to each community in the
hypergraph.
hye_comm_counts: list specifying at every entry i the number of nodes belonging to
community i in the hyperedge to be sampled.
rng: optional numpy random generator, to be utilized for sampling.
Returns
-------
A hyperedge sampled satisfying hye_comm_counts.
"""
if rng is None:
rng = np.random.default_rng()
hye = []
for comm, node_count in zip(comm_nodes, hye_comm_counts):
new_nodes = list(rng.choice(comm_nodes[comm], size=node_count, replace=False))
hye.extend(new_nodes)
return tuple(sorted(map(int, hye)))
# Path: test/model/test_sampling/test_helper_functions.py
import itertools
import numpy as np
import pytest
from collections import Counter
from typing import Dict, List
from scipy import special
from src.model.sampling import (
_community_count_combinations,
_log_n_sharp,
_sample_hye_from_count,
)
n_nodes_all = [2, 5, 10, 25, 50, 100]
rng = np.random.default_rng(seed=123)
hye_comm_counts_all = [
rng.integers(low=0, high=max_val, size=q)
for _ in range(10)
for max_val in [5, 10]
for q in [2, 3, 4, 5]
]
comm_counts_all = sum(
(
[
hye_comm_count + rng.integers(low=0, high=high, size=len(hye_comm_count))
for hye_comm_count in hye_comm_counts_all
]
for high in [1, 5, 10]
),
start=[],
)
hye_comm_counts_all = [list(x) for x in hye_comm_counts_all]
comm_counts_all = [list(x) for x in comm_counts_all]
def generate_communities(comm_counts: List[int]) -> Dict[int, np.ndarray]:
N = sum(comm_counts)
K = len(comm_counts)
rng_tmp = np.random.default_rng(seed=21)
all_nodes = np.arange(N)
rng_tmp.shuffle(all_nodes)
cumcount = [0] + list(np.cumsum(comm_counts))
comm_nodes = dict()
for comm in range(K):
comm_nodes[comm] = all_nodes[cumcount[comm] : cumcount[comm + 1]]
return comm_nodes
commm_nodes_all = [generate_communities(comm_counts) for comm_counts in comm_counts_all]
########################################################################################
# Test _community_count_combinations, _log_n_sharp
@pytest.mark.parametrize(
"n_nodes, hye_comm_counts", itertools.product(n_nodes_all, hye_comm_counts_all)
)
def test_community_count_combinations_brute_force(n_nodes, hye_comm_counts):
all_combinations = itertools.product(*(range(a + 1) for a in hye_comm_counts))
all_combinations = [list(comb) for comb in all_combinations if n_nodes == sum(comb)]
assert sorted(all_combinations) == sorted(
_community_count_combinations(n_nodes, hye_comm_counts)
)
@pytest.mark.parametrize(
"comm_counts, hye_comm_counts",
zip(comm_counts_all, hye_comm_counts_all * 3),
)
def test_log_n_sharp_brute_force(comm_counts, hye_comm_counts):
brute_force = [special.binom(a, b) for a, b in zip(comm_counts, hye_comm_counts)]
brute_force = np.sum(np.log(brute_force))
assert np.allclose(brute_force, _log_n_sharp(comm_counts, hye_comm_counts))
########################################################################################
# Test _sample_hye_from_count
@pytest.fixture(
params=(
(comm_nodes, hye_comm_counts, rng)
for comm_nodes, hye_comm_counts in zip(commm_nodes_all, hye_comm_counts_all * 3)
for rgn in [None, np.random.default_rng(seed=34)]
)
)
def sampled_hye_with_info(request):
comm_nodes, hye_comm_counts, rng = request.param
node_to_comm = {node: comm for comm in comm_nodes for node in comm_nodes[comm]}
return (
| _sample_hye_from_count(comm_nodes, hye_comm_counts, rng), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sailfishos-chum/sailfishos-chum.github.io
# Path: chumweb/config.py
CONFIG = init_config()
# Path: chumweb/remote_image.py
class RemoteImage:
"""
An image located on a remote computer that can be downloaded locally
Attributes:
remote_url URL to the icon on a remote server
local_url Path to locally cached (and scaled) version of the icon
"""
remote_url: str
local_path: str | None = None
def __init__(self, remote_url):
self.remote_url = remote_url
# Path: chumweb/package.py
import logging
import enum
import re
from dataclasses import dataclass, field
from datetime import datetime, UTC
from enum import StrEnum
from types import NoneType
from typing import List, Dict, Self, Set, Optional
from markupsafe import Markup
from . import CONFIG
from .remote_image import RemoteImage
from yaml import safe_load as yaml_load
from yaml.parser import ParserError
from yaml.scanner import ScannerError
"""
Data classes for package metadata. It is also responsible for parsing the metadate of a single package
"""
logger = logging.getLogger(__name__)
class PackageApplicationCategory(StrEnum):
"""
Desktop application categories, from https://specifications.freedesktop.org/menu-spec/latest/apa.html
"""
accessibility = "Accessibility" # Added by Chum?
audio_video = "AudioVideo"
audio = "Audio"
video = "Video"
development = "Development"
education = "Education"
game = "Game"
graphics = "Graphics"
library = "Library" # Added by Chum?
maps = "Maps" # Added by Chum?
network = "Network"
office = "Office"
science = "Science"
settings = "Settings"
system = "System"
utility = "Utility"
other = "Other"
class PackageApplicationType(StrEnum):
"""
Type of the application that the package provides
Enums are based on https://www.freedesktop.org/software/appstream/docs/sect-AppStream-YAML.html#field-dep11-type
"""
generic = enum.auto()
console_application = "console-application"
desktop_application = "desktop-application"
addon = enum.auto()
codec = enum.auto()
inputmethod = enum.auto()
firmware = enum.auto()
@dataclass
class PackageVersion:
epoch: str
ver: str
rel: str
def __init__(self, epoch, ver, rel):
self.epoch = epoch
self.ver = ver
self.rel = rel
def to_short_str(self) -> str:
return self.ver.split('+', 2)[0]
def to_full_str(self) -> str:
return f"{self.ver}-{self.rel}"
@dataclass
class Package:
"""
Metadata of a RPM package with associated Chum metadata
"""
name: str
summary: str | None = None
description: str | Markup | None = None
title: str | None = None
| icon: RemoteImage | None = None |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: oVo-HxBots/URLUploadBot
# Path: Uploader/functions/help_ytdl.py
def get_file_extension_from_url(url):
url_path = urlparse(url).path
basename = os.path.basename(url_path)
return basename.split(".")[-1]
# Path: Uploader/functions/help_ytdl.py
def get_resolution(info_dict):
if {"width", "height"} <= info_dict.keys():
width = int(info_dict['width'])
height = int(info_dict['height'])
# https://support.google.com/youtube/answer/6375112
elif info_dict['height'] == 1080:
width = 1920
height = 1080
elif info_dict['height'] == 720:
width = 1280
height = 720
elif info_dict['height'] == 480:
width = 854
height = 480
elif info_dict['height'] == 360:
width = 640
height = 360
elif info_dict['height'] == 240:
width = 426
height = 240
return (width, height)
# Path: Uploader/youtube.py
import os
import wget
import asyncio
from urllib.parse import urlparse
from opencc import OpenCC
from youtube_dl import YoutubeDL
from pyrogram import Client, filters, enums
from pyrogram.types import Message
from pyrogram import Client, filters
from Uploader.config import Config
from sample_config import Config
from Uploader.functions.help_ytdl import get_file_extension_from_url, get_resolution
# MIT License
# Copyright (c) 2022 Hash Minner
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
if bool(os.environ.get("WEBHOOK")):
else:
YTDL_REGEX = (r"^((?:https?:)?\/\/)")
s2tw = OpenCC('s2tw.json').convert
@Client.on_callback_query(filters.regex("^ytdl_audio$"))
async def callback_query_ytdl_audio(_, callback_query):
try:
url = callback_query.message.reply_to_message.text
ydl_opts = {
'format': 'bestaudio',
'outtmpl': '%(title)s - %(extractor)s-%(id)s.%(ext)s',
'writethumbnail': True
}
with YoutubeDL(ydl_opts) as ydl:
message = callback_query.message
await message.reply_chat_action(enums.ChatAction.TYPING)
info_dict = ydl.extract_info(url, download=False)
# download
await callback_query.edit_message_text("**Downloading audio...**")
ydl.process_info(info_dict)
# upload
audio_file = ydl.prepare_filename(info_dict)
task = asyncio.create_task(send_audio(message, info_dict,
audio_file))
while not task.done():
await asyncio.sleep(3)
await message.reply_chat_action(enums.ChatAction.UPLOAD_DOCUMENT)
await message.reply_chat_action(enums.ChatAction.CANCEL)
await message.delete()
except Exception as e:
await message.reply_text(e)
await callback_query.message.reply_to_message.delete()
await callback_query.message.delete()
async def send_audio(message: Message, info_dict, audio_file):
basename = audio_file.rsplit(".", 1)[-2]
if info_dict['ext'] == 'webm':
audio_file_weba = f"{basename}.weba"
os.rename(audio_file, audio_file_weba)
audio_file = audio_file_weba
thumbnail_url = info_dict['thumbnail']
| thumbnail_file = f"{basename}.{get_file_extension_from_url(thumbnail_url)}" |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Jiawei-Yao0812/PixelFormer_DGR
# Path: pixelformer/networks/utils.py
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
if isinstance(size, torch.Size):
size = tuple(int(x) for x in size)
return F.interpolate(input, size, scale_factor, mode, align_corners)
# Path: pixelformer/networks/utils.py
def normal_init(module, mean=0, std=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
# Path: pixelformer/networks/PQI.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from .utils import resize, normal_init
class PPM(nn.ModuleList):
"""Pooling Pyramid Module used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict): Config of activation layers.
align_corners (bool): align_corners argument of F.interpolate.
"""
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg,
act_cfg, align_corners):
super(PPM, self).__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for pool_scale in pool_scales:
# == if batch size = 1, BN is not supported, change to GN
if pool_scale == 1: norm_cfg = dict(type='GN', requires_grad=True, num_groups=256)
self.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(pool_scale),
ConvModule(
self.in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=norm_cfg,
act_cfg=self.act_cfg)))
def forward(self, x):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(x)
| upsampled_ppm_out = resize( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kramerlab/PeerLearning
# Path: suggestionbuffer.py
class SuggestionBuffer:
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def add(self, *args):
self.buffer.append(args)
def sample(self, batch_size):
if len(self.buffer) > batch_size:
return random.sample(self.buffer, batch_size)
# else return None
def latest(self):
return [self.buffer[-1]]
# Path: utils.py
def make_env(env_str, n_envs=1, **env_args):
envs = []
for _ in range(n_envs):
def env_func():
env = Monitor(gym.make(env_str, **env_args))
env.seed(new_random_seed())
return env
envs.append(env_func)
return DummyVecEnv(envs)
# Path: peer.py
from abc import ABC
from typing import Type
from suggestionbuffer import SuggestionBuffer
from utils import make_env
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
import itertools as it
import numpy as np
import torch
lr=0.95, switch_ratio=0, use_advantage=False,
max_peer_epochs=1_000_000_000):
"""
:param peers: An iterable of peer agents
:param lr: The learning rate for trust and agent values
:param switch_ratio: switch_ratio == 0 means no switching
:param use_advantage: use advantage instead of value for AV updates
"""
self.peers = peers
self.lr = lr
self.switch_ratio = switch_ratio
self.active_peer = None # index of currently learning peer
self.solo_epoch = False
self.use_advantage = use_advantage
self.max_peer_epochs = max_peer_epochs
if use_agent_values:
self.agent_values = np.full(len(peers), init_agent_values,
dtype=np.float32)
key = "agent_values"
for peer in peers:
peer.n_peers = len(peers)
peer.group = self
# setup agent values
if use_agent_values:
peer.peer_values[key] = self.agent_values # noqa (Eq. 6)
peer.peer_value_functions[key] = self._update_agent_values
def _update_agent_values(self, batch_size=10):
""" Updates the agent values with samples from the peers' buffers"""
targets = np.zeros_like(self.peers, dtype=np.float32)
counts = np.zeros_like(self.peers, dtype=np.float32)
for peer in self.peers:
bs = batch_size // len(self.peers)
# reward, action, peer, new_obs, old_obs
if peer.buffer is not None:
batch = peer.buffer.sample(bs)
if batch is None: # buffer not sufficiently full
return
obs = np.array([b[3] for b in batch]).reshape(bs, -1)
v = peer.value(obs)
if self.use_advantage:
# previous observations
prev_obs = np.array([b[4] for b in batch]).reshape(bs, -1)
prev_v = peer.value(prev_obs)
else:
prev_v = np.zeros_like(v) # no advantage (see Eq. 5)
for i in range(len(batch)): # Eq. 8
target = (batch[i][0] + peer.gamma * v[i]) - prev_v[i]
counts[batch[i][2]] += 1
targets[batch[i][2]] += target
# ensure counts are >= 1, don't change these values
targets[counts == 0] = self.agent_values[counts == 0]
counts[counts == 0] = 1
targets /= counts
self.agent_values += self.lr * (targets - self.agent_values) # Eq. 7
def learn(self, n_epochs, max_epoch_len, callbacks, **kwargs):
""" The outer peer learning routine. """
assert len(callbacks) == len(self.peers)
# more solo epochs
boost_single = 0 < self.switch_ratio < 1
if boost_single:
self.switch_ratio = 1 / self.switch_ratio
self.solo_epoch = False
peer_epochs = 0
for i in range(n_epochs):
# don't do peer learning forever
if peer_epochs < self.max_peer_epochs:
# ratio of 0 never performs a solo episode
if (i % (1 + self.switch_ratio) == 1) ^ boost_single:
self.solo_epoch = True
else:
peer_epochs += 1
else: # budget spent
self.solo_epoch = True
for p, peer, callback in zip(it.count(), self.peers, callbacks):
self.active_peer = p
peer.learn(self.solo_epoch, total_timesteps=max_epoch_len,
callback=callback, tb_log_name=f"Peer{p}",
reset_num_timesteps=False,
log_interval=None, **kwargs)
# update epoch for temperature decay
peer.epoch += 1
self.active_peer = None
def __len__(self):
return len(self.peers)
def make_peer_class(cls: Type[OffPolicyAlgorithm]):
""" Creates a mixin with the corresponding algorithm class.
:param cls: The learning algorithm (needs to have a callable critic).
:return: The mixed in peer agent class.
"""
class Peer(cls, ABC):
""" Abstract Peer class
needs to be mixed with a suitable algorithm. """
def __init__(self, temperature, temp_decay, algo_args, env,
use_trust=False, use_critic=False, init_trust_values=200,
buffer_size=1000, follow_steps=10, seed=None,
use_trust_buffer=True, solo_training=False,
peers_sample_with_noise=False,
sample_random_actions=False, sample_from_suggestions=True,
epsilon=0.0, env_args=None, only_follow_peers=False):
if env_args is None:
env_args = {}
super(Peer, self).__init__(**algo_args,
| env=make_env(env, **env_args), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: balewgize/skimmit
# Path: users/models.py
class Preference(models.Model):
class AIModels(models.TextChoices):
GPT_3_5 = "gpt-3.5-turbo", "GPT-3.5"
GEMINI_PRO = "gemini-pro", "Gemini Pro"
SENTENCE_COUNT_CHOICES = tuple(zip(range(3, 11), range(3, 11)))
user = models.OneToOneField(User, on_delete=models.CASCADE)
ai_model = models.CharField(
max_length=20, choices=AIModels, default=AIModels.GPT_3_5
)
sentence_count = models.IntegerField(default=5, choices=SENTENCE_COUNT_CHOICES)
# Path: url_summary/forms.py
class ArticleURLForm(forms.Form):
url = forms.URLField(
widget=forms.URLInput(
attrs={
"type": "url",
"class": "form-control",
"placeholder": "Enter URL",
"required": True,
"autofocus": True,
"maxlength": 500,
}
),
)
def clean_url(self):
url = self.cleaned_data["url"]
if "www.youtube.com" in url:
raise forms.ValidationError("Invalid Article URL.")
return url
# Path: url_summary/forms.py
class VideoURLForm(forms.Form):
url = forms.URLField(
widget=forms.URLInput(
attrs={
"type": "url",
"class": "form-control",
"placeholder": "Enter YouTube Video URL",
"required": True,
"autofocus": True,
"maxlength": 100,
}
),
)
def clean_url(self):
try:
url = self.cleaned_data["url"]
if not url.startswith("https://www.youtube.com/watch?v="):
raise
# there may be other parameters in the URL
video_id = url.split("v=")[1].split("&")[0]
if len(video_id) != 11:
raise
except:
video_id = None
if not video_id:
raise forms.ValidationError("Invalid YouTube URL.")
return url
# Path: url_summary/models.py
class URLSummary(models.Model):
"""A class representing URL summary results."""
url = models.URLField(max_length=500)
title = models.CharField(max_length=250, blank=True)
summary = models.TextField(blank=True)
text = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
bookmarks = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name="bookmarked_summaries",
blank=True,
)
ai_model = models.CharField(
max_length=20, choices=Preference.AIModels.choices, blank=True, null=True
)
class Meta:
ordering = ("-created_at",)
verbose_name = "URL Summary"
def __str__(self) -> str:
return f"<URLSummary - {self.url}"
# Path: url_summary/utils/downloader.py
def download_page(url: str) -> tuple[requests.Response, bool]:
"""Download HTML page from URL"""
filename = "user_agents.txt"
user_agents = get_user_agents(filename)
error = False
try:
if user_agents:
headers["user-agent"] = user_agents[0]
response = requests.get(url, headers=headers)
response.raise_for_status()
except Exception as e:
print(e, response.status_code)
error = True
return response, error
# Path: url_summary/views.py
import os
import json
import readtime
import google.generativeai as genai
from django.http import JsonResponse
from bs4 import BeautifulSoup
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from openai import OpenAI
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import TextFormatter
from pytube import YouTube
from users.models import Preference
from .forms import ArticleURLForm, VideoURLForm
from .models import URLSummary
from .utils.downloader import download_page
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
def home(request):
context = {"article_form": ArticleURLForm(), "video_form": VideoURLForm()}
return render(request, "index.html", context=context)
def article_summary(request):
if request.method == "POST":
form = ArticleURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_article_summary(url, user_preference)
context = {"result": summary, "article_form": ArticleURLForm()}
else:
context = {"article_form": form}
context["video_form"] = VideoURLForm()
return render(request, "url_summary/article.html", context=context)
else:
return redirect("url_summary:home")
def video_summary(request):
if request.method == "POST":
form = VideoURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_video_summary(url, user_preference)
context = {"result": summary, "video_form": VideoURLForm()}
else:
context = {"video_form": form}
context["article_form"] = ArticleURLForm()
return render(request, "url_summary/video.html", context=context)
else:
return redirect("url_summary:home")
def get_article_summary(url: str, user_preference: Preference):
"""
Summarize articles by extracting HTML body text.
"""
summary_obj = URLSummary.objects.filter(url=url).first()
if summary_obj:
summary_dict = get_summary_details(summary_obj)
return summary_dict
summary_dict = {}
| response, error = download_page(url) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ZS-YANG/FemtoDet-v3
# Path: mmdet/apis/det_inferencer.py
VOID = None
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
class DetInferencer(BaseInferencer):
def __init__(self,
model: Optional[Union[ModelType, str]] = None,
weights: Optional[str] = None,
device: Optional[str] = None,
scope: Optional[str] = 'mmdet',
palette: str = 'none',
show_progress: bool = True) -> None:
def _load_weights_to_model(self, model: nn.Module,
checkpoint: Optional[dict],
cfg: Optional[ConfigType]) -> None:
def _init_pipeline(self, cfg: ConfigType) -> Compose:
def _get_transform_idx(self, pipeline_cfg: ConfigType,
name: Union[str, Tuple[str, type]]) -> int:
def _init_visualizer(self, cfg: ConfigType) -> Optional[Visualizer]:
def _inputs_to_list(self, inputs: InputsType) -> list:
def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs):
def _get_chunk_data(self, inputs: Iterable, chunk_size: int):
def __call__(
self,
inputs: InputsType,
batch_size: int = 1,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
no_save_vis: bool = False,
draw_pred: bool = True,
pred_score_thr: float = 0.3,
return_datasamples: bool = False,
print_result: bool = False,
no_save_pred: bool = True,
out_dir: str = '',
# by open image task
texts: Optional[Union[str, list]] = None,
# by open panoptic task
stuff_texts: Optional[Union[str, list]] = None,
# by GLIP
custom_entities: bool = False,
**kwargs) -> dict:
def visualize(self,
inputs: InputsType,
preds: PredType,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
draw_pred: bool = True,
pred_score_thr: float = 0.3,
no_save_vis: bool = False,
img_out_dir: str = '',
**kwargs) -> Union[List[np.ndarray], None]:
def postprocess(
self,
preds: PredType,
visualization: Optional[List[np.ndarray]] = None,
return_datasamples: bool = False,
print_result: bool = False,
no_save_pred: bool = False,
pred_out_dir: str = '',
**kwargs,
) -> Dict:
def pred2dict(self,
data_sample: DetDataSample,
pred_out_dir: str = '') -> Dict:
# Path: mmdet/utils/typing_utils.py
# Path: projects/XDecoder/xdecoder/inference/texttoimage_regionretrieval_inferencer.py
import copy
import torch
from typing import Iterable, Optional, Union
from mmengine.dataset import Compose
from rich.progress import track
from mmdet.apis.det_inferencer import DetInferencer, InputsType
from mmdet.utils import ConfigType
class TextToImageRegionRetrievalInferencer(DetInferencer):
def _init_pipeline(self, cfg: ConfigType) -> Compose:
"""Initialize the test pipeline."""
pipeline_cfg = cfg.test_dataloader.dataset.pipeline
# For inference, the key of ``img_id`` is not used.
if 'meta_keys' in pipeline_cfg[-1]:
pipeline_cfg[-1]['meta_keys'] = tuple(
meta_key for meta_key in pipeline_cfg[-1]['meta_keys']
if meta_key != 'img_id')
load_img_idx = self._get_transform_idx(pipeline_cfg,
'LoadImageFromFile')
if load_img_idx == -1:
raise ValueError(
'LoadImageFromFile is not found in the test pipeline')
pipeline_cfg[load_img_idx]['type'] = 'mmdet.InferencerLoader'
retrieval_pipeline = Compose(pipeline_cfg)
grounding_pipeline_cp = copy.deepcopy(pipeline_cfg)
grounding_pipeline_cp[1].scale = cfg.grounding_scale
grounding_pipeline = Compose(grounding_pipeline_cp)
return {
'grounding_pipeline': grounding_pipeline,
'retrieval_pipeline': retrieval_pipeline
}
def _get_chunk_data(self, inputs: Iterable, pipeline, chunk_size: int):
"""Get batch data from inputs.
Args:
inputs (Iterable): An iterable dataset.
chunk_size (int): Equivalent to batch size.
Yields:
list: batch data.
"""
inputs_iter = iter(inputs)
while True:
try:
chunk_data = []
for _ in range(chunk_size):
inputs_ = next(inputs_iter)
chunk_data.append(
(inputs_, pipeline(copy.deepcopy(inputs_))))
yield chunk_data
except StopIteration:
if chunk_data:
yield chunk_data
break
def preprocess(self,
| inputs: InputsType, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mit-ll-ai-technology/maite
# Path: src/maite/_internals/interop/huggingface/base.py
class BaseHFModel(nn.Module, BaseModel):
def __init__(
self,
model_name: str,
model: Union[HuggingFaceWithLogits, HuggingFaceWithDetection],
processor: Optional[HuggingFaceProcessor] = None,
post_processor: Optional[HuggingFaceObjectDetectionPostProcessor] = None,
) -> None:
def get_labels(self) -> Sequence[str]:
# Path: src/maite/_internals/interop/huggingface/typing.py
class HuggingFacePredictions:
scores: Tensor
labels: Optional[Union[Tensor, Sequence[Sequence[str]]]] = None
# Path: src/maite/_internals/interop/huggingface/typing.py
class HuggingFaceProbs:
probs: Tensor
# Path: src/maite/_internals/interop/huggingface/typing.py
class HuggingFaceProcessor(Protocol):
def __call__(
self,
images: Sequence[ArrayLike],
return_tensors: Union[bool, str] = False,
**kwargs: Any,
) -> BatchFeature:
...
# Path: src/maite/_internals/interop/huggingface/typing.py
class HuggingFaceWithLogits(HuggingFaceModule, Protocol):
def __call__(
self, pixel_values: Union[ArrayLike, Sequence[ArrayLike]], **kwargs: Any
) -> HasLogits:
...
# Path: src/maite/_internals/interop/huggingface/image_classifier.py
from typing import TYPE_CHECKING, Any, List, Optional, Union, cast
from typing_extensions import Self
from maite.errors import InvalidArgument
from maite.protocols import HasDataImage, HasLogits, SupportsArray
from .base import BaseHFModel, InteropModelMetadata
from .typing import (
HuggingFacePredictions,
HuggingFaceProbs,
HuggingFaceProcessor,
HuggingFaceWithLogits,
)
from transformers import AutoFeatureExtractor, AutoModelForImageClassification
import torch as tr
# Copyright 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
__all__ = ["HuggingFaceImageClassifier"]
class HuggingFaceImageClassifier(BaseHFModel):
"""
Wrapper for HuggingFace image classifiation models.
This interface uses `AutoFeatureExtractor` and `AutoModelForImageClassification`
to load the HuggingFace models.
"""
metadata: InteropModelMetadata
def __init__(
self,
model_name: str,
model: HuggingFaceWithLogits,
processor: Optional[HuggingFaceProcessor] = None,
top_k: Optional[int] = None,
) -> None:
"""
Initialize HuggingFaceImageClassifier.
Parameters
----------
model_name: str
A Huggingface model name from model id, e.g. "microsoft/resnet-50"
processor : HuggingFaceProcessor
A HuggingFace feature extractor for a given model.
model : HuggingFaceModel
A HuggingFace image classification model.
Examples
--------
>>> from transformers import AutoFeatureExtractor, AutoModelForImageClassification
>>> processor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-50")
>>> model = AutoModelForImageClassification.from_pretrained("microsoft/resnet-50")
>>> hf_model = HuggingFaceImageClassifier(processor, model)
"""
super().__init__(model_name=model_name, model=model, processor=processor)
self._top_k = top_k
self.metadata = InteropModelMetadata(
model_name=model_name, provider="HuggingFace", task="Image Classification"
)
def preprocessor(
self,
data: SupportsArray,
) -> HasDataImage:
"""
Preprocess images for a HuggingFace object detector.
Parameters
----------
images : Sequence[ArrayLike]
The images to preprocess.
Returns
-------
tr.Tensor
The preprocessed images.
Examples
--------
"""
assert self._processor is not None, "No processor was provided."
assert isinstance(data, (list, tuple))
image_features = self._processor(images=data, return_tensors="pt")[
"pixel_values"
]
assert isinstance(image_features, tr.Tensor)
return {"image": image_features}
def post_processor(
self, outputs: HasLogits
| ) -> Union[HuggingFacePredictions, HuggingFaceProbs]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: djcopley/ShellOracle
# Path: src/shelloracle/provider.py
class Provider(Protocol):
"""
LLM Provider Protocol
All LLM backends must implement this interface.
"""
name: str
@abstractmethod
def generate(self, prompt: str) -> AsyncIterator[str]:
"""
This is an asynchronous generator method which defines the protocol that a provider implementation
should adhere to. The method takes a prompt as an argument and produces an asynchronous stream
of string results.
:param prompt: A string value which serves as input to the provider's process of generating results.
:return: An asynchronous generator yielding string results.
"""
# If you are wondering why the 'generate' signature doesn't include 'async', see
# https://mypy.readthedocs.io/en/stable/more_types.html#asynchronous-iterators
# Path: src/shelloracle/provider.py
class ProviderError(Exception):
"""LLM providers raise this error to gracefully indicate something has gone wrong."""
# Path: src/shelloracle/config/setting.py
class Setting(Generic[T]):
def __init__(self, *, name: str | None = None, default: T | None = None) -> None:
self.name = name
self.default = default
def __set_name__(self, owner: type[Provider], name: str) -> None:
if not self.name:
self.name = name
# Set the default value in the config dictionary if it doesn't exist
provider_table = config.global_config.get("provider", {})
provider_table.setdefault(owner.name, {}).setdefault(name, self.default)
config.global_config["provider"] = provider_table
def __get__(self, instance: Provider, owner: type[Provider]) -> T:
return config.global_config.get("provider", {}).get(instance.name, {})[self.name]
def __set__(self, instance: Provider, value: T) -> None:
config.global_config.setdefault("provider", {}).setdefault(instance.name, {})[self.name] = value
# Path: src/shelloracle/providers/ollama.py
import json
import httpx
from dataclasses import dataclass, asdict
from typing import Any, AsyncIterator
from ..provider import Provider, ProviderError
from ..config import Setting
from __future__ import annotations
def dataclass_to_json(obj: Any) -> dict[str, Any]:
"""Convert dataclass to a json dict
This function filters out 'None' values.
:param obj: the dataclass to serialize
:return: serialized dataclass
:raises TypeError: if obj is not a dataclass
"""
return {k: v for k, v in asdict(obj).items() if v is not None}
@dataclass
class GenerateRequest:
model: str
"""(required) the model name"""
prompt: str | None = None
"""the prompt to generate a response for"""
images: list[str] | None = None
"""a list of base64-encoded images (for multimodal models such as llava)"""
format: str | None = None
"""the format to return a response in. Currently the only accepted value is json"""
options: dict | None = None
"""additional model parameters listed in the documentation for the Modelfile such as temperature"""
system: str | None = None
"""system prompt to (overrides what is defined in the Modelfile)"""
template: str | None = None
"""the full prompt or prompt template (overrides what is defined in the Modelfile)"""
context: str | None = None
"""the context parameter returned from a previous request to /generate, this can be used to keep a short
conversational memory"""
stream: bool | None = None
"""if false the response will be returned as a single response object, rather than a stream of objects"""
raw: bool | None = None
"""if true no formatting will be applied to the prompt and no context will be returned. You may choose to use
the raw parameter if you are specifying a full templated prompt in your request to the API, and are managing
history yourself. JSON mode"""
class Ollama(Provider):
name = "Ollama"
host = Setting(default="localhost")
port = Setting(default=11434)
model = Setting(default="codellama:13b")
system_prompt = Setting(
default=(
"Based on the following user description, generate a corresponding Bash command. Focus solely "
"on interpreting the requirements and translating them into a single, executable Bash command. "
"Ensure accuracy and relevance to the user's description. The output should be a valid Bash "
"command that directly aligns with the user's intent, ready for execution in a command-line "
"environment. Output nothing except for the command. No code block, no English explanation, "
"no start/end tags."
)
)
@property
def endpoint(self) -> str:
# computed property because python descriptors need to be bound to an instance before access
return f"http://{self.host}:{self.port}/api/generate"
async def generate(self, prompt: str) -> AsyncIterator[str]:
request = GenerateRequest(self.model, prompt, system=self.system_prompt, stream=True)
data = dataclass_to_json(request)
try:
async with httpx.AsyncClient() as client:
async with client.stream("POST", self.endpoint, json=data, timeout=20.0) as stream:
async for line in stream.aiter_lines():
response = json.loads(line)
if "error" in response:
| raise ProviderError(response["error"]) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: juniberry/PacketIRC
# Path: settings.py
LOG_FILE = "packetirc.log"
# Path: settings.py
LOG_LEVEL = logging.INFO
# Path: settings.py
SERVER = ""
# Path: settings.py
PORT = 6667
# Path: settings.py
PASS = ""
# Path: settings.py
CHANNEL = "#Testing"
# Path: settings.py
HIDE_SERVER = True
# Path: settings.py
MAX_RETRIES = 3
# Path: settings.py
RETRY_DELAY = 5 # seconds
# Path: settings.py
HELP_INFO = """
PacketIRC commands:
/quit [message] - Disconnect from the server with optional message.
/msg <nickname> <message> - Send a private message to the specified user.
/join <channel> - Join the specified channel.
/names - Shows a list of users in the channel.
/topic [new topic] - Set a new topic for the current channel or request the topic.
/away [message] - Set an away message or clear the away status.
/whois <nickname> - Retrieves information about the specified user.
/help - Display this help message.
"""
# Path: settings.py
WELCOME_MESSAGE = """
Welcome to PacketIRC!
Type /help for a list of commands.
"""
# Path: settings.py
BAD_WORDS_FILE = "bad_words.txt"
# Path: settings.py
BAD_WORDS_FILTER = False
# Path: packetirc.py
import socket
import threading
import random
import time
import logging
import re
import irc.client
import os
import sys
from settings import LOG_FILE, LOG_LEVEL, SERVER, PORT, PASS, CHANNEL, HIDE_SERVER, MAX_RETRIES, RETRY_DELAY, HELP_INFO, WELCOME_MESSAGE, BAD_WORDS_FILE, BAD_WORDS_FILTER
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
______ _ _____ ______ ______
(_____ \ | | _ (_____|_____ \ / _____)
_____) )___ ____| | _ ____| |_ _ _____) ) /
| ____/ _ |/ ___) | / ) _ ) _) | | (_____ (| |
| | ( ( | ( (___| |< ( (/ /| |__ _| |_ | | \_____
|_| \_||_|\____)_| \_)____)\___|_____) |_|\______)
PacketIRC is a bandwidth-conscious IRC client specifically designed for packet radio communication.
It includes a client-side implementation with simplified IRC functionalities.
File: client.py
Author: Daria Juniper @juniberry
Date: 10-Dec-2023
Changes:
12-Dec-2023 - Initial version 1.0 beta.
"""
# Import settings from an external configuration file.
# Globals
VERSION = 'v1.1b'
BAD_WORDS = []
HOME_PATH = os.path.dirname(os.path.abspath(__file__)) # Grab home path for use with logging et al.
# State
is_running = True
# Initialize logging.
| logging.basicConfig(filename=os.path.join(HOME_PATH, LOG_FILE), filemode='w', level=LOG_LEVEL, format='%(asctime)s - %(levelname)s - %(message)s') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Tps-F/rvc-onnx-test
# Path: onnxlib/commons.py
def init_weights(m, mean=0.0, std=0.01):
def get_padding(kernel_size, dilation=1):
def kl_divergence(m_p, logs_p, m_q, logs_q):
def rand_gumbel(shape):
def rand_gumbel_like(x):
def slice_segments(x, ids_str, segment_size=4):
def slice_segments2(x, ids_str, segment_size=4):
def rand_slice_segments(x, x_lengths=None, segment_size=4):
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
def subsequent_mask(length):
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
def convert_pad_shape(pad_shape: List[List[int]]) -> List[int]:
def shift_1d(x):
def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):
def generate_path(duration, mask):
def clip_grad_value_(parameters, clip_value, norm_type=2):
# Path: onnxlib/modules.py
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
class ConvReluNorm(nn.Module):
class DDSConv(nn.Module):
class WN(torch.nn.Module):
class ResBlock1(torch.nn.Module):
class ResBlock2(torch.nn.Module):
class Log(nn.Module):
class Flip(nn.Module):
class ElementwiseAffine(nn.Module):
class ResidualCouplingLayer(nn.Module):
class ConvFlow(nn.Module):
def __init__(self, channels, eps=1e-5):
def forward(self, x):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
def forward(self, x, x_mask):
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
def forward(self, x, x_mask, g: Optional[torch.Tensor] = None):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
def forward(
self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None
):
def remove_weight_norm(self):
def __prepare_scriptable__(self):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
def forward(self, x: torch.Tensor, x_mask: Optional[torch.Tensor] = None):
def remove_weight_norm(self):
def __prepare_scriptable__(self):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
def forward(self, x, x_mask: Optional[torch.Tensor] = None):
def remove_weight_norm(self):
def __prepare_scriptable__(self):
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
reverse: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
reverse: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
def __init__(self, channels):
def forward(self, x, x_mask, reverse=False, **kwargs):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False,
):
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
reverse: bool = False,
):
def remove_weight_norm(self):
def __prepare_scriptable__(self):
def __init__(
self,
in_channels,
filter_channels,
kernel_size,
n_layers,
num_bins=10,
tail_bound=5.0,
):
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
reverse=False,
):
# Path: onnxlib/modules.py
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super(LayerNorm, self).__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
# Path: onnxlib/attentions.py
import math
import torch
from typing import Optional
from torch import nn
from torch.nn import functional as F
from onnxlib import commons, modules
from onnxlib.modules import LayerNorm
class Encoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=10,
**kwargs
):
super(Encoder, self).__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = int(n_layers)
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
window_size=window_size,
)
)
| self.norm_layers_1.append(LayerNorm(hidden_channels)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zhenqincn/FedKSeed
# Path: utils_data/default_tokens.py
class DefaultToken(Enum):
PAD_TOKEN = "[PAD]"
EOS_TOKEN = "</s>"
BOS_TOKEN = "<s>"
UNK_TOKEN = "<unk>"
IGNORE_INDEX = -100
# Path: utils_data/partition_data.py
def partition_idx_labeldir(y, n_parties, alpha, num_classes):
min_size = 0
min_require_size = 10
K = num_classes
N = y.shape[0]
net_dataidx_map = {}
while min_size < min_require_size:
idx_batch = [[] for _ in range(n_parties)]
for k in range(K):
idx_k = np.where(y == k)[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(alpha, n_parties))
# Balance
proportions = np.array([p * (len(idx_j) < N / n_parties) for p, idx_j in zip(proportions, idx_batch)])
proportions = proportions / proportions.sum()
proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]
idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
for j in range(n_parties):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
return net_dataidx_map
# Path: utils_data/load_data.py
import numpy as np
import torch
from torch.utils.data import DataLoader, Subset
from transformers import AutoTokenizer
from utils_data.default_tokens import DefaultToken
from utils_data.partition_data import partition_idx_labeldir
from collections import Counter
from utils_data.llm_dataset import LLMDataset, LLMDataCollator
from utils_data.natural_instruction_loader import get_instruction_dataset
def get_loaders(args, only_eval=False):
"""
Return: list of train_loaders, eval_loader
"""
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
tokenizer.model_max_length = args.max_length
special_tokens = dict()
if tokenizer.pad_token is None:
special_tokens["pad_token"] = DefaultToken.PAD_TOKEN.value
if tokenizer.eos_token is None:
special_tokens["eos_token"] = DefaultToken.EOS_TOKEN.value
if tokenizer.bos_token is None:
special_tokens["bos_token"] = DefaultToken.BOS_TOKEN.value
if tokenizer.unk_token is None:
special_tokens["unk_token"] = DefaultToken.UNK_TOKEN.value
tokenizer.add_special_tokens(special_tokens)
# Generation task
if args.dataset == 'dolly':
if args.eval_metric == 'loss':
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=False)
else:
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=True)
data_collator = LLMDataCollator(tokenizer=tokenizer)
# only use a subset of raw dataset
raw_datasets, _ = torch.utils.data.dataset.random_split(raw_datasets, [int(len(raw_datasets) * args.dataset_subsample), len(raw_datasets) - int(len(raw_datasets) * args.dataset_subsample)])
y_all = np.array([item['categories'] for item in raw_datasets])
index_eval = np.where(y_all == args.zerotask)[0]
# delete the indices of eval samples from the all set
index_train = np.delete(np.arange(len(y_all)), index_eval)
raw_datasets = np.array(raw_datasets)
train_set = raw_datasets[index_train]
eval_set = raw_datasets[index_eval]
y_train = np.array([item['categories'] for item in train_set])
counter = Counter(y_train)
noniid = args.iid
if 'dir' in noniid:
| split_dic = partition_idx_labeldir(y_train, n_parties=args.num_clients, alpha=float(noniid[3:]), num_classes=len(counter)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: merlresearch/PixPNet
# Path: pixpnet/utils.py
def get_logger(name):
logging.basicConfig(
format="%(asctime)s[%(process)d][%(levelname)s] %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
logger = logging.getLogger(name)
logger.setLevel(os.environ.get("PIXPNET_LOG_LEVEL", "INFO"))
return logger
# Path: pixpnet/utils.py
def intersect_func_and_kwargs(func, kwargs, exclude_func_args=None, exclude_kwargs=None, return_invalid=True):
func_args = {*get_all_func_args(func)} - (set() if exclude_func_args is None else {*exclude_func_args})
if isinstance(kwargs, argparse.Namespace):
kwargs = vars(kwargs)
kwargs_keys = {*kwargs.keys()} - (set() if exclude_kwargs is None else {*exclude_kwargs})
intersecting_keys = kwargs_keys & func_args
intersected_dict = {k: kwargs[k] for k in intersecting_keys}
if return_invalid:
return intersected_dict, kwargs_keys - func_args
return intersected_dict
# Path: pixpnet/optim.py
import argparse
import inspect
import re
import torch
from typing import Any, Dict, Optional, Set, Tuple, Type
from pytorch_warmup import ExponentialWarmup
from pytorch_warmup.base import BaseWarmup
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR, StepLR
from pixpnet.utils import get_logger, intersect_func_and_kwargs
# Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
#
# SPDX-License-Identifier: AGPL-3.0-or-later
logger = get_logger(__name__)
_OPTIMIZER_MAP = {attr: getattr(torch.optim, attr) for attr in dir(torch.optim) if attr != "Optimizer"}
_OPTIMIZER_MAP = {attr: cls for attr, cls in _OPTIMIZER_MAP.items() if inspect.isclass(cls)}
_LOOSE_OPTIMIZER_MAP = {}
for _attr, _cls in _OPTIMIZER_MAP.items():
_attr_split = re.split(r"(?=(?<!^)[A-Z][a-z]|(?<![A-Z])[A-Z]$)", _attr)
_attr_lower = "".join(map(str.lower, _attr_split))
_attr_lower_ = "_".join(map(str.lower, _attr_split))
if _attr_lower in _LOOSE_OPTIMIZER_MAP or _attr_lower_ in _LOOSE_OPTIMIZER_MAP:
_cls_existing = _LOOSE_OPTIMIZER_MAP.get(_attr_lower, _LOOSE_OPTIMIZER_MAP.get(_attr_lower_))
raise RuntimeError(
f"{_attr_lower} already in optimizers! Overlapping class names in "
f"lowercase was unexpected and cannot be resolved: "
f"{_cls_existing} and {_cls}"
)
_LOOSE_OPTIMIZER_MAP[_attr_lower] = _cls
if _attr_lower != _attr_lower_:
_LOOSE_OPTIMIZER_MAP[_attr_lower_] = _cls
def get_optimizer_cls(
config: argparse.Namespace,
ignore: Optional[Set[str]] = None,
) -> Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]:
if ignore is None:
ignore = set()
try:
optimizer_cls = _LOOSE_OPTIMIZER_MAP[config.optimizer.name.lower()]
except KeyError:
raise ValueError(f'No such optimizer "{config.optimizer.name}"')
| hparams, invalid_keys = intersect_func_and_kwargs( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dhh1995/MeGraph
# Path: megraph/io_utils.py
def get_default_config(args):
dataset_name = args.dataset_name
dataset_subname = args.dataset_subname
model_name = args.model
conv_name = args.layer
# Config
cfg_file = args.config_file
if cfg_file is not None:
config = read_config_file(cfg_file)
if config is None:
cfg_file = None
print(
f"[Warning] Could not found {cfg_file}, "
"fall back to default config files."
)
else:
config["config_file"] = cfg_file
if cfg_file is None:
cfg_files = get_default_config_filenames(
model_name, conv_name, dataset_name, dataset_subname
)
config = {}
found_files = []
for f in cfg_files:
new_config = read_config_file(f, folder=args.configs_dir)
if new_config is not None:
print(f"Overwrite default config using {f}:")
print(new_config)
config.update(new_config)
found_files.append(f)
config["config_file"] = found_files
return config
# Path: megraph/io_utils.py
def get_raw_cmdline():
with open("/proc/self/cmdline") as f:
x = f.readlines()
if x is None or len(x) == 0:
return None
return x[0].replace("\x00", " ")
# Path: megraph/args_utils.py
import git
from .io_utils import get_default_config, get_raw_cmdline
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : args.py
# Author : Honghua Dong
# Email : [email protected]
#
# Distributed under terms of the MIT license.
__all__ = ["ArgsBuilder", "add_git_and_cmd_line_info", "get_args_and_model"]
class ArgsBuilder(object):
"""A meta-class to be inherit that support args register and setup from args"""
__hyperparams__ = []
__parser__ = None
__prefix__ = "--"
@classmethod
def _set_parser_and_prefix(cls, parser, prefix):
cls.__parser__ = parser
if prefix is None:
prefix = "--"
else:
prefix = f"--{prefix}-"
cls.__prefix__ = prefix
@classmethod
def _add_argument(cls, name, *args, **kwargs):
cls.__hyperparams__.append(name)
name = name.replace("_", "-")
cls.__parser__.add_argument(cls.__prefix__ + name, *args, **kwargs)
@classmethod
def from_args(cls, args, prefix=None, **kwargs):
if prefix is None:
prefix = ""
else:
prefix = str(prefix) + "_"
print(f"From Args: {cls.__name__} with {kwargs}")
init_params = {k: getattr(args, prefix + k) for k in cls.__hyperparams__}
init_params.update(kwargs)
return cls(**init_params)
def add_git_and_cmd_line_info(args):
| args.raw_cmdline = get_raw_cmdline() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SJTU-Quant/SUNNY-GNN
# Path: train/train_baseline.py
def train(cfg):
def train_explain(cfg):
# Path: train/train_gnn.py
def train(cfg):
# Path: train/train_hgn.py
def train(cfg):
# Path: main.py
import argparse
import yaml
import os
import torch
import random
import copy
import numpy as np
from train import train_baseline, train_gnn, train_hgn
from tools.get_data import get_dataset
def parse_args():
parser = argparse.ArgumentParser(description="Self-explainable GNN/HGN")
parser.add_argument('--method', type=str, default='snexgnn',
help='self-explainable GNN/HGN type',
choices=['snexgnn', 'snexhgn', 'gat', 'gcn', 'simplehgn'])
parser.add_argument('--encoder', type=str, default='gat',
help='GNN/HGN encoder type',
choices=['gat', 'gcn', 'simplehgn'])
parser.add_argument('--dataset', type=str, default='citeseer',
help='dataset name',
choices=['citeseer', 'cora', 'pubmed',
'amazon-photo', 'coauthor-physics', 'coauthor-cs',
'imdb', 'dblp', 'acm'])
parser.add_argument('--gpu', type=int, default=0, help='gpu id')
parser.add_argument('--num_seeds', type=int, default=1, help='number of random seeds')
parser.add_argument('--eval_explanation', type=bool, default=False,
help='whether to evaluate explanation fidelity')
return parser.parse_args()
class Config(object):
def __init__(self, args):
abs_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(abs_dir, 'log')
os.makedirs(log_dir, exist_ok=True)
data_dir = os.path.join(abs_dir, 'dataset', args.dataset)
self.method = args.method
self.encoder_type = args.encoder
self.dataset = args.dataset
self.abs_dir = abs_dir
self.data_dir = data_dir
self.gpu = args.gpu
self.index = None
self.graph_path = f'{data_dir}/{args.dataset}_graph.bin'
self.index_path = f'{data_dir}/{args.dataset}_index.bin'
self.check_dataset()
self.ckpt_dir = os.path.join(abs_dir, 'ckpt')
self.hyparams = self.load_hyperparams(args)
self.eval_explanation = args.eval_explanation
def check_dataset(self):
if not os.path.exists(self.graph_path):
get_dataset(self.dataset, self.data_dir)
def load_hyperparams(self, args):
yml_path = os.path.join(self.abs_dir, 'configs', f'{args.dataset}.yml')
with open(yml_path, 'r') as f:
hyperparams = yaml.load(f, Loader=yaml.FullLoader)
return hyperparams
def set_seed(self, seed):
self.seed = seed
self.encoder_path = f'{self.ckpt_dir}/{self.dataset}/{self.encoder_type}-seed-{seed}-pretrain.pt'
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main():
results = {}
for seed in range(args.num_seeds):
setup_seed(seed)
cfg.set_seed(seed)
print(f'===========seed: {seed}===========')
if cfg.method in ['snexgnn', 'snexhgn']:
print(f"Dataset: {cfg.dataset}, Method: {cfg.method}-{cfg.encoder_type}")
if not os.path.exists(cfg.encoder_path):
print(f"Pretrain {cfg.encoder_type}...")
cfg_cp = copy.deepcopy(cfg)
cfg_cp.method = cfg_cp.encoder_type
| train_gnn.train(cfg_cp) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dvmazur/mixtral-offloading
# Path: src/utils.py
def nested_flatten(t):
"""
Turn nested list/tuple/dict into a flat iterator.
"""
if isinstance(t, (list, tuple)):
for x in t:
yield from nested_flatten(x)
elif isinstance(t, dict):
for k, v in sorted(t.items()):
yield from nested_flatten(v)
else:
yield t
# Path: src/utils.py
def nested_pack(flat, structure):
"""
Restore nested structure from flattened state
:param flat: result of nested_flatten
:param structure: used as example when recovering structure
:returns: nested structure like :structure: filled with elements of :flat:
"""
return _nested_pack(iter(flat), structure)
# Path: src/expert_wrapper.py
import typing as tp
import torch
from torch import nn
from .utils import nested_flatten, nested_pack
class MixtralExpertWrapper(nn.Module):
def __init__(
self,
expert_module: tp.Any,
device: torch.device,
):
super().__init__()
expert_module, self.storage = self.replace_layer_storage(expert_module, device)
self.expert_module = lambda *args, **kwargs: expert_module(*args, **kwargs)
self._register_state_dict_hook(self._add_storage_to_state_dict_hook)
self._register_load_state_dict_pre_hook(self._load_storage_from_state_dict_hook)
@staticmethod
def _add_storage_to_state_dict_hook(self, state_dict, prefix, local_metadata):
state_dict[prefix + 'storage'] = torch.as_tensor(self.storage, dtype=torch.uint8)
return state_dict
def _load_storage_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.storage.copy_(state_dict[prefix + 'storage'].storage().untyped())
del state_dict[prefix + 'storage']
def forward(self, *args, **kwargs):
return self.expert_module(*args, **kwargs)
@staticmethod
def replace_layer_storage(
layer: tp.Any,
device: torch.device,
):
state_dict = {
f"w{i}": {
"W_q": getattr(layer, f"w{i}").W_q,
"meta": getattr(layer, f"w{i}").meta,
"bias": getattr(layer, f"w{i}").bias,
}
for i in range(1, 4)
}
storage_size = 0
offsets = [0]
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
continue
storage_size += x.nbytes
offsets.append(storage_size)
storage = torch.UntypedStorage(storage_size, device=device)
i = 0
new_flattened_states = list()
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
new_flattened_states.append(x)
continue
start = offsets[i]
end = offsets[i + 1]
a_view = torch.as_tensor(storage[start:end], dtype=x.dtype, device=device).view(x.shape)
a_view[...] = x
assert a_view.data_ptr() == storage.data_ptr() + start
i += 1
new_flattened_states.append(a_view)
| state_dict = nested_pack(new_flattened_states, state_dict) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CircleRadon/Osprey
# Path: osprey/train/train.py
def preprocess(
sources: Sequence[str],
tokenizer: transformers.PreTrainedTokenizer,
has_image: bool = False
) -> Dict:
"""
Given a list of sources, each is a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN:
return preprocess_plain(sources, tokenizer)
if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.LLAMA_2:
return preprocess_llama_2(sources, tokenizer, has_image=has_image)
if conversation_lib.default_conversation.version.startswith("v1"):
return preprocess_v1(sources, tokenizer, has_image=has_image)
# add end signal and concatenate together
conversations = []
for source in sources:
header = f"{conversation_lib.default_conversation.system}\n\n"
conversation = _add_speaker_and_signal(header, source)
conversations.append(conversation)
# tokenize conversations
def get_tokenize_len(prompts):
return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts]
if has_image:
input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations]
else:
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
input_ids = conversations_tokenized["input_ids"]
targets = copy.deepcopy(input_ids)
for target, source in zip(targets, sources):
if has_image:
tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source])
else:
tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"]
speakers = [sentence["from"] for sentence in source]
_mask_targets(target, tokenized_lens, speakers)
return dict(input_ids=input_ids, labels=targets)
# Path: osprey/train/train.py
def preprocess_multimodal(
sources: Sequence[str],
data_args: DataArguments,
cur_token_len: int = 0
) -> Dict:
for source in sources:
for sentence in source:
if DEFAULT_IMAGE_TOKEN in sentence['value']:
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
sentence['value'] = DEFAULT_IMAGE_TOKEN + '\n' + sentence['value']
sentence['value'] = sentence['value'].strip()
if "mmtag" in conversation_lib.default_conversation.version:
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '<Image>' + DEFAULT_IMAGE_TOKEN + '</Image>')
replace_token = DEFAULT_IMAGE_TOKEN
if data_args.mm_use_im_start_end:
replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
return sources
# Path: osprey/datasets/stage2_data.py
import copy
import os
import random
import numpy as np
import torch
from osprey.train.train import preprocess, preprocess_multimodal
from torch.utils.data import Dataset
from pycocotools.coco import COCO
from pycocotools import mask as maskUtils
from PIL import Image
class CustomDataset(Dataset):
def __init__(self,
tokenizer=None,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=20,
):
self.data_args = data_args
self.tokenizer = tokenizer
self.max_gt_per_img = max_gt_per_img
self.img_prefix = img_prefix
self.data_infos = self.load_annotations(ann_file)
super().__init__()
def __len__(self):
return len(self.data_infos)
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.img_ids = self.coco.getImgIds()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
info['height'] = int(info['height'])
info['width'] = int(info['width'])
ann_ids = self.coco.getAnnIds(imgIds=[i])
ann_info = self.coco.loadAnns(ann_ids)
if len(ann_info)==0:
continue
data_infos.append(info)
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return ann_info
def annToMask(self, mask_ann, h, w):
if isinstance(mask_ann, list):
rles = maskUtils.frPyObjects(mask_ann, h, w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, h, w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def process_text(self, data_item):
image = data_item['img']
ori_labels = data_item['gt_labels']
ori_masks = np.array(data_item['gt_masks'])
ori_masks = torch.from_numpy(ori_masks)
shuffle_ids = torch.randperm(len(ori_labels))
if len(shuffle_ids) > self.max_gt_per_img:
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
ori_masks = ori_masks[shuffle_ids]
ori_labels = [ori_labels[i] for i in shuffle_ids]
sources = dict()
sources['conversations'] = []
# print("num:",len(ori_labels))
for i in range(len(ori_labels)):
question = '<region>'
question = question.replace('<region>', '<mask><pos>')
if i == 0:
question = self.begin_str + question
answer = ori_labels[i]
sources['conversations'].append(
{'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
cur_token_len = (image.shape[1] // 16) * (image.shape[2] // 16)
assert image.shape[1] == image.shape[2]
# a hard code [] for sources
sources = preprocess_multimodal(
copy.deepcopy([sources['conversations']]),
self.data_args,
cur_token_len)
# print(sources)
| data_dict = preprocess( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: open-mmlab/PIA
# Path: animatediff/utils/util.py
def zero_rank_print(s):
if (not dist.is_initialized()) or (dist.is_initialized() and dist.get_rank() == 0): print("### " + s)
# Path: animatediff/utils/util.py
def detect_edges(lum: np.ndarray) -> np.ndarray:
"""Detect edges using the luma channel of a frame.
Arguments:
lum: 2D 8-bit image representing the luma channel of a frame.
Returns:
2D 8-bit image of the same size as the input, where pixels with values of 255
represent edges, and all other pixels are 0.
"""
# Initialize kernel.
kernel_size = estimated_kernel_size(lum.shape[1], lum.shape[0])
kernel = np.ones((kernel_size, kernel_size), np.uint8)
# Estimate levels for thresholding.
# TODO(0.6.3): Add config file entries for sigma, aperture/kernel size, etc.
sigma: float = 1.0 / 3.0
median = np.median(lum)
low = int(max(0, (1.0 - sigma) * median))
high = int(min(255, (1.0 + sigma) * median))
# Calculate edges using Canny algorithm, and reduce noise by dilating the edges.
# This increases edge overlap leading to improved robustness against noise and slow
# camera movement. Note that very large kernel sizes can negatively affect accuracy.
edges = cv2.Canny(lum, low, high)
return cv2.dilate(edges, kernel)
# Path: animatediff/data/dataset.py
import os, io, csv, math, random
import numpy as np
import torch
import torchvision.transforms as transforms
import cv2
from einops import rearrange
from decord import VideoReader
from torch.utils.data.dataset import Dataset
from animatediff.utils.util import zero_rank_print, detect_edges
def get_score(video_data,
cond_frame_idx,
weight=[1.0, 1.0, 1.0, 1.0],
use_edge=True):
"""
Similar to get_score under utils/util.py/detect_edges
"""
"""
the shape of video_data is f c h w, np.ndarray
"""
h, w = video_data.shape[1], video_data.shape[2]
cond_frame = video_data[cond_frame_idx]
cond_hsv_list = list(
cv2.split(
cv2.cvtColor(cond_frame.astype(np.float32), cv2.COLOR_RGB2HSV)))
if use_edge:
cond_frame_lum = cond_hsv_list[-1]
cond_frame_edge = detect_edges(cond_frame_lum.astype(np.uint8))
cond_hsv_list.append(cond_frame_edge)
score_sum = []
for frame_idx in range(video_data.shape[0]):
frame = video_data[frame_idx]
hsv_list = list(
cv2.split(cv2.cvtColor(frame.astype(np.float32),
cv2.COLOR_RGB2HSV)))
if use_edge:
frame_img_lum = hsv_list[-1]
frame_img_edge = detect_edges(lum=frame_img_lum.astype(np.uint8))
hsv_list.append(frame_img_edge)
hsv_diff = [
np.abs(hsv_list[c] - cond_hsv_list[c]) for c in range(len(weight))
]
hsv_mse = [np.sum(hsv_diff[c]) * weight[c] for c in range(len(weight))]
score_sum.append(sum(hsv_mse) / (h * w) / (sum(weight)))
return score_sum
class WebVid10M(Dataset):
def __init__(
self,
csv_path, video_folder,
sample_size=256, sample_stride=4, sample_n_frames=16,
is_image=False,
):
| zero_rank_print(f"loading annotations from {csv_path} ...") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: VikParuchuri/texify
# Path: texify/inference.py
def batch_inference(images, model, processor, temperature=settings.TEMPERATURE, max_tokens=settings.MAX_TOKENS):
images = [image.convert("RGB") for image in images]
encodings = processor(images=images, return_tensors="pt", add_special_tokens=False)
pixel_values = encodings["pixel_values"].to(model.dtype)
pixel_values = pixel_values.to(model.device)
additional_kwargs = {}
if temperature > 0:
additional_kwargs["temperature"] = temperature
additional_kwargs["do_sample"] = True
additional_kwargs["top_p"] = 0.95
generated_ids = model.generate(
pixel_values=pixel_values,
max_new_tokens=max_tokens,
decoder_start_token_id=processor.tokenizer.bos_token_id,
**additional_kwargs,
)
generated_text = processor.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
generated_text = [postprocess(text) for text in generated_text]
return generated_text
# Path: texify/model/model.py
def load_model(checkpoint=settings.MODEL_CHECKPOINT, device=settings.TORCH_DEVICE_MODEL, dtype=settings.MODEL_DTYPE):
config = get_config(checkpoint)
AutoModel.register(VariableDonutSwinConfig, VariableDonutSwinModel)
model = VisionEncoderDecoderModel.from_pretrained(checkpoint, config=config, torch_dtype=dtype)
model = model.to(device)
model = model.eval()
print(f"Loaded texify model to {device} with {dtype} dtype")
return model
# Path: texify/model/processor.py
def load_processor():
AutoImageProcessor.register(VariableDonutSwinConfig, VariableDonutImageProcessor)
processor = VariableDonutProcessor.from_pretrained(settings.MODEL_CHECKPOINT)
processor.image_processor.max_size = settings.MAX_IMAGE_SIZE
processor.image_processor.size = [settings.MAX_IMAGE_SIZE["height"], settings.MAX_IMAGE_SIZE["width"]]
processor.image_processor.image_mean = IMAGE_MEAN
processor.image_processor.image_std = IMAGE_STD
processor.image_processor.train = False
processor.tokenizer.model_max_length = settings.MAX_TOKENS
processor.train = False
return processor
# Path: texify/output.py
def replace_katex_invalid(string):
# KaTeX cannot render all LaTeX, so we need to replace some things
string = re.sub(r'\\tag\{.*?\}', '', string)
string = re.sub(r'\\(?:Bigg?|bigg?)\{(.*?)\}', r'\1', string)
string = re.sub(r'\\quad\\mbox\{(.*?)\}', r'\1', string)
string = re.sub(r'\\mbox\{(.*?)\}', r'\1', string)
string = remove_inner_dollars(string)
return string
# Path: texify/settings.py
class Settings(BaseSettings):
class Config:
TORCH_DEVICE: Optional[str] = None
MAX_TOKENS: int = 384 # Will not work well above 768, since it was not trained with more
MAX_IMAGE_SIZE: Dict = {"height": 420, "width": 420}
MODEL_CHECKPOINT: str = "vikp/texify"
BATCH_SIZE: int = 16 # Should use ~5GB of RAM
DATA_DIR: str = "data"
TEMPERATURE: float = 0.0 # Temperature for generation, 0.0 means greedy
def TORCH_DEVICE_MODEL(self) -> str:
def CUDA(self) -> bool:
def MODEL_DTYPE(self) -> torch.dtype:
# Path: texify/util.py
def is_valid_image(file_path):
if not os.path.isfile(file_path):
return False
filename = os.path.basename(file_path)
if filename.startswith("."):
return False
try:
with Image.open(file_path) as img:
img.verify()
return True
except Exception:
return False
# Path: ocr_image.py
import argparse
import os.path
import json
from texify.inference import batch_inference
from texify.model.model import load_model
from texify.model.processor import load_processor
from PIL import Image
from texify.output import replace_katex_invalid
from texify.settings import settings
from texify.util import is_valid_image
def inference_single_image(image_path, json_path, model, processor, katex_compatible=False):
image = Image.open(image_path)
text = batch_inference([image], model, processor)
if katex_compatible:
text = [replace_katex_invalid(t) for t in text]
write_data = [{"image_path": image_path, "text": text[0]}]
with open(json_path, "w+") as f:
json_repr = json.dumps(write_data, indent=4)
f.write(json_repr)
def inference_image_dir(image_dir, json_path, model, processor, max=None, katex_compatible=False):
image_paths = [os.path.join(image_dir, image_name) for image_name in os.listdir(image_dir)]
| image_paths = [ip for ip in image_paths if is_valid_image(ip)] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dcharatan/pixelsplat
# Path: src/visualization/drawing/coordinate_conversion.py
def generate_conversions(
shape: tuple[int, int],
device: torch.device,
x_range: Optional[Pair] = None,
y_range: Optional[Pair] = None,
) -> tuple[
ConversionFunction, # conversion from world coordinates to pixel coordinates
ConversionFunction, # conversion from pixel coordinates to world coordinates
]:
h, w = shape
x_range = sanitize_pair((0, w) if x_range is None else x_range, device)
y_range = sanitize_pair((0, h) if y_range is None else y_range, device)
minima, maxima = torch.stack((x_range, y_range), dim=-1)
wh = torch.tensor((w, h), dtype=torch.float32, device=device)
def convert_world_to_pixel(
xy: Float[Tensor, "*batch 2"],
) -> Float[Tensor, "*batch 2"]:
return (xy - minima) / (maxima - minima) * wh
def convert_pixel_to_world(
xy: Float[Tensor, "*batch 2"],
) -> Float[Tensor, "*batch 2"]:
return xy / wh * (maxima - minima) + minima
return convert_world_to_pixel, convert_pixel_to_world
# Path: src/visualization/drawing/rendering.py
def render_over_image(
image: Float[Tensor, "3 height width"],
color_function: ColorFunction,
device: torch.device,
subdivision: int = 8,
num_passes: int = 1,
) -> Float[Tensor, "3 height width"]:
_, h, w = image.shape
overlay = render(
(h, w),
color_function,
device,
subdivision=subdivision,
num_passes=num_passes,
)
color, alpha = overlay.split((3, 1), dim=0)
return image * (1 - alpha) + color * alpha
# Path: src/visualization/drawing/types.py
def sanitize_vector(
vector: Vector,
dim: int,
device: torch.device,
) -> Float[Tensor, "*#batch dim"]:
def sanitize_scalar(scalar: Scalar, device: torch.device) -> Float[Tensor, "*#batch"]:
def sanitize_pair(pair: Pair, device: torch.device) -> Float[Tensor, "2"]:
# Path: src/visualization/drawing/points.py
from typing import Optional
from einops import repeat
from jaxtyping import Float
from torch import Tensor
from .coordinate_conversion import generate_conversions
from .rendering import render_over_image
from .types import Pair, Scalar, Vector, sanitize_scalar, sanitize_vector
import torch
def draw_points(
image: Float[Tensor, "3 height width"],
points: Vector,
color: Vector = [1, 1, 1],
radius: Scalar = 1,
inner_radius: Scalar = 0,
num_msaa_passes: int = 1,
x_range: Optional[Pair] = None,
y_range: Optional[Pair] = None,
) -> Float[Tensor, "3 height width"]:
device = image.device
points = sanitize_vector(points, 2, device)
color = sanitize_vector(color, 3, device)
radius = sanitize_scalar(radius, device)
inner_radius = sanitize_scalar(inner_radius, device)
(num_points,) = torch.broadcast_shapes(
points.shape[0],
color.shape[0],
radius.shape,
inner_radius.shape,
)
# Convert world-space points to pixel space.
_, h, w = image.shape
| world_to_pixel, _ = generate_conversions((h, w), device, x_range, y_range) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nianhua99/PandoraNext-Helper
# Path: model.py
class User(db.Model):
def keys(self):
def __getitem__(self, item):
def __repr__(self):
# Path: util/share_tools.py
def get_host():
def get_share_token(access_token, unique_name, expires_in=0, show_conversations=False, show_userinfo=True):
def get_share_token_info(share_token, access_token=None):
# Path: util/api_response.py
class ApiResponse:
@staticmethod
def success(data):
return jsonify({
'status': 0,
'message': '请求成功',
'data': data
})
@staticmethod
def error(message, status=-1):
return jsonify({
'status': status,
'message': message
}), 500
@staticmethod
def unauthorized(message):
return jsonify({
'status': 444,
'message': message
}), 444
# Path: util/pandora_tools.py
def sync_pandora():
make_json()
fresh_setup()
# Path: share/share.py
import json
from datetime import datetime
from flask import Blueprint, request
from flask_jwt_extended import jwt_required
from loguru import logger
from sqlalchemy import and_, text
from model import db, User
from util import share_tools
from util.api_response import ApiResponse
from util.pandora_tools import sync_pandora
share_bp = Blueprint('share_bp', __name__)
def account2share(accounts):
shares = []
for account in accounts:
_share_list = json.loads(account.share_list)
for share in _share_list:
share['email'] = account.email
share['account_id'] = account.id
shares.append(share)
return shares
@share_bp.route('/list')
@jwt_required()
def share_list():
accounts = db.session.query(User).all()
return ApiResponse.success(account2share(accounts))
@share_bp.route('/search', methods=['POST'])
@jwt_required()
def search():
# 根据email和unique_name模糊搜索
email = request.json.get('email')
unique_name = request.json.get('unique_name')
accounts = db.session.query(User).filter(and_(User.email.like(f'%{email}%') if email else text(''), User.share_list.like(f'%{unique_name}%') if unique_name else text(''))).all()
shares = account2share(accounts)
if unique_name:
shares = list(filter(lambda x: unique_name in x['unique_name'], shares))
return ApiResponse.success(shares)
@share_bp.route('/add', methods=['POST'])
@jwt_required()
def share_add():
account_id = request.json.get('account_id')
unique_name = request.json.get('unique_name')
password = request.json.get('password')
comment = request.form.get('comment')
account = db.session.query(User).filter_by(id=account_id).first()
if account:
if not account.access_token:
return ApiResponse.error('请先登录账号')
else:
try:
| res = share_tools.get_share_token(account.access_token, unique_name) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: shroominic/fastui-chat
# Path: src/fastui_chat/components.py
class ChatInputForm(c.Form):
"""
Component for displaying a chat input form.
"""
fire_page_event: str
display_mode: str = "inline"
class_name: str = "row row-cols-lg-3 justify-content-center"
form_fields: list[c.FormFieldInput] = [
c.FormFieldInput(
title="",
name="user_msg",
placeholder="Message ChatBot...",
class_name="py-4",
),
]
def __init__(
self,
*,
submit_url: str,
fire_page_event: str,
**data: Any,
) -> None:
data["submit_url"] = submit_url
data["fire_page_event"] = fire_page_event
super().__init__(**data, footer=[])
self.footer = [
c.FireEvent(event=e.PageEvent(name=self.fire_page_event)),
]
# Path: src/fastui_chat/components.py
class ChatMessage(c.Div):
"""
Component for displaying a chat message.
"""
content: Union[str, list[Union[str, dict]]]
msg_type: Literal["human", "ai"]
class_name: str = "container col-sm-4 my-4"
display_alias: DisplayAlias = {"human": "You", "ai": "ChatBot"}
@property
def images(self) -> list[str]:
"""Return a list of image URLs in the message content."""
if isinstance(self.content, str):
return []
return [
(
item["image_url"]["url"]
if isinstance(item["image_url"], dict)
else item["image_url"]
)
for item in self.content
if isinstance(item, dict) and item["type"] == "image_url"
]
@property
def message(self) -> str:
"""Return the message content."""
return (
self.content
if isinstance(self.content, str)
else self.content[0]
if isinstance(self.content[0], str)
else self.content[0]["text"]
)
def __init__(
self,
msg_type: Literal["human", "ai"],
content: Union[str, list[Union[str, dict]]],
**data: Any,
) -> None:
if msg_type == "AIMessageChunk":
msg_type = "ai"
data["msg_type"] = msg_type
data["content"] = content
super().__init__(**data, components=[])
self.components = [
c.Heading(text=self.display_alias[self.msg_type], level=6),
c.Markdown(text=self.message),
*(
c.Image(
src=image_url,
class_name="img-fluid",
)
for image_url in self.images
),
]
# Path: src/fastui_chat/db.py
async def get_history() -> AsyncGenerator[BaseChatMessageHistory, None]:
if "chat_history" not in database:
raise RuntimeError("Database not initialized")
yield database["chat_history"]
# Path: src/fastui_chat/db.py
async def get_session() -> AsyncGenerator[ChatSession, None]:
if "chat_history" not in database or "chat_handler" not in database:
raise RuntimeError("Database not initialized")
yield ChatSession(
history=database["chat_history"],
chat_handler=database["chat_handler"],
)
# Path: src/fastui_chat/session.py
class ChatSession:
def __init__(
self,
*,
chat_handler: Runnable[HumanMessage, AIMessage],
history: BaseChatMessageHistory,
) -> None:
self.history = history
self.chat_handler = chat_handler
async def astream(self, user_msg: str):
async for message in self.chat_handler.astream(
HumanMessage(content=user_msg),
config={
"run_name": "ChatMessage",
"configurable": {"session_id": ""},
},
):
yield message
# Path: src/fastui_chat/chat.py
from typing import Annotated, AsyncIterable
from fastapi import APIRouter, Depends, Form
from fastapi.responses import StreamingResponse
from fastui import AnyComponent, FastUI
from fastui import components as c
from fastui.events import PageEvent
from langchain_core.chat_history import BaseChatMessageHistory
from .components import ChatInputForm, ChatMessage
from .db import get_history, get_session
from .session import ChatSession
import asyncio
router = APIRouter()
@router.get("/", response_model=FastUI, response_model_exclude_none=True)
async def chat_ui() -> list[AnyComponent]:
"""
Main endpoint for showing the Chat UI and handling user input.
"""
return [
c.Page(
components=[
c.ServerLoad(
path="/chat/history",
load_trigger=PageEvent(name="chat-load"),
components=[],
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="chat-load",
),
],
)
]
@router.get("/chat/history", response_model=FastUI, response_model_exclude_none=True)
async def chat_history(
history: Annotated[BaseChatMessageHistory, Depends(get_history)],
) -> list[AnyComponent]:
"""
Endpoint for showing the Chat History UI.
"""
return [ChatMessage(msg.type, msg.content) for msg in history.messages]
@router.post("/chat/generate", response_model=FastUI, response_model_exclude_none=True)
async def chat_generate(user_msg: Annotated[str, Form(...)]) -> list[AnyComponent]:
"""
Endpoint for showing the Chat Generate UI.
"""
return [
ChatMessage("human", user_msg),
c.ServerLoad(
path="/chat/generate/response?user_msg=" + user_msg,
load_trigger=PageEvent(name="generate-response"),
components=[c.Text(text="...")],
sse=True,
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="generate-response",
),
]
@router.get("/chat/generate/response")
async def sse_ai_response(
user_msg: str,
| session: Annotated[ChatSession, Depends(get_session)], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SHI-Labs/VCoder
# Path: vcoder_llava/model/multimodal_encoder/builder.py
def build_vision_tower(vision_tower_cfg, **kwargs):
vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
is_absolute_path_exists = os.path.exists(vision_tower)
if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"):
return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
raise ValueError(f'Unknown vision tower: {vision_tower}')
# Path: vcoder_llava/model/multimodal_projector/builder.py
def build_vision_projector(config, delay_load=False, **kwargs):
projector_type = getattr(config, 'mm_projector_type', 'linear')
if projector_type == 'linear':
return nn.Linear(config.mm_hidden_size, config.hidden_size)
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
if mlp_gelu_match:
mlp_depth = int(mlp_gelu_match.group(1))
modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(config.hidden_size, config.hidden_size))
return nn.Sequential(*modules)
if projector_type == 'identity':
return IdentityMap()
raise ValueError(f'Unknown projector type: {projector_type}')
# Path: vcoder_llava/model/multimodal_adapter/builder.py
def build_seg_projector(config, delay_load=False, **kwargs):
projector_type = getattr(config, 'seg_mm_projector_type', 'linear')
if projector_type == 'linear':
return nn.Linear(config.seg_mm_hidden_size, config.hidden_size)
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
if mlp_gelu_match:
mlp_depth = int(mlp_gelu_match.group(1))
modules = [nn.Linear(config.seg_mm_hidden_size, config.hidden_size)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(config.hidden_size, config.hidden_size))
return nn.Sequential(*modules)
if projector_type == 'identity':
return IdentityMap()
raise ValueError(f'Unknown seg projector type: {projector_type}')
# Path: vcoder_llava/model/multimodal_depth_adapter/builder.py
def build_depth_projector(config, delay_load=False, **kwargs):
projector_type = getattr(config, 'depth_mm_projector_type', 'linear')
if projector_type == 'linear':
return nn.Linear(config.depth_mm_hidden_size, config.hidden_size)
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
if mlp_gelu_match:
mlp_depth = int(mlp_gelu_match.group(1))
modules = [nn.Linear(config.depth_mm_hidden_size, config.hidden_size)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(config.hidden_size, config.hidden_size))
return nn.Sequential(*modules)
if projector_type == 'identity':
return IdentityMap()
raise ValueError(f'Unknown depth projector type: {projector_type}')
# Path: vcoder_llava/constants.py
IGNORE_INDEX = -100
# Path: vcoder_llava/constants.py
IMAGE_TOKEN_INDEX = -200
# Path: vcoder_llava/constants.py
SEG_TOKEN_INDEX = -300
# Path: vcoder_llava/constants.py
DEPTH_TOKEN_INDEX = -400
# Path: vcoder_llava/model/vcoder_ds_llava_arch.py
from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from .multimodal_adapter.builder import build_seg_projector
from .multimodal_depth_adapter.builder import build_depth_projector
from vcoder_llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, DEPTH_TOKEN_INDEX
import torch
import torch.nn as nn
# Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VCoderDSLlavaMetaModel:
def __init__(self, config):
super(VCoderDSLlavaMetaModel, self).__init__(config)
self.config = config
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
| self.mm_projector = build_vision_projector(config) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: galatolofederico/microchain
# Path: microchain/engine/function.py
class Function:
def __init__(self):
self.call_signature = inspect.signature(self.__call__)
self.call_parameters = []
for name, parameter in self.call_signature.parameters.items():
if parameter.annotation == inspect._empty:
raise ValueError(f"Parameter {name} must have an annotation")
self.call_parameters.append(dict(
name=name,
annotation=parameter.annotation
))
self.state = None
self.engine = None
def bind(self, *, state, engine):
self.state = state
self.engine = engine
@property
def name(self):
return type(self).__name__
@property
def example(self):
if not isinstance(self.example_args, list):
raise ValueError("example_args must be a list")
if len(self.example_args) != len(self.call_parameters):
raise ValueError(f"example_args must have the same length as call_parameters ({len(self.call_parameters)})")
bound = self.call_signature.bind(*self.example_args)
return f"{self.name}({', '.join([f'{name}={value}' for name, value in bound.arguments.items()])})"
@property
def signature(self):
arguments = [f"{parameter['name']}: {parameter['annotation'].__name__}" for parameter in self.call_parameters]
return f"{self.name}({', '.join(arguments)})"
@property
def help(self):
return f"{self.signature}\n{self.description}.\nExample: {self.example}\n"
@property
def error(self):
return f"Error: wrong format. Use {self.signature}. Example: {self.example}. Please try again."
def check_bind(self):
if self.state is None:
raise ValueError("You must register the function to an Engine")
def safe_call(self, args, kwargs):
self.check_bind()
try:
return FunctionResult.SUCCESS, str(self.__call__(*args, **kwargs))
except Exception as e:
print(colored(f"Exception in Function call {e}", "red"))
print(colored(''.join(traceback.TracebackException.from_exception(e).format()), "red"))
return FunctionResult.ERROR, self.error
def __call__(self, command):
raise NotImplementedError
# Path: microchain/engine/function.py
class FunctionResult(enum.Enum):
SUCCESS = 0
ERROR = 1
# Path: microchain/engine/engine.py
import ast
from microchain.engine.function import Function, FunctionResult
class Engine:
def __init__(self, state=dict()):
self.state = state
self.functions = dict()
self.help_called = False
self.agent = None
def register(self, function):
self.functions[function.name] = function
function.bind(state=self.state, engine=self)
def bind(self, agent):
self.agent = agent
def stop(self):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before stopping")
self.agent.stop()
def execute(self, command):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before executing commands")
if not self.help_called:
raise ValueError("You never accessed the help property. Building a prompt without including the help string is a very bad idea.")
try:
tree = ast.parse(command)
except SyntaxError:
| return FunctionResult.ERROR, f"Error: syntax error in command {command}. Please try again." |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OSU-NLP-Group/SeeAct
# Path: src/data_utils/dom_utils.py
def get_tree_repr(
tree, max_value_length=5, max_length=20, id_mapping={}, keep_html_brackets=False
):
if isinstance(tree, str):
tree = etree.fromstring(tree)
else:
tree = copy.deepcopy(tree)
for node in tree.xpath("//*"):
if node.tag != "text":
if "backend_node_id" in node.attrib:
if node.attrib["backend_node_id"] not in id_mapping:
id_mapping[node.attrib["backend_node_id"]] = len(id_mapping)
node.attrib["backend_node_id"] = str(
id_mapping[node.attrib["backend_node_id"]]
)
get_attribute_repr(node, max_value_length, max_length)
else:
node.text = " ".join(node.text.split()[:max_length])
tree_repr = etree.tostring(tree, encoding="unicode")
tree_repr = tree_repr.replace('"', " ")
tree_repr = (
tree_repr.replace("meta= ", "").replace("id= ", "id=").replace(" >", ">")
)
tree_repr = re.sub(r"<text>(.*?)</text>", r"\1", tree_repr)
if not keep_html_brackets:
tree_repr = tree_repr.replace("/>", "$/$>")
tree_repr = re.sub(r"</(.+?)>", r")", tree_repr)
tree_repr = re.sub(r"<(.+?)>", r"(\1", tree_repr)
tree_repr = tree_repr.replace("$/$", ")")
html_escape_table = [
(""", '"'),
("&", "&"),
("<", "<"),
(">", ">"),
(" ", " "),
("–", "-"),
("’", "'"),
("‘", "'"),
("“", '"'),
("”", '"'),
("'", "'"),
("(", "("),
(")", ")"),
]
for k, v in html_escape_table:
tree_repr = tree_repr.replace(k, v)
tree_repr = re.sub(r"\s+", " ", tree_repr).strip()
return tree_repr, id_mapping
# Path: src/data_utils/dom_utils.py
def data_prune_tree(
dom_tree,
candidate_set,
max_depth=5,
max_children=50,
max_sibling=3,
):
nodes_to_keep = set()
for candidate_id in candidate_set:
candidate_node = dom_tree.xpath(f'//*[@backend_node_id="{candidate_id}"]')[0]
nodes_to_keep.add(candidate_node.attrib["backend_node_id"])
# get all ancestors
nodes_to_keep.update(
[
x.attrib.get("backend_node_id", "")
for x in candidate_node.xpath("ancestor::*")
]
)
# get descendants with max depth
nodes_to_keep.update(
[
x.attrib.get("backend_node_id", "")
for x in get_descendants(candidate_node, max_depth)
][:max_children]
)
# get siblings within range
parent = candidate_node.getparent()
if parent is not None:
siblings = [x for x in parent.getchildren() if x.tag != "text"]
idx_in_sibling = siblings.index(candidate_node)
nodes_to_keep.update(
[
x.attrib.get("backend_node_id", "")
for x in siblings[
max(0, idx_in_sibling - max_sibling): idx_in_sibling
+ max_sibling
+ 1
]
]
)
# clone the tree
new_tree = copy.deepcopy(dom_tree)
# remove nodes not in nodes_to_keep
for node in new_tree.xpath("//*")[::-1]:
if node.tag != "text":
is_keep = node.attrib.get("backend_node_id", "") in nodes_to_keep
is_candidate = node.attrib.get("backend_node_id", "") in candidate_set
else:
is_keep = (
node.getparent().attrib.get("backend_node_id", "") in nodes_to_keep
)
is_candidate = (
node.getparent().attrib.get("backend_node_id", "") in candidate_set
)
if not is_keep and node.getparent() is not None:
node.getparent().remove(node)
else:
if not is_candidate or node.tag == "text":
node.attrib.pop("backend_node_id", None)
if (
len(node.attrib) == 0
and not any([x.tag == "text" for x in node.getchildren()])
and node.getparent() is not None
and node.tag != "text"
and len(node.getchildren()) <= 1
):
# insert all children into parent
for child in node.getchildren():
node.addprevious(child)
node.getparent().remove(node)
return new_tree, nodes_to_keep
# Path: src/data_utils/format_prompt_utils.py
import string
import lxml
from .dom_utils import get_tree_repr, data_prune_tree
# -*- coding: utf-8 -*-
# Copyright (c) 2024 OSU Natural Language Processing Group
#
# Licensed under the OpenRAIL-S License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.licenses.ai/ai-pubs-open-rails-vz1
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def data_format_input_multichoice(
sample, candidate_ids, gt=-1, previous_k=5, keep_html_brackets=False
):
# Parse html into a dom tree
dom_tree = lxml.etree.fromstring(sample["cleaned_html"])
dom_tree, node_to_keep = data_prune_tree(dom_tree, candidate_ids)
| tree_repr, id_mapping = get_tree_repr( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DeepWok/mase
# Path: machop/chop/passes/graph/utils.py
def get_mase_op(node):
return node.meta["mase"].parameters["common"]["mase_op"]
# Path: machop/chop/passes/graph/utils.py
def get_mase_type(node):
return node.meta["mase"].parameters["common"]["mase_type"]
# Path: machop/chop/passes/graph/analysis/add_metadata/software_metadata_layers.py
SOFTWARE_PARAM_ANALYSIS_LAYERS = {
"module": {
"batch_norm1d": analyze_software_meta_param_nn_module_batch_norm,
"batch_norm2d": analyze_software_meta_param_nn_module_batch_norm,
# default:
"default": analyze_software_meta_param_nn_module_default,
},
"module_related_func": {
"adaptive_avg_pool1d": analyze_software_meta_param_module_related_func_default,
"adaptive_avg_pool2d": analyze_software_meta_param_module_related_func_default,
"adaptive_max_pool1d": analyze_software_meta_param_module_related_func_default,
"adaptive_max_pool2d": analyze_software_meta_param_module_related_func_default,
"avg_pool1d": analyze_software_meta_param_module_related_func_default,
"avg_pool2d": analyze_software_meta_param_module_related_func_default,
"batch_norm": analyze_software_meta_param_module_related_func_default,
"conv1d": analyze_software_meta_param_module_related_func_default,
"conv2d": analyze_software_meta_param_module_related_func_default,
"layer_norm": analyze_software_meta_param_module_related_func_default,
"linear": analyze_software_meta_param_module_related_func_default,
"max_pool1d": analyze_software_meta_param_module_related_func_default,
"max_pool2d": analyze_software_meta_param_module_related_func_default,
"relu": analyze_software_meta_param_module_related_func_default,
# NOTE: These ops were added to support MobileNetV2 and MobileNetV3
"relu6": analyze_software_meta_param_module_related_func_default,
"hardswish": analyze_software_meta_param_module_related_func_default,
"hardsigmoid": analyze_software_meta_param_module_related_func_default,
"dropout": analyze_software_meta_param_module_related_func_default,
# default:
"default": analyze_software_meta_param_module_related_func_default,
},
# builtin func
"builtin_func": {
"mul": analyze_software_meta_param_builtin_func_default,
"sub": analyze_software_meta_param_builtin_func_default,
"add": analyze_software_meta_param_builtin_func_default,
"matmul": analyze_software_meta_param_builtin_func_default,
"bmm": analyze_software_meta_param_builtin_func_default,
# default:
"default": analyze_software_meta_param_builtin_func_default,
},
"implicit_func": {
"size": analyze_software_meta_param_implicit_func_default,
"view": analyze_software_meta_param_implicit_func_default,
"flatten": analyze_software_meta_param_implicit_func_default,
"t": analyze_software_meta_param_implicit_func_default,
"constant": analyze_software_meta_param_implicit_func_default,
"default": analyze_software_meta_param_implicit_func_default,
"ge": analyze_software_meta_param_implicit_func_default,
"where": analyze_software_meta_param_implicit_func_default,
"clamp_": analyze_software_meta_param_implicit_func_default,
"abs": analyze_software_meta_param_implicit_func_default,
"stack": analyze_software_meta_param_implicit_func_default,
"getitem": analyze_software_meta_param_implicit_func_default,
"getattr": analyze_software_meta_param_implicit_func_default,
},
"placeholder": {
"placeholder": analyze_software_meta_param_placeholder,
},
"get_attr": {
"get_attr": analyze_software_meta_param_get_attr,
},
"output": {
"output": analyze_software_meta_param_output,
},
"patched_func": {
"default": analyze_software_meta_param_patched_func_default,
},
}
# Path: machop/chop/passes/graph/analysis/add_metadata/add_software_metadata.py
import logging
from ...utils import get_mase_op, get_mase_type
from .software_metadata_layers import SOFTWARE_PARAM_ANALYSIS_LAYERS
logger = logging.getLogger(__name__)
def add_software_metadata_analysis_pass(graph, pass_args=None):
"""add software metadata
:param graph: a MaseGraph
:type graph: MaseGraph
:param pass_args: this pass does not need any arguments, defaults to None
:type pass_args: _type_, optional
:return: return a tuple of a MaseGraph and an empty dict (no additional info to return)
:rtype: tuple(MaseGraph, Dict)
"""
for node in graph.fx_graph.nodes:
| mase_op = get_mase_op(node) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: PratikSingh121/ResearchPlot
# Path: app/prompt_templates.py
class GetPromptTemplates:
def __init__(self, topic):
self.topic = topic
self.question_parser = CommaSeparatedListOutputParser()
def ResearchPromptTemplate(self, questions = ''):
if questions != '':
research_bot_final_prompt = research_bot_prompt + "\n\nQuestions Answered by the user : " + questions + "\n\n" + "Output :\n\n";
else:
research_bot_final_prompt = research_bot_prompt + "\n\n" + "Output :\n\n";
ResearchPromptTemplate = PromptTemplate(template= research_bot_final_prompt, input_variables=["Topic"])
# partial_variables={"format_instructions": self.research_parser.get_format_instructions()}
return ResearchPromptTemplate.format_prompt(Topic = self.topic).to_string()
def QuestionPromptTemplate(self):
QuestionPromptTemplate = PromptTemplate(template= question_forming_prompt, input_variables=["Topic"], partial_variables={"format_instructions": self.question_parser.get_format_instructions()})
return QuestionPromptTemplate.format_prompt(Topic = self.topic).to_string()
def MermaidPromptTemplate(self, information):
MermaidPromptTemplate = PromptTemplate(template= mermaid_maker_prompt, input_variables=["information"])
return MermaidPromptTemplate.format_prompt(information = information).to_string()
# Path: app/question_framing.py
class QuestionFraming:
def __init__(self, question: list):
self.question = question
self.answer = []
def ask_questions(self):
print('\033[91m' + "Answer the following questions: (Leave blank if no answer)" + '\033[0m')
for i in self.question:
# print in blue color
print('\033[94m' + i + "\n > " + '\033[0m', end="")
answer = input()
if answer == "":
answer = "No answer"
self.answer.append(answer)
# Add more questions if needed
def format_information(self):
information = dict(zip(self.question, self.answer))
return information
# Path: packages/chains.py
class Chains:
def __init__(self, *args):
self._chains = args
def chain(self, PromptTemplate, parser = None):
message = [
SystemMessage(content = PromptTemplate)
]
output = llm(message)
if parser is None:
return output.content
else:
return parser.parse(output.content)
# Path: main.py
from langchain.output_parsers import CommaSeparatedListOutputParser
from app.prompt_templates import GetPromptTemplates
from app.question_framing import QuestionFraming
from packages.chains import Chains
import subprocess
import os
#app
#package
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Getting Topic
print('\033[93m' + "Enter the topic. You can add just a keyword or a description.\nTopic : > " + '\033[0m', end="")
topic = input()
print()
#Objects
Chain = Chains()
PromptTemplate = GetPromptTemplates(topic)
QuestionParser = CommaSeparatedListOutputParser()
# Getting Questions
print('\033[92m' + "Do you want to answer some questions? (y/n) \nAnswer : > " + '\033[0m', end="")
questions_allowed = input()
print()
if questions_allowed == 'y':
questions_allowed = True
else:
questions_allowed = False
if questions_allowed:
QuestionsList = Chain.chain(PromptTemplate = PromptTemplate.QuestionPromptTemplate(), parser = QuestionParser)
| questionframing = QuestionFraming(QuestionsList) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yeyt97/AirDropPlus
# Path: config.py
class Config:
def __init__(self, config_path):
self.config = configparser.ConfigParser()
self.config.read(config_path, encoding='utf-8')
self.config_path = config_path
self.key = self.config.get('config', 'key')
self.save_path = self.config.get('config', 'save_path')
if self.save_path == '' or self.save_path is None:
self.save_path = os.path.join(os.path.expanduser('~'), 'Downloads')
self.port = int(self.config.get('config', 'port'))
self.basic_notifier = False if self.config.get('config', 'basic_notifier')=='0' else True
self.version = self.config.get('info', 'version')
# Path: notifier.py
def create_notifier(basic: bool = True):
return BasicNotifier() if basic else Notifier()
# Path: server.py
class Server:
def __init__(self, config: Config, notifier: INotifier):
self.config = config
self.notifier = notifier
self.blueprint = Blueprint('server', __name__)
self.register_routes()
self.app = Flask(__name__)
self.app.register_blueprint(self.blueprint)
def run(self, host: str, port: int):
self.app.run(host=host, port=port)
def register_routes(self):
""" ----------- 统一处理 ----------- """
# 统一认证
@self.blueprint.before_request
def check_api_key():
if request.path == '/':
return
auth_header = request.headers.get("Authorization")
if auth_header != self.config.key:
return Result.error(msg='密钥错误', code=401)
version = request.headers.get("ShortcutVersion")
if version != self.config.version:
msg = f'版本不匹配\n\nWindows版本为:{self.config.version}\n快捷指令版本为:{version}'
return Result.error(msg=msg, code=400)
# 统一异常处理
@self.blueprint.errorhandler(Exception)
def handle_all_exceptions(error):
msg = str(error)
self.notifier.notify('错误', '遇到一个错误' + msg)
return Result.error(msg, 500)
""" ----------- 测试 ----------- """
@self.blueprint.route('/')
def test():
return 'Hello world!'
""" ----------- 文件 ----------- """
# 手机端发送接下来要发送的文件列表
@self.blueprint.route('/file/send/list', methods=['POST'])
def send_file_list():
filename_list = request.form['file_list'].splitlines()
self.notifier.show_future_files(self.config.save_path, filename_list, to_mobile=False)
return Result.success(msg="发送成功")
# 手机端发送文件
@self.blueprint.route('/file/send', methods=['POST'])
def send_file():
if 'file' not in request.files:
return Result.error(msg="文件不存在")
file = request.files['file']
ori_filename = request.form['filename']
notify_content = request.form['notify_content']
filename = utils.avoid_duplicate_filename(self.config.save_path, ori_filename)
file.save(os.path.join(self.config.save_path, filename))
if notify_content != '':
ori_filename_list = notify_content.splitlines()
if len(ori_filename_list) == 1:
self.notifier.show_received_file(self.config.save_path, filename, ori_filename)
else:
self.notifier.show_received_files(self.config.save_path, ori_filename_list)
return Result.success(msg="发送成功")
# 获取电脑端复制的文件的路径列表
@self.blueprint.route('/file/receive/list')
def receive_file_list():
success, res = utils.get_clipboard_files()
if not success:
msg = f'未复制文件: {res}'
self.notifier.notify('错误', msg)
return Result.error(msg=msg)
if len(res) > 0:
file_names = [os.path.basename(path) for path in res]
self.notifier.show_future_files(None, file_names, to_mobile=True)
return Result.success(data=res)
return Result.error(msg='Windows未复制文件')
# 获取电脑端文件
@self.blueprint.route('/file/receive', methods=['POST'])
def receive_file():
path = request.form.get('path')
file_name = os.path.basename(path)
# self.notifier.notify('文件', f'发送: {file_name}')
with open(path, 'rb') as f:
file_content = f.read()
return flask.send_file(io.BytesIO(file_content), as_attachment=True, download_name=file_name)
""" ----------- 剪贴板 ----------- """
# 获取电脑端剪贴板
@self.blueprint.route('/clipboard/receive')
def receive_clipboard():
success, res = utils.get_clipboard_content()
if not success:
msg = f'获取剪贴板出错: {res}'
self.notifier.notify('错误', msg)
return Result.error(msg=msg)
if res != '':
self.notifier.notify('剪贴板', f'发送: {res}')
return Result.success(data=res)
else:
self.notifier.notify('剪贴板', '发送失败: Windows剪贴板为空')
return Result.error(msg='Windows剪贴板为空')
# 接收手机端剪贴板
@self.blueprint.route('/clipboard/send', methods=['POST'])
def send_clipboard():
clipboard = request.form['clipboard']
if clipboard is None or clipboard == '':
self.notifier.notify('剪贴板', '接收失败: iPhone剪贴板为空')
return Result.error(msg='iPhone剪贴板为空')
success, msg = utils.set_clipboard_content(clipboard)
if success:
self.notifier.notify('剪贴板', f'收到剪贴板内容: {clipboard}')
else:
self.notifier.notify('错误', f'设置剪贴板出错: {msg}')
return Result.success(msg='发送成功') if success else Result.error(msg=msg)
# Path: AirDropPlus.py
import os
import sys
import utils
from config import Config
from notifier import create_notifier
from server import Server
if __name__ == '__main__':
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.join(SCRIPT_DIR, 'config', 'config.ini')
| config = Config(config_file_path) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: byeongjun-park/HarmonyView
# Path: ldm/thirdp/psp/helpers.py
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
return blocks
# Path: ldm/thirdp/psp/helpers.py
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
# Path: ldm/thirdp/psp/helpers.py
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
# Path: ldm/thirdp/psp/helpers.py
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
# Path: ldm/thirdp/psp/helpers.py
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
# Path: ldm/thirdp/psp/model_irse.py
from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
from ldm.thirdp.psp.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
# https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
| blocks = get_blocks(num_layers) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: srlabs/black-basta-buster
# Path: decryptblocks.py
def make_int(i):
def make_int_or_percent(i):
def xor_blocks(var, key, byteorder=sys.byteorder):
def write_block(fd, offset, block):
def main():
def decrypt_file(f, keyblock, fsize=None, is_dry=True, lower_limit=None, upper_limit=None):
def advise(t, start, end):
def advise(*args, **kwargs):
class Percent(int):
GB = 1024*1024*1024
BLOCK_SIZE = 64
BLOCK_SIZE = len(null)
# Path: ranges.py
def ranges_for_file(path, fsize=None):
#return ranges_for_file_real(path)
if fsize is None:
return ranges_for_file_generated(path)
else:
return ranges_for_file_size(fsize)
# Path: extractblock.py
import argparse
import logging
import sys
import logging
import math
from collections import deque
from itertools import islice
from pathlib import Path
from hexdump import hexdump
from decryptblocks import detect_magic_size, make_int, make_int_or_percent, Percent
from ranges import ranges_for_file
from collections import Counter
log = logging.getLogger(__name__)
def extract_block(fd, offset, size=64):
#log.debug("Reading %r at %r for %r ", fd, offset, size)
fd.seek(offset)
block = fd.read(size)
log.debug("Read %i bytes at %r for %r:\n%s", len(block), offset, size, hexdump(block, result="return"))
return block
def make_int_or_auto(s):
if s.strip() == "auto":
return "auto"
else:
return make_int(s)
### Entropy taken from https://stackoverflow.com/a/37890790/2015768
def eta(data, unit='natural'):
base = {
'shannon' : 2.,
'natural' : math.exp(1),
'hartley' : 10.
}
if len(data) <= 1:
return 0
counts = Counter()
for d in data:
counts[d] += 1
ent = 0
probs = [float(c) / len(data) for c in counts.values()]
for p in probs:
if p > 0.:
ent -= p * math.log(p, base[unit])
return ent
BLOCKSIZE = 64
NULLBLOCK = b'\x00' * BLOCKSIZE
def auto_detect_key_block(f, fsize=None, lower_limit=None, upper_limit=None):
if fsize is None:
fsize = detect_magic_size(f)
block = None
if lower_limit is None:
# we skip the first few block, unless explicitly requested
lower_limit = next(islice(ranges_for_file(f, fsize), 5, 6))[0]
if upper_limit is None:
upper_limit = fsize
CONFIDENCE = 5
with open(f, "rb") as fd:
confidence_blocks = deque(maxlen=CONFIDENCE)
for n, (offset, length) in enumerate(filter(lambda offset_len: lower_limit <= offset_len[0] < upper_limit, ranges_for_file(f, fsize))):
t = True
for i in (-2, -1, 1, 2):
b = extract_block(fd, offset-i*BLOCKSIZE)
t &= b == NULLBLOCK
log.debug("T is now: %s", t)
#if not t:
# raise
if t:
log.debug("Confidence: %s", confidence_blocks)
b = extract_block(fd, offset)
if b == NULLBLOCK:
log.debug("B is null")
else:
log.debug("Adding confidence at %d %r", offset, b)
confidence_blocks.append((offset, b))
if len(confidence_blocks) == CONFIDENCE:
if all((b == x[1] for x in confidence_blocks)):
log.info ("Found blocks: %r", confidence_blocks)
block = b # Urhgs. This is spaghetti control flow. Sorry.
break
else:
log.info("Not all blocks are equal to %r: %s", b, confidence_blocks)
raise
else:
log.info("only %d blocks: %s", len(confidence_blocks), confidence_blocks)
else:
print ("non found")
raise
return block
def main():
argparser = argparse.ArgumentParser(description="Extracts a 64 byte long chunk out of a file. This can be useful for taking that block as an encryption key.")
argparser.add_argument("--hexdump", action="store_true")
argparser.add_argument("--dry", action="store_true",
help="Do not write anything")
argparser.add_argument("--size", type=int, default=0x40, help="Chunk size")
argparser.add_argument("--start-at", type=make_int_or_percent, default=None, help="Start the automatic determination from here, only")
argparser.add_argument("--output", type=Path, help="Write the chunk to a file rather than stdout")
argparser.add_argument("file", type=Path, help="The file to cut a chunk out of")
argparser.add_argument("offset", type=make_int_or_auto, help="Position to cut the chunk out of the file, or 'auto' to detect encrypted zero bytes")
args = argparser.parse_args()
offset = args.offset
f = args.file
size = args.size
start_at = args.start_at
logging.basicConfig(level=logging.INFO)
fsize = detect_magic_size(f)
| if isinstance(start_at, Percent): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: EntySec/SeaShell
# Path: seashell/utils/ui/banner.py
class Banner(object):
""" Subclass of seashell.core module.
This subclass of seashell.core module is intended for
providing tools for printing banners in UI.
"""
def __init__(self) -> None:
super().__init__()
self.config = Config()
self.badges = Badges()
self.color_script = ColorScript()
def print_random_banner(self) -> None:
""" Print random banner.
:return None: None
"""
if os.path.exists(self.config.banners_path):
banners = []
all_banners = os.listdir(self.config.banners_path)
for banner in all_banners:
banners.append(banner)
if banners:
banner = ""
while not banner:
random_banner = random.randint(0, len(banners) - 1)
banner = self.color_script.parse_file(
self.config.banners_path + banners[random_banner]
)
self.badges.print_empty(f"%newline%end{banner}%end%newline")
else:
self.badges.print_warning("No banners detected.")
else:
self.badges.print_warning("No banners detected.")
# Path: seashell/utils/ui/tip.py
class Tip(object):
""" Subclass of seashell.core module.
This subclass of seashell.core module is intended for
providing tools for printing tips in UI.
"""
def __init__(self) -> None:
super().__init__()
self.config = Config()
self.badges = Badges()
self.color_script = ColorScript()
def print_random_tip(self) -> None:
""" Print random tip.
:return None: None
"""
if os.path.exists(self.config.tips_path):
tips = []
all_tips = os.listdir(self.config.tips_path)
for tip in all_tips:
tips.append(tip)
if tips:
tip = ""
while not tip:
random_tip = random.randint(0, len(tips) - 1)
tip = self.color_script.parse_file(
self.config.tips_path + tips[random_tip]
)
self.badges.print_empty(f"%newline%endSeaShell Tip: {tip}%end%newline")
else:
self.badges.print_warning("No tips detected.")
else:
self.badges.print_warning("No tips detected.")
# Path: seashell/lib/config.py
class Config(object):
""" Subclass of seashell.core module.
This subclass of seashell.core module is intended for providing
basic configuration for SeaShell.
"""
def __init__(self) -> None:
super().__init__()
self.user_path = f'{pathlib.Path.home()}/.seashell/'
self.base_path = f'{os.path.dirname(os.path.dirname(__file__))}/'
self.data_path = self.base_path + 'data/'
self.banners_path = self.data_path + 'banners/'
self.tips_path = self.data_path + 'tips/'
self.modules_path = self.base_path + 'modules/'
self.plugins_path = self.base_path + 'plugins/'
self.commands_path = self.base_path + 'commands/'
self.loot_path = self.user_path + 'loot/'
def setup(self) -> None:
""" Setup config and create paths.
:return None: None
"""
if not os.path.exists(self.user_path):
os.mkdir(self.user_path)
if not os.path.exists(self.loot_path):
os.mkdir(self.loot_path)
# Path: seashell/core/console.py
import os
import cmd
import sys
from badges import Badges, Tables
from colorscript import ColorScript
from hatsploit.lib.commands import Commands
from hatsploit.lib.runtime import Runtime
from seashell.utils.ui.banner import Banner
from seashell.utils.ui.tip import Tip
from seashell.lib.config import Config
"""
MIT License
Copyright (c) 2020-2024 EntySec
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class Console(cmd.Cmd):
""" Subclass of seashell.core module.
This subclass of seashell.core modules is intended for providing
main SeaShell Framework console interface.
"""
def __init__(self) -> None:
super().__init__()
cmd.Cmd.__init__(self)
self.badges = Badges()
self.tables = Tables()
self.banner = Banner()
| self.tip = Tip() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: FlagOpen/TACO
# Path: train_utils.py
class Trainer(transformers.Trainer):
"""Use CosineAnnealingLR from pytorch
"""
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
"""
Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
passed as an argument.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
num_warmup_steps=self.args.get_warmup_steps(num_training_steps)
if getattr(self.args, 'use_cosine_anneal_with_warmup', False):
lr_max=1
lr_min=1e-1
cosine_anneal_with_warmup = lambda cur_iter: max(cur_iter / num_warmup_steps, 1e-9) if cur_iter < num_warmup_steps else \
(lr_min + 0.5*(lr_max-lr_min)*(1.0+math.cos((cur_iter-num_warmup_steps)/(num_training_steps-num_warmup_steps)*math.pi)))
self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer=self.optimizer if optimizer is None else optimizer,
lr_lambda=cosine_anneal_with_warmup,
)
else:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps,
)
self._created_lr_scheduler = True
return self.lr_scheduler
# Path: datamodule/constants.py
DEFAULT_PAD_TOKEN = "[PAD]"
# Path: datamodule/constants.py
DEFAULT_EOS_TOKEN = "<|endoftext|>"
# Path: datamodule/constants.py
DEFAULT_BOS_TOKEN = "<|endoftext|>"
# Path: datamodule/taco_dataset.py
class TacoDataset(Dataset):
"""Dataset for fine-tune."""
def __init__(self, data_path: str, debug: bool=False, learning_skill: int=None):
super(TacoDataset, self).__init__()
logging.warning("Loading tokenized data...")
if os.path.exists(data_path):
dataset = load_from_disk(data_path).shuffle()
else:
raise ValueError(" The specified data_path does not exist. Please provide a tokenized dataset")
if not all(key in dataset.column_names for key in ['input_ids', 'source_ids_lens']):
raise ValueError("Data has not been tokenized. Please tokenize the data first.")
if debug:
dataset = dataset.select(range(1000))
if learning_skill:
dataset = dataset.filter(lambda entry: entry['labels'][learning_skill])
logging.warning("Collect columns of hf dataset... This may take some time...")
input_ids = dataset['input_ids']
source_ids_lens = dataset['source_ids_lens']
self.learning_skill = None
if learning_skill:
scores = dataset['scores']
scores = preprocess_scores(scores, source_ids_lens, learning_skill)
self.scores = scores
self.learning_skill = learning_skill
logging.warning("Processing inputs...")
data_dict = preprocess(input_ids, source_ids_lens)
self.input_ids = data_dict["input_ids"]
self.labels = data_dict["labels"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
if self.learning_skill:
return dict(input_ids=self.input_ids[i], labels=self.labels[i], scores=self.scores[i])
else:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
# Path: datamodule/taco_dataset.py
class DataCollatorForTacoDataset(object):
"""Collate examples for fine-tune."""
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels"))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id
)
labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
return dict(
input_ids=input_ids,
labels=labels,
)
# Path: train.py
from typing import Optional, Dict
from dataclasses import dataclass, field
from train_utils import Trainer
from datamodule import DEFAULT_PAD_TOKEN, DEFAULT_EOS_TOKEN, DEFAULT_BOS_TOKEN, TacoDataset, DataCollatorForTacoDataset
import transformers
"""
Finetune models on TACO-Dataset train split
"""
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="bigcode/tiny_starcoder_py")
@dataclass
class DataArguments:
data_path: str = field(default=None, metadata={"help": "Path to the training data."})
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
adam_beta1: float = field(default=0.9)
adam_beta2: float = field(default=0.95)
use_cosine_anneal_with_warmup: bool = field(default=True)
model_max_length: int = field(
default=2048,
metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
)
resume_from_checkpoint: bool = field(
default=False,
metadata={"help": "load the last checkpoint in args.output_dir as saved by a previous instance of Trainer."}
)
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def make_taco_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
"""Make dataset and collator for fine-tune"""
| train_dataset = TacoDataset(data_path=data_args.data_path) |
Subsets and Splits