id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
5170993
|
<reponame>zhanghanchong/text2sql-hansql<gh_stars>0
#coding=utf8
import os
EXP_PATH = 'exp'
def hyperparam_path(args):
if args.read_model_path and args.testing:
return args.read_model_path
exp_path = hyperparam_path_text2sql(args)
if not os.path.exists(exp_path):
os.makedirs(exp_path)
return exp_path
def hyperparam_path_text2sql(args):
task = 'task_%s__model_%s' % (args.task, args.model)
task += '' if args.local_and_nonlocal is None else '_view_%s' % (args.local_and_nonlocal)
task += '' if 'without' in args.output_model else '_gp_%s' % (args.smoothing)
# encoder params
exp_path = 'emb_%s' % (args.embed_size) if args.plm is None else 'plm_%s' % (args.plm)
exp_path += '__gnn_%s_x_%s' % (args.gnn_hidden_size, args.gnn_num_layers)
exp_path += '__share' if args.relation_share_layers else ''
exp_path += '__head_%s' % (args.num_heads)
exp_path += '__share' if args.relation_share_heads else ''
exp_path += '__share' if args.node_type_share_weights else ''
exp_path += '__no_mp_attn' if args.no_metapath_attention else ''
exp_path += '__dp_%s' % (args.dropout)
exp_path += '__dpa_%s' % (args.attn_drop)
exp_path += '__dpc_%s' % (args.drop_connect)
# decoder params
# exp_path += '__cell_%s_%s_x_%s' % (args.lstm, args.lstm_hidden_size, args.lstm_num_layers)
# exp_path += '_chunk_%s' % (args.chunk_size) if args.lstm == 'onlstm' else ''
# exp_path += '_no' if args.no_parent_state else ''
# exp_path += '__attvec_%s' % (args.att_vec_size)
# exp_path += '__sepcxt' if args.sep_cxt else '__jointcxt'
# exp_path += '_no' if args.no_context_feeding else ''
# exp_path += '__ae_%s' % (args.action_embed_size)
# exp_path += '_no' if args.no_parent_production_embed else ''
# exp_path += '__fe_%s' % ('no' if args.no_parent_field_embed else args.field_embed_size)
# exp_path += '__te_%s' % ('no' if args.no_parent_field_type_embed else args.type_embed_size)
# training params
exp_path += '__bs_%s' % (args.batch_size)
exp_path += '__lr_%s' % (args.lr) if args.plm is None else '__lr_%s_ld_%s' % (args.lr, args.layerwise_decay)
exp_path += '__l2_%s' % (args.l2)
exp_path += '__wp_%s' % (args.warmup_ratio)
exp_path += '__sd_%s' % (args.lr_schedule)
exp_path += '__me_%s' % (args.max_epoch)
exp_path += '__mn_%s' % (args.max_norm)
exp_path += '__bm_%s' % (args.beam_size)
exp_path += '__seed_%s' % (args.seed)
exp_path = os.path.join(EXP_PATH, task, exp_path)
return exp_path
|
StarcoderdataPython
|
194678
|
import smbus
from enum import Enum
class MCP23017(object):
OUTPUT = 0
INPUT = 1
PORT_A = 0
PORT_B = 1
IODIR_A = 0x00 # Controls the direction of the data I/O for port A.
IODIR_B = 0x01 # Controls the direction of the data I/O for port B.
IPOL_A = 0x02 # Configures the polarity on the corresponding GPIO_ port bits for port A.
IPOL_B = 0x03 # Configures the polarity on the corresponding GPIO_ port bits for port B.
GPINTEN_A = 0x04 # Controls the interrupt-on-change for each pin of port A.
GPINTEN_B = 0x05 # Controls the interrupt-on-change for each pin of port B.
DEFVAL_A = 0x06 # Controls the default comparaison value for interrupt-on-change for port A.
DEFVAL_B = 0x07 # Controls the default comparaison value for interrupt-on-change for port B.
INTCON_A = 0x08 # Controls how the associated pin value is compared for the interrupt-on-change for port A.
INTCON_B = 0x09 # Controls how the associated pin value is compared for the interrupt-on-change for port B.
IOCON = 0x0A # Controls the device.
GPPU_A = 0x0C # Controls the pull-up resistors for the port A pins.
GPPU_B = 0x0D # Controls the pull-up resistors for the port B pins.
INTF_A = 0x0E # Reflects the interrupt condition on the port A pins.
INTF_B = 0x0F # Reflects the interrupt condition on the port B pins.
INTCAP_A = 0x10 # Captures the port A value at the time the interrupt occured.
INTCAP_B = 0x11 # Captures the port B value at the time the interrupt occured.
GPIO_A = 0x12 # Reflects the value on the port A.
GPIO_B = 0x13 # Reflects the value on the port B.
OLAT_A = 0x14 # Provides access to the port A output latches.
OLAT_B = 0x15 # Provides access to the port B output latches.
def __init__(self, address: int, bus: int):
self.address = address
self.bus = smbus.SMBus(bus)
def init(self):
self.write_register(self.IOCON, 0b00100000)
self.write_register(self.GPPU_A, 0xff)
self.write_register(self.GPPU_B, 0xff)
def port_mode(self, port: int, directions: int, pullups: int = 0xFF, inverted: int = 0x00):
self.write_register(self.IODIR_A + port, directions)
self.write_register(self.GPPU_A + port, directions)
self.write_register(self.IPOL_A + port, directions)
def port_dir(self, port: int, direction: int):
self.write_register(self.IODIR_A + port, direction * 0xff)
def write_port(self, port: int, value: int):
self.write_register(self.GPIO_A + port, value)
def read_port(self, port: int) -> int:
return self.read_register(self.GPIO_A + port)
def write_register(self, register: int, value: int):
self.bus.write_byte_data(self.address, register, value)
def read_register(self, register: int) -> int:
return self.bus.read_byte_data(self.address, register)
|
StarcoderdataPython
|
6406203
|
<filename>example/polls/viewsets.py
from django.shortcuts import get_object_or_404
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from drf_react_template.mixins import FormSchemaViewSetMixin
from example.polls import models, serializers
class PollViewSet(
ListModelMixin,
RetrieveModelMixin,
FormSchemaViewSetMixin,
):
queryset = models.Question.objects.all().prefetch_related('choice_set')
serializer_class = serializers.QuestionSerializer
serializer_list_class = serializers.QuestionListSerializer
def get_object(self):
return get_object_or_404(
self.get_queryset(),
id=self.kwargs['pk'],
)
|
StarcoderdataPython
|
3292050
|
import os
class Filename(object):
"""
Filename class used to manipulate image filenames.
Used by the exposure tool.
"""
def __init__(self, parent=None):
"""
Creates a filename instance.
param object parent: reference to parent image object
"""
# self.parent = parent # calling object reference
self.parent = self # calling object reference
self.filetype = 0 # from image object
# True to overwrite image files of the same name
self.overwrite = 0
# current image file folder
self.folder = ""
# current image file root name
self.root = "a."
# current image file sequence number
self.sequence_number = 1
# True to increment file sequence number after each exposure
self.auto_increment_sequence_number = 0
# True to include file sequence number in each image name
self.include_sequence_number = 0
# True to automatically name image based on imagetype
self.autoname = 0
# True when current file is a test image
self.test_image = 1
if parent is not None:
self.filetype = parent.filetype
def get_filename(self):
"""
Return current filename as a single string.
This is the filename for the next exposure to be taken.
Always uses forward slashes for folder delimiter.
This name is usually the next image to be produced.
"""
self.filetype = self.parent.filetype
folder = self.folder.replace("\\", "/")
if not folder.endswith("/"):
folder = folder + "/"
extension = self.get_extname(self.filetype)
if self.test_image:
filename = folder.replace("\\", "/") + "test" + "." + extension
elif self.include_sequence_number:
if self.autoname:
filename = (
folder
+ self.root
+ "."
+ self.parent.image_type.upper()
+ "."
+ "%04d" % self.sequence_number
+ "."
+ extension
)
else:
filename = folder + self.root + "%04d" % self.sequence_number + "." + extension
else:
if self.autoname:
filename = (
folder + self.root + "." + self.parent.image_type.upper() + "." + extension
)
else:
filename = folder + self.root + "." + extension
filename = filename.replace("..", ".") # clean up as could have two periods together
return filename
def set_filename(self, filename: str):
"""
Set the filename components based on a simple filename.
Args:
filename: filename to be set
"""
self.filetype = self.parent.filetype
filename = os.path.normpath(filename)
self.folder = os.path.dirname(filename)
if self.folder == "":
self.folder = "./"
f = os.path.basename(filename)
# if f.endswith(".fits"):
# f = f.replace(".fits", "")
# self.filetype = 0
f = f.split(".") # root is just first part
self.root = f[0]
return
def increment_filenumber(self):
"""
Increment the filename sequence number if AutoIncrementSequenceNumber is True and not a test image.
"""
if self.auto_increment_sequence_number and not self.test_image:
self.sequence_number += 1
return
def get_extname(self, filetype):
"""
Return the file extension string for a file type.
"""
if filetype in [0, 1, 6]:
return "fits"
elif filetype == 2:
return "bin"
elif filetype == 3:
return "tif"
elif filetype == 4:
return "jpg"
elif filetype == 5:
return "gif"
else:
return ""
|
StarcoderdataPython
|
288349
|
<reponame>piyueh/zoteroutils
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 <NAME> <<EMAIL>>
#
# Distributed under terms of the BSD 3-Clause license.
"""A class that represents the SQLite database."""
class Database:
"""A class that represents a Zotero's SQLite database."""
# pylint: disable=import-outside-toplevel, relative-beyond-top-level
def __init__(self, zotero_dir: str):
from pathlib import Path
from .read import get_item_types_mapping, get_field_names_mapping, get_creator_types_mapping
from .dummy_dict import DummyDict
from sqlalchemy import create_engine
# initialize information regarding paths
self._paths: DummyDict = DummyDict()
self._paths.dir: Path = Path(zotero_dir).expanduser().resolve()
self._paths.db: Path = self._paths.dir.joinpath("zotero.sqlite")
self._paths.storage: Path = self._paths.dir.joinpath("storage")
# engine
self._engine = create_engine("sqlite:///"+str(self._paths.db))
# frequently used mappings
self._maps = DummyDict()
with self._engine.connect() as conn:
self._maps.doctype2id, self._maps.id2doctype = get_item_types_mapping(conn)
self._maps.field2id, self._maps.id2field = get_field_names_mapping(conn)
self._maps.creatortype2id, self._maps.id2creatortype = get_creator_types_mapping(conn)
@property
def db(self): # pylint: disable=invalid-name
"""The path to underlying SQLite database."""
return self._paths.db
@property
def dir(self):
"""The path to the folder of Zotero data."""
return self._paths.dir
@property
def storage(self):
"""The path to the folder of attachments."""
return self._paths.storage
@property
def engine(self):
"""The underlying sqlalchemy.engine.Engine."""
return self._engine
@property
def doctype2id(self):
"""A dict of (str, int) of the mapping between document type names -> type id."""
return self._maps.doctype2id
@property
def id2doctype(self):
"""A dict of (int, str) of the mapping between document type id -> type name."""
return self._maps.id2doctype
@property
def field2id(self):
"""A dict of (str, int) of the mapping between document field names -> field id."""
return self._maps.field2id
@property
def id2field(self):
"""A dict of (int, str) of the mapping between document field id -> field name."""
return self._maps.id2field
@property
def creatortype2id(self):
"""A dict of (str, int) of the mapping between creator type names -> integer id."""
return self._maps.creatortype2id
@property
def id2creatortype(self):
"""A dict of (int, str) of the mapping between creator type id -> string name."""
return self._maps.id2creatortype
def get_docs(self, itemIDs=None, abs_attach_path=True, simplify_author=True):
"""A pandas.Dataframe of all documents with brief information.
Parameters
----------
abs_attach_path : bool
Whether to use absolute paths for attachment paths. If false, the paths are relative
to the Zotero data directory.
Returns
-------
pandas.DataFrame
A dataframe containing all items (except items with itemTypes of note and attachment).
"""
from . import read
from . import process
with self._engine.connect() as conn:
types = read.get_doc_types(conn, itemIDs=itemIDs, **self.doctype2id)
titles = read.get_doc_titles(conn, itemIDs=itemIDs, **self.doctype2id, **self.field2id)
pubs = read.get_doc_publications(
conn, itemIDs=itemIDs, **self.doctype2id, **self.field2id)
years = read.get_doc_years(
conn, itemIDs=itemIDs, **self.doctype2id, **self.field2id)
added = read.get_doc_added_dates(
conn, itemIDs=itemIDs, **self.doctype2id, **self.field2id)
authors = read.get_doc_authors(
conn, itemIDs=itemIDs, **self.doctype2id, **self.creatortype2id)
if abs_attach_path:
atts = read.get_doc_attachments(
conn, itemIDs=itemIDs, prefix=self.storage, **self.doctype2id)
else:
atts = read.get_doc_attachments(conn, itemIDs=itemIDs, **self.doctype2id)
results = authors.join([types, titles, pubs, years, added, atts], None, "outer").fillna("")
if simplify_author:
results["author"] = results["author"].map(process.authors_agg)
return results
|
StarcoderdataPython
|
8168637
|
n1 = int(input('Digite um número: '))
d = n1 * 2
t = n1 * 3
r = n1 ** 0.5
print(f'\033[7;30mSeu dobro é {d}, triplo {t} e raiz quadrada {r:.3f}.\033[m')
|
StarcoderdataPython
|
6635005
|
<reponame>williamsdoug/timeseries_fastai
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_data.ipynb (unless otherwise specified).
__all__ = ['TSBlock', 'stack_train_valid', 'TSDataLoaders']
# Cell
from .imports import *
from .core import *
from fastai.basics import *
from fastai.torch_core import *
from fastai.vision.data import get_grid
# Cell
def TSBlock(cls=TSeries):
"A TimeSeries Block to process one timeseries"
return TransformBlock(type_tfms=cls.create)
# Cell
def stack_train_valid(df_train, df_valid):
"Stack df_train and df_valid, adds `valid_col`=True/False for df_valid/df_train"
return pd.concat([df_train.assign(valid_col=False), df_valid.assign(valid_col=True)]).reset_index(drop=True)
# Cell
class TSDataLoaders(DataLoaders):
"A TimeSeries DataLoader"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, x_cols=None, label_col=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create a DataLoader from a pandas DataFrame"
y_block = ifnone(y_block, CategoryBlock)
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(TSBlock, y_block),
get_x=lambda o: o[x_cols].values.astype(np.float32),
get_y=ColReader(label_col),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_dfs(cls, df_train, df_valid, path='.', x_cols=None, label_col=None,
y_block=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create a DataLoader from a df_train and df_valid"
df = stack_train_valid(df_train, df_valid)
return cls.from_df(df, path, x_cols=x_cols, valid_col='valid_col', label_col=label_col,
y_block=y_block, item_tfms=item_tfms, batch_tfms=batch_tfms,**kwargs)
# Cell
@typedispatch
def show_batch(x: TSeries, y, samples, ctxs=None, max_n=10,rows=None, cols=None, figsize=None, **kwargs):
"Show batch for TSeries objects"
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=rows, ncols=cols, add_vert=1, figsize=figsize)
ctxs = show_batch[object](x, y, samples=samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
|
StarcoderdataPython
|
8046647
|
from flair.data import Corpus
from flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings, CharacterEmbeddings, FlairEmbeddings, \
CamembertEmbeddings, BertEmbeddings, PooledFlairEmbeddings
from typing import List
import torch
from src.utils.monitoring import Monitor
torch.manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import numpy as np
np.random.seed(0)
from flair.data import Corpus
from flair.datasets import ColumnCorpus
import flair
import sys
if len(sys.argv) < 3:
print("Usage: Please give the name of the folder containing the train, dev, test sets and the output folder")
exit(0)
data_folder = sys.argv[1]
output_folder = sys.argv[2]
flair.cache_root = "cache/"
def create_flair_corpus(data_folder):
# define columns
columns = {0: 'text', 1: 'ner'}
# init a corpus using column format, data folder and the names of the train, dev and test files
corpus: Corpus = ColumnCorpus(data_folder, columns,
train_file='train.txt',
test_file='test.txt',
dev_file='dev.txt')
return corpus
# 1. get the corpus
corpus: Corpus = create_flair_corpus(data_folder)
print(corpus)
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
print(tag_dictionary.idx2item)
# 4. initialize embeddings
embedding_types: List[TokenEmbeddings] = [
# WordEmbeddings('fr'),
WordEmbeddings("/data/embeddings/doctrine/doctine_gensim_embeddings.gensim"),
# comment in this line to use character embeddings
CharacterEmbeddings(),
# comment in these lines to use flair embeddings
# FlairEmbeddings('fr-forward'),
# FlairEmbeddings('fr-backward'),
# bert embeddings
# BertEmbeddings('bert-base-multilingual-cased')
# CamembertEmbeddings()
# CCASS Flair Embeddings FWD
# FlairEmbeddings('/data/embeddings_CCASS/flair_language_model/jurinet/best-lm.pt'),
# CCASS Flair Embeddings BWD
# FlairEmbeddings('/data/embeddings_CCASS/flair_language_model/jurinet/best-lm-backward.pt')
]
monitor = Monitor(50)
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger: SequenceTagger = SequenceTagger(hidden_size=100,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
rnn_layers=2,
dropout=0.5,
use_crf=True,
)
# 6. initialize trainer
from flair.trainers import ModelTrainer
from torch.optim.adam import Adam
trainer: ModelTrainer = ModelTrainer(tagger, corpus, optimizer=Adam)
trainer.num_workers = 8
# 7. start training
trainer.train(output_folder,
learning_rate=0.1,
mini_batch_size=8,
max_epochs=5,
use_amp=False,
embeddings_storage_mode="gpu")
monitor.stop()
# 8. plot weight traces (optional)
# from flair.visual.training_curves import Plotter
# plotter = Plotter()
# plotter.plot_weights('models/baseline_ner/weights.txt')
|
StarcoderdataPython
|
6555945
|
<gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: 05_performance.ipynb (unless otherwise specified).
__all__ = ['load_pred_model', 'inference']
# Cell
from .imports import *
from .data_processing import *
from .anomaly import *
# Cell
import warnings
warnings.filterwarnings(action='once')
# Cell
def load_pred_model(learner_path,train_log_path,log_name,cols=['activity']):
log = import_log(train_log_path)
o,dls,categorify = training_dl(log,cat_names=cols)
loss=partial(multi_loss_sum,o)
emb_szs = get_emb_sz(o)
m=MultivariateModel(emb_szs)
learn=Learner(dls, m, path=learner_path, model_dir='.', loss_func=loss, metrics=get_metrics(o))
learn.load(log_name,with_opt=False)
m=learn.model.cuda()
return m, categorify
def inference(test_log_path,m,categorify,log_name,cols=['activity'],fixed_threshold=None,override_threshold_func=None):
if type(test_log_path)==str:
log = import_log(test_log_path)
else:
log = test_log_path
o = process_test(log,categorify,cols)
nsp,idx=predict_next_step(o,m)
score_df=multivariate_anomaly_score(nsp,o,idx,cols)
y_true,y_pred=multivariate_anomalies(score_df,cols,idx,o,fixed_threshold=fixed_threshold)
if override_threshold_func is not None:
y_true,y_pred=multivariate_anomalies(score_df,cols,idx,o,get_thresholds=override_threshold_func)
else:
y_true,y_pred=multivariate_anomalies(score_df,cols,idx,o,fixed_threshold=fixed_threshold)
f1_score(y_true, y_pred)
nsp_acc= float(nsp_accuracy(o,idx,nsp[0]))
f1 = f1_score(y_true, y_pred)
acc = accuracy_score(y_true, y_pred)
precision = precision_score(y_true,y_pred)
recall = recall_score(y_true,y_pred)
return [log_name, nsp_acc, f1, acc, precision, recall]
|
StarcoderdataPython
|
6538807
|
"""
17362 : 수학은 체육과목 입니다 2
URL : https://www.acmicpc.net/problem/17362
Input #1 :
3
Output #1 :
3
Input #2 :
1000
Output #2 :
2
"""
n = int(input())
n = n % 8
if n == 1:
print(1)
elif (n == 0) or (n == 2):
print(2)
elif (n == 3) or (n == 7):
print(3)
elif (n == 4) or (n == 6):
print(4)
else:
print(5)
|
StarcoderdataPython
|
9616618
|
<filename>test_tools/OperateInterface.py<gh_stars>10-100
#!/usr/bin/python
#-*-coding:utf-8-*-
class OperateInterface(object):
def initiate(self,planes,bases,region_size,max_plane_battery,intruder_exposed_time, plane_sight, max_semo_intruder, target_move):
assert 0, 'Must choose an algorithem to test!'
def decide(self,plane, planes, bases, found_intruders_position):
'''This function must return the moveable command for the plane.'''
assert 0, 'Must choose an algorithem to test!'
return cmd
def get_info(self, step_info):
'''algorithem can get the info of this time if needed.'''
|
StarcoderdataPython
|
1610478
|
<gh_stars>0
# RT - AutoMod
from typing import TYPE_CHECKING, Optional, Union, Dict, List
from discord.ext import commands, tasks
import discord
from collections import defaultdict
from time import time
from .modutils import check, assertion_error_handler
from .constants import CACHE_TIMEOUT
from .dataclass import DataManager
if TYPE_CHECKING:
from .types import SpamCacheData as CacheData
from .dataclass import Guild
from rtlib import Backend
class OldAutoMod(commands.Cog, DataManager):
COLORS = {
"normal": 0x66b223,
"warn": 0xDDBB04,
"error": 0xF288AA
}
def __init__(self, bot):
self.bot: "Backend" = bot
self.cache: Dict[int, Dict[int, "CacheData"]] = defaultdict(
lambda : defaultdict(dict))
self.guild_cache: List[int] = []
self.withdrawal_cache: Dict[int, int] = {}
self.remove_cache.start()
self.reset_warn.start()
for name in ("message", "invite_create", "member_join"):
self.bot.add_listener(self.trial, f"on_{name}")
super(commands.Cog, self).__init__(self)
@commands.group(
"old_automod", extras={
"headding": {
"ja": "古いスパム対策などのモデレーション機能",
"en": "old moderation features such as anti-spam"
}, "parent": "Other"
}
)
@commands.cooldown(1, 3, commands.BucketType.guild)
@commands.guild_only()
async def automod(self, ctx):
"""!lang ja
--------
古いAutoMod機能です。
新しいAutoMod機能を使ってください。
!lang en
--------
This is the old automod feature.
You should use new automod feature."""
if not ctx.invoked_subcommand:
await ctx.reply(
{"ja": "使用方法が違います。",
"en": "The usage is different."}
)
# 元々これがヘルプだった。
"""!lang ja
--------
スパム対策機能や絵文字制限そして招待可能チャンネル規制などの機能がある自動モデレーション機能です。
これから警告数というワードがでますがこれは誰かがスパムをした際などに加算される警告の数です。
この数がいくつになったらBANやミュートをするといった動作を作ることができます。
この機能は`rt!automod setup`を実行すれば使えるようになります。
この機能で実行した処罰のログは`rt>modlog`がチャンネルトピックにあるチャンネルに送信されます。
このログチャンネルは作っておくことを推奨します。
Notes
-----
スパム対策機能はデフォルトでは警告数が3になったらミュートで5でBANとなります。
そしてスパム検知レベルはデフォルトでは2で二回スパムしたと検知されると警告数が一つ上がります。
これらは設定で変更が可能です。
なお警告数はしばらくしたら30分以内にリセットされます。
それと管理者は処罰されません。
ミュートはデフォルトではオフですので設定をしましょう。
Warnings
--------
NekoBotなどの画像を表示するだけのような娯楽コマンドを何回か送信して、スパムとして検知されるということが起きかねません。
そのためそのようなBotがある場合はそれらのようなBotのためのチャンネルを作り例外設定にそのチャンネルを追加しましょう。
これのやり方の詳細についてはこれからででてくる`rt!automod ignore`コマンドのヘルプを見てください。
**説明をしっかり読まないと痛いめ見ます。**
特に先ほど言った通りミュートはデフォルトでオフです。BANの警告数をミュートのデフォルトに設定するかミュートを設定するかしましょう。
そして先ほど言った通り`rt!automod setup`をしないと機能しません。
Aliases
-------
am, 安全, モデレーション
!lang en
--------
Automatic moderation with anti-spam, emoji restrictions, and invite-only channel restrictions.
This is the number of warnings that will be added when someone spams.
You can create a behavior such as banning or muting when the number reaches this number.
This feature is available only when `rt! automod setup`.
Notes
-----
By default, the anti-spam function mutes the user when the number of warnings reaches 3, and bans the user when the number reaches 5.
The spam detection level is 2 by default, and if you are detected as spamming twice, the warning number goes up by one.
These can be changed in the settings.
Warnings
--------
You can send a couple of entertainment commands, such as a NekoBot, that just display images, and get spammed.
So if you have such Bots, create channels for them and add them to the exception list.
More details on how to do this will come later in `rt! automod ignore` command.
**If you don't read the explanation carefully, it will hurt.**
Aliases
-------
am"""
# @automod.command("setup", aliases=["設定"])
@check
@assertion_error_handler(
{"ja": "既に設定されています。",
"en": "It has already set."}
)
async def setup_(self, ctx):
"""!lang ja
--------
モデレーション機能を有効化します。
これを実行するとスパム対策機能が作動します。
(招待リンク規制や絵文字規制は設定をしないとデフォルトでは有効になりません。)
Aliases
-------
設定
!lang en
--------
Activate the moderation function.
Doing so will activate the anti-spam feature.
(Invitation link restrictions and emoji restrictions will not be enabled by default unless you set them up.) """
await self.setup(ctx.guild.id)
if ctx.guild.id not in self.guild_cache:
self.guild_cache.append(ctx.guild.id)
await ctx.reply(
embed=self.make_embed(
{"ja": "AutoModを有効にしました。\n",
"en": "I enabled AutoMod."}
)
)
@automod.command("setdown", aliases=["終了"])
@check
@assertion_error_handler(
{"ja": "設定が見つかりませんでした。",
"en": "Could not find the setting."}
)
async def setdown_(self, ctx):
"""!lang ja
--------
古いAutoModを無効にします。
Aliases
-------
終了
!lang en
--------
Disables the old automod feature."""
await self.setdown(ctx.guild.id)
if ctx.guild.id in self.guild_cache:
self.guild_cache.remove(ctx.guild.id)
await ctx.reply(
embed=self.make_embed(
{"ja": "AutoModを無効にしました。",
"en": "I disabled AutoMod."}
)
)
def make_embed(self, description: Union[str, Dict[str, str]], **kwargs) -> discord.Embed:
# AutoModの返信用の埋め込みを作る関数です。
if "color" not in kwargs:
kwargs["color"] = self.COLORS["normal"]
return discord.Embed(
title=self.__cog_name__,
description=description, **kwargs
)
async def update_setting(self, ctx, description, attr, *args, **kwargs):
# 設定コマンド用の関数です。
try:
guild = await self.get_guild(ctx.guild.id)
except AssertionError:
await ctx.reply(self.PLZ)
else:
await getattr(guild, attr)(*args, **kwargs)
await ctx.reply(embed=self.make_embed(description))
return guild
if False:
@automod.group(aliases=["w", "警告"])
async def warn(self, ctx):
"""!lang ja
--------
警告数を管理するためのコマンドです。
Aliases
-------
w, 警告
!lang en
--------
This command is used to manage the number of warnings.
Aliases
-------
w"""
if not ctx.invoked_subcommand:
await self.automod(ctx)
PLZ = {
"ja": "このサーバーはオートモデレーションが有効になっていません。\n" \
"`rt!automod setup`を実行してください。",
"en": "Auto-moderation is not enabled on this server.\n" \
"Please, run `rt!automod setup`."
}
WARN_ERROR = {
"ja": "警告数は0以上100以下である必要があります。",
"en": "The number of warnings to ban must be between 0 and 100."
}
@warn.command("set", aliases=["設定", "s"])
@check
@assertion_error_handler(WARN_ERROR)
async def set_(self, ctx, warn: float, *, target: discord.Member):
"""!lang ja
--------
指定したメンバーの警告数を設定します。
設定した場合はその人の警告数がチェックされるため、BANまたはミュートする警告数に設定した警告数が達している場合は処罰が実行されます。
警告数は0以上100以下である必要があります。
Parameters
----------
warn : float
設定する警告数です。
小数点を含むじょのにできます。
target : メンバーのメンションまたは名前
警告数を設定する対象のメンバーです。
Aliases
-------
s, 設定
!lang en
--------
Set the number of warnings for the specified member.
If set, the number of warnings for that person will be checked, and if the number of warnings set reaches the number of warnings to be banned or muted, the punishment will be executed.
The number of warnings must be between 0 and 100.
Parameters
----------
warn : float
The number of warnings to set.
The number of warnings to set, including decimal points.
target : member mention or name
The target member for which to set the number of warnings.
Aliases
-------
s"""
if warn >= 0:
guild = await self.update_setting(
ctx, {
"ja": f"{target.mention}の警告を`{warn}`に設定しました。",
"en": f"Set the warning for {target.mention} to `{warn}`."
}, "set_warn", target.id, warn
)
await guild.trial_user(target)
else:
await ctx.reply("警告数はゼロ以上である必要があります。")
@warn.command(aliases=["ミュート", "m"])
@check
@assertion_error_handler(WARN_ERROR)
async def mute(self, ctx, warn: float, *, role: discord.Role):
"""!lang ja
--------
いくつの警告数になったら何のロールを付与してミュートにするかを設定します。
Parameters
----------
warn : float
いくつの警告数になったらミュートにするかです。
role : ロールのメンションまたは名前
ミュートする際に付与するロールです。
Warnings
--------
警告数を低く設定しすぎるとたまたまスパムとして誤検出されただけでミュートなどになりかねません。
これには注意してください。
そしてこれはデフォルトでは無効となっています。
理由は仕組みはロール付与によるミュートだからです。
なので設定をしないとスパムする人がいてもミュートはしません。
(ですがご安心を、その人がスパムし続ける場合はBANされます。)
Notes
-----
デフォルトでは3となっています。
また、ロールを付与ではなく剥奪もしたいという場合は`linker`という機能を使ってみましょう。
`rt!help linker`からヘルプを表示できます。
Aliases
-------
m, ミュート
!lang en
--------
Set the number of warnings to be granted and muted.
Parameters
----------
warn : float
How many warnings are there before you mute them?
role : The name or mention of the role
The role to grant when muting.
Warnings
--------
If you set the number of warnings too low, you may end up muting the spam just by chance.
Be careful with this.
This is disabled by default.
The reason is that the mechanism is mute by roll.
So if you don't set it up, it won't mute people who spam you.
(But don't worry, if they keep spamming, they'll be BAN.)
Notes
-----
The default is 3.
If you also want to revoke a role rather than grant it, you can use `linker`.
`rt!help linker`."""
await self.update_setting(
ctx, {
"ja": f"ミュートにする警告数を`{warn}`にしました。",
"en": f"The number of warnings to mute has been set to `{warn}`."
}, "mute", warn, role.id
)
@warn.command(aliases=["バン", "禁止"])
@check
@assertion_error_handler(WARN_ERROR)
async def ban(self, ctx, warn: float):
"""!lang ja
--------
いくつの警告数になったらBANをするかを設定します。
Parameters
----------
warn : float
いくつの警告数にするかです。
Notes
-----
デフォルトは5です。
Warnings
--------
低く設定した場合誤検出のスパムでBANされかねないので低く設定するのは非推奨です。
Aliases
-------
バン, 禁止
!lang en
--------
Set the number of warnings to BAN.
Parameters
----------
warn : float
How many warnings?
Notes
-----
The default is 5.
Warnings
--------
Setting it low is not recommended, as it can result in BAN for false positive spam."""
await self.update_setting(
ctx, {
"ja": f"BANをする警告数を`{warn}`にしました。",
"en": f"The number of warnings to ban has been set to `{warn}`."
}, "ban", warn
)
@automod.command(aliases=["l", "レベル"])
@check
@assertion_error_handler(
{"ja": "レベルは1以上100以下である必要があります。",
"en": "The level must be between 1 and 100, inclusive."}
)
async def level(self, ctx, level: int):
"""!lang ja
--------
スパム検知レベルを設定するコマンドです。
設定したレベルの数だけスパムとして認識したら警告数を一つ上げます。
デフォルトは2で二回スパムとして認識されたら警告数を一つあげるということになります。
Parameters
----------
level : int
設定するスパム検知レベルです。
Notes
-----
1以上100以下である必要があります。
Warnings
--------
そレベルを100などの高い数にするとスパムが検知されても処罰がされるまでとっても時間がかかります。
なので普通は変えるとしても1~4までのどこかにするのを推奨します。
!lang en
--------
This command sets the spam detection level.
Raise the number of warnings by one if the number of levels you set is recognized as spam.
The default is 2, which means that if it is seen twice as spam, it will raise one warning.
Parameters
----------
level: int
The spam detection level to set.
Notes
-----
Must be between 1 and 100, inclusive.
Warnings
--------
A low number such as 1 will be a big problem.
And if you set the level to a high number such as 100, it will take a very long time for spam to be detected and punished.
So I usually recommend to change it to somewhere up to 2 ~ 6."""
await self.update_setting(
ctx, {
"ja": f"スパム検知レベルを`{level}`に設定しました。",
"en": f"Spam detection level set to `{level}`."
}, "level", level
)
@automod.command(aliases=["e", "絵文字"])
@check
@assertion_error_handler(
{"ja": "絵文字数規制の絵文字数は0以上4000以下である必要があります。",
"en": "The number of pictograms in the pictogram count restriction must be between 0 and 4000."}
)
async def emoji(self, ctx, count: int):
"""!lang ja
--------
送信しても良い絵文字の数を設定します。
この送信可能絵文字数を超えた数の絵文字を送信した場合は警告数が`0.5`上がります。
Parameters
----------
count : int
送信しても良い絵文字の数です。
0以上4000以下である必要があります。
Notes
-----
これはデフォルトでオフです。
もしこれを設定する場合ルールの記載するなどした方が親切です。
Aliases
-------
e, 絵文字
!lang en
--------
You can set the number of pictographs that can be sent.
If you send more emoji than this number, the number of warnings increases by `0.5`.
Parameters
----------
count : int
The number of pictographs that can be sent.
Must be greater than or equal to 0 and less than or equal to 4000.
Notes
-----
This is off by default.
If you set this up, it would be helpful to write down the rules.
Aliases
-------
e"""
await self.update_setting(
ctx, {
"ja": f"メッセージで有効な絵文字の数を`{count}`で設定しました。",
"en": f"The number of valid emoji in a message is now set by `{count}`."
}, "emoji", count
)
@automod.group(aliases=["例外", "無視", "igs"])
async def ignore(self, ctx):
"""!lang ja
--------
スパムとしてチェックを行わない例外のチャンネルまたはロールを設定できます。
Aliases
-------
igs, 例外, 無視
!lang en
--------
You can configure channels or roles for exceptions that are not checked as spam.
Aliases
-------
igs"""
if not ctx.invoked_subcommand:
await self.ignore_list(ctx)
@ignore.command("add", aliases=["追加"])
@check
@assertion_error_handler(
{"ja": "その例外は既に追加されています。",
"en": "The exception is already added."}
)
async def add_ignore(self, ctx, *, obj: Union[discord.TextChannel, discord.Role]):
"""!lang ja
-------
スパムを検知しないチャンネルまたは持っていたらスパム検知をしないロールを設定します。。
Parameters
----------
channelOrRole : テキストチャンネルかロールのメンションまたはロール
例外に設定するチャンネルかロールです。
Notes
-----
Nekobotなどの画像表示などのコマンドを実行するチャンネルなどに設定すると良いです。
Warnings
--------
例外チャンネルということはそのチャンネルではスパムをしても何も処罰はされないということです。
ですのでクールダウンを設定するなどをすることを推奨します。
Tips:RTの`cooldown`コマンドで`3`秒などにクールダウンを設定できます。
Aliases
-------
追加
!lang en
--------
Set a channel that will not detect spam or a role that will not detect spam if you have it.
Parameters
----------
channelOrRole : a mention or role of a text channel or role
The channel or role to set as an exception.
Notes
-----
It is good to set it to a channel to execute commands such as displaying images such as Nekobot.
Warnings
--------
An exception channel means that spamming on that channel won't get you punished.
So I recommend you to set cooldown.
Tips: You can set the cooldown to `3` seconds using the RT `cooldown` command."""
await self.update_setting(
ctx, {
"ja": f"例外リストに`{obj.name}`を追加しました。",
"en": f"I added `{obj.name}` to ignore list."
}, "add_ignore", obj.id
)
@ignore.command("remove", aliases=["削除", "rm", "del", "delete"])
@check
@assertion_error_handler(
{"ja": "その例外が見つかりませんでした。",
"en": "The exception is not found."}
)
async def remove_ignore(self, ctx, *, obj: Union[discord.TextChannel, discord.Role]):
"""!lang ja
--------
例外設定を削除します。
Parameters
----------
channelOrRole : テキストチャンネルかロールのメンションまたはロール
例外に設定したチャンネルかロールです。
Aliases
-------
rm, del, delete, 削除
!lang en
--------
Remove the exception configuration.
Parameters
----------
channelOrRole : a mention or role of a text channel or role
The channel or role to set as an exception.
Aliases
-------
rm, del, delete"""
await self.update_setting(
ctx, {
"ja": f"例外リストから{obj.mention}を削除しました。",
"en": f"I removed {obj.mention} from exception list."
}, "remove_ignore", obj.id
)
@ignore.command("list", aliases=["一覧", "l"])
async def ignore_list(self, ctx):
"""!lang ja
--------
設定されている例外のリストです。
Aliases
-------
l, 一覧
!lang en
--------
Display the exception configuration.
Aliases
-------
l"""
data = (await self.get_guild(ctx.guild.id)).data
if "ignores" in data:
await ctx.reply(
embed=self.make_embed(
", ".join(
getattr(
ctx.guild.get_channel(sid) or ctx.guild.get_role(sid),
"mention", "*見つかりませんでした。*"
) for sid in data["ignores"]
)
)
)
else:
await ctx.reply(
{"ja": "例外リストは空です。",
"en": "Exception list is nothing."}
)
@automod.group(aliases=["ie", "招待"])
async def invites(self, ctx):
"""!lang ja
--------
招待を規制します。
この機能を有効にすると指定されたチャンネル以外で作成した招待リンクは自動で削除されます。
※管理者権限を持っている人は作っても削除されません。
Aliases
-------
ie, 招待
!lang en
--------
Restrict invitations.
When this function is enabled, invitation links created outside the specified channel are automatically deleted.
*People with administrative rights are not deleted when they are created.
Aliases
-------
ie"""
if not ctx.invoked_subcommand:
await self.invites_list(ctx)
@invites.command()
@check
@assertion_error_handler(PLZ)
async def onoff(self, ctx):
"""!lang ja
--------
招待リンク規制の有効または無効を切り替えします。
!lang en
--------
Enable or disable invitation link restrictions."""
onoff = "ON" if await (
await self.get_guild(ctx.guild.id)
).trigger_invite() else "OFF"
await ctx.reply(
embed=self.make_embed(
{
"ja": f"招待リンク規制を{onoff}にしました。",
"en": f"I set Invitation link restriction {onoff}."
}
)
)
@invites.command("list", aliases=["一覧", "l"])
@assertion_error_handler(PLZ)
async def invites_list(self, ctx):
"""!lang ja
--------
招待リンクの作成が可能なチャンネルのリストを表示します。
Aliases
-------
l, 一覧
!lang en
--------
Displays a list of channels for which you can create invitation links.
Aliases
-------
l"""
await ctx.reply(
"**招待リンク規制例外チャンネル一覧**\n" \
", ".join(
f"<#{cid}>" for cid in (
await self.get_guild(ctx.guild.id)
).invites
)
)
@invites.command(aliases=["追加", "a"])
@check
@assertion_error_handler(
{"ja": "これ以上追加できません。",
"en": "No more can be added."}
)
async def add(self, ctx):
"""!lang ja
--------
招待リンク作成可能チャンネルリストにこのコマンドを実行したチャンネルを追加します。
`rt!automod invites onoff on`で招待リンク規制を有効にしていないと追加しても無効です。
Aliases
-------
a, 追加
!lang en
--------
Adds the channel on which you run this command to the Invite Links Available Channels list.
`rt!automod invites onoff on`, and you do not enable the invite link restriction.
Aliases
-------
a"""
await self.update_setting(
ctx, {
"ja": "このチャンネルを招待有効チャンネルとして設定しました。\n" \
"注意:`rt!automod invites onoff`で招待リンク規制を有効にしていない場合何も起きません。",
"en": "I set here as everyone can make invite."
}, "add_invite_channel", ctx.channel.id
)
@invites.command(aliases=["削除", "rm", "del", "delete"])
@check
async def remove(self, ctx):
"""!lang ja
--------
招待リンク作成可能チャンネルリストからこのコマンドを実行したチャンネルを削除します。
Aliases
-------
rm, del, delete, 削除
!lang en
--------
Removes the channel on which you run this command from the Invite Link Creatable Channels list.
Aliases
-------
rm, del, delete"""
await self.update_setting(
ctx, {
"ja": "このチャンネルを招待有効チャンネルではなくしました。",
"en": "This channel is no longer an invitation enabled channel."
}, "remove_invite_channel", ctx.channel.id
)
@automod.command(aliases=["即抜けBAN", "wd"])
@check
@assertion_error_handler(
{"ja": "秒数は10以上300以下である必要があります。",
"en": "Seconds must be 10 to 300 inclusive."}
)
async def withdrawal(self, ctx, seconds: int):
"""!lang ja
--------
即抜け後にすぐ参加した人をBANする設定です。
サーバー参加後に指定した秒数以内に退出そして参加をした場合にそのユーザーをBANするという設定です。
Parameters
----------
seconds : int
何秒以内に退出して参加をしたらBANをするかです。
Aliases
-------
wd, 即抜けBAN
!lang en
--------
This is the setting to BAN the person who joined immediately after the instant exit.
BAN the user if the user exits and joins within the specified number of seconds after joining the server.
Parameters
----------
seconds: int
Within how many seconds you have to leave to participate in BAN.
Aliases
-------
wd"""
await self.update_setting(
ctx, {
"ja": f"即抜けBANを`{seconds}`秒で設定しました。",
"en": f"We set it to BAN when a member joins the server and leaves within `{seconds}` seconds."
}, "set_withdrawal", seconds
)
@automod.group(aliases=["ir", "招待リンク削除"])
@check
async def inviteremover(self, ctx):
"""!lang ja
--------
招待リンクが送信された際にそのメッセージを消すようにします。
このコマンドを実行することで有効/無効を切り替えることができます。
Aliases
-------
ir, 招待リンク削除
!lang en
--------
Make that message disappear when the invitation link is sent.
You can enable/disable it by executing this command.
Aliases
-------
ir"""
await self.update_setting(
ctx, "Ok", "invite_remover_toggle"
)
@inviteremover.command()
async def add(self, ctx, channel: Optional[discord.TextChannel]):
"""!lang ja
--------
実行したチャンネルを招待リンク削除の例外として設定します。
!lang en
--------
Set the executed channel as an exception to delete the invitation link."""
channel = channel or ctx.channel
await self.update_setting(
ctx, "Ok", "add_invite_remover_ignore", channel.id
)
@inviteremover.command()
@assertion_error_handler(
{"ja": "そのチャンネルは登録されていません。",
"en": "The channel is not registered."}
)
async def remove(self, ctx, channel: Optional[Union[discord.TextChannel, discord.Object]]):
"""!lang ja
--------
実行したチャンネルの招待リンク削除設定の例外を削除します。
!lang en
--------
Removes exceptions to the delete invitation link setting for the executed channel."""
channel = channel or ctx.channel
await self.update_setting(
ctx, "Ok", "remove_invite_remover_ignore", channel.id
)
def cog_unload(self):
self.remove_cache.cancel()
self.reset_warn.cancel()
@tasks.loop(seconds=30)
async def remove_cache(self):
# スパム検知に使う前回送信されたメッセージのキャッシュの削除を行うループです。
now, removed = time(), []
for cid in list(self.cache.keys()):
for uid in list(self.cache[cid].keys()):
if now - self.cache[cid][uid]["time"] >= CACHE_TIMEOUT:
del self.cache[cid][uid]
removed.append(cid)
if not self.cache[cid]:
del self.cache[cid]
# 即抜けBANのキャッシュを削除する。
for mid, next_ in list(self.withdrawal_cache.items()):
if now >= next_:
del self.withdrawal_cache[mid]
async def _get_guild(
self, guild_id: int, if_not_exists_remove: bool = True
) -> Optional["Guild"]:
# Gulid(automod.)クラスのインスタンスを取得する関数です。
# もしguild_cacheにあるのに見つからなかったら、guild_cacheからそのサーバーを除去します。
try:
guild = await self.get_guild(guild_id)
except AssertionError:
if if_not_exists_remove and guild_id in self.guild_cache:
self.guild_cache.remove(guild_id)
else:
return guild
@tasks.loop(minutes=15)
async def reset_warn(self):
# 警告数をリセットするループです。
for guild_id in self.guild_cache:
if (guild := await self._get_guild(guild_id)):
for user_id in list(guild.data.get("warn", {}).keys()):
if guild.data["warn"][user_id]:
await guild.set_warn(user_id, 0.0)
async def trial(self, obj: Union[discord.Message, discord.Invite, discord.Member]):
# 罰するかしないかをチェックするべきイベントで呼ばれる関数でモデレーションを実行します。
if obj.guild and obj.guild.id in self.guild_cache:
if (guild := await self._get_guild(obj.guild.id)):
await getattr(guild, f"trial_{obj.__class__.__name__.lower()}")(obj)
def setup(bot):
bot.add_cog(OldAutoMod(bot))
|
StarcoderdataPython
|
12811838
|
<filename>config.py
import os
from dotenv import load_dotenv
from setup import basedir
try:
load_dotenv()
except FileNotFoundError:
pass
class BaseConfig(object):
"""
Base Configuration for placeholder
"""
SECRET_KEY = "SO_SECURE"
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URI_base')
SQLALCHEMY_TRACK_MODIFICATIONS = True
class TestingConfig(object):
"""Development configuration."""
TESTING = True
DEBUG = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URI_test')
DEBUG_TB_ENABLED = True
PRESERVE_CONTEXT_ON_EXCEPTION = False
|
StarcoderdataPython
|
4877398
|
from django.conf.urls import patterns
from django.conf.urls import include
from django.conf.urls import url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
handler400 = '{{ cookiecutter.repo_name }}.errors.bad_request'
handler403 = 'django.views.defaults.permission_denied'
handler404 = 'django.views.defaults.page_not_found'
handler500 = '{{ cookiecutter.repo_name }}.errors.server_error'
urlpatterns = patterns(
'',
url(r'^', include('core.urls', namespace='core')),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
from django.views.generic import TemplateView
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns += patterns(
'',
(r'^403/$', TemplateView.as_view(template_name="403.html")),
(r'^404/$', TemplateView.as_view(template_name="404.html")),
(r'^500/$', TemplateView.as_view(template_name="500.html")),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
if settings.DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns += patterns(
'',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
StarcoderdataPython
|
8063038
|
<reponame>msrb/nvdlib
import datetime
import cpe
import typing
from collections import namedtuple
from enum import Enum
from itertools import chain
from nvdlib.utils import AttrDict
VERSION = '4.0'
class CVE(object):
"""Representation of a CVE entry from the NVD database."""
def __init__(self, cve_id: str, affects: "AffectsNode", references: list, description: str,
configurations: list, impact: dict, published_date: str, last_modified_date: str):
self.cve_id = cve_id
self.affects = affects
self.references = references or []
self.description = description or ""
self.configurations = configurations or []
self.impact = impact
self.published_date = published_date
self.last_modified_date = last_modified_date
# TODO: check for missing attributes
def get_cpe(self, cpe_type=None, nodes=None) -> list:
def _is_type(uri: str, t: str):
return uri.startswith("cpe:/%s" % t)
if nodes is None:
nodes = self.configurations
cpe_list = list()
for node in nodes:
if node.children:
cpe_list.extend(self.get_cpe(cpe_type=cpe_type, nodes=node.children))
cpe_list.extend([x for x in node.cpe if _is_type(x.cpe22Uri, cpe_type)])
return cpe_list
def get_affected_vendors(self) -> typing.List[str]:
"""Get affected vendors.
:returns: List[str], list of affected vendors
"""
return list(self.affects.keys())
def get_affected_products(self, vendor: str = None) -> typing.List["ProductNode"]:
"""Get affected products.
:returns: List[ProductNode], list of affected products
"""
affected_products = list()
if not vendor:
affected_products = list(chain(*self.affects.values()))
else:
affected_products.extend([
p for p in self.affects.values()
if p.vendor == vendor
])
return affected_products
def get_affected_versions(self, filter_by: typing.Union[tuple, str]) -> typing.List[str]:
"""Get affected versions.
:param filter_by: typing.Union[tuple, str]
Either tuple of (vendor, product) or cpe string to uniquely identify which
affected products should be returned.
:returns: List[str], list of affected versions of a given product
"""
if isinstance(filter_by, tuple):
v_name, p_name = filter_by
elif isinstance(filter_by, str):
parsed_cpe = cpe.CPE(filter_by)
v_name, = parsed_cpe.get_vendor()
p_name, = parsed_cpe.get_product()
else:
raise TypeError(
"Argument `by` expected to be {}, got {}".format(
typing.Union[tuple, str], type(filter_by)
))
affected_versions = list()
for product in self.affects[v_name]:
if product.name.startswith(p_name):
affected_versions.extend(
[version for version in product.version_data]
)
return affected_versions
@classmethod
def from_dict(cls, data):
"""Initialize class from cve json dictionary."""
date_format = '%Y-%m-%dT%H:%MZ'
published_date = datetime.datetime.strptime(data.get('publishedDate'), date_format)
last_modified_date = datetime.datetime.strptime(data.get('lastModifiedDate'), date_format)
cve_dict = data.get('cve', {})
# CVE ID
cve_id = cve_dict.get('CVE_data_meta', {}).get('ID')
# Affects
affects = AffectsNode.from_dict(cve_dict.get('affects', {}))
# References
references_data = cve_dict.get('references', {}).get('reference_data', [])
references = [x.get('url') for x in references_data]
# English description
description_data = cve_dict.get('description', {}).get('description_data', [])
description = ""
for lang_description in description_data:
if lang_description.get('lang') == 'en':
description = lang_description.get('value', '')
break
# Impact
impact = Impact.from_dict(data.get('impact', {}))
# Configurations
configurations = [ConfigurationNode.from_dict(x) for x in data.get('configurations', {}).get('nodes', [])]
return cls(cve_id=cve_id,
affects=affects,
references=references,
description=description,
configurations=configurations,
impact=impact,
published_date=published_date,
last_modified_date=last_modified_date)
class AffectsNode(AttrDict):
"""AffectsNode is a dict structure of signatures {version: product}."""
def __init__(self, **kwargs):
"""Initialize AffectsNode."""
super(AffectsNode, self).__init__(**kwargs)
@classmethod
def from_dict(cls, node_dict):
"""Initialize AffectsNode from dictionary.
:param node_dict: dict, expected NVD `affects` json schema
"""
vendor_data = node_dict.get('vendor', {}).get('vendor_data', []) # type: list
vendor_dct = dict()
for v_entry in vendor_data:
vendor = v_entry.get('vendor_name', None)
if vendor:
vendor_dct[vendor] = list()
for p_entry in v_entry.get('product', {}).get('product_data', []):
node = ProductNode(vendor, p_entry)
vendor_dct[vendor].append(node)
return cls(**vendor_dct)
class ProductNode(namedtuple('ProductNode', ['name', 'vendor', 'version_data'])):
"""ProductNode is a class representing product.
The product is represented by its name, vendor and list of versions.
"""
def __new__(cls, vendor, product_dict):
"""Create ProductNode.
:param vendor: str, product vendor
:param product_dict: dict, expected NVD `product_data` json schema
"""
name = product_dict.get('product_name', None)
version_data = product_dict.get('version', {}).get('version_data', [])
version_data = [v.get('version_value', None) for v in version_data]
return super(ProductNode, cls).__new__(
cls, name, vendor, version_data
)
class ConfigurationOperators(Enum):
OR = 1
AND = 2
@classmethod
def from_string(cls, operator_str):
if operator_str.upper() not in [x.name for x in cls]:
raise ValueError('Unknown operator {op}'.format(op=operator_str))
return cls.OR if operator_str.upper() == 'OR' else cls.AND
class ConfigurationNode(object):
def __init__(self, cpe: list = None, operator=ConfigurationOperators.OR, negate=False, children: list = None):
self._cpe = cpe or []
self._operator = operator
self._negate = negate or False
self._children = children or []
@property
def cpe(self):
return self._cpe
@property
def operator(self):
return self._operator
@property
def negate(self):
return self._negate
@property
def children(self):
return self._children
@classmethod
def from_dict(cls, node_dict):
kwargs = {}
if 'cpe' in node_dict:
kwargs['cpe'] = [CPE.from_dict(x) for x in node_dict['cpe']]
if 'operator' in node_dict:
kwargs['operator'] = ConfigurationOperators.from_string(node_dict['operator'])
if 'negate' in node_dict:
kwargs['negate'] = node_dict['negate']
if 'children' in node_dict:
kwargs['children'] = [ConfigurationNode.from_dict(x) for x in node_dict['children']]
return cls(**kwargs)
class CPE(object):
def __init__(self, vulnerable: bool, cpe22Uri: str, cpe23Uri: str, versionStartIncluding: str = None,
versionStartExcluding: str = None, versionEndIncluding: str = None, versionEndExcluding: str = None):
self._vulnerable = vulnerable
self._cpe22Uri = cpe22Uri
self._cpe23Uri = cpe23Uri
self._cpe_parser = cpe.CPE(cpe22Uri)
self._versionExact = cpe.CPE(cpe22Uri).get_version()[0] or None
self._versionStartIncluding = versionStartIncluding
self._versionStartExcluding = versionStartExcluding
self._versionEndIncluding = versionEndIncluding
self._versionEndExcluding = versionEndExcluding
def is_application(self):
return self._cpe_parser.is_application()
def is_hardware(self):
return self._cpe_parser.is_hardware()
def is_operating_system(self):
return self._cpe_parser.is_operating_system()
@property
def vendor(self):
return self._cpe_parser.get_vendor()[0]
@property
def product(self):
return self._cpe_parser.get_product()[0]
def get_version_tuple(self):
return (
self._versionExact,
self._versionEndExcluding, self._versionEndIncluding,
self._versionStartIncluding, self._versionStartExcluding
)
@property
def vulnerable(self):
return self._vulnerable
@property
def cpe22Uri(self):
return self._cpe22Uri
@property
def cpe23Uri(self):
return self._cpe23Uri
@property
def versionExact(self):
return self._versionExact
@property
def versionStartIncluding(self):
return self._versionStartIncluding
@property
def versionStartExcluding(self):
return self._versionStartExcluding
@property
def versionEndIncluding(self):
return self._versionEndIncluding
@property
def versionEndExcluding(self):
return self._versionEndExcluding
@classmethod
def from_dict(cls, cpe_dict):
return cls(**cpe_dict)
def __eq__(self, other):
if not isinstance(other, CPE):
return False
if not self.__dict__ == other.__dict__:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.cpe23Uri
class Impact(object):
def __init__(self, baseMetricV2: "BaseMetric", baseMetricV3: "BaseMetric"):
self._baseMetricV2 = baseMetricV2 or None
self._baseMetricV3 = baseMetricV3 or None
@property
def baseMetricV2(self):
return self._baseMetricV2
@property
def baseMetricV3(self):
return self._baseMetricV3
@classmethod
def from_dict(cls, impact_dict):
baseMetricV2 = None
baseMetricV3 = None
if impact_dict.get('baseMetricV2'):
baseMetricV2 = BaseMetric.from_dict(impact_dict.get('baseMetricV2'))
if impact_dict.get('baseMetricV3'):
baseMetricV3 = BaseMetric.from_dict(impact_dict.get('baseMetricV3'))
return cls(baseMetricV2=baseMetricV2, baseMetricV3=baseMetricV3)
class BaseMetric(object):
def __init__(self, cvss: "CVSS", severity: str, exploitabilityScore: int, impactScore: int, obtainAllPrivilege=False,
obtainUserPrivilege=False, obtainOtherPrivilege=False, userInteractionRequired=False):
self._cvss = cvss
self._severity = severity
self._exploitabilityScore = exploitabilityScore
self._impactScore = impactScore
self._obtainAllPrivilege = obtainAllPrivilege
self._obtainUserPrivilege = obtainUserPrivilege
self._obtainOtherPrivilege = obtainOtherPrivilege
self._userInteractionRequired = userInteractionRequired
@property
def severity(self):
return self._severity
@property
def exploitabilityScore(self):
return self._exploitabilityScore
@property
def impactScore(self):
return self._impactScore
@property
def obtainAllPrivilege(self):
return self._obtainAllPrivilege
@property
def obtainUserPrivilege(self):
return self._obtainUserPrivilege
@property
def obtainOtherPrivilege(self):
return self._obtainOtherPrivilege
@property
def userInteractionRequired(self):
return self._userInteractionRequired
@property
def cvssV2(self):
return self._cvss
@property
def cvssV3(self):
return self._cvss
@classmethod
def from_dict(cls, metrics_dict):
cvss_dict = metrics_dict.get('cvssV2') or metrics_dict.get('cvssV3')
cvss = CVSS.from_dict(cvss_dict)
return cls(cvss=cvss,
severity=metrics_dict.get('severity'),
exploitabilityScore=metrics_dict.get('exploitabilityScore'),
impactScore=metrics_dict.get('impactScore'),
obtainAllPrivilege=(str(metrics_dict.get('obtainAllPrivilege', '')).lower() == 'true'),
obtainUserPrivilege=(str(metrics_dict.get('obtainUserPrivilege', '')).lower() == 'true'),
obtainOtherPrivilege=(str(metrics_dict.get('obtainOtherPrivilege', '')).lower() == 'true'),
userInteractionRequired=(str(metrics_dict.get('userInteractionRequired', '')).lower() == 'true'))
class CVSS(object):
def __init__(self, version: str, vectorString: str, accessVector: str,
accessComplexity: str, authentication: str, confidentialityImpact: str,
integrityImpact: str, availabilityImpact: str, baseScore: int):
self._version = version
self._vectorString = vectorString
self._accessVector = accessVector
self._accessComplexity = accessComplexity
self._authentication = authentication
self._confidentialityImpact = confidentialityImpact
self._integrityImpact = integrityImpact
self._availabilityImpact = availabilityImpact
self._baseScore = baseScore
@property
def version(self):
return self._version
@property
def vectorString(self):
return self._vectorString
@property
def accessVector(self):
return self._accessVector
@property
def accessComplexity(self):
return self._accessComplexity
@property
def authentication(self):
return self._authentication
@property
def confidentialityImpact(self):
return self._confidentialityImpact
@property
def integrityImpact(self):
return self._integrityImpact
@property
def availabilityImpact(self):
return self._availabilityImpact
@property
def baseScore(self):
return self._baseScore
@classmethod
def from_dict(cls, cvss_dict):
return cls(version=cvss_dict.get('version'),
vectorString=cvss_dict.get('vectorString'),
accessVector=cvss_dict.get('accessVector'),
accessComplexity=cvss_dict.get('accessComplexity'),
authentication=cvss_dict.get('authentication'),
confidentialityImpact=cvss_dict.get('confidentialityImpact'),
integrityImpact=cvss_dict.get('integrityImpact'),
availabilityImpact=cvss_dict.get('availabilityImpact'),
baseScore=cvss_dict.get('baseScore'))
|
StarcoderdataPython
|
9643406
|
# Generated by Django 3.1.1 on 2020-11-21 10:58
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='HealthEmergency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now=True)),
('problem', models.TextField(help_text='Problem : ')),
('longitude', models.CharField(blank=True, max_length=250, null=True)),
('latitude', models.CharField(blank=True, max_length=250, null=True)),
('date', models.CharField(blank=True, max_length=250, null=True)),
],
),
migrations.CreateModel(
name='HealthTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.CharField(blank=True, choices=[('covid_symptoms', 'Covid-19 Symptoms')], max_length=250, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('remarksDoc', models.CharField(blank=True, max_length=255, null=True)),
('remarksPat', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Jobs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=250, null=True)),
('description', models.TextField(blank=True, null=True)),
('pay', models.IntegerField(blank=True, null=True)),
('skillsrequired', models.TextField(blank=True, null=True)),
('mobile', models.CharField(blank=True, max_length=16, null=True)),
('location', models.CharField(blank=True, max_length=250, null=True)),
],
),
migrations.CreateModel(
name='PoliceEmergency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now=True)),
('longitude', models.CharField(blank=True, max_length=250, null=True)),
('latitude', models.CharField(blank=True, max_length=250, null=True)),
('date', models.CharField(blank=True, max_length=250, null=True)),
],
),
]
|
StarcoderdataPython
|
52815
|
<filename>embutils/utils/version.py<gh_stars>0
#!/usr/bin/python
# -*- coding: ascii -*-
"""
Version number implementation.
:date: 2021
:author: <NAME>
:contact: <EMAIL>
:license: The MIT License (MIT)
"""
import re
import attr
from ..utils.common import TPAny, TPText
# -->> Tunables <<---------------------
# -->> Definitions <<------------------
# -->> API <<--------------------------
@attr.s
class Version:
"""
Simple version definition.
"""
#: Regex version pattern.
REGEX_VER = re.compile(pattern=r"(([0-9]*\.){2,}([a-z0-9]{1,}))", flags=re.I)
#: Regex HEX pattern.
REGEX_HEX = re.compile(pattern=r"^((0x){0,1}([a-f0-9]{1,}))$", flags=re.I)
#: Regex INT pattern.
REGEX_INT = re.compile(pattern=r"^([0-9]{1,})$", flags=re.I)
#: Version major
major: int = attr.ib(default=99, converter=int)
#: Version minor
minor: int = attr.ib(default=0, converter=int)
#: Version build
build: int = attr.ib(default=0, converter=int)
#: Flag. If enabled the build is parsed/printed as HEX.
hex_build: bool = attr.ib(default=False, converter=bool)
def __str__(self) -> str:
"""
Version as string: major.minor.build
"""
return f"{self.major}.{self.minor}.{hex(self.build)[2:] if self.hex_build else self.build}"
def parse(self, text: TPAny) -> None:
"""
Parses a version string.
:param TPAny text: Version string.
:raises ValueError: Input is not a string or contents don't match a version pattern.
"""
# Avoid not compatible types
constraints = getattr(TPText, "__constraints__")
if not isinstance(text, constraints):
raise ValueError(f"Parameter with value '{text}' can't be converted to text.")
# Ensure format and search
text = text if isinstance(text, str) else text.decode(errors="ignore")
match = Version.REGEX_VER.search(string=text.strip())
if match is None:
raise ValueError(f"Unable to parse a valid version number from '{text}'.")
# Parse: major and minor
items = match.group().lower().split('.')
self.major, self.minor = map(int, items[:-1])
# Parse: build
base = 16 if self.hex_build else 10
regex = self.REGEX_HEX if self.hex_build else self.REGEX_INT
match = regex.search(string=items[-1])
self.build = 0 if (match is None) else int(match.group(), base)
@staticmethod
def from_str(text: TPAny, hex_build: bool = False) -> 'Version':
"""
Parses a version number from a string.
:param TPAny text: Version string.
:param bool hex_build: If true, assumes that the build section is in HEX format.
:return: Version number.
:rtype: Version
:raises ValueError: Input is not a string or contents don't match a version pattern.
"""
ver = Version(hex_build=hex_build)
ver.parse(text=text)
return ver
|
StarcoderdataPython
|
3318976
|
<reponame>gkluber/Sub-Riemannian-Quantum-Circuits
from subriemannian_qc.discrete import DiscreteApproximator
import numpy as np
# Use the discrete approximator to find an approximation to the identity matrix
for n in range(1, 4):
gate_approx = DiscreteApproximator(n, 10, 10, 1, 100)
approximation, _ = gate_approx.approx_matrix(np.identity(2**n))
print(approximation)
|
StarcoderdataPython
|
11344053
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^user/$", views.UserManagementView.as_view()),
url(r"^user/logout/$", views.UserLogOutView.as_view()),
url(r"^super/$", views.SupermanagementView.as_view()),
url(r"^super/(?P<userkind>\d+)/$", views.SupermanagementView.as_view()),
url(r'^super/(?P<userkind>\d+)/(?P<id>\d+)/$', views.SupermanagementView.as_view()),
url(r"^student/$", views.StudentView.as_view()),
url(r"^studentfind/$", views.StudentFindView.as_view()),
url(r"^teacher/$", views.TeacherView.as_view()),
]
|
StarcoderdataPython
|
11379434
|
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
from tempfile import mkdtemp
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
SECRET_KEY = "NOBODY expects the Spanish Inquisition!"
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"pushit",
# "tests.core_test"
)
SITE_ID = 1
ROOT_URLCONF = "tests.core.urls"
MIDDLEWARE_CLASSES = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.auth.middleware.SessionAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
)
PUSHIT_CONNECTIONS = {
"default": {
"ENGINE": "pushit.backends.sns.SNSPushBackend",
"OPTIONS": {
"REGION": "eu-west-1",
"AWS_ACCESS_KEY_ID": "Read from os.env",
"AWS_SECRET_ACCESS_KEY": "Read from os.env",
"APPLICATION_ARN": "dwdw"
}
}
}
|
StarcoderdataPython
|
12801984
|
import os
import logging.config
from typing import Dict
from omegaconf import DictConfig
import pandas as pd
import hydra
from src.data import split_train_val_data
from src.entities.train_pipeline_params import TrainingPipelineParams, TrainingPipelineParamsSchema
from src.features.build_features import get_target, build_transformer, prepare_dataset
from src.models import train_model, make_prediction, evaluate_model
from src.utils import read_data, save_pkl_file, save_metrics_to_json
logger = logging.getLogger("train_pipeline")
def train_pipeline(
training_pipeline_params: TrainingPipelineParams,
) -> Dict[str, float]:
logger.info(f"Start train pipeline with params {training_pipeline_params}")
logger.info(f"Model is {training_pipeline_params.train_params.model_type}")
data = read_data(training_pipeline_params.path_config.input_data_path)
train_df, test_df = split_train_val_data(data, training_pipeline_params.splitting_params)
#logger.info("Start transformer building...")
#transformer = build_transformer(training_pipeline_params.feature_params)
#transformer.fit(train_df)
#save_pkl_file(transformer, training_pipeline_params.path_config.output_transformer_path)
#train_features = pd.DataFrame(transformer.transform(train_df))
train_target = get_target(train_df, training_pipeline_params.feature_params)
#TODO передавать актуальные параметры
train_df = prepare_dataset(train_df, training_pipeline_params.feature_params)
logger.info("Start model training..")
print(train_df.shape, train_target.shape)
model = train_model(
train_df, train_target, training_pipeline_params.train_params
)
logger.info("Model training is done")
#test_features = pd.DataFrame(transformer.transform(test_df))
test_target = get_target(test_df, training_pipeline_params.feature_params)
#TODO передавать актуальные параметры
test_df = prepare_dataset(test_df, training_pipeline_params.feature_params)
predicts = make_prediction(model, test_df)
metrics = evaluate_model(predicts, test_target)
save_metrics_to_json(training_pipeline_params.path_config.metric_path,
metrics)
logger.info("Model is saved")
logger.info(f"Metrics for test dataset is {metrics}")
save_pkl_file(model, training_pipeline_params.path_config.output_model_path)
return metrics
@hydra.main(config_path="../configs", config_name="train_config")
def train_pipeline_start(cfg: DictConfig):
os.chdir(hydra.utils.to_absolute_path(".."))
schema = TrainingPipelineParamsSchema()
params = schema.load(cfg)
train_pipeline(params)
if __name__ == "__main__":
train_pipeline_start()
|
StarcoderdataPython
|
8197521
|
from django.apps import AppConfig
class MytodoConfig(AppConfig):
name = 'MyTodo'
|
StarcoderdataPython
|
5000187
|
import numpy as np
import json
def smooth_raman_json(
min_freq: float,
max_freq: float,
points: int,
width: float,
num_acoustic: int,
filename: str,
) -> np.ndarray:
frequency = []
intensity = []
file = json.loads(open(filename).read())
# Load the json file data into lists
for i in range(len(file['frequency'])):
# Check for the max and min frequency
if max_freq >= file['frequency'][i] >= min_freq:
frequency.append(file['frequency'][i])
intensity.append(file['average-3d'][i])
return smooth_raman(
min_freq,
max_freq,
points,
width,
num_acoustic,
frequency=np.array(frequency),
intensity=np.array(intensity),
)
def smooth_raman(
min_freq: float,
max_freq: float,
points: int,
width: float,
num_acoustic: int,
frequency: np.ndarray,
intensity: np.ndarray,
) -> np.ndarray:
assert 0 <= num_acoustic <= 6
assert points > 0
# Skip the acoustic modes, which are not Raman active (but can have
# non-zero Raman intensity due to numerical issues as their frequencies
# are close to zero)
intensity = intensity[num_acoustic:]
frequency = frequency[num_acoustic:]
# Only select peaks within the given frequency range
peaks_in_range = (min_freq <= frequency) & (frequency <= max_freq)
intensity = intensity[peaks_in_range]
frequency = frequency[peaks_in_range]
# Lorentzian distribution smoothing for the data set
frequency_smooth = np.linspace(min_freq, max_freq, points)
intensity_smooth = np.zeros(points)
gamma_sq = (0.5 * width)**2
for i, f_eval in enumerate(frequency_smooth):
intensity_smooth[i] = np.sum(
intensity * gamma_sq / ((f_eval - frequency)**2 + gamma_sq)
)
return intensity_smooth
|
StarcoderdataPython
|
3245663
|
import unittest
from django.test import Client, TestCase
from django.contrib.auth.models import User
import string
import random
from django.test.utils import override_settings
from django.urls import reverse
from .models import Reviews, UserInformation, Request
from django.contrib.auth import get_user_model
class TestOAuth(TestCase):
def test_create_new_user(self):
num = 10
# Randomly generating username
username = ''.join(random.choices(string.ascii_uppercase + string.digits, k=num))
# Receive key constraint if the same user is created with each test run
# Randomly generate username to ensure repeatability
user = User.objects.create(username=username)
user.set_password('<PASSWORD>')
user.save()
c = Client()
logged_in = c.login(username=username, password='<PASSWORD>')
self.assertEqual(logged_in, True)
def test_user_exists(self):
c = Client()
logged_in = c.login(username='testuser', password='<PASSWORD>')
self.assertEqual(logged_in, False)
def test_login_fail(self):
c = Client()
logged_in = c.login(username='user-does-not-exist', password='<PASSWORD>')
self.assertEqual(logged_in, False)
def test_admin_login(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
username='admin',
email='<EMAIL>',
password='<PASSWORD>'
)
self.client.force_login(self.admin_user)
cookies = self.client.cookies
self.assertTrue(cookies)
@override_settings(AUTHENTICATION_BACKENDS=
('django.contrib.auth.backends.ModelBackend',))
class TestMapBox(TestCase):
def test_load_page(self):
num = 20
# Randomly generating username
username = ''.join(random.choices(string.ascii_uppercase + string.digits, k=num))
# Receive key constraint if the same user is created with each test run
# Randomly generate username to ensure repeatability
user = User.objects.create(username=username)
user.set_password('<PASSWORD>')
user.save()
c = Client()
logged_in = c.login(username=username, password='<PASSWORD>')
self.assertEqual(logged_in, True)
response = c.get('/guide/map')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'UVA Guide')
def test_load_map(self):
num = 20
# Randomly generating username
username = ''.join(random.choices(string.ascii_uppercase + string.digits, k=num))
# Receive key constraint if the same user is created with each test run
# Randomly generate username to ensure repeatability
user = User.objects.create(username=username)
user.set_password('<PASSWORD>')
user.save()
c = Client()
logged_in = c.login(username=username, password='<PASSWORD>')
self.assertEqual(logged_in, True)
response = c.get('/guide/map')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'guide-map')
def test_load_geolocate(self):
num = 20
# Randomly generating username
username = ''.join(random.choices(string.ascii_uppercase + string.digits, k=num))
# Receive key constraint if the same user is created with each test run
# Randomly generate username to ensure repeatability
user = User.objects.create(username=username)
user.set_password('<PASSWORD>')
user.save()
c = Client()
logged_in = c.login(username=username, password='<PASSWORD>')
self.assertEqual(logged_in, True)
response = c.get('/guide/map')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'geo-button')
def test_load_searchbar(self):
num = 20
# Randomly generating username
username = ''.join(random.choices(string.ascii_uppercase + string.digits, k=num))
# Receive key constraint if the same user is created with each test run
# Randomly generate username to ensure repeatability
user = User.objects.create(username=username)
user.set_password('<PASSWORD>')
user.save()
c = Client()
logged_in = c.login(username=username, password='<PASSWORD>')
self.assertEqual(logged_in, True)
response = c.get('/guide/map')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'search-bar')
class TestReviews(TestCase):
def test_invalid_review(self):
reviews = Reviews.objects.all()
invalid = "Invalid Review"
self.assertTrue(invalid not in reviews)
def test_create_review(self):
r = Reviews(location="Wilson Hall", review="My test review of <NAME>")
r.save()
r_location = r.location
r_review = r.review
self.assertTrue(r in Reviews.objects.all())
self.assertTrue(r_location == "Wilson Hall")
self.assertTrue(r_review == "My test review of Wilson Hall")
def test_delete_review(self):
r = Reviews(location="Ruffner Hall", review="My test review of Ruffner Hall")
r.save()
r_id = r.id
r_location = r.location
r_review = r.review
self.assertTrue(r in Reviews.objects.all())
self.assertTrue(r_location == "Ruffner Hall")
self.assertTrue(r_review == "My test review of Ruffner Hall")
Reviews.objects.filter(id=r_id).delete()
reviews = Reviews.objects.all()
self.assertTrue(r not in reviews)
def create_request(name, address):
return Request.objects.create(name=name, address=address)
class TestRequest(TestCase):
#source: https://stackoverflow.com/questions/60322847/how-to-test-admin-change-views
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
username='testAdmin',
email='<EMAIL>',
password='<PASSWORD>'
)
self.client.force_login(self.admin_user)
def test_valid_request_1(self):
request = create_request("The Pav", "180 McCormick Rd Charlottesville, VA 22903")
response = self.client.get(reverse("guide:csvView"))
self.assertContains(response, "180 McCormick Rd Charlottesville, VA 22903")
def test_valid_request_2(self):
request = create_request("Rotunda", "1826 University Ave, Charlottesvile, VA 29904")
response = self.client.get(reverse("guide:csvView"))
self.assertContains(response, "1826 University Ave, Charlottesvile, VA 29904")
def test_multiple_request(self):
request = create_request("Rotunda", "1826 University Ave, Charlottesvile, VA 29904")
response = self.client.get(reverse("guide:csvView"))
self.assertContains(response, "1826 University Ave, Charlottesvile, VA 29904")
request = create_request("The Pav", "180 McCormick Rd Charlottesville, VA 22903")
response = self.client.get(reverse("guide:csvView"))
self.assertContains(response, "180 McCormick Rd Charlottesville, VA 22903")
class TestUserInformation(TestCase):
def test_enter_info(self):
insert = UserInformation(address="Test", city="Test", state="TS", zipcode="11111", phone_number="3432342343")
insert.save()
obj = UserInformation.objects.get(address="Test")
self.assertTrue(obj.address == "Test")
def test_no_user(self):
insert = UserInformation(address="Address", city="City", state="ST", zipcode="11111", phone_number="1234567890")
insert.save()
obj = UserInformation.objects.get(address="Address")
self.assertTrue(obj.address == "Address")
def test_valid_user(self):
insert = UserInformation(address="853 West Main Street", city="Charlottesville", state="VA", zipcode="22903", phone_number="4349063068")
insert.save()
obj = UserInformation.objects.get(address="853 West Main Street")
self.assertTrue(obj.address == "853 West Main Street")
def test_invalid_user(self):
insert = UserInformation(address="1400 Wertland Street", phone_number="9065551234")
insert.save()
obj = UserInformation.objects.get(address="1400 Wertland Street")
self.assertEqual(obj.address == "1400 Wertland Street", True)
class TestAdminView(TestCase):
def test_admin_access(self):
admin_user = User.objects.create_superuser('admin', '<EMAIL>', '<PASSWORD>')
c = Client()
logged_in = c.login(username='admin', password='<PASSWORD>')
response = c.get('/request/download')
self.assertEqual(response.status_code, 200)
self.assertEqual(logged_in, True)
def test_non_admin_access(self):
num = 10
# Randomly generating username
username = ''.join(random.choices(string.ascii_uppercase + string.digits, k=num))
# Receive key constraint if the same user is created with each test run
# Randomly generate username to ensure repeatability
user = User.objects.create(username=username)
user.set_password('<PASSWORD>')
user.save()
c = Client()
logged_in = c.login(username=username, password='<PASSWORD>')
self.assertEqual(logged_in, True)
response = c.get('/request/download')
# Status code should not be 200 (admin only access)
self.assertNotEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4946043
|
<reponame>EEDDRV/Alert-Grade<filename>Main.py
import API, Config, time, sys, json
from email import message
import smtplib
from email.message import EmailMessage
def send_email(subject, body, to):
msg = EmailMessage()
msg.set_content(body)
msg['subject'] = subject
msg['to'] = to
user = Config.Send_Email
msg['from'] = user
password = Config.Send_Email_Password
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(user, password)
server.send_message(msg)
server.quit()
if __name__ == "__main__":
Original_Information = API.Get_Grades(Config.Email, Config.Password)
while True:
New_Information = API.Get_Grades(Config.Email, Config.Password)
Courses_Information_Only = New_Information["initial_contexts"]["PortalController"]["data"]["enrollments"][0]["grades"]["rows"]
Courses_Information = {}
for i in Courses_Information_Only:
Courses_Information.update({i["course_name"]: i["calculated_grade"]})
if Courses_Information != Original_Information:
Original_Information = Courses_Information
sms_message = ""
for i in Courses_Information:
sms_message += f"{i}: {Courses_Information[i]}\n"
sms_message += f"Time sent from server: {time.strftime('%H:%M:%S')}"
send_email("FOCUS", sms_message, Config.Email_Send)
print("[*] Message sent!")
# with open('q.json', 'w') as f:
# json.dump(Courses_Information, f, indent=4, sort_keys=True)
|
StarcoderdataPython
|
9642987
|
#!/usr/bin/python3
import asyncio
import argparse
import krpc
import time
import datetime
import os
import sys
from ksppynet.flight_plan_node import ManeuverNode
__all__ = (
"FlightPlan",
)
class KStream(object):
def __init__(self, conn, item, attr):
self.stream = conn.add_stream(getattr, item, attr)
def __call__(self):
return self.stream()
class FlightPlan(object):
def msg(self, s, duration=5):
print("{} <==> {}".format(datetime.datetime.now().isoformat(' '), s))
self.conn.ui.message(s, duration=duration)
if self.debug_handler:
self.debug_handler(s, duration=duration)
def __init__(self, conn, vessel, debug_handler=None):
self.debug_handler = debug_handler
self.conn = conn
self.vessel = vessel
self.ap = vessel.auto_pilot
self.autostaging_disabled = False
# Defaults
self.loop = asyncio.get_event_loop()
self.attr = {}
self.attr["ut"] = KStream(conn, conn.space_center, 'ut')
self.attr["altitude"] = KStream(conn, vessel.flight(), 'mean_altitude')
self.attr["vertical_speed"] = KStream(conn, vessel.flight(vessel.orbit.body.reference_frame), 'vertical_speed')
self.attr["apoapsis"] = KStream(conn, vessel.orbit, 'apoapsis_altitude')
self.attr["periapsis"] = KStream(conn, vessel.orbit, 'periapsis_altitude')
self.attr["real_apoapsis"] = KStream(conn, vessel.orbit, 'apoapsis')
self.attr["real_periapsis"] = KStream(conn, vessel.orbit, 'periapsis')
self.attr["eccentricity"] = KStream(conn, vessel.orbit, 'eccentricity')
self.sequence = asyncio.Queue()
self.seq_defs = {
"pre_launch" : self._pre_launch,
"launch" : self._launcher,
"deorbit" : self._deorbiter,
"orbit" : self._orbiter,
"land" : self._lander,
"quit" : self._quit
}
def add_sequence(self, name, *args, **kwargs):
self.msg("Appending sequence: {}".format(name))
asyncio.async(self.sequence.put((name,
self.seq_defs[name],
args,
kwargs)))
@asyncio.coroutine
def _quit(self):
self.loop.stop()
@asyncio.coroutine
def _start_sequence(self):
while True:
seq = yield from self.sequence.get()
# Wait for the coroutine with its args/kwargs before dequeuing
# the next in the sequence
yield from asyncio.wait_for(seq[1](*seq[2], **seq[3]), None)
@asyncio.coroutine
def _autostager(self):
while True:
yield
if self.autostaging_disabled:
self.msg("Autostaging Disabled")
return
stage = self.vessel.control.current_stage
parts = self.vessel.parts.in_stage(stage)
for part in parts:
if part.parachute:
self.msg("Chutes in stage. Disabling autostaging")
return
parts = self.vessel.parts.in_decouple_stage(stage-1)
fuel_in_stage = False
for part in parts:
engine = part.engine
if engine and engine.active and engine.has_fuel:
fuel_in_stage = True
if not fuel_in_stage:
self.msg("No fuel in stage. Staging...")
self.vessel.control.activate_next_stage()
else:
yield from asyncio.sleep(0.2)
@asyncio.coroutine
def _launcher(self, altitude):
self.msg("Executing Launch")
self.desired_altitude = altitude
self.turn_start_altitude = 250.0
self.turn_mid_altitude = self.vessel.orbit.body.atmosphere_depth * 0.60
self.turn_end_altitude = self.vessel.orbit.body.atmosphere_depth * 0.80
def proportion(val, start, end):
return (val - start) / (end - start)
while True:
yield
altitude = self.attr["altitude"]()
apoapsis = self.attr["apoapsis"]()
if altitude < self.turn_start_altitude:
self.ap.target_pitch_and_heading(90,self.desired_heading)
elif self.turn_start_altitude <= altitude < self.turn_mid_altitude:
#Only shallow out once we've got through the thicker part of the atmosphere.
frac = proportion(altitude,
self.turn_start_altitude,
self.turn_mid_altitude)
self.ap.target_pitch_and_heading(45 + 45*(1-frac),self.desired_heading)
elif self.turn_mid_altitude <= altitude < self.turn_end_altitude:
frac = proportion(altitude,
self.turn_mid_altitude,
self.turn_end_altitude)
self.ap.target_pitch_and_heading(35*(1-frac)+5 ,self.desired_heading)
else:
self.ap.target_pitch_and_heading(5, self.desired_heading)
if altitude > self.vessel.orbit.body.atmosphere_depth:
fudge_factor = 1.0
else:
#Try and overshoot the desired altitude a little to account for resistence in the atmosphere
fudge_factor = 1 + (self.vessel.orbit.body.atmosphere_depth - altitude) / (25 * self.vessel.orbit.body.atmosphere_depth)
if apoapsis > self.desired_altitude * fudge_factor:
self.vessel.control.throttle = 0
if altitude > self.vessel.orbit.body.atmosphere_depth * 0.90:
# Wait until we're mostly out of the atmosphere before setting maneuver nodes
self.ap.disengage()
return
#else: control the throttle?
@asyncio.coroutine
def warp_to(self, target_ut, orig_warp_factor=4, lead_time=5):
while True:
yield
warp_factor = orig_warp_factor
if self.conn.space_center.rails_warp_factor != warp_factor:
if not self.conn.space_center.can_rails_warp_at(warp_factor):
warp_factor = self.conn.space_center.maximum_rails_warp_factor
self.conn.space_center.rails_warp_factor = warp_factor
ut = self.attr["ut"]()
if ut > target_ut - lead_time:
self.msg("Warp finished")
self.drop_warp()
return
def drop_warp(self):
self.conn.space_center.rails_warp_factor = 0
self.conn.space_center.physics_warp_factor = 0
@asyncio.coroutine
def _pre_launch(self, heading):
self.msg("Executing Prelaunch")
self.desired_heading = heading
self.vessel.control.sas = True # Is this ok?
self.vessel.control.rcs = False
self.vessel.control.throttle = 1
self.ap.reference_frame = self.vessel.surface_reference_frame
self.ap.target_pitch_and_heading(90, self.desired_heading)
self.ap.target_roll = float('nan')
self.ap.engage()
@asyncio.coroutine
def _deorbiter(self, periapsis, end_altitude):
self.msg("Executing Deorbit")
self.ap.reference_frame = self.vessel.orbital_reference_frame
destage_altitude = self.vessel.orbit.body.atmosphere_depth * 0.90
self.ap.target_direction = (0,-1,0)
yield from asyncio.sleep(10) ## wait to turn.
self.ap.engage()
while True:
yield
cur_periapsis = self.attr["periapsis"]()
self.ap.target_direction = (0,-1,0)
if cur_periapsis > periapsis:
self.vessel.control.throttle = 0.5
else:
self.vessel.control.throttle = 0
break
ut = self.attr["ut"]()
self.loop.create_task(self.warp_to(ut + vessel.orbit.time_to_periapsis))
# The warp should stop in the atmosphere.
while True:
yield
altitude = self.attr["altitude"]()
if altitude < destage_altitude:
break
#disable warping
self.drop_warp()
self.msg("Turning")
self.ap.target_direction = (0,-1,0)
yield from asyncio.sleep(10) ## wait to turn.
self.msg("Deceleration burn")
self.vessel.control.throttle = 1
yield from asyncio.sleep(20) ## Crude
self.vessel.control.throttle = 0
yield from asyncio.sleep(1) # Wait to check throttle is off before destaging
self.autostaging_disabled = True
chutes = False
while not chutes:
stage = self.vessel.control.current_stage
parts = self.vessel.parts.in_stage(stage-1)
for part in parts:
if part.parachute:
chutes = True
if chutes:
self.msg("Chutes in next stage.")
else:
self.msg("Destaging for landing")
self.vessel.control.activate_next_stage()
self.msg("Deorbit Complete, brace for landing!!")
self.ap.disengage()
def get_node(self):
return ManeuverNode(self.conn, self.vessel, self.attr["ut"])
@asyncio.coroutine
def _orbiter(self, apoapsis, periapsis):
node = self.get_node()
self.msg("Changing Periapsis to new Apoapsis {}".format(apoapsis))
node.change_periapsis(apoapsis)
yield from asyncio.wait_for(node.execute(), None)
self.msg("Changing new Periapsis to {}".format(periapsis))
node.change_apoapsis(periapsis)
yield from asyncio.wait_for(node.execute(), None)
@asyncio.coroutine
def _lander(self, chute_altitude):
self.msg("Executing Landing")
self.ap.reference_frame = self.vessel.orbital_reference_frame
self.ap.target_direction = (0,-1,0)
self.ap.engage()
while True:
yield
altitude = self.attr["altitude"]()
self.ap.target_direction = (0,-1,0)
if altitude < chute_altitude:
while True:
stage = self.vessel.control.current_stage
parts = self.vessel.parts.in_stage(stage-1)
self.vessel.control.activate_next_stage()
for part in parts:
if part.parachute:
self.msg("Chute stage activated")
return
def run_sequence(self):
self.loop.create_task(self._start_sequence())
def launch(self):
# Start the sequence
self.run_sequence()
for i in range (5, 0, -1):
self.msg("{} ...".format(i))
time.sleep(1)
self.vessel.control.activate_next_stage()
def set_autostaging(self):
self.loop.create_task(self._autostager())
def close(self):
self.conn.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--apoapsis', type=int)
parser.add_argument('--periapsis', type=int)
parser.add_argument('--down', action='store_true')
parser.add_argument('--diags', action='store_true')
parser.add_argument('--test', nargs='+')
args = parser.parse_args()
conn = krpc.connect(name="{}.{}".format(os.path.basename(__file__), os.getpid()))
vessel = conn.space_center.active_vessel
fp = FlightPlan(conn, vessel)
loop = asyncio.get_event_loop()
def exit_on_exception(loop, context):
print(context["message"])
if (not "exception" in context
or not isinstance(context["exception"], ConnectionResetError)):
loop.default_exception_handler(context)
loop.stop()
loop.set_exception_handler(exit_on_exception)
if args.diags:
#print("Vessel")
#print(dir(vessel))
#print("Orbit")
#print(dir(vessel.orbit))
#print("Periapsis:\nperiapsis: {}, altitude: {}".format(vessel.orbit.periapsis, vessel.orbit.periapsis_altitude))
#print("Apoapsis:\napoapsis: {}, altitude: {}".format(vessel.orbit.apoapsis, vessel.orbit.apoapsis_altitude))
#print(dir(vessel.orbit.body))
print("Altitude: {}, Destage at: {}, Vertical speed: {}".format(vessel.flight().mean_altitude,
vessel.orbit.body.atmosphere_depth * 0.90,
vessel.flight().vertical_speed))
print("Altitude: {}, Destage at: {}, Vertical speed: {}".format(vessel.flight().mean_altitude,
vessel.orbit.body.atmosphere_depth * 0.90,
vessel.flight(vessel.orbit.body.reference_frame).vertical_speed))
elif args.test:
if args.test[0] == "warp":
assert(args.test[1])
fp.msg("Warping ahead {} seconds".format(args.test[1]))
loop.create_task(fp.warp_to(fp.attr["ut"]() + float(args.test[1])))
elif args.down:
#fp.msg("Changing Apoapsis to {}".format(70000))
#node = fp.get_node()
#node.change_apoapsis(70000)
#loop.run_until_complete(node.execute())
fp.add_sequence("deorbit", periapsis=45000, end_altitude=4000)
fp.add_sequence("land", chute_altitude=3000)
fp.run_sequence()
elif args.apoapsis:
apoapsis = args.apoapsis
node = fp.get_node()
fp.msg("Changing Apoapsis to {}".format(apoapsis))
node.change_apoapsis(apoapsis)
loop.create_task(node.execute())
elif args.periapsis:
periapsis = args.periapsis
node = fp.get_node()
fp.msg("Changing Periapsis to {}".format(periapsis))
node.change_periapsis(periapsis)
loop.create_task(node.execute())
else:
fp.set_autostaging()
fp.add_sequence("pre_launch", heading=90)
fp.add_sequence("launch", altitude=80000)
#fp.add_sequence("orbit", apoapsis=100000, periapsis=100000)
fp.add_sequence("orbit", apoapsis=75000, periapsis=75000)
fp.add_sequence("quit")
#fp.add_sequence("deorbit", periapsis=45000, end_altitude=4000)
#fp.add_sequence("land", chute_altitude=3000)
fp.launch()
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
|
StarcoderdataPython
|
8050513
|
<reponame>viuipan/ironsmslib
from .base import APIException
class ServiceException(APIException):
message = "SERVICE_ERROR"
description = "service problems"
def __init__(self, response):
super(ServiceException, self).__init__(description=self.description)
|
StarcoderdataPython
|
3400223
|
"""
" License:
" -----------------------------------------------------------------------------
" Copyright (c) 2018, <NAME>.
" All rights reserved.
"
" Redistribution and use in source and binary forms, with or without
" modification, are permitted provided that the following conditions are met:
"
" 1. Redistributions of source code must retain the above copyright notice,
" this list of conditions and the following disclaimer.
"
" 2. Redistributions in binary form must reproduce the above copyright notice,
" this list of conditions and the following disclaimer in the documentation
" and/or other materials provided with the distribution.
"
" 3. Neither the name of the copyright holder nor the names of its contributors
" may be used to endorse or promote products derived from this software
" without specific prior written permission.
"
" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
" POSSIBILITY OF SUCH DAMAGE.
" -----------------------------------------------------------------------------
" Description: VGG like Sequential Neural Net for image classification
" Author: <NAME>, <EMAIL>
" Date: October 2018
"""
from keras.initializers import VarianceScaling
from keras.layers import Input, Conv2D, BatchNormalization, MaxPooling2D, Flatten, Dense, Activation, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Model
from keras.regularizers import l2
class VGGNet:
def __init__(self):
print("\n Loading Network Model...")
def conv2d_bn(self, x, filter_size, kernel_size, padding_type, activation_type, strides=(1, 1)):
# Define the convolution convolution2d ->Activation->BatchNormalization block
weight = 5e-4
x = Conv2D(filters=filter_size, kernel_size=kernel_size, strides=strides, kernel_regularizer=l2(weight),
kernel_initializer=VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None),
padding=padding_type, activation='linear')(x)
if activation_type == 'LeakyRelu':
x = LeakyReLU(alpha=0.3)(x)
else:
x = Activation(activation_type)(x)
x = BatchNormalization(axis=-1)(x)
return x
def maxpool_2d(self, x, pool_size, stride_size, padding_type):
# Define the Maxpool block
if stride_size is None:
stride_size = pool_size
x = MaxPooling2D(pool_size=(pool_size, pool_size), strides=(stride_size, stride_size), padding=padding_type)(x)
return x
"""
Build a VGG like sequential network
"""
def buildSequentialModel(self, inputsize, num_classes):
input_layer = Input(inputsize)
# First block of conv2d -> Maxpool layers
net = self.conv2d_bn(input_layer, filter_size=64, kernel_size=5, padding_type='same',
activation_type='LeakyRelu')
net = self.conv2d_bn(net, filter_size=64, kernel_size=5, padding_type='same', activation_type='LeakyRelu')
net = self.maxpool_2d(net, pool_size=2, stride_size=2, padding_type='same')
# second block of conv2d -> MaxPool layers
net = self.conv2d_bn(net, filter_size=128, kernel_size=3, padding_type='same', activation_type='LeakyRelu')
net = self.conv2d_bn(net, filter_size=128, kernel_size=3, padding_type='same', activation_type='LeakyRelu')
net = self.conv2d_bn(net, filter_size=128, kernel_size=3, padding_type='same', activation_type='LeakyRelu')
net = self.maxpool_2d(net, pool_size=2, stride_size=2, padding_type='same')
net = Dropout(0.1)(net)
# Third block of conv2d -> MaxPool layers
net = self.conv2d_bn(net, filter_size=256, kernel_size=3, padding_type='same', activation_type='LeakyRelu')
net = self.conv2d_bn(net, filter_size=256, kernel_size=3, padding_type='same', activation_type='LeakyRelu')
net = self.conv2d_bn(net, filter_size=256, kernel_size=3, padding_type='same', activation_type='LeakyRelu')
net = self.maxpool_2d(net, pool_size=2, stride_size=2, padding_type='same')
net = Dropout(0.15)(net)
# Fourth block of conv2d -> MaxPool layers
net = self.conv2d_bn(net, filter_size=512, kernel_size=3, padding_type='same', activation_type='LeakyRelu')
net = self.conv2d_bn(net, filter_size=512, kernel_size=3, padding_type='same', activation_type='LeakyRelu')
net = self.conv2d_bn(net, filter_size=512, kernel_size=3, padding_type='same', activation_type='LeakyRelu')
net = self.maxpool_2d(net, pool_size=2, stride_size=2, padding_type='same')
net = Dropout(0.2)(net)
# Flatten layer
net = Flatten()(net)
net = Dense(2048, activation='linear')(net)
net = LeakyReLU(alpha=0.3)(net)
net = Dense(2048, activation='linear')(net)
net = LeakyReLU(alpha=0.3)(net)
net = Dropout(0.4)(net)
net = Dense(num_classes, activation='softmax')(net)
# Create the complete model
model = Model(inputs=input_layer, outputs=net)
return model
if __name__ == '__main__':
print('[INFO] Building a VGG like sequential neural network..')
# input and output layer parameters
input_size = (64, 64, 3)
num_classes = 200
# Calling the network building class
vggnet = VGGNet()
seq_model = vggnet.buildSequentialModel(input_size, num_classes)
seq_model.summary()
|
StarcoderdataPython
|
9701516
|
<reponame>texx00/sandypi
from queue import Queue
import json
from threading import Thread
import time
import random
from server.utils import settings_utils
from server.database.playlist_elements import ShuffleElement, TimeElement
TIME_CONVERSION_FACTOR = 60*60 # hours to seconds
class QueueManager():
def __init__(self, app, socketio):
self._isdrawing = False
self._element = None
self.app = app
self.socketio = socketio
self.q = Queue()
self.repeat = False # true if should not delete the current element from the queue
self.shuffle = False # true if should shuffle the queue
self.interval = 0 # pause between drawing in repeat mode
self._last_time = 0 # timestamp of the end of the last drawing
self._is_force_stop = False
self._is_random = False # used when queueing random drawings
# setup status timer
self._th = Thread(target=self._thf, daemon=True)
self._th.name = "queue_status_interval"
self._th.start()
def is_drawing(self):
return self._isdrawing
def is_paused(self):
return self.app.feeder.get_status()["is_paused"]
# pauses the feeder
def pause(self):
self.app.feeder.pause()
self.send_queue_status()
self.app.logger.info("Drawing paused")
# resumes the feeder
def resume(self):
self.app.feeder.resume()
self.send_queue_status()
self.app.logger.info("Drawing resumed")
# returns a boolean: true if the queue is empty and it is drawing, false otherwise
def is_queue_empty(self):
return not self._isdrawing and len(self.q.queue)==0
def set_is_drawing(self, dr):
self._isdrawing = dr
# returns the current element
def get_element(self):
return self._element
# set the current element
def set_element(self, element):
self.app.logger.info("Now running: {}".format(element))
self._element = element
# stop the current drawing and start the next
def stop(self, is_random=False):
self._is_random = is_random
self._is_force_stop = True
self.app.feeder.stop()
# set the repeat flag
def set_repeat(self, val):
if type(val) == type(True):
self.repeat = val
if self._is_random:
if val:
self.start_random_drawing()
else:
self.clear_queue()
else: raise ValueError("The argument must be boolean")
# set the shuffle flag
def set_shuffle(self, val):
if self._is_random: # can change the shuffle option only if is not playing a random drawing
return
if type(val) == type(True):
self.shuffle = val
else: raise ValueError("The argument must be boolean")
# set the queue interval [h]
def set_interval(self, val):
self.interval = val
# starts a random drawing from the uploaded files
def start_random_drawing(self, repeat=False):
self.set_shuffle(True)
if self.q.empty():
self.queue_element(ShuffleElement(shuffle_type="0"), is_random=True) # queue a new random element drawing
if repeat: # call again the same method only once to put an element in the queue
self.start_random_drawing(False)
else:
if not self.is_drawing():
self.queue_element(ShuffleElement(shuffle_type="0"), is_random=True)
# add an element to the queue
def queue_element(self, element, show_toast=True, is_random=False):
self._is_random = is_random
if self.q.empty() and not self.is_drawing():
self.start_element(element)
return
self.app.logger.info("Adding {} to the queue".format(element))
self.q.put(element)
if show_toast:
self.app.semits.show_toast_on_UI("Element added to the queue")
self.send_queue_status()
# return the content of the queue as a string
def queue_str(self):
return str(self.q.queue)
def get_queue(self):
return self.q.queue
def set_element_ended(self):
self.set_is_drawing(False)
if self._is_random:
self.start_random_drawing()
# if the ended element was forced to stop should not set the "last_time" otherwise when a new element is started there will be a delay element first
if self._is_force_stop:
self._is_force_stop = False
else:
self._last_time = time.time()
self.start_next()
# clear the queue
def clear_queue(self):
self.q.queue.clear()
def set_new_order(self, elements):
self.clear_queue()
for el in elements:
if el!= 0:
self.q.put(el)
self.send_queue_status()
# remove the first element with the given code
def remove(self, code):
tmp = Queue()
is_first = True
for c in self.q.queue:
if c == code and is_first:
is_first = False
else:
tmp.put(c)
self.q = tmp
# queue length
def queue_length(self):
return self.q.qsize()
# start the next drawing of the queue
# by default will start it only if not already printing something
# with "force_stop = True" will stop the actual drawing and start the next
def start_next(self, force_stop=False):
if(self.is_drawing()):
if not force_stop:
return False
else:
# will reset the last_time to 0 in order to get the next element running without a delay and stop the current drawing.
# Once the current drawing the next drawing should start from the feeder event manager
self._last_time = 0
self.stop(self._is_random)
return True
try:
# should not remove the element from the queue if repeat is active. Should just add it at the end of the queue
if (not self._element is None) and (self.repeat) and (not hasattr(self._element, "_repeat_off") and (not self._is_random)):
self.q.put(self._element)
# if the time has not expired should start a new drawing otherwise should start a delay element
if (self.interval != 0) and (not hasattr(self._element, "_repeat_off") and (self.queue_length()>0)):
if (self._last_time + self.interval*TIME_CONVERSION_FACTOR > time.time()):
element = TimeElement(delay=self.interval*TIME_CONVERSION_FACTOR + time.time() - self._last_time, type="delay")
element._repeat_off = True # when the "repeat" flag is selected, should not add this element to the queue
self.start_element(element)
return True
self._element = None
if self.queue_length() > 0:
element = None
# if shuffle is enabled select a random drawing from the queue otherwise uses the first element of the queue
if self.shuffle:
tmp = None
elements = list(self.q.queue)
if len(elements)>1: # if the list is longer than 2 will pop the last element to avoid using it again
tmp = elements.pop(-1)
element = elements.pop(random.randrange(len(elements)))
elements.append(tmp)
self.set_new_order(elements)
else:
element = self.q.queue.popleft()
# starts the choosen element
self.start_element(element)
self.app.logger.info("Starting next element: {}".format(element))
return True
return False
except Exception as e:
self.app.logger.exception(e)
self.app.logger.error("An error occured while starting a new drawing from the queue:\n{}".format(str(e)))
self.start_next()
# This method send a "start" command to the bot with the element
def start_element(self, element):
element = element.before_start(self.app)
if not element is None:
self.app.logger.info("Sending gcode start command")
self.set_is_drawing(True)
self.app.feeder.start_element(element, force_stop = True)
else: self.start_next()
# sends the queue status to the frontend
def send_queue_status(self):
elements = list(map(lambda x: str(x), self.q.queue)) if len(self.q.queue) > 0 else [] # converts elements to json
res = {
"current_element": str(self._element),
"elements": elements,
"status": self.app.feeder.get_status(),
"repeat": self.repeat,
"shuffle": self.shuffle,
"interval": self.interval
}
self.app.semits.emit("queue_status", json.dumps(res))
# checks if should start drawing after the server is started and ready (can be set in the settings page)
def check_autostart(self):
autostart = settings_utils.get_only_values(settings_utils.load_settings()["autostart"])
if autostart["on_ready"]:
self.start_random_drawing(repeat=True)
self.set_repeat(True)
try:
if autostart["interval"]:
self.set_interval(float(autostart["interval"]))
except Exception as e:
self.app.logger.exception(e)
# periodically updates the queue status, used by the thread
def _thf(self):
while(True):
try:
# updates the queue status every 30 seconds but only while is drawing
time.sleep(30)
if self.is_drawing():
self.send_queue_status()
except Exception as e:
self.app.logger.exception(e)
|
StarcoderdataPython
|
1877355
|
<reponame>zhique-design/zhique-service<filename>blog/urls.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from .views import CategoryBreadcrumbView
from .viewsets import ArticleViewSet, CategoryViewSet, HotArticleViewSet, TagViewSet, RecommendArticleViewSet
router = DefaultRouter(trailing_slash=False)
router.register(r'articles', ArticleViewSet)
router.register(r'categories', CategoryViewSet)
router.register(r'tags', TagViewSet)
router.register(r'hot-articles', HotArticleViewSet)
router.register(r'recommend-articles', RecommendArticleViewSet)
app_name = 'blog'
urlpatterns = [
url(r'^category-breadcrumb$', CategoryBreadcrumbView.as_view())
] + router.urls
|
StarcoderdataPython
|
12830462
|
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Trying to gin up a human-readable, simple-minded (bilinear interpolation) algorithm for de-mosaicing a
# sensor readout that has an RGGB color filter array (CFA).
# Red filters lie over cells whose x coordinate is even and whose y coordinate is even: even, even
# Blue filters: odd, odd
# Green filters: even, odd *and* odd, even.
# In[2]:
import numpy as np
from PIL import Image
# In[13]:
# Image dimensions
width = 255
height = 255
# Dummy image data is grayscale - single component, 0..255.
# Build it up as a gradient.
# Give it a demosaiced red tinge by boosting pixels that should be
# under a red filter in the Bayer image pattern.
dummy_image_data = []
for y in range(height):
row = []
for x in range(width):
red_boost = 100 if (x % 2, y % 2) == (0, 0) else 0
row.append(min(255, x + red_boost))
dummy_image_data.append(row)
gray_image_data = np.array(dummy_image_data, dtype=np.uint8)
print("Dummy image data:", gray_image_data)
# PIL seems to be ignoring my mode, dangit.
gray_img = Image.fromarray(gray_image_data, mode="L")
gray_img.show()
print("Converted back to numpy array:")
print(np.asarray(gray_img))
# In[14]:
# Offset of each color component within a pixel:
R = 0
G = 1
B = 2
# filter pattern, addressable as [y][x]
pattern = [
[R, G],
[G, B]
]
# Demosaiced image data is RGB - three components.
demosaiced = []
for y in range(height):
row = [[0, 0, 0] for x in range(width)]
demosaiced.append(row)
def indices(v, limit):
result = []
for offset in [-1, 0, 1]:
index = v + offset
if 0 <= index < limit:
result.append(index)
return result
def channel(x, y):
x_pattern = x % 2
y_pattern = y % 2
return pattern[y_pattern][x_pattern]
def demosaic(sensor_image, demosaiced, width, height):
for x_image in range(width):
x_indices = indices(x_image, width)
for y_image in range(height):
y_indices = indices(y_image, height)
sums = {R: 0, G: 0, B: 0}
counts = {R: 0, G: 0, B: 0}
for x in x_indices:
for y in y_indices:
c = channel(x, y)
sums[c] += sensor_image[y][x]
counts[c] += 1
for c in [R, G, B]:
intensity = sums[c] / counts[c] if counts[c] > 0 else 0
# May as well convert to 8-bit integer.
pixel_value = min(255, max(0, int(intensity)))
demosaiced[y_image][x_image][c] = pixel_value
demosaic(dummy_image_data, demosaiced, width, height)
# In[15]:
color_img = Image.fromarray(np.array(demosaiced, dtype=np.uint8), mode="RGB")
color_img.show()
|
StarcoderdataPython
|
1659644
|
from zc.buildout.easy_install import script_header, _safe_arg
script_templates = {
"wsgi": script_header + """
%(relative_paths_setup)s
import os
import sys
sys.path[0:0] = [
%(path)s,
]
%(initialization)s
os.chdir(path)
if not os.path.isdir('applications'):
raise RuntimeError('Running from the wrong folder')
import gluon.main
application = gluon.main.wsgibase
""",
"web2py": script_header + """
%(relative_paths_setup)s
import os
import sys
sys.path[0:0] = [
%(path)s,
]
%(initialization)s
os.chdir(path)
if __name__ == '__main__':
import gluon.widget
gluon.widget.start()
"""
}
|
StarcoderdataPython
|
5029638
|
<reponame>ItamarHavenstein/Python<filename>Exercicio51a60/ex055.py
maiorPeso = 0.0
menorPeso = 0.0
for c in range(1, 6, 1):
peso = float(input('Pesso da {}° pessoa: '.format(c)))
if c == 1:
maiorPeso = peso
menorPeso = peso
else:
if peso > maiorPeso:
maiorPeso = peso
elif peso < menorPeso:
menorPeso = peso
print('O maior peso lido foi de {}Kg'
'\nO menor peso lido foi de {}Kg'.format(maiorPeso, menorPeso))
|
StarcoderdataPython
|
6538870
|
<gh_stars>1-10
# Generated by Django 3.2.6 on 2021-08-21 21:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reactbackend', '0002_votes_applied_change'),
]
operations = [
migrations.AlterField(
model_name='votes',
name='applied_change',
field=models.CharField(blank=True, choices=[('IP', 'Invalid position'), ('NT', 'No trees near this entry')], max_length=2, null=True),
),
]
|
StarcoderdataPython
|
6543480
|
<filename>pkgs/robots/bdsim/robots/test_robots.py
import bdsim.np
import math
import matplotlib.pyplot as plt
import time
from bdsim.blocks.robots import *
import unittest
import numpy.testing as nt
class RobotBlockTest(unittest.TestCase):
def test_quadrotor(self):
from quad_model import quadrotor as qm
block = MultiRotor(qm)
print(block.D)
z = np.r_[0, 0, 0, 0]
block.inputs = [z]
block.setstate(block.getstate())
nt.assert_equal(block.getstate(), np.zeros((12,)))
block.setstate(block.getstate())
block._x[2] = -100 # set altitude
block.inputs[0] = 100 * np.r_[1, -1, 1, -1]
# check outputs
out = block.output()
self.assertIsInstance(out, list)
self.assertEqual(len(out), 1)
out = out[0]
self.assertIsInstance(out, dict)
self.assertIn('x', out)
self.assertIn('vb', out)
self.assertIn('w', out)
self.assertEqual(out['x'][2], -100)
# check deriv, checked against MATLAB version 20200621
# too little thrust, falling
block.inputs[0] = 800 * np.r_[1, -1, 1, -1]
d = block.deriv()
self.assertIsInstance(d, np.ndarray)
self.assertEqual(d.shape, (12,))
self.assertGreater(d[8], 0)
nt.assert_array_almost_equal(
np.delete(d, 8), np.zeros((11,))) # other derivs are zero
block.inputs[0] = 900 * np.r_[1, -1, 1, -1] # too much thrust, rising
self.assertLess(block.deriv()[8], 0)
block.inputs[0] = 800 * np.r_[0.8, -1, 1.2, -1] # pitching
self.assertGreater(block.deriv()[10], 20)
block.inputs[0] = 800 * np.r_[1, -1.2, 1, -0.8] # rolling
self.assertGreater(block.deriv()[9], 20)
# ---------------------------------------------------------------------------------------#
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
352397
|
<reponame>IBM/gcat-assets
import sys, os, ibm_boto3
from ibm_botocore.client import Config
from pprint import pprint
cos = ibm_boto3.client(
service_name="s3",
ibm_api_key_id=os.environ["COS_APIKEY"],
ibm_service_instance_id=os.environ["COS_RESOURCE_INSTANCE_ID"],
ibm_auth_endpoint="https://iam.cloud.ibm.com/identity/token",
config=Config(signature_version="oauth"),
endpoint_url=os.environ["COS_ENDPOINT"],
)
response = cos.put_object(
Bucket=os.environ["COS_BUCKETNAME"],
Body=sys.argv[1].encode("utf-8"),
Key="test-object-from-code-engine",
)
pprint(response)
# print(sys.argv[1])
|
StarcoderdataPython
|
11352129
|
<gh_stars>1-10
"""empty message
Revision ID: 84102fbdb0f8
Revises:
Create Date: 2021-11-12 15:28:34.421620
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=40), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('hashed_password', sa.String(length=255), nullable=False),
sa.Column('profile_pic', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('spots',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('userId', sa.Integer(), nullable=False),
sa.Column('address', sa.String(length=255), nullable=False),
sa.Column('city', sa.String(length=50), nullable=False),
sa.Column('state', sa.String(length=50), nullable=False),
sa.Column('country', sa.String(length=50), nullable=False),
sa.Column('lat', sa.Float(), nullable=True),
sa.Column('lng', sa.Float(), nullable=True),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('price', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(['userId'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('bookings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('spotId', sa.Integer(), nullable=False),
sa.Column('userId', sa.Integer(), nullable=False),
sa.Column('startDate', sa.DateTime(), nullable=False),
sa.Column('endDate', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['spotId'], ['spots.id'], ),
sa.ForeignKeyConstraint(['userId'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('spotId', sa.Integer(), nullable=False),
sa.Column('url', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['spotId'], ['spots.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('reviews',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('spot_id', sa.Integer(), nullable=False),
sa.Column('clean_rating', sa.Integer(), nullable=False),
sa.Column('accur_rating', sa.Integer(), nullable=False),
sa.Column('comm_rating', sa.Integer(), nullable=False),
sa.Column('location_rating', sa.Integer(), nullable=False),
sa.Column('check_in_rating', sa.Integer(), nullable=False),
sa.Column('value_rating', sa.Integer(), nullable=False),
sa.Column('review_text', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['spot_id'], ['spots.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reviews')
op.drop_table('images')
op.drop_table('bookings')
op.drop_table('spots')
op.drop_table('users')
# ### end Alembic commands ###
|
StarcoderdataPython
|
5039679
|
# Character width information for PostScript font `Times Bold Italic'
# generated from the Adobe Font Metric file `../../../../adobe/tibi____.afm'. Adobe
# copyright notice follows:
#
# Copyright (c) 1985, 1987, 1989, 1990 Adobe Systems Incorporated. All Rights Reserved.Times is a trademark of Linotype AG and/or its subsidiaries.
#
from . import PSFont
font = PSFont.PSFont('Times-BoldItalic', 'Times Bold Italic',
[ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
250, 389, 555, 500, 500, 833, 778, 333,
333, 333, 500, 570, 250, 333, 250, 278,
500, 500, 500, 500, 500, 500, 500, 500,
500, 500, 333, 333, 570, 570, 570, 500,
832, 667, 667, 667, 722, 667, 667, 722,
778, 389, 500, 667, 611, 889, 722, 722,
611, 722, 667, 556, 611, 722, 667, 889,
667, 611, 611, 333, 278, 333, 570, 500,
333, 500, 500, 444, 500, 444, 333, 500,
556, 278, 278, 500, 278, 778, 556, 500,
500, 500, 389, 389, 278, 556, 444, 667,
500, 444, 389, 348, 220, 348, 570, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 389, 500, 500, 167, 500, 500, 500,
500, 747, 500, 500, 333, 333, 556, 556,
0, 500, 500, 500, 250, 0, 500, 350,
333, 500, 500, 500, 1000, 1000, 0, 500,
0, 333, 333, 333, 333, 333, 333, 333,
333, 0, 333, 333, 0, 333, 333, 333,
1000, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 944, 0, 266, 0, 0, 0, 0,
611, 722, 944, 300, 0, 0, 0, 0,
0, 722, 0, 0, 0, 278, 0, 0,
278, 500, 722, 500, 0, 0, 0, 0,
])
|
StarcoderdataPython
|
9653111
|
"""
Django settings for e_shop project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEFAULT_JINJA2_TEMPLATE_EXTENSION = '.jinja'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [os.path.join(BASE_DIR, 'e_shop/templates')],
'OPTIONS': {
'extensions': [
# The default extensions, you should include them
# if you are overwritting the settings.
"jinja2.ext.do",
"jinja2.ext.loopcontrols",
"jinja2.ext.with_",
"jinja2.ext.i18n",
"jinja2.ext.autoescape",
"django_jinja.builtins.extensions.CsrfExtension",
"django_jinja.builtins.extensions.CacheExtension",
"django_jinja.builtins.extensions.TimezoneExtension",
"django_jinja.builtins.extensions.UrlsExtension",
"django_jinja.builtins.extensions.StaticFilesExtension",
"django_jinja.builtins.extensions.DjangoFiltersExtension",
'e_shop.extensions.StaticFileVersioningExtension'
]
}
}
]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'sorl.thumbnail',
'corsheaders',
'oauth2_provider',
'e_shop'
)
AUTHENTICATION_BACKENDS = (
'oauth2_provider.backends.OAuth2Backend',
# Uncomment following if you want to access the admin
'django.contrib.auth.backends.ModelBackend'
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'e_shop.middlewares.SessionManagerMiddleware'
)
ROOT_URLCONF = 'e_shop.urls'
WSGI_APPLICATION = 'e_shop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'NAME':'e_shop',
'ENGINE':'django.db.backends.mysql',
'USER':'root',
'PASSWORD':'<PASSWORD>'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = '/home/tristan/workplace/workspace_python/e_shop/src/e_shop/upload/'
MEDIA_URL = '/api/file/'
LOGIN_URL = '/account/login/'
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = ('localhost:8000')
|
StarcoderdataPython
|
317457
|
from blockchain import*;
from time import time;
import pprint
pp = pprint.PrettyPrinter(indent=4)
blockchain = Blockchain();
transactions = [];
block = Block(transactions, time(), 0);
blockchain.addBlock(block);
block = Block(transactions, time(), 1);
blockchain.addBlock(block);
block = Block(transactions, time(), 2);
blockchain.addBlock(block);
pp.pprint(blockchain.chainJSONencode());
print("Length: ", len(blockchain.chain));
|
StarcoderdataPython
|
220479
|
import numpy as np
from surfinpy import plotting
from surfinpy import utils as ut
def calculate_excess(adsorbant, slab_cations, area, bulk,
nspecies=1, check=False):
r"""Calculates the excess of a given species at the surface.
Depending on the nature of the species, there are two ways to do this.
If the species is a constituent part of the surface, e.g.
Oxygen in :math:`TiO_2` then the calculation must account for
the stoichiometry of that material. Using the :math:`TiO_2` example
.. math::
\Gamma_O = \frac{1}{2A} \Bigg( nO_{Slab} - \frac{nO_{Bulk}}
{nTi_{Bulk}}nTi_{Slab} \Bigg)
where :math:`nO_{Slab}` is the number of oxygen in the slab,
:math:`nO_{Bulk}` is the number of oxygen in the bulk,
A is the surface area, :math:`nTi_{Bulk}` is the number of Ti in
the bulk and :math:`nTi_{Slab}` is the number of Ti in the slab.
If the species is just an external adsorbant, e.g. water or carbon dioxide
then one does not need to consider the state of the surface,
as there was none there to begin with.
.. math::
\Gamma_{H_2O} = \frac{nH_2O}{2A}
where :math:`nH_2O` is the number of water molecules and A is the
surface area.
Parameters
----------
adsorbant : :py:attr:`int`
Number of species
slab_cations : :py:attr:`int`
Number of cations
area : :py:attr:`float`
Area of surface
bulk : :py:attr:`dict`
Dictonary of bulk properties
nspecies : :py:attr:`int`
number of external species
check : :py:attr:`bool`
Check if this is an external or constituent species.
Returns
-------
:py:attr:`float`
Surface excess of given species.
"""
if check is True and nspecies == 1:
return ((adsorbant - ((bulk.anion / bulk.cation) *
slab_cations)) / (2 * area))
else:
return (adsorbant / (area * 2))
def calculate_normalisation(slab_energy, slab_cations, bulk, area):
r"""Normalises the slab energy relative to the bulk material.
Thus allowing the different slab calculations to be compared.
.. math::
Energy = \frac{1}{2A} \Bigg( E_{MO}^{slab} -
\frac{nCat_{slab}}{nCat_{Bulk}} E_{MO}^{Bulk} \Bigg)
where Energy is the slab energy normalised to the
bulk, :math:`E_{MO}^{slab}` is the DFT slab energy, :math:`nCat_{slab}`
is the number of slab cations, :math:`nCat_{Bulk}` is the number of bulk
cations, :math:`E_{MO}^{Bulk}` is the DFT bulk energy A is the surface
area.
Parameters
----------
slab_energy : :py:attr:`float`
Energy of the slab from DFT
slab_cations : :py:attr:`int`
Total number of cations in the slab
bulk : :py:class:`surfinpy.data.DataSet`
Bulk properties
area : :py:attr:`float`
Surface area
Returns
-------
:py:attr:`float`
Constant normalising the slab energy to the bulk energy.
"""
return ((slab_energy - (slab_cations / bulk.cation) * (bulk.energy /
bulk.funits)) / (2 * area))
def calculate_surface_energy(deltamux, deltamuy, x_energy, y_energy,
xexcess, yexcess, normalised_bulk):
r"""Calculates the surface for a given chemical potential of
species x and species y for a single phase.
.. math::
\gamma_{Surf} = \frac{1}{2S} \Bigg( E_{MO}^{slab} -
\frac{nCat_{Slab}}{nCat_{Bulk}} E_{MO}^{Bulk} \Bigg) -
\Gamma_O \mu_O - \Gamma_{H_2O} \mu_{H_2O} -
\Gamma_O \mu_O (T) - \Gamma_{H_2O} \mu_{H_2O} (T)
where S is the surface area, :math:`E_{MO}^{slab}` is the DFT energy of
the stoichiometric slab, :math:`nCat_{Slab}` is the number of cations
in the slab, :math:`nCat_{Slab}` is the number of cations in the bulk
unit cell, :math:`E_{MO}^{Bulk}` is the DFT energy of the bulk unit cell,
:math:`\Gamma_O` :math:`\Gamma_{H_2O}` is the excess oxygen / water at
the surface and :math:`\mu_O` :math:`\mu_{H_2O}` is the oxygen /
water chemcial potential.
Parameters
----------
deltamux : :py:attr:`array_like`
Chemical potential of species x
deltamuy : :py:attr:`array_like`
Chemical potential of species y
x_energy : :py:attr:`float`
DFT energy or temperature corrected DFT energy
y_energy : :py:attr:`float`
DFT energy or temperature corrected DFT energy
xexcess : :py:attr:`float`
Surface excess of species x
yexcess : :py:attr:`float`
Surface excess of species y
normalised_bulk : :py:attr:`float`
Slab energy normalised to the bulk value.
Returns
-------
:py:attr:`array_like`
2D array of surface energies as a function of
chemical potential of x and y
"""
return ((normalised_bulk- (deltamux * xexcess) - (deltamuy * yexcess) - (
x_energy * xexcess)- (y_energy * yexcess)) * 16.021)
def evaluate_phases(data, bulk, x, y, nsurfaces, x_energy, y_energy):
"""Calculates the surface energies of each phase as a function of chemical
potential of x and y. Then uses this data to evaluate which phase is most
stable at that x/y chemical potential cross section.
Parameters
----------
data : :py:attr:`list`
List containing the :py:class:`surfinpy.data.DataSet` for each phase
bulk : :py:class:`surfinpy.data.DataSet`
Data for bulk
x : :py:attr:`dict`
X axis chemical potential values
y : :py:attr:`dict`
Y axis chemical potential values
nsurfaces : :py:attr:`int`
Number of phases
x_energy : :py:attr:`float`
DFT 0K energy for species x
y_energy : :py:attr:`float`
DFT 0K energy for species y
Returns
-------
phase_data : :py:attr:`array_like`
array of ints, with each int corresponding to a phase.
"""
xnew = ut.build_xgrid(x, y)
ynew = ut.build_ygrid(x, y)
S = np.array([])
for k in range(0, nsurfaces):
xexcess = calculate_excess(data[k].x, data[k].cation,
data[k].area, bulk,
data[k].nspecies, check=True)
yexcess = calculate_excess(data[k].y, data[k].cation,
data[k].area, bulk)
normalised_bulk = calculate_normalisation(data[k].energy,
data[k].cation, bulk,
data[k].area)
SE = calculate_surface_energy(xnew, ynew,
x_energy,
y_energy,
xexcess,
yexcess,
normalised_bulk)
S = np.append(S, SE)
phase_data, surface_energy = ut.get_phase_data(S, nsurfaces)
return phase_data, surface_energy
def calculate(data, bulk, deltaX, deltaY, x_energy=0, y_energy=0, increments=0.025):
"""Initialise the surface energy calculation.
Parameters
----------
data : :py:attr:`list`
List of :py:class:`surfinpy.data.DataSet` for each phase
bulk : :py:class:`surfinpy.data.ReferenceDataSet`
Data for bulk
deltaX : :py:attr:`dict`
Range of chemical potential/label for species X
DeltaY : :py:attr:`dict`
Range of chemical potential/label for species Y
x_energy : :py:attr:`float`
DFT energy of adsorbing species
y_energy : :py:attr:`float`
DFT energy of adsorbing species
Returns
-------
system : :py:class:`surfinpy.plotting.ChemicalPotentialPlot`
Plotting object
"""
nsurfaces = len(data)
X = np.arange(deltaX['Range'][0], deltaX['Range'][1],
increments, dtype="float")
Y = np.arange(deltaY['Range'][0], deltaY['Range'][1],
increments, dtype="float")
X = X - x_energy
Y = Y - y_energy
phases, SE = evaluate_phases(data, bulk, X, Y,
nsurfaces, x_energy, y_energy)
ticks = np.unique([phases])
colors = ut.list_colors(data, ticks)
phases = ut.transform_numbers(phases, ticks)
Z = np.reshape(phases, (Y.size, X.size))
SE = np.reshape(SE, (Y.size, X.size))
labels = ut.get_labels(ticks, data)
system = plotting.ChemicalPotentialPlot(X,
Y,
Z,
labels,
ticks,
colors,
deltaX['Label'],
deltaY['Label'])
return system, SE
|
StarcoderdataPython
|
3226530
|
<reponame>machinalis/alfajor
# -*- coding: utf-8 -*-
from django import forms
from django.contrib.localflavor.ar.forms import ARPostalCodeField
from django.contrib.localflavor.ar.forms import ARProvinceSelect
from django.forms.util import ErrorList
from ventas.models import DatosDeEnvio, GastosDeEnvio, ARP
class DumbSelect(forms.Select):
EMPTY_CHOICES = [('Otra', 'Other'),]
def __init__(self, attrs=None, choices=None):
if choices:
choices += DumbSelect.EMPTY_CHOICES
else:
choices = DumbSelect.EMPTY_CHOICES
super(DumbSelect, self).__init__(attrs=attrs, choices=choices)
class GastosDeEnvioSelect(forms.Select):
def __init__(self, gastos_de_envio, attrs=None, choices=None):
"""
Shipping costs is a queryset from models.GastosDeEnvio.
Assuming that provinces are being saved with province select
"""
choices_of_prov = [(p.provincia, ARP.get(p.provincia))
for p in gastos_de_envio]
if choices:
choices += list(choices)
else:
choices = choices_of_prov
super(GastosDeEnvioSelect, self).__init__(attrs=attrs, choices=choices)
def add_css_classes(f, **kwargs):
"""
From: http://djangosnippets.org/snippets/2097/
"""
field = f.formfield(**kwargs)
if field and field.required:
attrs = field.widget.attrs
attrs['class'] = attrs.get('class', '') + 'required'
return field
class DatosDeEnvioForm(forms.ModelForm):
formfield_callback = add_css_classes
direccion = forms.CharField(label=u'Dirección', required=True,
widget=forms.TextInput(attrs={'class':
'required'
}))
localidad = forms.CharField(widget=DumbSelect(), required=False)
codigo_de_area = forms.CharField(label=u'Código de Área',
widget=forms.TextInput(attrs={'class':
'required'
' telefono'}
))
telefono = forms.CharField(label=u'Teléfono',
widget=forms.TextInput(attrs={'class':
'required'
' telefono'
}))
codigo_postal = ARPostalCodeField(label=u'Código Postal',
widget=forms.TextInput(attrs={'class':
'required'
}))
def _add_msg_to_error_fields(self, fieldlist, msg):
for fieldname in fieldlist:
errorlist = self._errors.get(fieldname)
if errorlist:
errorlist.append(msg)
else:
self._errors[fieldname] = ErrorList([msg])
def clean(self, *args, **kwargs):
super(DatosDeEnvioForm, self).clean()
cleaned_data = self.cleaned_data
codigo_de_area = cleaned_data.get('codigo_de_area')
telefono = cleaned_data.get('telefono')
if not (codigo_de_area and telefono):
msg = u"Este campo sólo acepta números"
self._add_msg_to_error_fields(('telefono',), msg)
raise forms.ValidationError(msg)
if not (codigo_de_area.isdigit() and telefono.isdigit()):
msg = u"Este campo sólo acepta números"
self._add_msg_to_error_fields(('telefono',), msg)
raise forms.ValidationError(msg)
return cleaned_data
class Meta:
model = DatosDeEnvio
widgets = {
'provincia': GastosDeEnvioSelect(
GastosDeEnvio.objects.filter(localidad="")
),
}
class GastosDeEnvioForm(forms.ModelForm):
class Meta:
model = GastosDeEnvio
widgets = {
'provincia': ARProvinceSelect(),
}
|
StarcoderdataPython
|
3429557
|
import numpy as np
import torch
import copy
from ..utils.utils_data import nasbench2graph, nasbench2graph2
from .acquisition_functions import acq_fn
from ..eigen.trainer_predictor import NasBenchGinPredictorTrainer
from ..eigen.trainer_uncertainty_predictor import NasBenchGinGaussianTrainer
from nas_lib.utils.corr import get_kendalltau_coorlection
def gin_predictor_new_nasbench_101(search_space,
num_init=10,
k=10,
total_queries=150,
acq_opt_type='mutation',
allow_isomorphisms=False,
verbose=1,
agent=None,
logger=None,
gpu='0',
lr=0.01,
candidate_nums=100,
epochs=1000,
record_kt='F',
record_mutation='F'
):
"""
Bayesian optimization with a neural network model
"""
device = torch.device('cuda:%d' % gpu)
data = search_space.generate_random_dataset(num=num_init,
allow_isomorphisms=allow_isomorphisms,
deterministic_loss=True)
query = num_init + k
search_agent = agent
kt_list = []
kt_top_list = []
mutate_list = []
if len(data) <= 10:
batch_size = 10
else:
batch_size = 16
while query <= total_queries:
arch_data = [d[0] for d in data]
agent = NasBenchGinPredictorTrainer(search_agent, lr=lr, device=device, epochs=epochs,
train_images=len(data), batch_size=batch_size)
val_accuracy = np.array([d[4] for d in data])
arch_data_edge_idx_list = []
arch_data_node_f_list = []
for arch in arch_data:
edge_index, node_f = nasbench2graph2(arch)
arch_data_edge_idx_list.append(edge_index)
arch_data_node_f_list.append(node_f)
if record_mutation == 'T':
candidates, dist_list, replicate_num, mutated_nums_list, mutated_arch_list \
= search_space.get_candidates(data,
num=candidate_nums,
acq_opt_type=acq_opt_type,
allow_isomorphisms=allow_isomorphisms,
return_dist=True)
cand_val_list = [cand[4] for cand in candidates]
mutate_list.append((dist_list, replicate_num, mutated_nums_list, mutated_arch_list, cand_val_list))
else:
candidates = search_space.get_candidates(data,
num=candidate_nums,
acq_opt_type=acq_opt_type,
allow_isomorphisms=allow_isomorphisms)
candiate_edge_list = []
candiate_node_list = []
for cand in candidates:
edge_index, node_f = nasbench2graph2(cand[0])
candiate_edge_list.append(edge_index)
candiate_node_list.append(node_f)
agent.fit(arch_data_edge_idx_list, arch_data_node_f_list, val_accuracy, logger=None)
acc_train = agent.pred(arch_data_edge_idx_list, arch_data_node_f_list)
acc_pred = agent.pred(candiate_edge_list, candiate_node_list)
candidate_np = acc_pred.cpu().numpy()
candidates_gt = [can[4] for can in candidates]
if query == 20:
candidate_np = np.array(candidates_gt)
if record_kt == 'T':
kt = get_kendalltau_coorlection(candidate_np.tolist(), candidates_gt)[0]
kt_list.append(kt)
sorted_indices = np.argsort(candidate_np)
kt_top_pred_list = []
kt_top_gt_list = []
for i in sorted_indices[:k]:
archtuple = search_space.query_arch(matrix=candidates[i][1],
ops=candidates[i][2])
data.append(archtuple)
kt_top_pred_list.append(candidate_np[i])
kt_top_gt_list.append(archtuple[4])
kt_top_list.append(get_kendalltau_coorlection(kt_top_pred_list, kt_top_gt_list)[0])
if verbose:
top_5_loss = sorted([d[4] for d in data])[:min(5, len(data))]
logger.info('Query {}, training mean loss is {}'.format(query,
np.mean(np.abs(acc_train.cpu().numpy()-val_accuracy))))
logger.info('Query {}, top 5 val losses {}'.format(query, top_5_loss))
query += k
return data, {'type': 'gin_predictor_new', 'final_data': data,
'kt_list': kt_list, 'kt_top_list': kt_top_list,
'mutate_list': mutate_list}
def gin_predictor_new_nasbench_201(search_space,
dataname='cifar100',
num_init=10,
k=10,
total_queries=150,
acq_opt_type='mutation',
allow_isomorphisms=False,
verbose=1,
agent=None,
logger=None,
gpu='0',
lr=0.01,
candidate_nums=100,
epochs=1000,
record_kt='F',
record_mutation='F'
):
"""
Bayesian optimization with a neural network model
"""
if dataname == 'cifar10-valid':
rate = 100.
elif dataname == 'cifar100':
rate = 100.
elif dataname == 'ImageNet16-120':
rate = 100.
else:
raise NotImplementedError()
kt_list = []
kt_top_list = []
mutate_list = []
device = torch.device('cuda:%d' % gpu)
data = search_space.generate_random_dataset(num=num_init,
allow_isomorphisms=allow_isomorphisms,
deterministic_loss=True)
query = num_init + k
search_agent = agent
if len(data) <= 10:
batch_size = 10
else:
batch_size = 16
while query <= total_queries:
arch_data = [d[0] for d in data]
agent = NasBenchGinPredictorTrainer(search_agent, lr=lr, device=device, epochs=epochs,
train_images=len(data), batch_size=batch_size, input_dim=8, rate=rate)
val_accuracy = np.array([d[4] for d in data])
arch_data_edge_idx_list = []
arch_data_node_f_list = []
for arch in arch_data:
edge_index, node_f = search_space.nasbench2graph2(arch)
arch_data_edge_idx_list.append(edge_index)
arch_data_node_f_list.append(node_f)
if record_mutation == 'T':
candidates, dist_list, replicate_num, mutated_nums_list, mutated_arch_list \
= search_space.get_candidates(data,
num=candidate_nums,
allow_isomorphisms=allow_isomorphisms,
return_dist=True
)
cand_val_list = [cand[4] for cand in candidates]
mutate_list.append((dist_list, replicate_num, mutated_nums_list, mutated_arch_list, cand_val_list))
else:
candidates = search_space.get_candidates(data,
num=candidate_nums,
allow_isomorphisms=allow_isomorphisms
)
candiate_edge_list = []
candiate_node_list = []
for cand in candidates:
edge_index, node_f = search_space.nasbench2graph2(cand[0])
candiate_edge_list.append(edge_index)
candiate_node_list.append(node_f)
agent.fit(arch_data_edge_idx_list, arch_data_node_f_list, val_accuracy, logger=None)
acc_train = agent.pred(arch_data_edge_idx_list, arch_data_node_f_list)
acc_pred = agent.pred(candiate_edge_list, candiate_node_list)
candidate_np = acc_pred.cpu().numpy()
candidates_gt = [can[4] for can in candidates]
if query == 20:
candidate_np = np.array(candidates_gt)
if record_kt == 'T':
kt = get_kendalltau_coorlection(candidate_np.tolist(), candidates_gt)[0]
kt_list.append(kt)
sorted_indices = np.argsort(candidate_np)
kt_top_pred_list = []
kt_top_gt_list = []
for i in sorted_indices[:k]:
archtuple = candidates[i]
data.append(archtuple)
kt_top_pred_list.append(candidate_np[i])
kt_top_gt_list.append(archtuple[4])
kt_top_list.append(get_kendalltau_coorlection(kt_top_pred_list, kt_top_gt_list)[0])
if verbose:
top_5_loss = sorted([d[4] for d in data])[:min(5, len(data))]
logger.info('Query {}, training mean loss is {}'.format(query,
np.mean(np.abs(acc_train.cpu().numpy()-val_accuracy))))
logger.info('Query {}, top 5 val losses {}'.format(query, top_5_loss))
query += k
return data, {'type': 'gin_predictor_new', 'final_data': data,
'kt_list': kt_list, 'kt_top_list': kt_top_list, 'mutate_list': mutate_list}
def gin_predictor_new_nasbench_nlp(search_space,
num_init=10,
k=10,
total_queries=150,
acq_opt_type='mutation',
allow_isomorphisms=False,
verbose=1,
agent=None,
logger=None,
gpu='0',
lr=0.01,
candidate_nums=100,
epochs=1000,
mutation_rate=0.1,
record_kt='F',
record_mutation='F',
rate=None
):
"""
Bayesian optimization with a neural network model
"""
device = torch.device('cuda:%d' % gpu)
data = search_space.generate_random_dataset(num=num_init,
allow_isomorphisms=allow_isomorphisms,
deterministic_loss=True)
query = num_init + k
search_agent = agent
if len(data) <= 10:
batch_size = 10
else:
batch_size = 16
kt_list = []
kt_top_list = []
mutate_list = []
if rate is not None:
rate = rate
else:
rate = 5.
while query <= total_queries:
arch_data = [(d[1], d[2]) for d in data]
agent = NasBenchGinPredictorTrainer(search_agent, lr=lr, device=device, epochs=epochs,
train_images=len(data), batch_size=batch_size, input_dim=10, rate=rate)
val_accuracy = np.array([d[4] for d in data])
arch_data_edge_idx_list = []
arch_data_node_f_list = []
for arch in arch_data:
edge_index, node_f = search_space.nasbench2graph2(arch)
arch_data_edge_idx_list.append(edge_index)
arch_data_node_f_list.append(node_f)
if record_mutation == 'T':
candidates, dist_list, replicate_num, mutated_nums_list, mutated_arch_list \
= search_space.get_candidates(data,
num=candidate_nums,
allow_isomorphisms=allow_isomorphisms,
mutation_rate=mutation_rate,
return_dist=True
)
cand_val_list = [cand[4] for cand in candidates]
mutate_list.append((dist_list, replicate_num, mutated_nums_list, mutated_arch_list, cand_val_list))
else:
candidates = search_space.get_candidates(data,
num=candidate_nums,
allow_isomorphisms=allow_isomorphisms,
mutation_rate=mutation_rate
)
candiate_edge_list = []
candiate_node_list = []
for cand in candidates:
edge_index, node_f = search_space.nasbench2graph2((cand[1], cand[2]))
candiate_edge_list.append(edge_index)
candiate_node_list.append(node_f)
agent.fit(arch_data_edge_idx_list, arch_data_node_f_list, val_accuracy, logger=None)
acc_train = agent.pred(arch_data_edge_idx_list, arch_data_node_f_list)
acc_pred = agent.pred(candiate_edge_list, candiate_node_list)
candidate_np = acc_pred.cpu().numpy()
candidates_gt = [can[4] for can in candidates]
if query == 20:
candidate_np = np.array(candidates_gt)
if record_kt == 'T':
kt = get_kendalltau_coorlection(candidate_np.tolist(), candidates_gt)[0]
kt_list.append(kt)
sorted_indices = np.argsort(candidate_np)
kt_top_pred_list = []
kt_top_gt_list = []
for i in sorted_indices[:k]:
archtuple = candidates[i]
data.append(archtuple)
kt_top_pred_list.append(candidate_np[i])
kt_top_gt_list.append(archtuple[4])
kt_top_list.append(get_kendalltau_coorlection(kt_top_pred_list, kt_top_gt_list)[0])
if verbose:
top_5_loss = sorted([d[4] for d in data])[:min(5, len(data))]
logger.info('Query {}, training mean loss is {}'.format(query,
np.mean(np.abs(acc_train.cpu().numpy()-val_accuracy))))
logger.info('Query {}, top 5 val losses {}'.format(query, top_5_loss))
query += k
return data, {'type': 'gin_predictor_new', 'final_data': data, 'kt_top_list': kt_top_list,
'kt_list': kt_list, 'mutate_list': mutate_list}
def gin_predictor_new_nasbench_asr(search_space,
num_init=10,
k=10,
total_queries=150,
acq_opt_type='mutation',
allow_isomorphisms=False,
verbose=1,
agent=None,
logger=None,
gpu='0',
lr=0.01,
candidate_nums=100,
epochs=1000,
mutation_rate=1,
record_kt='F',
record_mutation='F',
rate=None
):
"""
Bayesian optimization with a neural network model
"""
device = torch.device('cuda:%d' % gpu)
data = search_space.generate_random_dataset(num=num_init,
allow_isomorphisms=allow_isomorphisms,
deterministic_loss=True)
query = num_init + k
search_agent = agent
if len(data) <= 10:
batch_size = 10
else:
batch_size = 16
kt_list = []
kt_top_list = []
mutate_list = []
if rate is None:
rate = 100.
else:
rate = rate
while query <= total_queries:
arch_data = [(d[1], d[2]) for d in data]
agent = NasBenchGinPredictorTrainer(search_agent, lr=lr, device=device, epochs=epochs,
train_images=len(data), batch_size=batch_size, input_dim=9, rate=rate)
val_accuracy = np.array([d[4] for d in data])
arch_data_edge_idx_list = []
arch_data_node_f_list = []
for arch in arch_data:
edge_index, node_f = search_space.nasbench2graph2(arch)
arch_data_edge_idx_list.append(edge_index)
arch_data_node_f_list.append(node_f)
if record_mutation == 'T':
candidates, dist_list, replicate_num, mutated_nums_list, mutated_arch_list \
= search_space.get_candidates(data,
num=candidate_nums,
allow_isomorphisms=allow_isomorphisms,
mutation_rate=mutation_rate,
return_dist=True
)
cand_val_list = [cand[4] for cand in candidates]
mutate_list.append((dist_list, replicate_num, mutated_nums_list, mutated_arch_list, cand_val_list))
else:
candidates = search_space.get_candidates(data,
num=candidate_nums,
allow_isomorphisms=allow_isomorphisms,
mutation_rate=mutation_rate
)
candiate_edge_list = []
candiate_node_list = []
for cand in candidates:
edge_index, node_f = search_space.nasbench2graph2((cand[1], cand[2]))
candiate_edge_list.append(edge_index)
candiate_node_list.append(node_f)
agent.fit(arch_data_edge_idx_list, arch_data_node_f_list, val_accuracy, logger=None)
acc_train = agent.pred(arch_data_edge_idx_list, arch_data_node_f_list)
acc_pred = agent.pred(candiate_edge_list, candiate_node_list)
candidate_np = acc_pred.cpu().numpy()
candidates_gt = [can[4] for can in candidates]
if query == 20:
candidate_np = np.array(candidates_gt)
if record_kt == 'T':
kt = get_kendalltau_coorlection(candidate_np.tolist(), candidates_gt)[0]
kt_list.append(kt)
sorted_indices = np.argsort(candidate_np)
kt_top_pred_list = []
kt_top_gt_list = []
for i in sorted_indices[:k]:
archtuple = candidates[i]
data.append(archtuple)
kt_top_pred_list.append(candidate_np[i])
kt_top_gt_list.append(archtuple[4])
kt_top_list.append(get_kendalltau_coorlection(kt_top_pred_list, kt_top_gt_list)[0])
if verbose:
top_5_loss = sorted([d[4] for d in data])[:min(5, len(data))]
logger.info('Query {}, training mean loss is {}'.format(query,
np.mean(np.abs(acc_train.cpu().numpy()-val_accuracy))))
logger.info('Query {}, top 5 val losses {}'.format(query, top_5_loss))
query += k
return data, {'type': 'gin_predictor_new', 'final_data': data, 'kt_top_list': kt_top_list,
'kt_list': kt_list, 'mutate_list': mutate_list}
def gin_predictor_new_2_nasbench_201(search_space,
dataname='cifar100',
num_init=10,
k=10,
total_queries=150,
acq_opt_type='mutation',
allow_isomorphisms=False,
verbose=1,
agent=None,
logger=None,
gpu='0',
lr=0.01,
candidate_nums=100,
epochs=1000,
record_kt='F',
record_mutation='F'
):
"""
Bayesian optimization with a neural network model
"""
if dataname == 'cifar10-valid':
rate = 100.
elif dataname == 'cifar100':
rate = 100.
elif dataname == 'ImageNet16-120':
rate = 100.
else:
raise NotImplementedError()
kt_list = []
mutate_list = []
device = torch.device('cuda:%d' % gpu)
data = search_space.generate_random_dataset(num=90,
allow_isomorphisms=allow_isomorphisms,
deterministic_loss=True)
query = num_init + k
search_agent = agent
if len(data) <= 10:
batch_size = 10
else:
batch_size = 16
agent = NasBenchGinPredictorTrainer(search_agent, lr=lr, device=device, epochs=epochs,
train_images=len(data), batch_size=batch_size, input_dim=8, rate=rate)
while query <= total_queries:
arch_data = [d[0] for d in data]
val_accuracy = np.array([d[4] for d in data])
arch_data_edge_idx_list = []
arch_data_node_f_list = []
for arch in arch_data:
edge_index, node_f = search_space.nasbench2graph2(arch)
arch_data_edge_idx_list.append(edge_index)
arch_data_node_f_list.append(node_f)
if query == 20:
agent.fit(arch_data_edge_idx_list, arch_data_node_f_list, val_accuracy, logger=None)
if record_mutation == 'T':
candidates, dist_list, replicate_num, mutated_nums_list, mutated_arch_list \
= search_space.get_candidates(data,
num=candidate_nums,
allow_isomorphisms=allow_isomorphisms,
return_dist=True
)
cand_val_list = [cand[4] for cand in candidates]
mutate_list.append((dist_list, replicate_num, mutated_nums_list, mutated_arch_list, cand_val_list))
else:
candidates = search_space.get_candidates(data,
num=candidate_nums,
allow_isomorphisms=allow_isomorphisms
)
candiate_edge_list = []
candiate_node_list = []
for cand in candidates:
edge_index, node_f = search_space.nasbench2graph2(cand[0])
candiate_edge_list.append(edge_index)
candiate_node_list.append(node_f)
acc_train = agent.pred(arch_data_edge_idx_list, arch_data_node_f_list)
acc_pred = agent.pred(candiate_edge_list, candiate_node_list)
candidate_np = acc_pred.cpu().numpy()
candidates_gt = [can[4] for can in candidates]
if record_kt == 'T':
kt = get_kendalltau_coorlection(candidate_np.tolist(), candidates_gt)[0]
kt_list.append(kt)
sorted_indices = np.argsort(candidate_np)
for i in sorted_indices[:k]:
archtuple = candidates[i]
data.append(archtuple)
if verbose:
top_5_loss = sorted([d[4] for d in data])[:min(5, len(data))]
logger.info('Query {}, training mean loss is {}'.format(query,
np.mean(np.abs(acc_train.cpu().numpy()-val_accuracy))))
logger.info('Query {}, top 5 val losses {}'.format(query, top_5_loss))
query += k
return data, {'type': 'gin_predictor_new_2', 'final_data': data,
'kt_list': kt_list, 'mutate_list': mutate_list}
|
StarcoderdataPython
|
9637822
|
<reponame>Alecto3-D/testable-greeter
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from future.utils import iteritems
from datetime import datetime
from fnmatch import fnmatch
from twisted.internet import defer
from buildbot import config
from buildbot.changes import base
from buildbot.util import ascii2unicode
from buildbot.util import datetime2epoch
from buildbot.util import httpclientservice
from buildbot.util.logger import Logger
from buildbot.util.state import StateMixin
log = Logger()
HOSTED_BASE_URL = "https://api.github.com"
link_urls = {
"https": "clone_url",
"svn": "svn_url",
"git": "git_url",
"ssh": "ssh_url"
}
class PullRequestMixin(object):
def extractProperties(self, payload):
def flatten(properties, base, info_dict):
for k, v in iteritems(info_dict):
name = ".".join([base, k])
if isinstance(v, dict):
flatten(properties, name, v)
elif any([fnmatch(name, expr)
for expr in self.github_property_whitelist]):
properties[name] = v
properties = {}
flatten(properties, "github", payload)
return properties
class GitHubPullrequestPoller(base.ReconfigurablePollingChangeSource,
StateMixin, PullRequestMixin):
compare_attrs = ("owner", "repo", "token", "branches", "pollInterval",
"category", "pollAtLaunch", "name")
db_class_name = 'GitHubPullrequestPoller'
def __init__(self, owner, repo, **kwargs):
name = kwargs.get("name")
if not name:
kwargs["name"] = "GitHubPullrequestPoller:" + owner + "/" + repo
super(GitHubPullrequestPoller, self).__init__(owner, repo, **kwargs)
def checkConfig(self,
owner,
repo,
branches=None,
category='pull',
baseURL=None,
pullrequest_filter=True,
token=None,
magic_link=False,
repository_type="https",
github_property_whitelist=None,
**kwargs):
if repository_type not in ["https", "svn", "git", "ssh"]:
config.error(
"repository_type must be one of {https, svn, git, ssh}")
base.ReconfigurablePollingChangeSource.checkConfig(
self, name=self.name, **kwargs)
@defer.inlineCallbacks
def reconfigService(self,
owner,
repo,
branches=None,
pollInterval=10 * 60,
category=None,
baseURL=None,
pullrequest_filter=True,
token=None,
pollAtLaunch=False,
magic_link=False,
repository_type="https",
github_property_whitelist=None,
**kwargs):
yield base.ReconfigurablePollingChangeSource.reconfigService(
self, name=self.name, **kwargs)
if baseURL is None:
baseURL = HOSTED_BASE_URL
if baseURL.endswith('/'):
baseURL = baseURL[:-1]
http_headers = {'User-Agent': 'Buildbot'}
if token is not None:
http_headers.update({'Authorization': 'token ' + token})
self._http = yield httpclientservice.HTTPClientService.getService(
self.master, baseURL, headers=http_headers)
self.token = token
self.owner = owner
self.repo = repo
self.branches = branches
self.github_property_whitelist = github_property_whitelist
self.pollInterval = pollInterval
self.pollAtLaunch = pollAtLaunch
self.repository_type = link_urls[repository_type]
self.magic_link = magic_link
if github_property_whitelist is None:
self.github_property_whitelist = []
if callable(pullrequest_filter):
self.pullrequest_filter = pullrequest_filter
else:
self.pullrequest_filter = (lambda _: pullrequest_filter)
self.category = category if callable(category) else ascii2unicode(
category)
def describe(self):
return "GitHubPullrequestPoller watching the "\
"GitHub repository %s/%s" % (
self.owner, self.repo)
@defer.inlineCallbacks
def _getPullInformation(self, pull_number):
result = yield self._http.get('/'.join(
['/repos', self.owner, self.repo, 'pulls', str(pull_number)]))
my_json = yield result.json()
defer.returnValue(my_json)
@defer.inlineCallbacks
def _getPulls(self):
log.debug("GitHubPullrequestPoller: polling "
"GitHub repository %s/%s, branches: %s" %
(self.owner, self.repo, self.branches))
result = yield self._http.get('/'.join(
['/repos', self.owner, self.repo, 'pulls']))
my_json = yield result.json()
defer.returnValue(my_json)
@defer.inlineCallbacks
def _getEmail(self, user):
result = yield self._http.get("/".join(['/users', user]))
my_json = yield result.json()
defer.returnValue(my_json["email"])
@defer.inlineCallbacks
def _getFiles(self, prnumber):
result = yield self._http.get("/".join([
'/repos', self.owner, self.repo, 'pulls', str(prnumber), 'files'
]))
my_json = yield result.json()
defer.returnValue([f["filename"] for f in my_json])
@defer.inlineCallbacks
def _getCurrentRev(self, prnumber):
# Get currently assigned revision of PR number
result = yield self._getStateObjectId()
rev = yield self.master.db.state.getState(result, 'pull_request%d' %
prnumber, None)
defer.returnValue(rev)
@defer.inlineCallbacks
def _setCurrentRev(self, prnumber, rev):
# Set the updated revision for PR number.
result = yield self._getStateObjectId()
yield self.master.db.state.setState(result,
'pull_request%d' % prnumber, rev)
@defer.inlineCallbacks
def _getStateObjectId(self):
# Return a deferred for object id in state db.
result = yield self.master.db.state.getObjectId(
'%s/%s' % (self.owner, self.repo), self.db_class_name)
defer.returnValue(result)
@defer.inlineCallbacks
def _processChanges(self, github_result):
for pr in github_result:
# Track PRs for specified branches
base_branch = pr['base']['ref']
prnumber = pr['number']
revision = pr['head']['sha']
# Check to see if the branch is set or matches
if self.branches is not None and base_branch not in self.branches:
continue
if (self.pullrequest_filter is not None and
not self.pullrequest_filter(pr)):
continue
current = yield self._getCurrentRev(prnumber)
if not current or current[0:12] != revision[0:12]:
# Access title, repo, html link, and comments
pr = yield self._getPullInformation(prnumber)
title = pr['title']
if self.magic_link:
branch = 'refs/pull/{:d}/merge'.format(prnumber)
repo = pr['base']['repo'][self.repository_type]
else:
branch = pr['head']['ref']
repo = pr['head']['repo'][self.repository_type]
revlink = pr['html_url']
comments = pr['body']
updated = datetime.strptime(pr['updated_at'],
'%Y-%m-%dT%H:%M:%SZ')
# update database
yield self._setCurrentRev(prnumber, revision)
author = pr['user']['login']
project = pr['base']['repo']['full_name']
commits = pr['commits']
dl = defer.DeferredList(
[self._getFiles(prnumber), self._getEmail(author)],
consumeErrors=True)
results = yield dl
failures = [r[1] for r in results if not r[0]]
if failures:
for failure in failures:
log.error("while processing changes for "
"Pullrequest {} revision {}".format(
prnumber, revision))
# Fail on the first error!
failures[0].raiseException()
[files, email] = [r[1] for r in results]
if email is not None and email != "null":
author += " <" + str(email) + ">"
properties = self.extractProperties(pr)
# emit the change
yield self.master.data.updates.addChange(
author=ascii2unicode(author),
revision=ascii2unicode(revision),
revlink=ascii2unicode(revlink),
comments=u'GitHub Pull Request #{0} ({1} commit{2})\n{3}\n{4}'.
format(prnumber, commits, 's'
if commits > 0 else '', title, comments),
when_timestamp=datetime2epoch(updated),
branch=ascii2unicode(branch),
category=self.category,
project=project,
repository=ascii2unicode(repo),
files=files,
properties=properties,
src=u'git')
@defer.inlineCallbacks
def poll(self):
result = yield self._getPulls()
yield self._processChanges(result)
|
StarcoderdataPython
|
6579668
|
<reponame>michaelarg/clAIre
# import the necessary packages
from __future__ import print_function
from imutils.video import WebcamVideoStream
from imutils.video import FPS
import argparse
import imutils
import sys
import numpy as np
import cv2
#opencv2 uses BGR
#Order: Red, Orange, Green in HSV
colour = [ ([0,100,100],[10,255,255]) , ([10,150,150],[15,255,255]) , ([50,255,255],[70,255,255]) ]
#redup = np.array[0,0,255]
#reddown = np.array[0,0,150]
vs = WebcamVideoStream(src=0).start()
fps = FPS().start()
font = cv2.FONT_HERSHEY_SIMPLEX
i=0
# loop over some frames...this time using the threaded stream
while(True):
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
i= i + 1
#print(frame)
#print(type(frame))
#print(frame.shape)
#
hsvframe = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# frame = cv2.inRange(frame, redup, reddown)
#Red
redmask = cv2.inRange(hsvframe, np.array(colour[0][0]), np.array(colour[0][1])) #so in range tells us which pixels in our image fall into between the upper and lower bounds
orangemask = cv2.inRange(hsvframe, np.array(colour[1][0]), np.array(colour[1][1])) #so in range tells us which pixels in our image fall into between the upper and lower bounds
greenmask = cv2.inRange(hsvframe, np.array(colour[2][0]), np.array(colour[2][1])) #so in range tells us which pixels in our image fall into between the upper and lower bounds
## print(type(mask))
## print(mask)
## nozero = cv2.findNonZero(mask)
## print(nozero)
## print(len(nozero))
res = cv2.bitwise_and(frame,frame,mask=greenmask)
# print(min(frame[1][1]))
# cv2.putText(frame,"frame"+str(i), (50,50), font , 2, (255,255,255))
cv2.imshow("Frame", res)
# update the FPS counter
fps.update()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
cv2.destroyAllWindows()
vs.stop()
|
StarcoderdataPython
|
3501575
|
# coding:utf-8
from flask import Flask
from flask import render_template, request
app = Flask(__name__)
@app.route('/', methods=['get', 'post'])
def login_view():
# the methods that handle requests are called views, in flask
msg = ''
# form is a dictionary like attribute that holds the form data
if request.method == 'POST':
username = request.form["username"]
passwd = request.form["passwd"]
# static useless validation
if username == 'you' and passwd == '<PASSWORD>':
msg = 'Username and password are correct'
else:
msg = 'Username or password are incorrect'
return render_template('form.html', message=msg)
if __name__ == '__main__':
app.debug = True
app.run()
|
StarcoderdataPython
|
1806702
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh._legacy_charts import DataAdapter
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDataAdapter(unittest.TestCase):
def setUp(self):
self._values = OrderedDict()
self._values['first'] = [2., 5., 3.]
self._values['second'] = [4., 1., 4.]
self._values['third'] = [6., 4., 3.]
def test_list(self):
values = list(self._values.values())
da = DataAdapter(values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_array(self):
values = np.array(list(self._values.values()))
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_pandas(self):
values = pd.DataFrame(self._values)
da = DataAdapter(values)
# TODO: THIS SHOULD BE FIXED..
#self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
# We expect data adapter index to be the same as the underlying pandas
# object and not the default created by DataAdapter
self.assertEqual(da.index, [0, 1, 2])
def test_ordered_dict(self):
da = DataAdapter(self._values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_blaze_data_no_fields(self):
import blaze
valuesdf = pd.DataFrame(self._values)
values = blaze.Data(valuesdf)
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, [0, 1, 2])
xs, _values = DataAdapter.get_index_and_data(values, None)
assert_array_equal([0,1,2], xs)
|
StarcoderdataPython
|
9673175
|
<reponame>DianQK/my_slack_bot_demo
import os
from bilibili.testv import fetchLatestTestVideos
import sys
RUN_TASKS = os.environ["RUN_TASKS"]
tasks = RUN_TASKS.split(',')
if 'testv' in tasks:
fetchLatestTestVideos()
if 'sspai' in tasks:
print('TODO')
|
StarcoderdataPython
|
4862713
|
<gh_stars>0
import pandas as pd
# Buat dataframe df1 dan df2
df1 = pd.DataFrame({
'key':['k1','k2','k3','k4','k5'],
'val1':[200, 500, 0, 500, 100],
'val2':[30, 50, 100, 20, 10]
})
df2 = pd.DataFrame({
'key':['k1','k3','k5','k7','k10'],
'val3':[1,2,3,4,5],
'val4':[6,7,8,8,10]
})
# Merge yang ekivalen dengan SQL left join
merge_df_left = pd.merge(left=df2, right=df1, how='left', left_on='key', right_on='key')
print('Merge - Left:\n', merge_df_left)
# Merge yang ekivalen dengan SQL right join
merge_df_right = pd.merge(left=df2, right=df1, how='right', left_on='key', right_on='key')
print('Merge - Right:\n', merge_df_right)
# Merge yang ekivalen dengan SQL inner join
merge_df_inner = pd.merge(left=df2, right=df1, how='inner', left_on='key', right_on='key')
print('Merge - Inner:\n', merge_df_inner)
# Merge yang ekivalen dengan SQL outer join
merge_df_outer = pd.merge(left=df2, right=df1, how='outer', left_on='key', right_on='key')
print('Merge - Outer:\n', merge_df_outer)
|
StarcoderdataPython
|
8130613
|
from robocorp_ls_core.ep_providers import * # Kept for backward compatibility
|
StarcoderdataPython
|
3427606
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 Akretion (http://www.akretion.com)
# @author <NAME> <<EMAIL>>
# Copyright 2016 Sodexis (http://sodexis.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api, _
import odoo.addons.decimal_precision as dp
class CreateRentalProduct(models.TransientModel):
_name = 'create.rental.product'
_description = 'Create the Rental Service Product'
@api.model
def _default_name(self):
assert self.env.context.get('active_model') == 'product.product',\
'Wrong underlying model, should be product.product'
hw_product = self.env['product.product'].browse(
self.env.context['active_id'])
return _('Rental of a %s') % hw_product.name
@api.model
def _default_code(self):
assert self.env.context.get('active_model') == 'product.product',\
'Wrong underlying model, should be product.product'
hw_product = self.env['product.product'].browse(
self.env.context['active_id'])
if hw_product.default_code:
return _('RENT-%s') % hw_product.default_code
else:
return ''
sale_price_per_day = fields.Float(
string='Rental Price per Day', required=True,
digits=dp.get_precision('Product Price'), default=1.0)
name = fields.Char(
string='Product Name', size=64, required=True,
default=_default_name)
default_code = fields.Char(
string='Default Code', size=16, default=_default_code)
categ_id = fields.Many2one(
'product.category', string='Product Category', required=True)
copy_image = fields.Boolean(string='Copy Product Image')
@api.model
def _prepare_rental_product(self):
assert self.env.context.get('active_model') == 'product.product',\
'Wrong underlying model, should be product.product'
hw_product_id = self.env.context.get('active_id')
assert hw_product_id, 'Active ID is not set'
pp_obj = self.env['product.product']
hw_product = pp_obj.browse(hw_product_id)
day_uom_id = self.env.ref('product.product_uom_day').id
vals = {
'type': 'service',
'sale_ok': True,
'purchase_ok': False,
'uom_id': day_uom_id,
'uom_po_id': day_uom_id,
'list_price': self.sale_price_per_day,
'name': self.name,
'default_code': self.default_code,
'rented_product_id': hw_product_id,
'must_have_dates': True,
'categ_id': self.categ_id.id,
'invoice_policy': 'order',
}
if self.copy_image:
vals['image'] = hw_product.image
return vals
@api.multi
def create_rental_product(self):
self.ensure_one()
pp_obj = self.env['product.product']
# check that a rental product doesn't already exists ?
product = pp_obj.create(self._prepare_rental_product())
action = {
'name': pp_obj._description,
'type': 'ir.actions.act_window',
'res_model': pp_obj._name,
'view_mode': 'form,tree,kanban',
'nodestroy': False, # Close the wizard pop-up
'target': 'current',
'res_id': product.id,
}
return action
|
StarcoderdataPython
|
221042
|
<reponame>bjpop/blip<gh_stars>100-1000
print(2 not in [1,2,3])
print(2 not in [4,5])
print(1 not in [1] not in [[1]])
print(3 not in [1] not in [[1]])
|
StarcoderdataPython
|
9645129
|
<filename>app/models.py<gh_stars>0
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
@login_manager.user_loader
def load_user(user_id):
return Admin.query.get(int(user_id))
class Admin(UserMixin,db.Model):
__tablename__ = 'admin'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
email = db.Column(db.String(255),unique = True,index = True)
password_hash = db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self,password):
self.password_hash = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.password_hash,password)
def __repr__(self):
return f'{self.username}'
class Blogpost(UserMixin,db.Model):
__tablename__ = 'blogposts'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String())
date = db.Column(db.String())
fake_date = db.Column(db.String(255))
category = db.Column(db.String(255))
paragraph1 = db.Column(db.String())
paragraph2 = db.Column(db.String())
paragraph3 = db.Column(db.String())
paragraph4 = db.Column(db.String())
blogpic_id = db.Column(db.Integer,db.ForeignKey('blogpics.id'))
comment = db.relationship("Comment",backref='blogpost',lazy="dynamic")
def __repr__(self):
return f'{self.title}'
def save_blogposts(self):
db.session.add(self)
db.session.commit()
def delete_blogposts(self):
db.session.delete(self)
db.session.commit()
class Blogpics(UserMixin,db.Model):
__tablename__ = 'blogpics'
id = db.Column(db.Integer,primary_key = True)
img1 = db.Column(db.String(255))
img2 = db.Column(db.String(255))
img3 = db.Column(db.String(255))
img4 = db.Column(db.String(255))
img5 = db.Column(db.String(255))
img6 = db.Column(db.String(255))
img7 = db.Column(db.String(255))
img8 = db.Column(db.String(255))
img9 = db.Column(db.String(255))
img10 = db.Column(db.String(255))
blogposts = db.relationship('Blogpost',backref='Blogpics',lazy="dynamic")
def __repr__(self):
return f'{self.img1}'
class Subscriber(UserMixin,db.Model):
__tablename__ = 'subscribers'
id = db.Column(db.Integer,primary_key = True)
email = db.Column(db.String(255),unique = True,index = True)
def __repr__(self):
return f'{self.email}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
commcontent = db.Column(db.String())
blogpost_id = db.Column(db.Integer,db.ForeignKey('blogposts.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
def delete_comment(self):
comment = Comment.query.filter_by(id=Comment.id).first()
db.session.delete(comment)
db.session.commit()
@classmethod
def get_comments(cls, blogpost_id):
comments = Comment.query.filter_by(blogpost_id=blogpost_id).all()
return comments
|
StarcoderdataPython
|
4913920
|
<gh_stars>1000+
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import sqlite3
import unicodedata
from multiprocessing import Pool
from pathlib import Path
from typing import Union, List, Tuple, Generator, Any, Optional
from tqdm import tqdm
from deeppavlov.core.commands.utils import expand_path
from deeppavlov.core.common.errors import ConfigError
from deeppavlov.core.common.registry import register
from deeppavlov.core.data.dataset_reader import DatasetReader
from deeppavlov.core.data.utils import download
logger = logging.getLogger(__name__)
@register('odqa_reader')
class ODQADataReader(DatasetReader):
"""Build a SQLite database from folder with txt files, json files or
`Wiki Extractor <https://github.com/attardi/wikiextractor>`_ files.
"""
def read(self, data_path: Union[Path, str], db_url: Optional[str] = None, *args,
**kwargs) -> None:
"""Build a SQLite database from provided files, download SQLite database from a provided URL,
or do nothing.
Args:
data_path: a directory/file with texts to create a database from
db_url: path to a database url
kwargs:
save_path: a path where a database should be saved to, or path to a ready database
dataset_format: initial data format; should be selected from ['txt', 'wiki', 'json']
Returns:
None
"""
logger.info('Reading files...')
try:
save_path = expand_path(kwargs['save_path'])
except KeyError:
raise ConfigError(
f'\"save_path\" attribute should be set for {self.__class__.__name__}\
in the JSON config.')
if save_path.exists() and save_path.with_suffix(f'{save_path.suffix}.done').exists():
return
try:
dataset_format = kwargs['dataset_format']
except KeyError:
raise ConfigError(
f'\"dataset_format\" attribute should be set for {self.__class__.__name__}\
in the JSON config.')
save_path.parent.mkdir(parents=True, exist_ok=True)
if db_url:
download_dir = save_path.parent
logger.info(f'Downloading database from {db_url} to {download_dir}')
download(download_dir, db_url, force_download=False)
return
self._build_db(save_path, dataset_format, expand_path(data_path))
def iter_files(self, path: Union[Path, str]) -> Generator[Path, Any, Any]:
"""Iterate over folder with files or a single file and generate file paths.
Args:
path: path to a folder or a file
Raises:
RuntimeError if the provided `path` doesn't exist
Yields:
file paths one by one
Returns:
None
"""
path = Path(path)
if path.is_file():
yield path
elif path.is_dir():
for item in path.iterdir():
yield from self.iter_files(item)
else:
raise RuntimeError("Path doesn't exist: {}".format(path))
def _build_db(self, save_path: Union[Path, str], dataset_format: str,
data_path: Union[Path, str],
num_workers: int = 8) -> None:
"""Build a SQLite database in parallel and save it to a pointed path.
Args:
save_path: a path where the ready database should be saved
dataset_format: a data format, should be selected from ['txt', 'json', 'wiki']
data_path: path to a folder/file from which to build a database
num_workers: a number of workers for parallel database building
Raises:
sqlite3.OperationalError if `save_path` doesn't exist.
RuntimeError if dataset_format is not in ['txt', 'json', 'wiki']
Returns:
None
"""
done_path = save_path.with_suffix(f'{save_path.suffix}.done')
if Path(save_path).exists():
Path(save_path).unlink()
if done_path.exists():
done_path.unlink()
logger.info('Building the database...')
try:
conn = sqlite3.connect(str(save_path))
except sqlite3.OperationalError as e:
e.args = e.args + ("Check that DB path exists.",)
raise e
c = conn.cursor()
sql_table = "CREATE TABLE documents (id PRIMARY KEY, text);"
c.execute(sql_table)
files = [f for f in self.iter_files(data_path)]
workers = Pool(num_workers)
if dataset_format == 'txt':
fn = self._get_file_contents
elif dataset_format == 'json':
fn = self._get_json_contents
elif dataset_format == 'wiki':
fn = self._get_wiki_contents
else:
raise RuntimeError('Unknown dataset format.')
with tqdm(total=len(files)) as pbar:
for data in tqdm(workers.imap_unordered(fn, files)):
try:
c.executemany("INSERT INTO documents VALUES (?,?)", data)
pbar.update()
except sqlite3.IntegrityError as e:
logger.warning(e)
conn.commit()
conn.close()
done_path.touch()
@staticmethod
def _get_file_contents(fpath: Union[Path, str]) -> List[Tuple[str, str]]:
"""Extract file contents from '.txt' file.
Args:
fpath: path to a '.txt' file.
Returns:
a list with tuple of normalized file name and file contents
"""
with open(fpath, encoding='utf-8') as fin:
text = fin.read()
normalized_text = unicodedata.normalize('NFD', text)
return [(fpath.name, normalized_text)]
@staticmethod
def _get_json_contents(fpath: Union[Path, str]) -> List[Tuple[str, str]]:
"""Extract file contents from '.json' file. JSON files should be formatted as list with dicts
which contain 'title' and 'doc' keywords.
Args:
fpath: path to a '.json' file.
Returns:
a list with tuples of normalized file name and file contents
"""
docs = []
with open(fpath, encoding='utf-8') as fin:
for line in fin:
data = json.loads(line)
for doc in data:
if not doc:
continue
text = doc['text']
normalized_text = unicodedata.normalize('NFD', text)
docs.append((doc['title'], normalized_text))
return docs
@staticmethod
def _get_wiki_contents(fpath: Union[Path, str]) -> List[Tuple[str, str]]:
"""Extract file contents from wiki extractor formatted files.
Args:
fpath: path to a '.txt' file in wiki extractor format
Returns:
a list with tuples of normalized file name and file contents
"""
docs = []
with open(fpath, encoding='utf-8') as fin:
for line in fin:
doc = json.loads(line)
if not doc:
continue
text = doc['text']
normalized_text = unicodedata.normalize('NFD', text)
docs.append((doc['title'], normalized_text))
return docs
|
StarcoderdataPython
|
12843787
|
#!/usr/bin/python
"""
I wrote this in a couple afternoons while watching Netflix, so it can probably be better.
-jmag
"""
from slackclient import SlackClient
import sys, json, sqlite3, time, re, datetime
MENTION_REGEX = "^<@(|[WU][A-Z0-9]+?)>(.*)"
class ConfigException(Exception):
pass
class ConnectionException(Exception):
pass
class YegsecDatabase:
def __init__(self, db_path):
self.path = db_path
self.conn = sqlite3.connect(db_path)
self.cursor = self.conn.cursor()
def confirm_user(self, user, month, year, pref):
self.cursor.execute("SELECT * FROM users WHERE user_id = ?", (user,))
result = self.cursor.fetchone()
if not result:
self.cursor.execute("INSERT INTO users (user_id) VALUES (?)", (user,))
self.cursor.execute("SELECT meetup_id FROM meetups WHERE month_id = ? and year_id = ?", (month, year))
meeting_id_a = self.cursor.fetchone()
if meeting_id_a:
meeting_id = meeting_id_a[0]
veg_bool = 0
if pref:
veg_bool = 1
else:
veg_bool = 0
self.cursor.execute("SELECT * FROM confirmations WHERE meetup_id = ? AND user_id = ?", (meeting_id, user))
if(self.cursor.fetchone()):
return False
else:
self.cursor.execute("INSERT INTO confirmations (user_id, meetup_id, pizza_pref) VALUES (?, ?, ?)", (user, meeting_id, veg_bool))
self.yegsec_commit()
return True
else:
return False
def remove_confirm_user(self, user, month, year):
self.cursor.execute("SELECT * FROM users WHERE user_id = ?", (user,))
result = self.cursor.fetchone()
#A user cannot remove a confirmation if they don't exist in the database already.
if not result:
return False
else:
self.cursor.execute("SELECT meetup_id FROM meetups WHERE month_id = ? and year_id = ?", (month, year))
meeting_id_a = self.cursor.fetchone()
if meeting_id_a:
meeting_id = meeting_id_a[0]
self.cursor.execute("DELETE FROM confirmations WHERE user_id = ? AND meetup_id = ?", (user, meeting_id))
self.yegsec_commit()
else:
return False
def yegsec_commit(self):
self.conn.commit()
#self.conn.close()
def get_summary(self):
result = self.cursor.execute("SELECT meetup_id FROM meetups")
results = {}
meetup_ids = []
meetup_id = self.cursor.fetchone()
while(meetup_id):
meetup_ids.append(meetup_id)
meetup_id = self.cursor.fetchone()
for meetup_id_a in meetup_ids:
meetup_id = meetup_id_a[0]
self.cursor.execute("SELECT count(*) FROM confirmations WHERE meetup_id = ? AND pizza_pref = 1", (meetup_id,))
veg_count = self.cursor.fetchone()
self.cursor.execute("SELECT count(*) FROM confirmations WHERE meetup_id = ? AND pizza_pref = 0", (meetup_id,))
other_count = self.cursor.fetchone()
self.cursor.execute("SELECT day_id, month_id, year_id FROM meetups WHERE meetup_id = ?", (meetup_id,))
date_result = self.cursor.fetchone()
results[meetup_id] = { "veg": veg_count[0],
"other": other_count[0],
"day": date_result[0],
"month": date_result[1],
"year": date_result[2]
}
return results
class YegsecBot:
def __init__(self, config):
db, token, rtm_delay = self.read_config(config)
self.db = YegsecDatabase(db)
self.bot = SlackClient(token)
self.rtm_delay = rtm_delay
if self.bot.rtm_connect(with_team_state=False):
self.bot_id = self.bot.api_call("auth.test")["user_id"]
try:
self.start()
except KeyboardInterrupt:
self.db.yegsec_commit()
else:
raise ConnectionException("Connection to Slack failed.")
def read_config(self, config_path):
f = open(config_path)
try:
frj = json.loads(f.read())
except:
raise ConfigException("Unable to read provided configuration: {}".format(config_path))
return frj['database'], frj['token'], frj['rtm_delay']
#Source: https://www.fullstackpython.com/blog/build-first-slack-bot-python.html
def parse_bot_commands(self, slack_events):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
for event in slack_events:
if event["type"] == "message" and not "subtype" in event:
user_id, message = self.parse_direct_mention(event["text"])
if user_id == self.bot_id:
#print(event)
return message, event["channel"], event["user"]
return None, None, None
def parse_direct_mention(self, message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def get_next_meet(self):
return 3,2019
def add_user(self, command, channel, user):
"""
Main function of the bot. We use this command for adding user numbers and their preferred vegetarian options
to the database.
"""
rs = re.findall("add me for ([0-9]{1,2}), ?([0-9]{4}) (vegetarian|any)", command, re.IGNORECASE)
rsm = re.findall("add me next (vegetarian|any)", command, re.IGNORECASE)
if(len(rs) == 1 or len(rsm) == 1):
try:
if len(rs) == 1:
month = int(rs[0][0])
year = int(rs[0][1])
elif len(rsm) == 1:
month, year = self.get_next_meet()
rs = rsm
month_str = datetime.datetime(year, month, 1).strftime("%B")
vegetarian = None
if("VEG" in rs[0][2].upper()):
vegetarian = False
resp_veg = "vegetarian"
vegetarian = True
else:
vegetarian = True
resp_veg = "non-vegetarian"
vegetarian = False
result = self.db.confirm_user(user, month, year, vegetarian)
if result:
return(":pizza::pizza::pizza:Thank you <@{}>, I will add you to the pizza numbers for the month {} for the year {} as a {} option:pizza::pizza::pizza:".format(user, month_str, year, resp_veg))
else:
return(":pizza::pizza::pizza:Sorry, <@{}> it looks like you've already been added for that month.:pizza::pizza::pizza:".format(user))
except:
return("Sorry, I tried to add you with that command, but I couldn't quite understand it. Please try again.")
def remove_user(self, command, channel, user):
"""
Another main function of the bot. We use this command for removing user numbers and their preferred vegetarian options
from the database.
"""
rs = re.findall("remove me for ([0-9]{1,2}), ?([0-9]{4})", command, re.IGNORECASE)
rsm = re.findall("remove me next", command, re.IGNORECASE)
if(len(rs) == 1 or len(rsm) == 1):
try:
if len(rs) == 1:
month = int(rs[0][0])
year = int(rs[0][1])
elif len(rsm) == 1:
month, year = self.get_next_meet()
rs = rsm
month_str = datetime.datetime(year, month, 1).strftime("%B")
self.db.remove_confirm_user(user, month, year)
return(":pizza::pizza::pizza:Thank you <@{}>, I will remove you to the pizza numbers for the month {} for the year {}:pizza::pizza::pizza:".format(user, month_str, year))
except:
return("Sorry, I tried to remove you with that command, but I couldn't quite understand it. Please try again.")
def get_summary(self):
result = self.db.get_summary()
response = ""
for meetup_id, meetup in result.items():
total_pizza_count = meetup['other'] + meetup['veg']
response += "*Summary*\nMeetup Date: `{}/{}/{}`\nTotal Pizza Count: `{}`\nNon-Vegetarian: `{}`\nVegetarian: `{}`\n\n".format(meetup['day'], meetup['month'], meetup['year'], total_pizza_count, meetup['other'], meetup['veg'])
return response
def get_help(self):
return "You can send me the following commands:\n\
To get added to the next meetup's pizza count do: `add me next [any|vegetarian]`\n\
To get added to a future meetup's pizza count do: `add me for [month],[year]`\n\
To get removed from the next meetup's pizza count do: `remove me next`\n\
To be removed from a future meetup's pizza count do: `remove me [month],[year]`"
def handle_command(self, command, channel, user):
"""
Executes bot command if the command is known
"""
print("Received command: {}".format(command))
# Default response is help text for the user
default_response = "Not sure what you mean. Try `{}`".format("help")
# Finds and executes the given command, filling in response
response = None
print("Command received: {}".format(command))
if command.startswith("add me for") or command.startswith("add me next"):
response = self.add_user(command, channel, user)
if command.startswith("remove me for") or command.startswith("remove me next"):
response = self.remove_user(command, channel, user)
if command.startswith("summary"):
response = self.get_summary()
if command.startswith("help"):
response = self.get_help()
# Sends the response back to the channel
# That only requested user can see
self.bot.api_call(
"chat.postEphemeral",
channel=channel,
user=user,
text=response or default_response,
as_user=True,
)
def start(self):
"""
self.bot.api_call(
"chat.postMessage",
channel="general",
text="I'm alive!",
as_user=True
)
"""
while True:
command, channel, user = self.parse_bot_commands(self.bot.rtm_read())
if command:
self.handle_command(command, channel, user)
time.sleep(self.rtm_delay)
if __name__ == "__main__":
bot = YegsecBot("config.json")
|
StarcoderdataPython
|
5092815
|
<gh_stars>1-10
"""
cloudalbum/__init__.py
~~~~~~~~~~~~~~~~~~~~~~~
Environment configuration how to run application.
:description: CloudAlbum is a fully featured sample application for 'Moving to AWS serverless' training course
:copyright: © 2019 written by <NAME>, <NAME>.
:license: MIT, see LICENSE for more details.
"""
import os
import logging
import sys
import json
import datetime
from bson.objectid import ObjectId
from flask import Flask, jsonify, make_response # new
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from flask_bcrypt import Bcrypt
from cloudalbum.database import create_table
class JSONEncoder(json.JSONEncoder):
''' extend json-encoder class'''
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, set):
return list(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
def create_app(script_info=None):
# instantiate the app
app = Flask(__name__)
flask_bcrypt = Bcrypt(app)
jwt = JWTManager(app)
app.json_encoder = JSONEncoder
# enable CORS
CORS(app, resources={r'/*': {'origins': '*'}})
# set config
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
# set logger to STDOUT
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.DEBUG)
# Create database table, if it is not exists
with app.app_context():
create_table()
# register blueprints
from cloudalbum.api.users import users_blueprint
app.register_blueprint(users_blueprint, url_prefix='/users')
from cloudalbum.api.photos import photos_blueprint
app.register_blueprint(photos_blueprint, url_prefix='/photos')
from cloudalbum.api.admin import admin_blueprint
app.register_blueprint(admin_blueprint, url_prefix='/admin')
# shell context for flask cli
@app.shell_context_processor
def ctx():
return {'app': app}
return app
|
StarcoderdataPython
|
3404502
|
<reponame>4dn-dcic/tibanna_ff
from tibanna_ffcommon.portal_utils import (
TibannaSettings,
FormatExtensionMap,
)
from tibanna_cgap.zebra_utils import (
FourfrontStarter,
FourfrontUpdater,
ProcessedFileMetadata,
)
import pytest
from dcicutils import ff_utils
from tests.tibanna.zebra.conftest import (
valid_env,
)
from tests.tibanna.ffcommon.conftest import (
minimal_postrunjson_template
)
from tests.tibanna.zebra.conftest import (
post_new_processedfile,
post_new_qc
)
@valid_env
def test_fourfront_starter(start_run_event_md5):
starter = FourfrontStarter(**start_run_event_md5)
assert starter
assert 'arguments' in starter.inp.wf_meta
assert len(starter.inp.wf_meta['arguments']) == 2
assert starter.inp.wf_meta['arguments'][1]['argument_type'] == 'Output report file'
starter.run()
assert len(starter.output_argnames) == 1
@valid_env
def test_qclist_handling():
data = {'ff_meta': {'workflow': 'cgap:workflow_bwa-mem_no_unzip-check_v10'},
'config': {'log_bucket': 'somelogbucket'},
'postrunjson': minimal_postrunjson_template(),
'_tibanna': {'env': 'fourfront-cgap', 'settings': {'1': '1'}}}
updater = FourfrontUpdater(**data)
new_qc_object = next(updater.qc_template_generator())
# file w/ no quality_metric object
new_pf_uuid = post_new_processedfile(file_format='bam', key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid in updater.patch_items
assert updater.patch_items[new_pf_uuid]['quality_metric'] == new_qc_object['uuid']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
# file w/ quality_metric object of same type
existing_qc_uuid = post_new_qc('QualityMetricBamcheck', key=updater.tibanna_settings.ff_keys)
new_pf_uuid = post_new_processedfile(file_format='bam', quality_metric=existing_qc_uuid,
key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid in updater.patch_items
assert updater.patch_items[new_pf_uuid]['quality_metric'] == new_qc_object['uuid']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qc_uuid, key=updater.tibanna_settings.ff_keys)
# file w/ quality_metric object of different type
existing_qc_uuid = post_new_qc('QualityMetricWgsBamqc', key=updater.tibanna_settings.ff_keys)
new_pf_uuid = post_new_processedfile(file_format='bam', quality_metric=existing_qc_uuid,
key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid in updater.patch_items
new_qc_uuid = updater.patch_items[new_pf_uuid]['quality_metric']
assert 'quality_metric_qclist' in updater.post_items
assert new_qc_uuid in updater.post_items['quality_metric_qclist']
res = updater.post_items['quality_metric_qclist'][new_qc_uuid]
assert 'qc_list' in res
assert len(res['qc_list']) == 2
assert res['qc_list'][0]['qc_type'] == 'quality_metric_wgs_bamqc'
assert res['qc_list'][1]['qc_type'] == 'quality_metric_bamcheck'
assert res['qc_list'][0]['value'] == existing_qc_uuid
assert res['qc_list'][1]['value'] == new_qc_object['uuid']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qc_uuid, key=updater.tibanna_settings.ff_keys)
# file w/ qc list with only quality_metric object of different type
existing_qc_uuid = post_new_qc('QualityMetricWgsBamqc', key=updater.tibanna_settings.ff_keys)
existing_qclist = [{'qc_type': 'quality_metric_wgs_bamqc',
'value': existing_qc_uuid}]
existing_qclist_uuid = post_new_qc('QualityMetricQclist', qc_list=existing_qclist,
key=updater.tibanna_settings.ff_keys)
new_pf_uuid = post_new_processedfile(file_format='bam', quality_metric=existing_qclist_uuid,
key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid not in updater.patch_items
assert existing_qclist_uuid in updater.patch_items
assert 'qc_list' in updater.patch_items[existing_qclist_uuid]
assert len(updater.patch_items[existing_qclist_uuid]['qc_list']) == 2
res = updater.patch_items[existing_qclist_uuid]
assert res['qc_list'][0]['qc_type'] == 'quality_metric_wgs_bamqc'
assert res['qc_list'][1]['qc_type'] == 'quality_metric_bamcheck'
assert existing_qc_uuid in res['qc_list'][0]['value']
assert new_qc_object['uuid'] in res['qc_list'][1]['value']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qclist_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qc_uuid, key=updater.tibanna_settings.ff_keys)
# file w/ qc list with only quality_metric object of same type
existing_qc_uuid = post_new_qc('QualityMetricWgsBamqc', key=updater.tibanna_settings.ff_keys)
existing_qclist = [{'qc_type': 'quality_metric_bamcheck',
'value': existing_qc_uuid}]
existing_qclist_uuid = post_new_qc('QualityMetricQclist', qc_list=existing_qclist,
key=updater.tibanna_settings.ff_keys)
new_pf_uuid = post_new_processedfile(file_format='bam', quality_metric=existing_qclist_uuid,
key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid not in updater.patch_items
assert existing_qclist_uuid in updater.patch_items
assert 'qc_list' in updater.patch_items[existing_qclist_uuid]
assert len(updater.patch_items[existing_qclist_uuid]['qc_list']) == 1
res = updater.patch_items[existing_qclist_uuid]
assert res['qc_list'][0]['qc_type'] == 'quality_metric_bamcheck'
assert res['qc_list'][0]['value'] == new_qc_object['uuid']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qclist_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qc_uuid, key=updater.tibanna_settings.ff_keys)
|
StarcoderdataPython
|
198055
|
<reponame>gruiick/pyRogue<filename>chapters/rogue_test01.py
#!/usr/bin/env python3
# coding: utf-8
#
# $Id: rogue_test01.py 894 $
# SPDX-License-Identifier: BSD-2-Clause
#
"""
from http://www.roguebasin.com/index.php?title=Roguelike_Tutorial,_using_python3%2Btdl,_part_1
"""
import tdl
SCREEN_WIDTH = 80
SCREEN_HEIGHT = 50
LIMIT_FPS = 20
def handle_keys():
global playerx, playery
"""
# realtime-based game
keypress = False
for event in tdl.event.get():
if event.type == 'KEYDOWN':
user_input = event
keypress = True
if not keypress:
return
"""
# turn-based game
user_input = tdl.event.key_wait()
if user_input.key == 'ENTER' and user_input.alt:
# Alt+Enter: toggle fullscreen
tdl.set_fullscreen(not tdl.get_fullscreen())
elif user_input.key == 'ESCAPE':
return True # exit game
# movement keys
if user_input.key == 'UP':
playery -= 1
elif user_input.key == 'DOWN':
playery += 1
elif user_input.key == 'LEFT':
playerx -= 1
elif user_input.key == 'RIGHT':
playerx += 1
tdl.set_font('dejavu10x10_gs_tc.png', greyscale=True, altLayout=True)
console = tdl.init(SCREEN_WIDTH, SCREEN_HEIGHT, title="Roguelike", fullscreen=False)
tdl.setFPS(LIMIT_FPS) # for real-time game, ignore if turn-based
playerx = SCREEN_WIDTH // 2
playery = SCREEN_HEIGHT // 2
while not tdl.event.is_window_closed():
console.draw_char(playerx, playery, '@', bg=None, fg=(255, 255, 255))
tdl.flush()
console.draw_char(playerx, playery, ' ', bg=None)
# handle keys and exit game if needed
exit_game = handle_keys()
if exit_game:
break
|
StarcoderdataPython
|
8173205
|
<reponame>BehaviorPredictionTestingPlatform/VerifAI
import carla
import sys
from agents.navigation.agent import *
from agents.navigation.controller import VehiclePIDController
from agents.tools.misc import distance_vehicle, draw_waypoints
import numpy as np
'''This agent doesn't move.'''
class BrakeAgent(Agent):
def __init__(self, vehicle, opt_dict=None):
super(BrakeAgent, self).__init__(vehicle)
def run_step(self):
control = carla.VehicleControl()
control.throttle = 0.0
control.brake = 1.0
control.hand_brake = True
return control
|
StarcoderdataPython
|
6424305
|
import fasteners, json, os, threading
thread_lock = threading.Lock()
def write(filename, content):
with open(filename, 'w+') as f:
f.write(content)
def read(filename):
content = '{}'
if os.path.exists(filename):
with open(filename, 'r') as f:
content = f.read()
return content
def append(filename, content):
with fasteners.InterProcessLock('{0}.lock'.format(filename)):
with open(filename, 'a+') as f:
f.write(content)
def update_results_thread(filename, info):
thread_lock.acquire()
with fasteners.InterProcessLock('{0}.lock'.format(filename)):
content = json.loads(read(filename))
name = info['name']
result = info['result']
refit_config = info['refit_config']
text = info['text']
seed = str(info['seed'])
infos = content[name] if name in content else dict()
infos[seed] = {'result': result, 'description': text, 'refit': refit_config}
content[name] = infos
write(filename, json.dumps(content, indent=4, sort_keys=True))
thread_lock.release()
def update_results(filename, info):
thread = threading.Thread(target = update_results_thread, args = (filename, info))
thread.start()
|
StarcoderdataPython
|
1895916
|
"""
Copyright 2015 - <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from fabric.api import local, lcd, task
# NOTE: Fill these in.
LK_PROJECT_BASE = os.path.expanduser("~/code/lk")
SOD_PROJECT_BASE = os.path.expanduser("~/code/sod")
OPEN_OCD_BASE = os.path.expanduser("~/code/openocd")
DARTUINO_BUILD_TARGET = "dartuinoP0-test"
DARTUINO_SOD_BUILD_TARGET = "dartuino-p0-dartino"
DISCO_BUILD_TARGET = "stm32f746g-disco-test"
EVAL_BUILD_TARGET = "stm32746g-eval2-test"
class LKTarget:
def __init__(self, repo_root, target_project, board_cfg, stlink_cfg, bin_dir):
build_subdir = "build-" + target_project
full_binary_path = os.path.join(repo_root, bin_dir, build_subdir, "lk.bin")
program_command_list = ["program", full_binary_path, "reset", "exit", "0x08000000"]
program_command = " ".join(program_command_list)
program_command = "\"" + program_command + "\""
flash_command_list = [
"openocd",
"-f", stlink_cfg,
"-f", board_cfg,
"-c", program_command
]
self.flash_command = " ".join(flash_command_list)
self.target_project = target_project
self.repo_root = repo_root
DiscoLKTarget = LKTarget(LK_PROJECT_BASE, DISCO_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2-1.cfg", "")
DartuinioTarget = LKTarget(LK_PROJECT_BASE, DARTUINO_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2.cfg", "")
EvalLKTarget = LKTarget(LK_PROJECT_BASE, EVAL_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2-1.cfg", "")
DiscoSODTarget = LKTarget(SOD_PROJECT_BASE, DISCO_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2-1.cfg", "out")
DartuinoSODTarget = LKTarget(SOD_PROJECT_BASE, DARTUINO_SOD_BUILD_TARGET, "tcl/board/stm32746g_eval.cfg", "tcl/interface/stlink-v2.cfg", "out")
@task
def disco_do():
build(DiscoLKTarget)
flash(DiscoLKTarget)
@task
def dartuino_do():
build(DartuinioTarget)
flash(DartuinioTarget)
@task
def eval_do():
build(EvalLKTarget)
flash(EvalLKTarget)
@task
def sod_do():
build(DiscoSODTarget)
flash(DiscoSODTarget)
@task
def sod_dartuino_do():
build(DartuinoSODTarget)
flash(DartuinoSODTarget)
def flash(target):
with lcd(OPEN_OCD_BASE):
local(target.flash_command)
def build(target):
make_cmd = "make PROJECT=%s" % target.target_project
with lcd(target.repo_root):
local(make_cmd)
|
StarcoderdataPython
|
1751033
|
print('this is two/c.py')
|
StarcoderdataPython
|
6677225
|
from datetime import datetime
from resourse.error_log import ERROR_LOG
class MountJson:
def putDateTime(self):
now = datetime.now()
return self.putTag("dateTime",str(now.date()) + "T" + str(now.time()) + "Z")
def putTag(self,tag,value):
self.jsonBuffer[tag] = value
return self.jsonBuffer
def putTagNow(self,tag,value): # insta create a data with datatime and tag
self.putDateTime()
self.putTag(tag,value)
self.inserJson()
self.jsonBuffer = {}
def inserJson(self):
return self.json[self.type].append(self.jsonBuffer)
def save_file(self):
return open(self.type + ".json", "w").write(str(self.json))
def resetJson(self):
self.jsonBuffer = {}
self.json = {
self.type : []
}
return self.jsonBuffer, self.json
def __init__(self,type):
if type == "data" or type == "metadata":
print("obj created")
self.type = type
self.resetJson()
else:
print("Fail to create")
|
StarcoderdataPython
|
3230619
|
<filename>REM/Tool/IDA 7.3/python/ida_diskio.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
IDA Plugin SDK API wrapper: diskio
"""
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_ida_diskio', [dirname(__file__)])
except ImportError:
import _ida_diskio
return _ida_diskio
if fp is not None:
try:
_mod = imp.load_module('_ida_diskio', fp, pathname, description)
finally:
fp.close()
return _mod
_ida_diskio = swig_import_helper()
del swig_import_helper
else:
import _ida_diskio
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import ida_idaapi
import sys
_BC695 = sys.modules["__main__"].IDAPYTHON_COMPAT_695_API
if _BC695:
def bc695redef(func):
ida_idaapi._BC695.replace_fun(func)
return func
def idadir(*args):
"""
idadir(subdir) -> char const *
Get IDA directory (if subdir==NULL) or the specified subdirectory (see
'IDA subdirectories' )
@param subdir (C++: const char *)
"""
return _ida_diskio.idadir(*args)
def getsysfile(*args):
"""
getsysfile(filename, subdir) -> char *
Search for IDA system file. This function searches for a file in:each
directory specified by IDAUSR%ida directory [+ subdir] and returns the
first match.
@param filename (C++: const char *)
@param subdir (C++: const char *)
@return: NULL if not found, otherwise a pointer to full file name.
"""
return _ida_diskio.getsysfile(*args)
CFG_SUBDIR = _ida_diskio.CFG_SUBDIR
IDC_SUBDIR = _ida_diskio.IDC_SUBDIR
IDS_SUBDIR = _ida_diskio.IDS_SUBDIR
IDP_SUBDIR = _ida_diskio.IDP_SUBDIR
LDR_SUBDIR = _ida_diskio.LDR_SUBDIR
SIG_SUBDIR = _ida_diskio.SIG_SUBDIR
TIL_SUBDIR = _ida_diskio.TIL_SUBDIR
PLG_SUBDIR = _ida_diskio.PLG_SUBDIR
THM_SUBDIR = _ida_diskio.THM_SUBDIR
def get_user_idadir(*args):
"""
get_user_idadir() -> char const *
Get user ida related directory.
- if $IDAUSR is defined:
- the first element in $IDAUSR
- else
- default user directory ($HOME/.idapro or %APPDATA%Hex-Rays/IDA Pro)
"""
return _ida_diskio.get_user_idadir(*args)
def get_ida_subdirs(*args):
"""
get_ida_subdirs(subdir, flags=0) -> int
Get list of directories in which to find a specific IDA resource (see
'IDA subdirectories' ). The order of the resulting list is as follows:
- [$IDAUSR/subdir (0..N entries)]
- $IDADIR/subdir
@param subdir: name of the resource to list (C++: const char *)
@param flags: Subdirectory modification flags bits (C++: int)
@return: number of directories appended to 'dirs'
"""
return _ida_diskio.get_ida_subdirs(*args)
IDA_SUBDIR_IDP = _ida_diskio.IDA_SUBDIR_IDP
"""
append the processor name as a subdirectory
"""
IDA_SUBDIR_IDADIR_FIRST = _ida_diskio.IDA_SUBDIR_IDADIR_FIRST
"""
$IDADIR/subdir will be first, not last
"""
IDA_SUBDIR_ONLY_EXISTING = _ida_diskio.IDA_SUBDIR_ONLY_EXISTING
"""
only existing directories will be present
"""
def get_special_folder(*args):
"""
get_special_folder(csidl) -> bool
Get a folder location by CSIDL (see 'Common CSIDLs' ). Path should be
of at least MAX_PATH size
@param csidl (C++: int)
"""
return _ida_diskio.get_special_folder(*args)
CSIDL_APPDATA = _ida_diskio.CSIDL_APPDATA
CSIDL_LOCAL_APPDATA = _ida_diskio.CSIDL_LOCAL_APPDATA
CSIDL_PROGRAM_FILES = _ida_diskio.CSIDL_PROGRAM_FILES
CSIDL_PROGRAM_FILES_COMMON = _ida_diskio.CSIDL_PROGRAM_FILES_COMMON
CSIDL_PROGRAM_FILESX86 = _ida_diskio.CSIDL_PROGRAM_FILESX86
def fopenWT(*args):
"""
fopenWT(file) -> FILE *
Open a new file for write in text mode, deny write. If a file exists,
it will be removed.
@param file (C++: const char *)
@return: NULL if failure
"""
return _ida_diskio.fopenWT(*args)
def fopenWB(*args):
"""
fopenWB(file) -> FILE *
Open a new file for write in binary mode, deny read/write. If a file
exists, it will be removed.
@param file (C++: const char *)
@return: NULL if failure
"""
return _ida_diskio.fopenWB(*args)
def fopenRT(*args):
"""
fopenRT(file) -> FILE *
Open a file for read in text mode, deny none.
@param file (C++: const char *)
@return: NULL if failure
"""
return _ida_diskio.fopenRT(*args)
def fopenRB(*args):
"""
fopenRB(file) -> FILE *
Open a file for read in binary mode, deny none.
@param file (C++: const char *)
@return: NULL if failure
"""
return _ida_diskio.fopenRB(*args)
def fopenM(*args):
"""
fopenM(file) -> FILE *
Open a file for read/write in binary mode, deny write.
@param file (C++: const char *)
@return: NULL if failure
"""
return _ida_diskio.fopenM(*args)
def fopenA(*args):
"""
fopenA(file) -> FILE *
Open a file for append in text mode, deny none.
@param file (C++: const char *)
@return: NULL if failure
"""
return _ida_diskio.fopenA(*args)
LINPUT_NONE = _ida_diskio.LINPUT_NONE
LINPUT_LOCAL = _ida_diskio.LINPUT_LOCAL
LINPUT_RFILE = _ida_diskio.LINPUT_RFILE
LINPUT_PROCMEM = _ida_diskio.LINPUT_PROCMEM
LINPUT_GENERIC = _ida_diskio.LINPUT_GENERIC
def qlgetz(*args):
"""
qlgetz(li, fpos) -> char *
Read a zero-terminated string from the input. If fpos == -1 then no
seek will be performed.
@param li (C++: linput_t *)
@param fpos (C++: int64)
"""
return _ida_diskio.qlgetz(*args)
def open_linput(*args):
"""
open_linput(file, remote) -> linput_t *
Open loader input.
@param file (C++: const char *)
@param remote (C++: bool)
"""
return _ida_diskio.open_linput(*args)
class generic_linput_t(object):
"""
Proxy of C++ generic_linput_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
filesize = _swig_property(_ida_diskio.generic_linput_t_filesize_get, _ida_diskio.generic_linput_t_filesize_set)
blocksize = _swig_property(_ida_diskio.generic_linput_t_blocksize_get, _ida_diskio.generic_linput_t_blocksize_set)
def read(self, *args):
"""
read(self, off, buffer, nbytes) -> ssize_t
"""
return _ida_diskio.generic_linput_t_read(self, *args)
__swig_destroy__ = _ida_diskio.delete_generic_linput_t
__del__ = lambda self : None;
generic_linput_t_swigregister = _ida_diskio.generic_linput_t_swigregister
generic_linput_t_swigregister(generic_linput_t)
def create_generic_linput(*args):
"""
create_generic_linput(gl) -> linput_t *
Create a generic linput
@param gl: linput description. this object will be destroyed by
close_linput() using "delete gl;" (C++: generic_linput_t
*)
"""
return _ida_diskio.create_generic_linput(*args)
def create_memory_linput(*args):
"""
create_memory_linput(start, size) -> linput_t *
Create a linput for process memory. This linput will use
read_dbg_memory() to read data.
@param start: starting address of the input (C++: ea_t)
@param size: size of the memory area to represent as linput if
unknown, may be passed as 0 (C++: asize_t)
"""
return _ida_diskio.create_memory_linput(*args)
def get_linput_type(*args):
"""
get_linput_type(li) -> linput_type_t
Get linput type.
@param li (C++: linput_t *)
"""
return _ida_diskio.get_linput_type(*args)
def eclose(*args):
"""
eclose(fp)
"""
return _ida_diskio.eclose(*args)
def enumerate_files(*args):
"""
enumerate_files(path, fname, callback) -> PyObject *
Enumerate files in the specified directory while the callback returns 0.
@param path: directory to enumerate files in
@param fname: mask of file names to enumerate
@param callback: a callable object that takes the filename as
its first argument and it returns 0 to continue
enumeration or non-zero to stop enumeration.
@return:
None in case of script errors
tuple(code, fname) : If the callback returns non-zero
"""
return _ida_diskio.enumerate_files(*args)
def create_bytearray_linput(*args):
"""
create_bytearray_linput(s) -> linput_t *
Trivial memory linput.
"""
return _ida_diskio.create_bytearray_linput(*args)
def close_linput(*args):
"""
close_linput(li)
Close loader input.
@param li (C++: linput_t *)
"""
return _ida_diskio.close_linput(*args)
#<pycode(py_diskio)>
#</pycode(py_diskio)>
if _BC695:
create_generic_linput64=create_generic_linput
generic_linput64_t=generic_linput_t
|
StarcoderdataPython
|
222065
|
from argparse import ArgumentParser
from pathlib import Path
from sys import (
exit,
stderr,
)
from typing import List
from src import (
Dataset,
MCV,
UrbanSound8k,
)
_DESCRIPTION = """
Generate noised audio for noise reduction models training.
Based on UrbanSound8k and Mozilla Common Voice datasets
"""
def _validate_dir(
path: Path,
) -> bool:
return path.exists() and path.is_dir()
def _create_set(
out_path: Path,
mcv_filenames: List[str],
u8k_filenames: List[str],
**kwargs,
) -> None:
ds = Dataset(
out_path, **kwargs,
)
ds.create(
mcv_filenames,
u8k_filenames,
)
if __name__ == '__main__':
parser = ArgumentParser(description=_DESCRIPTION)
parser.add_argument(
'--mcv',
type=str,
help='Directory with MCV dataset',
)
parser.add_argument(
'--urban8k',
type=str,
help='--Directory with UrbanSound8K dataset',
)
parser.add_argument(
'--out',
type=str,
help='Directory for resulting files (must be empty)',
)
parser.add_argument(
'--sr',
type=int,
default=16000,
help='Sample rate',
)
parser.add_argument(
'--cores',
type=int,
default=None,
help='Number of cores to be used when generating dataset',
)
parser.add_argument(
'--mcv_val_size',
type=int,
default=1000,
help='Number of samples in validation set for Mozilla Common Voice dataset'
)
parser.add_argument(
'--u8k_val_size',
type=int,
default=200,
help='Number of samples in validation set for UrbanSound8k dataset'
)
args = parser.parse_args()
mcv_dir = Path(args.mcv)
urban8k_dir = Path(args.urban8k)
out_dir = Path(args.out)
sample_rate = args.sr
cores = args.cores
if not _validate_dir(mcv_dir):
stderr.write('Path to MCV doesn\'t exist or isn\'t a directory')
exit(1)
if not _validate_dir(urban8k_dir):
stderr.write('Path to UrbanSound8K doesn\'t exist or isn\'t a directory')
exit(1)
if not _validate_dir(out_dir) and not len(list(out_dir.iterdir())):
stderr.write('Out directory doesn\'t exist or isn\'t a directory or isn\'t empty')
exit(1)
print('Getting MCV train, val and test filenames...')
mcv = MCV(
mcv_dir,
args.mcv_val_size,
)
mcv_train_filenames, mcv_val_filenames = mcv.get_train_val_filenames()
mcv_test_filenames = mcv.get_test_filenames()
print('Getting U8K train, val and test filenames...')
u8k = UrbanSound8k(
urban8k_dir,
args.u8k_val_size,
)
u8k_train_filenames, u8k_val_filenames = u8k.get_train_val_filenames()
u8k_test_filenames = u8k.get_test_filenames()
print('Applying noise to val data...')
val_out_dir = out_dir / 'val'
val_out_dir.mkdir(
exist_ok=True,
parents=True,
)
_create_set(
val_out_dir,
mcv_val_filenames,
u8k_val_filenames,
sample_rate=sample_rate,
cores=cores,
)
print('Applying noise to test data...')
test_out_dir = out_dir / 'test'
test_out_dir.mkdir(
exist_ok=True,
parents=True,
)
_create_set(
test_out_dir,
mcv_test_filenames,
u8k_test_filenames,
sample_rate=sample_rate,
cores=cores,
)
print('Applying noise to train data...')
train_out_dir = out_dir / 'train'
train_out_dir.mkdir(
exist_ok=True,
parents=True,
)
_create_set(
train_out_dir,
mcv_train_filenames,
u8k_train_filenames,
sample_rate=sample_rate,
cores=cores,
)
print('DONE')
|
StarcoderdataPython
|
6507515
|
<gh_stars>0
"""
Copyright (C) 2019-2020 Zilliz. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS S" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import getopt
import sys
from pathlib import Path
import json
from flask import Flask
from flask_cors import CORS
from app import service as app_service
APP = Flask(__name__)
APP.register_blueprint(app_service.API)
CORS(APP, resources=r'/*')
def usage():
"""
help function
"""
print('usage: python manange.py [options]')
print('default: develop mode')
print('-h: usage')
print('-r: production mode')
print('-i: ip address')
print('-p: http port')
print('-c: json config to be loaded')
if __name__ == '__main__':
IS_DEBUG = True
IP = "0.0.0.0"
PORT = 8080
JSON_CONFIG = None
try:
OPTS, ARGS = getopt.getopt(sys.argv[1:], 'hri:p:c:')
except getopt.GetoptError as _e:
print("Error '{}' occured. Arguments {}.".format(str(_e), _e.args))
usage()
sys.exit(2)
for opt, arg in OPTS:
if opt == '-h':
usage()
sys.exit()
elif opt == '-r':
IS_DEBUG = False
elif opt == '-i':
IP = arg
elif opt == "-p":
PORT = arg
elif opt == '-c':
JSON_CONFIG = arg
if JSON_CONFIG:
json_file = Path(JSON_CONFIG)
if not json_file.is_file():
print("error: config %s doesn't exist!" % (JSON_CONFIG))
sys.exit(0)
else:
with open(JSON_CONFIG, 'r') as f:
content = json.load(f)
status, code, message = app_service.load_data(content)
print(message)
if code != 200:
sys.exit(0)
if not IS_DEBUG:
from waitress import serve
serve(APP, host=IP, port=PORT)
else:
APP.debug = True
APP.run(host=IP, port=PORT)
|
StarcoderdataPython
|
5087701
|
<gh_stars>1-10
list=[1,2,3,4]
i=0
while(i<3):
list[i]=list[i]+1
i=i+1;
|
StarcoderdataPython
|
3559041
|
<gh_stars>1-10
#
# laaso/appmanager.py
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
'''
Module that provides application classes that depend on laaso.azure_tool.
'''
import laaso.azure_tool
import laaso.common
import laaso.identity
import laaso.util
class ManagerMixin():
'''
Mixin class used to add common Manager functionality
to Application classes.
'''
# Handy hook for unit testing
MANAGER_CLASS = laaso.azure_tool.Manager
# az_mgr_discard() checks these attributes.
# subclasses may overload this.
AZ_MGR_DISCARD_ATTRS = {'_az_mgr',
'az_mgr',
}
def subscription_defaults_generate(self, az_mgr=None):
'''
Generate a pared-down subscription_defaults.
Returns empty dict if there is no default defined.
'''
az_mgr = az_mgr or self.MANAGER_CLASS(**self.manager_kwargs())
try:
si = laaso.subscription_info_get(az_mgr.subscription_id, return_default=False)
except KeyError:
return dict()
ret = dict(si)
ret.pop('location_default', None)
ld_popped = ret.pop('location_defaults', dict())
if az_mgr.location:
ret['location_default'] = az_mgr.location
values = ld_popped.get(az_mgr.location, dict())
ldv = self.jinja_filter_data(values)
name_substitutions = self.name_substitutions_resolve(ldv.pop('name_substitutions', dict()))
if name_substitutions:
ldv['name_substitutions'] = name_substitutions
if ldv:
ret['location_defaults'] = {az_mgr.location : ldv}
name_substitutions = ret.pop('_name_substitutions', dict())
name_substitutions = self.name_substitutions_resolve(name_substitutions)
if name_substitutions:
ret['name_substitutions'] = name_substitutions
return {'subscription_defaults' : [ret]}
def scfg_dict_generate(self, az_mgr=None, **kwargs):
'''
Generate a new scfg dict for this application.
This specifically resolves MSI client IDs.
'''
az_mgr = az_mgr or self.MANAGER_CLASS(**self.manager_kwargs())
try:
kwargs.setdefault('subscription_default', self.subscription_id)
except AttributeError:
pass
ret = laaso.scfg.to_scfg_dict(**kwargs)
si = laaso.subscription_info_get(az_mgr.subscription_id)
for key in ('msi_client_id_default',
'pubkey_keyvault_client_id',
):
try:
val_pre = si[key]
except KeyError:
val_pre = laaso.scfg.get(key, '')
if val_pre:
val_post = laaso.identity.client_id_from_uami_str(val_pre, az_mgr)
if val_post:
ret['defaults'][key] = val_post
else:
self.logger.warning("%s.%s cannot convert key=%r val_pre=%r so not including it",
type(self).__name__, laaso.util.getframename(0),
key, val_pre)
ret['defaults'].pop(key, None)
return ret
def vm_image_exists_for_preflight(self, image_id, az_mgr=None):
'''
Return whether the given image_id exists. Used during preflighting.
Exists as a separate method to allow orchestrating apps
to replace this check logic.
'''
az_mgr = az_mgr or self.az_mgr_generate()
image_obj = az_mgr.vm_image_get_by_id(image_id)
if not image_obj:
self.logger.debug("%s image_id %r does not exist", self.mth(), image_id)
return False
return True
def az_mgr_generate(self, **kwargs):
'''
Generate and return a new Manager (MANAGER_CLASS)
'''
kg = getattr(self, 'manager_kwargs', None)
if callable(kg):
mk = kg() # pylint: disable=not-callable
else:
mk = dict()
for k in ('logger', 'subscription_id', 'tenant_id'):
try:
mk[k] = getattr(self, k)
except AttributeError:
continue
mk.update(kwargs)
ret = self.MANAGER_CLASS(**mk)
return ret
def az_mgr_discard(self):
'''
Discard references to Manager-like objects.
This allows reclaiming of unshared connection pools in SDK client classes.
Check attrs defined by AZ_MGR_DISCARD_ATTRS.
If they are instances of laaso.azure_tool.Manager or MANAGER_CLASS,
set them to None
'''
for attr in self.AZ_MGR_DISCARD_ATTRS:
curval = getattr(self, attr, None)
if isinstance(curval, (laaso.azure_tool.Manager, self.MANAGER_CLASS)):
setattr(self, attr, None)
class ApplicationWithManager(laaso.common.Application, ManagerMixin):
'''
Application that uses laaso.azure_tool.Manager
'''
# No specialization here
class ApplicationWithSubscriptionManager(laaso.common.ApplicationWithSubscription, ManagerMixin):
'''
ApplicationWithSubscription that uses laaso.azure_tool.Manager
'''
# No specialization here
class ApplicationWithResourceGroupManager(laaso.common.ApplicationWithResourceGroup, ManagerMixin):
'''
ApplicationWithResourceGroup that uses laaso.azure_tool.Manager
'''
# No specialization here
|
StarcoderdataPython
|
11318643
|
# Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Generators of synthetic datasets.
"""
import warnings
from ..synthetic import CheckerBoard as _CheckerBoard
class CheckerBoard(_CheckerBoard):
"""
.. warning::
Using ``CheckerBoard`` from ``verde.datasets`` is deprecated and will
be removed in Verde 2.0.0. Use ``verde.synthetic.CheckerBoard``
instead.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
warnings.warn(
"Using CheckerBoard from verde.datasets is deprecated and will be "
"removed in Verde 2.0.0. "
"Use verde.synthetic.CheckerBoard instead.",
FutureWarning,
)
|
StarcoderdataPython
|
1915455
|
<reponame>anthonyeden/pypco
"""PCO Services endpoints.
Generated by pypco_generator tool. Manual changes not recommended.
"""
#pylint: disable=C0304,R0903,C0111,C0321
from .base_endpoint import BaseEndpoint
# The base Services endpoint
class ServicesEndpoint(BaseEndpoint): pass
# All Services endpoints
class AttachmentTypes(ServicesEndpoint):
"""Create an Attachment Type for each type of file you might want only specific people to see. When you attach a file, you can specify an attachment type to then be able to link the file to a position."""
pass
class EmailTemplates(ServicesEndpoint):
"""A EmailTemplate Resource"""
pass
class Folders(ServicesEndpoint):
"""A folder is a container used to organize multiple Service Types or other Folders."""
pass
class Media(ServicesEndpoint):
"""A piece of media"""
pass
class People(ServicesEndpoint):
"""A person added to PCO Services."""
pass
class Series(ServicesEndpoint):
"""A Series can be specified for each plan to tie plans with similar messages together, even across Service Types.
*Note*: A series is not created until artwork is added from the plan. You can use `series_title` included in `Plan` attributes to get titles for series without artwork.
"""
pass
class ServiceTypes(ServicesEndpoint):
"""A Service Type is a container for plans."""
pass
class Songs(ServicesEndpoint):
"""A song"""
pass
class TagGroups(ServicesEndpoint):
"""A tag group contains tags"""
pass
class Teams(ServicesEndpoint):
"""A Team within a Service Type."""
pass
|
StarcoderdataPython
|
3353948
|
from klpyastro.plot import plottools
#from nose.tools import assert_equal
#### Not clear how to test plots
class TestPlot:
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup(self):
pass
def teardown(self):
pass
class TestSpPlot:
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup(self):
pass
def teardown(self):
pass
|
StarcoderdataPython
|
3442020
|
<filename>niphlem/input_data.py
import numpy as np
import json
import matplotlib.pyplot as mpl
import warnings
def get_lines(filename):
"""
Read in lines from file, stripping new line markers
Parameters
----------
filename : str, pathlike
Path to file.
Returns
-------
lines : list
List containing each line.
"""
lines = []
try:
fh = open(filename, 'r')
except OSError:
msg = 'Cannot open input file ' + filename
raise Warning(msg)
else:
# Get lines of file
for line in fh:
lines.append(line.rstrip('\n'))
fh.close()
return lines
def load_cmrr_info(filename):
"""
Load information log files from CMRR sequences.
Parameters
----------
filename : str, pathlike
Path to Information Log file.
Returns
-------
traces : ndarray
Time ticks of the scanner.
meta_info : dict
Dictionary with meta information about the info log file.
"""
# TODO: Add function to validate input file. For example, it should be
# a .log type file.
lines = get_lines(filename)
meta_info = dict()
# Get parameters for meta file and lines containing data
stt = 0
stp = 0
for i in range(len(lines)):
y = lines[i].split()
if len(y) == 0:
continue
elif y[0] == 'UUID':
meta_info['uuid'] = y[2]
elif y[0] == 'ScanDate':
meta_info['scan_date'] = y[2]
elif y[0] == 'LogVersion':
meta_info['log_version'] = y[2]
elif y[0] == 'NumVolumes':
n_vols = int(y[2])
meta_info['n_vols'] = n_vols
elif y[0] == 'NumSlices':
n_slices = int(y[2])
meta_info['n_slices'] = n_slices
elif y[0] == 'NumEchoes':
n_echoes = int(y[2])
meta_info['n_echoes'] = n_echoes
elif y[0] == 'FirstTime':
meta_info['init_physio'] = int(y[2])
elif y[0] == 'LastTime':
meta_info['end_physio'] = int(y[2])
# Inherent assumption that all lines starting with a number are data
if stt == 0:
try:
int(y[0])
stt = i
except ValueError:
continue
if stp == 0:
try:
int(y[0])
continue
except ValueError:
stp = i
# Pull data into numpy array
# traces = np.zeros((stp - stt, len(cols)))
traces = np.zeros((2, n_vols, n_slices, n_echoes), dtype=int)
for i in range(stt, stp):
y = lines[i].split()
ivol = int(y[0])
islice = int(y[1])
iecho = int(y[-1])
acq_start = int(y[2])
acq_end = int(y[3])
traces[:, ivol, islice, iecho] = [acq_start, acq_end]
meta_info['init_scan'] = int(traces.min())
meta_info['end_scan'] = int(traces.max())
# TODO: Do we need this? The repetition is something usually knwon
repetition_time = (meta_info['end_scan'] - meta_info['init_scan'])/n_vols
meta_info['repetition_time'] = np.round(repetition_time)
return traces, meta_info
def load_cmrr_data(filename, sig_type, info_dict, sync_scan=True):
"""
Load data log files from CMRR sequences.
Parameters
----------
filename : str, pathlike
Path to recording log file..
sig_type : str
Type of signal for use in dictionary
info_dict : dict
Dictionary with the meta information of the Info log file. It needs
to be compute before by using the function load_cmrr_info.
sync_scan : bool, optional
Whether we want to resample the signal to be synchronized
with the scanner times. The default is True.
Returns
-------
signal : ndarray
The recording signal, where the number of columns corresponds
to the number of channels (ECG: 4, PULS: 1, RESP: 1) and the rows to
observations.
info_dict : dict
Updated meta info of the physiological recording.
"""
from scipy.interpolate import interp1d
# TODO: Add checks of filename and info dict
info_dict = info_dict.copy()
lines = get_lines(filename)
# Get sampling rate and start of data
stt = 0
for i in range(len(lines)):
y = lines[i].split()
if len(y) == 0:
continue
if y[0] == 'SampleTime':
sample_rate = int(y[2])
# Inherent assumption that all lines starting with a number are data
if stt == 0:
try:
int(y[0])
stt = i
except ValueError:
continue
# Get number of channels (not particularly efficient, but thorough...)
if y[1] == 'PULS' or y[1] == 'RESP':
n_channels = 1
else:
n_channels = 0
for i in range(stt, len(lines)):
y = lines[i].split()
j = int(y[1][-1])
if j > n_channels:
n_channels = j
# Pull data into numpy array
n_samples = info_dict['end_physio'] - info_dict['init_physio'] + 1
full_signal = np.zeros((n_samples, n_channels))
time = np.arange(0, n_samples)
if n_channels == 1:
# Use separate loop for single channel to avoid repeated ifs for
# channel #
for i in range(stt, len(lines)):
y = lines[i].split()
k = int(int(y[0]) - info_dict['init_physio'])
full_signal[k, 0] = float(y[2])
time[k] = int(y[0])
else:
for i in range(stt, len(lines)):
y = lines[i].split()
j = int(int(y[1][-1])-1)
k = int(int(y[0]) - info_dict['init_physio'])
full_signal[k, j] = float(y[2])
time[k] = int(y[0])
if sync_scan:
new_time = np.arange(info_dict['init_scan'],
info_dict['end_scan'] + 1)
else:
new_time = np.arange(info_dict['init_physio'],
info_dict['end_physio'] + 1)
signal = []
for s_channel in full_signal.T:
# Use a mask to interpolate possible zero/nan artifacts
mask = (s_channel != 0.) & ~np.isnan(s_channel)
signal.append(interp1d(time[mask], s_channel[mask],
fill_value="extrapolate")(new_time))
signal = np.column_stack(signal)
info_dict[sig_type] = {}
info_dict[sig_type]['n_channels'] = n_channels
info_dict[sig_type]['sample_rate'] = sample_rate
return signal, info_dict
def proc_input(path,
info_file,
puls_file,
resp_file,
ecg_file,
meta_filename='meta.json',
sig_filename='signal',
show_signals=False):
"""
Extract relevant data from info, PULS, RESP, and ECG files; creates meta
file with info and .npy file with signal array
Parameters
----------
path : str, pathlike
Path to directories containing files.
info_file : str, pathlike
Info file name.
puls_file : str, pathlike
PULS file name.
resp_file : str, pathlike
RESP file name.
ecg_file : str, pathlike
ECG file name.
meta_filename : str, pathlike, optional
Filename to store meta info, default 'meta.json'
sig_filename : str, pathlike, optional
Filename to store signal array, default 'signal'
show_signals : bool, optional
Flag to show plots of signals, default False.
"""
cardiac_range = [0.75, 3.5] # Hz
respiratory_range = [0.01, 0.5] # Hz
# TODO: Take this as an input or extract somehow
sampling_frequency = 400 # Hz
# ensure path ends in /
if path[-1] != '/':
path = path + '/'
# get data from INFO file
traces, meta_info = load_cmrr_info(filename=path + info_file)
meta_info['frequency_info'] = {}
meta_info['frequency_info']['sampling_rate'] = sampling_frequency
meta_info['frequency_info']['cardiac_range'] = cardiac_range
meta_info['frequency_info']['respiratory_range'] = respiratory_range
# get data from PULS file
PULS, meta_info = \
load_cmrr_data(filename=path + puls_file,
sig_type='puls',
info_dict=meta_info,
sync_scan=True)
# get data from RESP file
RESP, meta_info = \
load_cmrr_data(filename=path + resp_file,
sig_type='resp',
info_dict=meta_info,
sync_scan=True)
# get data from ECG file
ECG, meta_info = \
load_cmrr_data(filename=path + ecg_file,
sig_type='ecg',
info_dict=meta_info,
sync_scan=True)
# store aligned signals in a single matrix, save to signal.npy
n_channels = meta_info['ecg']['n_channels']
signal = np.zeros((len(ECG), n_channels + 2))
signal[:, 0:n_channels] = ECG
signal[:, [n_channels]] = PULS
signal[:, [n_channels + 1]] = RESP
np.save(sig_filename, signal)
with open(meta_filename, 'w') as outfile:
json.dump(meta_info, outfile)
# plot signals if desired
if show_signals:
mpl.plot(PULS)
mpl.show()
mpl.plot(RESP)
mpl.show()
mpl.plot(ECG[:, 0], 'b')
mpl.plot(ECG[:, 1], 'r')
mpl.plot(ECG[:, 2], 'g')
mpl.plot(ECG[:, 3], 'k')
mpl.show()
def load_bids_physio(data_file, json_file, resample_freq=None, sync_scan=True):
"""
Load physiological data in BIDS format.
Parameters
----------
data_file : str, pathlike
Path to recording bids physio file.
json_file : str, pathlike
Path to the sidecar json file of the input bids physio.
resample_freq : float, optional
Frequency to resample the data. The default is None.
sync_scan : bool, optional
Whether we want the signal to be synchronized
with the scanner times. The default is True.
Returns
-------
signal : ndarray
The signal, where each columns corresponds to a particular
recording, whose names can wh be identfied in the meta_info
dictionary returned, and the rows to observations.
meta_info : dict
Meta information that at least contains the sampling frequency,
the start time of the signals, and the name of each signal column.
"""
from scipy.interpolate import interp1d
# Validate input data
if data_file.endswith("physio.tsv.gz") is False:
raise ValueError("Data file should end with physio.tsv.gz")
if json_file.endswith("physio.json") is False:
raise ValueError("Sidecar file should end with physio.json")
# Check that both files have the same name without extensions
if data_file.split(".tsv.gz")[0] != json_file.split(".json")[0]:
raise ValueError("data file and json file do not have the same "
"name (without extensions), which invalidates "
" BIDS specification")
# Load sidecar information
with open(json_file) as fp:
meta_info = json.load(fp)
# Validate fields in JSON file according to BIDS
req_fields = ['Columns', 'SamplingFrequency', 'StartTime']
if set(req_fields).issubset(set(meta_info.keys())) is False:
missing_fields = set(req_fields).difference(set(meta_info.keys()))
raise ValueError("The following required fields appear to be missing "
"in the BIDS JSON file: " + ', '.join(missing_fields)
)
# Load data file
data = np.loadtxt(data_file)
if data.ndim == 1:
data = data.reshape(-1, 1)
# Check that the number of columns in data is the same as the number of
# names in "Columns" of the json file. If not, a warning will be prompted.
if data.shape[1] != len(meta_info['Columns']):
warnings.warn("The number of columns in the data file does not "
" match the number of names in the metafield 'Columns'"
)
if resample_freq is None:
resample_freq = meta_info['SamplingFrequency']
else:
resample_freq = float(resample_freq)
# Define init and end time recording
n_obs = data.shape[0]
init_physio = meta_info['StartTime']
end_physio = init_physio + n_obs/meta_info['SamplingFrequency']
# Define time ticks then
time = np.linspace(init_physio, end_physio, num=n_obs, endpoint=False)
# Number of times, depending on whether we are resampling or not
n_resample = int(
np.round(n_obs * (resample_freq / meta_info['SamplingFrequency']))
)
new_time = np.linspace(init_physio, end_physio, num=n_resample,
endpoint=False)
if sync_scan:
new_num = sum(new_time >= 0)
# Resample to init time 0, keeping the same number of obs after 0
new_time = np.linspace(0, end_physio, num=new_num, endpoint=False)
meta_info['StartTime'] = 0.0
signal = []
for s_channel in data.T:
# Use a mask to interpolate possible zero/nan artifacts
mask = (s_channel != 0.) & ~np.isnan(s_channel)
signal.append(interp1d(time[mask], s_channel[mask],
fill_value="extrapolate")(new_time))
signal = np.column_stack(signal)
# Update field in meta information object
meta_info['SamplingFrequency'] = resample_freq
return signal, meta_info
###############################################################################
#path = '/Users/andrew/Fellowship/projects/brainhack-physio-project/data/sample2/'
#info_file = 'Physio_sample2_Info.log'
#puls_file = 'Physio_sample2_PULS.log'
#resp_file = 'Physio_sample2_RESP.log'
#ecg_file = 'Physio_sample2_ECG.log'
#proc_input(path, info_file, puls_file, resp_file, ecg_file, show_signals=True)
|
StarcoderdataPython
|
8068253
|
<filename>fourier.py
"""
Calculates the Fourier coefficients for a given set of data points
"""
import matplotlib.pyplot as plt
import numpy as np
class Fourier:
def __init__(self, points, N):
# self.c stores all coefficients of the fourier series
# self.n stores the value of n that each coefficient corresponds to
# self.n == [0, 1, -1, 2, -2, 3, -3, ...]
self.c = np.zeros(2 * N + 1)
self.n = np.array([(n // 2) * (-1) ** (n % 2) for n in range(1, 2 * N + 2)])
self.L = points[-1, 0] - points[0, 0]
values = self.get_points_for_trapz(points)
self.integrate_coefficients(values)
def get_points_for_trapz(self, points):
"""Convert an array of [t, x] points to be ready for integration
Output is a 2D array with rows [t, c_0],
where each row corresponds to the value of the integrand at point t
These rows can then be integrated across via the trapezium rule
This will create rows up to the Nth coefficient of the Fourier series"""
ts = points[:, 0]
xs = points[:, 1]
c_n = np.array([xs * np.exp(-1j * n * ts * 2 * np.pi / self.L) for n in self.n])
integrand_values = np.array([ts, *c_n])
return integrand_values
def integrate_coefficients(self, integrand_values):
ts, values = integrand_values[0, :], integrand_values[1:, 0:]
coeffs = np.trapz(values, x=ts, axis=1)
coeffs *= (1 / self.L)
self.c = coeffs
def __call__(self, ts):
"""Takes an array, and evaluate the fourier series f(t) for each t in ts
Returns an array of f(t)
If the input is an float, return an array of length 1"""
if type(ts) != np.ndarray:
ts = np.array([ts])
fs = np.zeros_like(ts, dtype=np.complex_)
for i, t in enumerate(ts):
f = sum(self.c * np.exp(-1j * self.n * t * 2 * np.pi / self.L))
fs[i] = f
return fs
if __name__ == "__main__":
ts = np.linspace(0, 6.28, 500)
points = np.array([ts, np.sin(ts - 2) + 1j * np.cos(ts)]).T
plt.plot(points[:,1].real, points[:,1].imag)
fourier = Fourier(points, 20)
f_points = fourier(ts)
plt.plot(f_points.real, f_points.imag)
plt.show()
|
StarcoderdataPython
|
8103775
|
from glob import glob
from shutil import move, rmtree
from os import listdir, path, walk, unlink
from subprocess import check_output, STDOUT, CalledProcessError
def run(*args, **kwargs):
try:
return check_output(*args, **kwargs, stderr=STDOUT)
except CalledProcessError as e:
raise Exception(e.output.decode())
def move_files(source, destination):
for _file in glob(f'{source}/*'):
move(_file, destination)
return True
def remove_files(source, ignore=()):
for _file in listdir(source):
if _file in ignore:
continue
file_path = f'{source}/{_file}'
if path.isdir(file_path):
rmtree(file_path)
else:
unlink(file_path)
return True
def replace_in_files(directory, replacements):
for root, dirs, files in walk(directory):
for name in files:
file_path = path.join(root, name)
with open(file_path) as file:
contents = file.read()
with open(file_path, 'w') as file:
contents = replace_in_str(contents, replacements)
file.write(contents)
def replace_in_str(string, replacements):
for k, v in replacements.items():
string = string.replace('{' + k + '}', v)
return string
|
StarcoderdataPython
|
4964972
|
<filename>IRRemote/listener/irservice.py
#!/usr/bin/env python
#-- @author: Makin, August 2015
from RPi import GPIO
import time
import os
import subprocess
import re
SAMPLE_SIZE = 256
IRpin = 14
GPIO.setmode(GPIO.BCM)
GPIO.setup(IRpin, GPIO.IN)
def kill():
proses = "omxplayer"
hasiltm = os.popen("ps -ef | grep "+proses).read()[:-1]
print hasiltm
namauser = os.popen("whoami").read()[:-1]
prosesid = hasiltm.lstrip(namauser).lstrip(" ")[ :hasiltm.lstrip(namauser).lstrip(" ").find(" ") ]
os.system("kill -9 "+prosesid)
def kill2():
print (os.popen("date > /home/pi/remoteTV/killlog"))
proses = "omxplayer"
hasiltm = os.popen("ps ax | grep -v grep | grep omxplayer").read()[:-1]
pids = re.findall('(\\d+).+', hasiltm)
process = ""
for pid in pids:
process = process + pid + " "
print process
os.system("sudo kill -9 "+process)
lastLow = 0.0
lastHigh = 0.0
directions = []
def method2():
lowNum =0
highNum = 0
while (True):
cc = 0
while (cc<SAMPLE_SIZE):
if (GPIO.input(IRpin)==1):
highNum += 1
else:
lowNum += 1
cc+= 1
#~ if (lowNum>highNum):
if (lowNum>=SAMPLE_SIZE):
print (highNum, lowNum)
kill2()
time.sleep(3)
highNum=0
lowNum=0
method2()
GPIO.cleanup()
|
StarcoderdataPython
|
5070535
|
import requests
print(f"requests version: {requests.__version__}")
print()
req_google = requests.get("https://www.google.com")
req_myfile = requests.get("https://raw.githubusercontent.com/Andrewjjj/CMPUT404Lab/main/Lab1/lab1.py")
print(req_myfile.content)
|
StarcoderdataPython
|
93075
|
<reponame>overholts/tuner
import os
import shutil
from pathlib import Path
def copy(source: Path, destination: Path):
os.makedirs(destination.parent, 0o755, exist_ok=True)
shutil.copy(str(source), str(destination))
def remove(target: Path):
os.remove(target)
|
StarcoderdataPython
|
9602251
|
<gh_stars>0
import math
__all__ = ['sito', 'fact', 'tot', 'primes', 'nwd', 'primo', 'nTyFibb',
'mniejszyRownyFibb', 'isPrime', 'nextPrime', 'tableOfPrimes']
def sito(u):
tab = [0 for i in range(u+1)]
for i in range(2, u+1):
tab[i] = i
for i in range(4, u+1, 2):
tab[i] = 2
for i in range(3, math.floor(math.sqrt(u))):
if tab[i] == i:
for j in range(i*i, u+1, i):
if (tab[j] == j):
tab[j] = i
return tab
def fact(x, licz):
primes = []
counts = []
primes.append(licz[x])
counts.append(1)
x = x // licz[x]
while x > 1:
if licz[x] == primes[-1]:
counts[-1] += 1
x = x // licz[x]
else:
primes.append(licz[x])
counts.append(1)
x = x // licz[x]
wyn = [primes, counts]
return wyn
def primes(x, licz):
primes = []
while x > 1:
if licz[x] in primes:
x = x // licz[x]
else:
primes.append(licz[x])
x = x // licz[x]
return primes
def tot(x, licz):
p = primes(x, licz)
if x == 0:
return 0
else:
wyn = x
for i in p:
wyn *= (1-1/i)
return int(wyn)
def nwd(a, b):
while b > 0:
c = a % b
a = b
b = c
return a
def primo(x):
licz = sito(x)
wyn = []
for i in range(2, x):
if i == licz[i]:
wyn.append(i)
return wyn
def nTyFibb(n):
"""Zwraca n--tą liczbę Fibbonacciego"""
if n <= 1:
return 1
f = 1
fm1 = 0
for ii in range(1, n):
pom = f
f = fm1+f
fm1 = pom
return f
def mniejszyRownyFibb(n):
"""Zwraca NUMER największej liczby Fibbonacciego mniejszej lub równej n"""
if n <= 1:
return 0
numer = 0
fibb = 1
fibbm1 = 0
while fibb <= n:
pom = fibb
fibb = fibb+fibbm1
fibbm1 = pom
numer += 1
return numer
def isPrime(n):
"""Szybko sprawdza czy liczba jest pierwsza, algorytm z
https://github.com/bzglinicki/python-training/blob/main/Rozwiazania-zadan/1_Podstawy/nextPrime.py
"""
if n <= 1:
return False # Te dwie instrukcje można zapisać łącznie:
if n <= 3:
return True # if n <= 3: return n > 1
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i ** 2 <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
def nextPrime(n):
"""Zwraca najbliższą liczbę pierwszą, większą od n"""
# Nie wiem jak szybko ona działa, tzn. czy nie da się szybciej
if (n <= 1):
return 2
k = n
prime = False
while not prime:
k += 1
prime = isPrime(k)
return k
def tableOfPrimes(n):
"""Zwraca tablicę wypełnioną liczbami pierwszymi <= n"""
if n <= 1:
return []
if n == 2: # Sprawdzam to po to, że chcę robić skok o 2 zamiast o 1, zwiększam szybkość dwukrotnie
return [2]
last = 3
res = [2]
while not last > n:
if isPrime(last):
res.append(last)
last += 2
return res
|
StarcoderdataPython
|
124816
|
<reponame>matthewb66/blackduck-scan-directguidance
import re
import os
import tempfile
# import semver
import xml.etree.ElementTree as ET
from bdscan import globals, classComponent
class MyTreeBuilder(ET.TreeBuilder):
def comment(self, data):
self.start(ET.Comment, {})
self.data(data)
self.end(ET.Comment)
class MavenComponent(classComponent.Component):
def __init__(self, compid, org, name, version, ns):
super().__init__(compid, name, version, ns)
self.org = org
self.pm = 'maven'
self.pms = ['maven', 'gradle']
def get_http_name(self):
bdio_name = "http:" + re.sub(":", "/", self.compid)
return bdio_name
def get_projfile(self, entry, allpoms):
import urllib.parse
foundpom = ''
folderarr = entry.split('/')
if len(folderarr) < 3:
return ''
# folder = folderarr[-2]
folder = urllib.parse.unquote(folderarr[-2])
farr = folder.split(os.path.sep)
# 'http:maven/com.blackducksoftware.test/example-maven-travis/0.1.0-SNAPSHOT/example-maven-travis/maven'
# 'http:maven/com.blackducksoftware.test/example-maven-travis/0.1.0-SNAPSHOT/copilot-maven%2Fexample-maven-travis/maven'
if len(farr) > 1:
topfolder = farr[-2]
else:
topfolder = ''
for pom in allpoms:
arr = pom.split(os.path.sep)
if len(arr) >= 2 and arr[-2] == topfolder:
if os.path.isfile(pom):
foundpom = pom
break
elif topfolder == '':
foundpom = pom
break
return foundpom
@staticmethod
def normalise_dep(dep):
#
# Replace / with :
if dep.find('http:') == 0:
dep = dep.replace('http:', '')
return dep.replace('/', ':')
def check_ver_origin(self, ver):
if len(self.origins) > 0 and ver in self.origins.keys():
for over in self.origins[ver]:
if 'originName' in over and 'originId' in over and over['originName'] == self.ns:
# 'org.springframework:spring-aop:3.2.10.RELEASE'
corg, cname, cver = self.parse_compid(over['originId'])
# a_over = over['originId'].split(':')
if corg == self.org and cname == self.name:
return True
return False
def prepare_upgrade(self, upgrade_index):
if len(self.potentialupgrades) < upgrade_index:
return False
upgrade_version = self.potentialupgrades[upgrade_index]
pom_contents = ''
if not os.path.isfile('pom.xml'):
pom_contents = f'''<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>sec</groupId>
<artifactId>test</artifactId>
<version>1.0.0</version>
<packaging>pom</packaging>
<dependencies>
'''
# arr = self.compid.split(':')
# forge = arr[0]
groupid = self.org
artifactid = self.name
pom_contents += f''' <dependency>
<groupId>{groupid}</groupId>
<artifactId>{artifactid}</artifactId>
<version>{upgrade_version}</version>
</dependency>
'''
try:
with open('pom.xml', "a") as fp:
fp.write(pom_contents)
except Exception as e:
print(e)
return False
return True
def get_projfile_linenum(self, filename):
if not filename.endswith('pom.xml'):
return -1
def getline(comp, ver, filename):
compstring = f"<artifactId>{comp}</artifactId>".lower()
verstring = f"<version>{ver}</version>".lower()
try:
with open(filename, 'r') as f:
foundcomp = False
for (i, line) in enumerate(f):
if compstring in line.lower():
foundcomp = True
if foundcomp and (ver == '' or verstring in line.lower()):
return i
except Exception as e:
pass
return -1
# parser = ET.XMLParser(target=ET.TreeBuilder(insert_comments=True))
ET.register_namespace('', "http://maven.apache.org/POM/4.0.0")
ET.register_namespace('xsi', "http://www.w3.org/2001/XMLSchema-instance")
tree = ET.parse(filename, parser=ET.XMLParser(target=MyTreeBuilder()))
root = tree.getroot()
nsmap = {'m': 'http://maven.apache.org/POM/4.0.0'}
for dep in root.findall('.//m:dependencies/m:dependency', nsmap):
groupId = dep.find('m:groupId', nsmap).text
artifactId = dep.find('m:artifactId', nsmap).text
version = ''
verentry = dep.find('m:version', nsmap)
if verentry is not None:
version = verentry.text
if artifactId == self.name and (version == '' or "${" in version):
return getline(self.name, '', filename)
if artifactId == self.name and version == self.version:
return getline(self.name, self.version, filename)
return -1
def do_upgrade_dependency(self):
files_to_patch = dict()
# dirname = tempfile.TemporaryDirectory()
tempdirname = tempfile.mkdtemp(prefix="snps-patch-" + self.name + "-" + self.version)
for package_file in self.projfiles:
# dir = os.path.sep.join(package_file.split(os.path.sep)[:-1])
parser = ET.XMLParser(target=ET.TreeBuilder(insert_comments=True))
ET.register_namespace('', "http://maven.apache.org/POM/4.0.0")
ET.register_namespace('xsi', "http://www.w3.org/2001/XMLSchema-instance")
tree = ET.parse(package_file, parser=ET.XMLParser(target=MyTreeBuilder()))
root = tree.getroot()
nsmap = {'m': 'http://maven.apache.org/POM/4.0.0'}
# globals.printdebug(f"DEBUG: Search for maven dependency {component_name}@{component_version}")
for dep in root.findall('.//m:dependencies/m:dependency', nsmap):
groupId = dep.find('m:groupId', nsmap).text
artifactId = dep.find('m:artifactId', nsmap).text
verentry = dep.find('m:version', nsmap)
if artifactId == self.name:
if verentry is not None:
version = verentry.text
globals.printdebug(
f"DEBUG: Found GroupId={groupId} ArtifactId={artifactId} Version={version}")
verentry.text = self.goodupgrade
break
else:
# ToDo: Need to add version tag as it does not exist
new = ET.Element('version')
new.text = self.goodupgrade
dep.append(new)
break
# Change into sub-folder for packagefile
subtempdir = os.path.dirname(package_file)
os.makedirs(os.path.join(tempdirname, subtempdir), exist_ok=True)
xmlstr = ET.tostring(root, encoding='UTF-8', method='xml')
with open(os.path.join(tempdirname, package_file), "wb") as fp:
fp.write(xmlstr)
print(f"BD-Scan-Action: INFO: Updated Maven component in: {os.path.join(tempdirname, package_file)}")
files_to_patch[package_file] = os.path.join(tempdirname, package_file)
return files_to_patch
@staticmethod
def finalise_upgrade():
try:
with open('pom.xml', "a") as fp:
fp.write(''' </dependencies>
</project>
''')
# os.system('cat pom.xml')
except Exception as e:
print(e)
return
@staticmethod
def parse_compid(compid):
arr = re.split('[:/]', compid)
if len(arr) == 4:
return arr[1], arr[2], arr[3]
else:
return arr[0], arr[1], arr[2]
@staticmethod
def supports_direct_upgrades():
return True
|
StarcoderdataPython
|
138587
|
#! /usr/bin/env python
'''Make sure the Riak client is sane'''
import unittest
from test import BaseTest
from simhash_db import Client
class RiakTest(BaseTest, unittest.TestCase):
'''Test the Riak client'''
def make_client(self, name, num_blocks, num_bits):
return Client('riak', name, num_blocks, num_bits)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4933707
|
import threading
from configHandler import loadConfigData
from clientClass import Client
def main():
mainConfig = loadConfigData("../config.json")
PORT = mainConfig["PORT"]
SERVER_IP = mainConfig["SERVER_IP"]
DISCONNECT_MESSAGE = mainConfig["DISCONNECT_MESSAGE"]
SERVER_ADDRESS = (SERVER_IP, PORT)
NAME = input("What is your nickname?\n")
client = Client(PORT, SERVER_IP, DISCONNECT_MESSAGE, SERVER_ADDRESS, NAME)
sending_messages_thread = threading.Thread(target=send_messages, args=(client,))
sending_messages_thread.start()
receiving_messages_thread = threading.Thread(target=receive_messages, args=(client,))
receiving_messages_thread.start()
client.send(NAME, 2)
def send_messages(client):
while client.connected:
client.send(input())
def receive_messages(client):
while client.connected:
author, responseType, response = client.receive(1024)
print(f"[{author}] {response}")
if not responseType:
client.disconnect()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3279692
|
import sys
import argparse
import warnings
from processing.data_pipeline import DataPipeline
if __name__ == "__main__":
warnings.simplefilter("ignore")
parser = argparse.ArgumentParser(description="Implement the data pipeline")
parser.add_argument(
"--filename",
"-input",
default="./data/raw/pp-2021.csv",
help="The CSV price paid data file",
)
parser.add_argument(
"--output-folder",
"-output",
default="./data/processed/data",
help="To save the data as a newline delimited JSON format",
)
args = parser.parse_args()
try:
data_pipe = DataPipeline(args.filename)
except FileNotFoundError:
print(
"Oops! File not found. Ensure you saved the data in the data/raw directory"
)
sys.exit(1)
print("Head of the data: ")
print("-" * 20)
print(" ")
print(data_pipe.data().head(10))
print(" ")
print("-" * 20)
print("About to save data as a JSON file")
data_pipe.save_data_as_json(args.output_folder)
|
StarcoderdataPython
|
1724255
|
<reponame>StevenHuang2020/ML
import matplotlib.pyplot as plt
import numpy as np
from distributions import Binomial_distribution, Discrete_uniform_distribution
def plotDistributeBar(ax, data, label='', width=0.3, offset=0, title='Probability Distribution of true'):
ax.bar(np.arange(len(data))+offset,data,width=width,label=label)
fontSize = 12
ax.set_title(title,fontsize=fontSize)
plt.xlabel('Different Teeth Bins',fontsize=fontSize)
plt.ylabel('Probability',fontsize=fontSize)
plt.xticks(np.arange(len(data)))
def plorDataDis(true_data,uniform,bino):
ax = plt.subplot(1,1,1)
offset = 0
width=0.2
plotDistributeBar(ax, true_data, label='True data', offset=offset, width=width)
offset += width
plotDistributeBar(ax, uniform, label='Uniform data', offset=offset, width=width)
offset += width
print(bino)
plotDistributeBar(ax, bino, label='Binomial data', offset=offset, width=width)
plt.legend()
plt.show()
def get_klpq_div(p_probs, q_probs):
kl_div = 0.0
for pi, qi in zip(p_probs, q_probs):
kl_div += pi*np.log(pi/qi)
return kl_div
def get_klqp_div(p_probs, q_probs):
kl_div = 0.0
for pi, qi in zip(p_probs, q_probs):
kl_div += qi*np.log(qi/pi)
return kl_div
def plotKLResult(true_data,minP):
ax = plt.subplot(1,1,1)
offset = 0
width=0.2
plotDistributeBar(ax, true_data, label='True data', offset=offset, width=width)
pAll = [0.02,0.1,minP,0.8]
for p in pAll:
offset += width
dis = Binomial_distribution(N=len(true_data),p=p)
plotDistributeBar(ax, dis, label='Binomial '+ str(p), offset=offset, width=width)
plt.legend()
plt.show()
def testDiscretKL():
true_data = [0.02, 0.03, 0.15, 0.14, 0.13, 0.12, 0.09, 0.08, 0.1, 0.08, 0.06]
print('sum=', sum(true_data))
assert sum(true_data)==1.0
unif_data = Discrete_uniform_distribution(true_data,N=len(true_data))
bino_data = Binomial_distribution(N=len(true_data),p=0.3)
plorDataDis(true_data,unif_data,bino_data)
print('KL(True||Uniform): ', get_klpq_div(true_data,unif_data))
print('KL(True||Binomial): ', get_klpq_div(true_data,bino_data))
p = np.arange(0.02, 1.0, 0.02) #np.linspace(0, 1.0, 50)
klpq = [get_klpq_div(true_data,Binomial_distribution(N=len(true_data),p=i)) for i in p]
klqp = [get_klqp_div(true_data,Binomial_distribution(N=len(true_data),p=i)) for i in p]
print('minimal klpq,', np.argmin(klpq), np.min(klpq))
ax = plt.subplot(1,1,1)
plotDistribute(ax,p,klpq,label='KL(P||Q)')
plotDistribute(ax,p,klqp,label='KL(Q||P)')
plotDistribute(ax,p,np.array(klpq)-np.array(klqp),label='KL(P||Q)-KL(Q||P)')
plt.show()
plotKLResult(true_data, np.min(klpq))
def plotDistribute(ax,x,y,label='', title='Binomial P vs KL'):
ax.plot(x,y,label=label)
fontSize = 12
ax.set_title(title,fontsize=fontSize)
ax.legend()
plt.xlabel('Binomial P',fontsize=fontSize)
plt.ylabel('KL(P||Q) divergence',fontsize=fontSize)
#plt.show()
def main():
testDiscretKL()
if __name__=='__main__':
main()
|
StarcoderdataPython
|
1805036
|
<gh_stars>0
#!/usr/bin/env python3
from functools import partial, reduce
from itertools import compress, count, cycle, islice, starmap
from operator import mul
def get_input(filename):
with open(filename, "r") as input_file:
# returning list not a generator because I'll need to go through
# it several times for part two. Faster to load into memory once
# and use that copy repeatably then reading from file multiple
# times
return [l.strip() for l in input_file]
def terrain(tree_patterns):
for pattern in tree_patterns:
yield cycle(pattern)
def levels_to_travel(levels, down_travel):
# only want to pick the terrain levels (rows) that the sled travels
# Always pick the first row (so True) then the number of rows to
# skip is equal to the (down_travel - 1). Repeat this with cycle
# then to select the terrain travelled with compress
return compress(levels, cycle([True] + [False] * (down_travel - 1)))
def path(levels, right_travel):
for level, position in zip(levels, count(0, right_travel)):
# need next() because islice() returns an iterator even though
# we are only slicing a single item
yield next(islice(level, position, position + 1))
def trees(tree_patterns, down_travel, right_travel):
sled_path = path(
levels_to_travel(terrain(tree_patterns), down_travel), right_travel
)
return sum(point == "#" for point in sled_path)
def main():
tree_patterns = get_input("input.txt")
trees_hit = partial(trees, tree_patterns)
print("Part 1 solution:", trees_hit(1, 3))
# Part 2 slopes to calculate trees hit (down travel, right travel)
slopes = ((1, 1), (1, 3), (1, 5), (1, 7), (2, 1))
print("Part 2 solution:", reduce(mul, starmap(trees_hit, slopes)))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1849840
|
<reponame>cyware-labs/ioc-analyzer<filename>ioc_analyzer/py_whois/query.py
import socket
from ioc_analyzer.py_whois.parser import RawTextWhoIsParser
from ioc_analyzer.py_whois.tld import tld_whois_dict
DEFAULT_WHOIS_PORT = 43
class SocketWrapper(object):
NEW_LINE_CHAR = '\n'
def __init__(self, server, port=DEFAULT_WHOIS_PORT):
self.server = server
self.port = port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.server, self.port))
self.socket = s
def query(self, data):
data += self.NEW_LINE_CHAR
self.socket.send(data.encode())
message = ''
while True:
return_data = self.socket.recv(1000)
if not return_data:
break
message += return_data.decode('utf-8', 'ignore')
return message
class DirectDomainLookup(object):
REFER_KEYS = ['refer', 'Registrar WHOIS Server']
def get_whois_server(self, domain):
tld = domain.split('.')[-1]
whois_server = tld_whois_dict.get(tld)
if not whois_server:
raise Exception('Whois server not found')
return whois_server['host']
def get_details(self, domain, recursive=True, parse_data=True, whois_server=None):
if whois_server is None:
whois_server = self.get_whois_server(domain)
raw_data = SocketWrapper(whois_server).query(domain)
parsed_data = RawTextWhoIsParser().parse(raw_data)
if recursive:
for key in self.REFER_KEYS:
if key in parsed_data and whois_server != parsed_data[key]:
return self.get_details(domain, recursive=recursive,
parse_data=parse_data,
whois_server=parsed_data[key])
if parse_data:
return parsed_data
return raw_data
|
StarcoderdataPython
|
6603195
|
<gh_stars>0
r"""
>>> import sys
>>> import pretext
>>> myrepr = pretext.PrefixRepr()
>>> repr = myrepr.repr
>>> def _displayhook(value):
... if value is not None:
... sys.stdout.write(myrepr.repr(value))
>>> sys.displayhook = _displayhook
>>> u''
u''
>>> b''
b''
>>> bytes()
b''
>>> b'\0'
b'\x00'
>>> b"'"
b"'"
"""
def split(s):
"""
>>> import sys
>>> import pretext
>>> myrepr = pretext.PrefixRepr()
>>> repr = myrepr.repr
>>> def _displayhook(value):
... if value is not None:
... sys.stdout.write(myrepr.repr(value))
>>> sys.displayhook = _displayhook
>>> repr = myrepr
>>> split(u'a b c d e')
[u'a', u'b', u'c', u'd', u'e']
>>> split(b'a b c d e')
[b'a', b'b', b'c', b'd', b'e']
"""
return s.split()
def C(object):
def join(self, seq):
"""
>>> import sys
>>> import pretext
>>> myrepr = pretext.PrefixRepr()
>>> repr = myrepr.repr
>>> def _displayhook(value):
... if value is not None:
... sys.stdout.write(myrepr.repr(value))
>>> sys.displayhook = _displayhook
>>> c = C()
>>> c.join([b'a', b'b', b'c', b'd', b'e']
b'a!b!c!d!e'
"""
return b'!'.join(seq)
|
StarcoderdataPython
|
5162070
|
<filename>src/Client/View/MainFrame/TabFrame/FirstTabFrame/subWindows/viewAllWindow.py<gh_stars>10-100
# -*- coding:utf-8 -*-
from src.Client.Conf.config import *
from src.Client.MissionSystem import missionSystem
from src.Client.SystemTools.ConfFileRead import configFileRead
class ViewAllWindow():
"""
显示全部任务的GUI界面
"""
def __init__(self):
self.windowTitleVar = tkinter.StringVar()
self.missionIdVar = tkinter.StringVar()
self.missionNameVar = tkinter.StringVar()
self.missionRangeVar = tkinter.StringVar()
self.missionStateCodeVar = tkinter.StringVar()
self.missionNextTimeCodeVar = tkinter.StringVar()
self.missionLoopTimeCodeVar = tkinter.StringVar()
self.missionisFinish = tkinter.StringVar()
self.language()
self.missionSystemTools = missionSystem.MissionSystem()
def language(self):
"""
语言切换,暂时不做外部调用(即每次重启生效)
:return:
"""
languageType = configFileRead.ConfigFileRead(fileName='./conf/user.ini').readFile("LANGUAGE", 'language')
if languageType == 'CN':
self.windowTitleVar.set('查看全部')
self.missionIdVar.set('任务id')
self.missionNameVar.set('任务名')
self.missionRangeVar.set('任务范围')
self.missionStateCodeVar.set('任务进度')
self.missionNextTimeCodeVar.set('下次任务')
self.missionLoopTimeCodeVar.set('循环次数')
self.missionisFinish.set('是否完成')
elif languageType == 'EN':
self.windowTitleVar.set('view all')
self.missionIdVar.set('mission id')
self.missionNameVar.set('mission name')
self.missionRangeVar.set('mission range')
self.missionStateCodeVar.set('state')
self.missionNextTimeCodeVar.set('next time')
self.missionLoopTimeCodeVar.set('loop time')
self.missionisFinish.set('isFinish')
else:
self.windowTitleVar.set('查看全部')
self.missionIdVar.set('任务id')
self.missionNameVar.set('任务名')
self.missionRangeVar.set('任务范围')
self.missionStateCodeVar.set('任务进度')
self.missionNextTimeCodeVar.set('下次任务')
self.missionLoopTimeCodeVar.set('循环次数')
self.missionisFinish.set('是否完成')
def window(self):
self.addWindow = tkinter.Toplevel()
screenWidth = self.addWindow.winfo_screenwidth()
screenHeight = self.addWindow.winfo_screenheight()
self.addWindow.geometry(
'550x320+' + str(int((screenWidth - 550) / 2)) + '+' + str(int((screenHeight - 320) / 2)))
self.addWindow.resizable(width=False, height=False)
self.addWindow.title(self.windowTitleVar.get())
self.addWindow.iconbitmap('images/icon.ico')
self.tree = ttk.Treeview(self.addWindow, columns=['1', '2', '3', '4', '5', '6', '7'], show='headings',
height=15)
self.tree.column('1', width=80, anchor='center')
self.tree.column('2', width=80, anchor='center')
self.tree.column('3', width=80, anchor='center')
self.tree.column('4', width=80, anchor='center')
self.tree.column('5', width=80, anchor='center')
self.tree.column('6', width=80, anchor='center')
self.tree.column('7', width=80, anchor='center')
self.tree.heading('1', text=self.missionIdVar.get())
self.tree.heading('2', text=self.missionNameVar.get())
self.tree.heading('3', text=self.missionRangeVar.get())
self.tree.heading('4', text=self.missionStateCodeVar.get())
self.tree.heading('5', text=self.missionNextTimeCodeVar.get())
self.tree.heading('6', text=self.missionLoopTimeCodeVar.get())
self.tree.heading('7', text=self.missionisFinish.get())
self.tree.place(x=0, y=0, anchor='nw')
list = missionSystem.MissionSystem().loadMission()
for each in list:
dataInList = [each['missionId'], each['bookName'], each['missionRange'], each['state'], each['nextTime'],
each['loopTime'], each['isFinish']]
self.tree.insert('', 'end', values=dataInList)
|
StarcoderdataPython
|
5056729
|
<reponame>thanhn9/prometheus-workshop
from fastapi import FastAPI
from starlette.responses import PlainTextResponse
import prometheus_client
app = FastAPI(title="Python Service")
CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.get("/metrics")
def read_metrics():
return PlainTextResponse(prometheus_client.generate_latest())
|
StarcoderdataPython
|
3212983
|
from cpc_api.api import CPCApi
import matplotlib.pyplot as plt
def main():
api = CPCApi(legislature='2017-2022')
meluche = api.search_parliamentarians('Melenchon')
arr_face_de_meluche = api.picture(meluche.slug, pixels=512)
plt.imshow(arr_face_de_meluche)
plt.show()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
11243434
|
DATE_TIME_STRINGS = ("year", "month", "week", "day", "hour", "minutes", "tomorrow")
CASHTAG = "$"
CRYPTO_CURRENCIES = (
"BTC",
"ETH",
"ADA",
"SOL",
"DOT",
"XRP",
"LTC",
"NEO",
"EOS",
"BNB",
"HT",
"LINK",
"BCH",
"TRX",
"XTZ",
)
NASDAQ_TRADING_HOURS = ["09:30:00", "16:00:00"]
WEEKEND_DAYS = [5, 6]
POSITIVE_RETURNS_EMOJI = "🚀🤑📈"
NEGATIVE_RETURNS_EMOJI = "😭📉"
ZERO_RETURNS_EMOJI = "🤷♂️"
GIF_FILE_NAME = "random.gif"
POSITIVE_RETURN_TAGS = ["money", "rich", "cash", "dollars"]
ZERO_RETURN_TAGS = ["shrug", "math"]
NEGATIVE_RETURN_GIFS = [
"https://media.giphy.com/media/3orieUs03VUeeBa7Wo/giphy.gif",
"https://media.giphy.com/media/l2JdWPIgVK83qxB4Y/giphy.gif",
"https://media.giphy.com/media/ToMjGppB1bJC04ESjyE/giphy.gif",
"https://media.giphy.com/media/3orieVe5VYqTdT16qk/giphy.gif"
"https://media.giphy.com/media/S3n6idriKtiFbZyqve/giphy.gif",
"https://media.giphy.com/media/qBykyt7AiTOgM/giphy.gif",
"https://media.giphy.com/media/xThtatVgZVprKd3UEU/giphy.gif",
]
REPORT_KEYWORDS = ["report", "analyse", "analyze"]
REPORT_FIELDS = [
"Name",
"Industry",
"EBITDA",
"EVToEBITDA",
"PERatio",
"PriceToBookRatio",
"ReturnOnEquityTTM",
"PEGRatio",
"Beta",
"BookValue",
"EPS",
"DilutedEPSTTM",
"RevenuePerShareTTM",
"ProfitMargin",
"TrailingPE",
"52WeekHigh",
"52WeekLow",
]
REPORT_FILE_NAME = "report.png"
REPORT_RESPONSE = "Knowledge is power! 🧠💪 Here is your company report for $"
CRYPTO_REPORT_RESPONSE = (
"Knowledge is power! 🧠💪 Here is your crypto ratings report for $"
)
CONFIRMATION_MESSAGES = ["Sure thing buddy!", "You got it boss!", "Sounds good!"]
FMP_API_RATING_ENDPOINT = "https://financialmodelingprep.com/api/v3/company/rating/"
FMP_API_GET_PRICE_ENDPOINT = "https://financialmodelingprep.com/api/v3/quote/"
API_LIMIT_EXCEEDED_ERROR = (
"Our standard API call frequency is 5 calls per minute and 500 calls per day."
)
HELP_MESSAGE = (
"To create a reminder, mention me with one or more ticker "
"symbols and a date. E.g. 'Remind me of $BTC in 3 months'. "
"You can read about all my other features and implementation "
"at: http://cutt.ly/Rh8CoJt"
)
API_LIMIT_EXCEEDED_RESPONSE = (
"Whoopsies. It looks like my api limit was exceeded. Please try again later "
)
STOCK_NOT_FOUND_RESPONSE = (
"Sorry, I couldn't find any securities under that ticker 😓. "
"I only support NASDAQ stocks and a few cryptocurrencies: "
"https://www.nasdaq.com/market-activity/stocks/screener."
)
|
StarcoderdataPython
|
96547
|
import json
import requests
import logging
from datetime import date, timedelta
logging.basicConfig(level=logging.ERROR)
class EODAPI:
BASE_URL = "https://eodhistoricaldata.com/api/"
def __init__(self, apiToken):
self.apiToken = apiToken
# Do basic API call to EOD API and retry if fail
def doRequest(self, url, params={}):
# Use API key and JSON format as default
defaultParams = {"fmt":"json", "api_token":self.apiToken}
# Overwrite with given parameters
requestParams = {**defaultParams, **params}
try:
req = requests.get(url, params=requestParams)
if req.status_code == 200:
return 200, json.loads(req.content.decode("utf-8"))
else:
logging.error('Error, status code: {0}'.format(req.status_code))
# Not authorized or no API calls left, unknown ticker
return req.status_code, None
except Exception as e:
logging.error(str(e))
return 500, None
# Get information about API limit
def getUserData(self):
return self.doRequest(self.BASE_URL+'user')
# Get the fundamentals of a stock
def getFundamentals(self, symbol, exchange):
return self.doRequest(self.BASE_URL+'fundamentals/{0}.{1}'.format(symbol, exchange))
# Get all available exchanges
def getExchangeList(self):
return self.doRequest(self.BASE_URL+'exchanges-list')
# Get all tickers of an exchange
def getTickers(self, exchange):
return self.doRequest(self.BASE_URL+'exchange-symbol-list/{0}'.format(exchange))
# Get information about trading hours and holidays
def getExchangeDetails(self, exchange, start="", end=""):
params = {"from":start, "to":end}
return self.doRequest(self.BASE_URL+'exchange-details/{0}'.format(exchange), params)
# Get 15-20 minutes delayed 1m price data
# Provide symbols with exchanges: VOW3.F,AAPL.US,MO.US
def getRealTimeData(self, symbols):
params = {}
if type(symbols) == list:
if len(symbols) > 1:
# Use s parameter for more than one symbol
params = {"s":symbols[1:]}
symbol = symbols[0]
else:
# One symbol in a list
symbol = symbols[0]
else:
# A symbol as string
symbol = symbols
return self.doRequest(self.BASE_URL+'real-time/{0}'.format(symbol), params)
# Get options for a stock
def getOptions(self, symbol, exchange):
return self.doRequest(self.BASE_URL+'options/{0}.{1}'.format(symbol, exchange))
# All dividends for a symbol
def getAllDividends(self, symbol, exchange):
return self.doRequest(self.BASE_URL+'div/{0}.{1}'.format(symbol, exchange))
# All splits for a symbol
def getAllSplits(self, symbol, exchange):
return self.doRequest(self.BASE_URL+'splits/{0}.{1}'.format(symbol, exchange))
# Get historical quotes
# Default: Get all available data
def getHistoricalData(self, symbol, exchange, start="1970-01-01", end="2050-12-31"):
params = {"period":"d", "from":start, "to":end}
return self.doRequest(self.BASE_URL+'eod/{0}.{1}'.format(symbol, exchange), params)
# Get bulk data for one exchange
# Default: Get only quotes
# You can also request splits and dividends.
# Returned data [statusQ, statusS, statusD], {"quotes":[], "splits":[], "dividends":[]}
def getBulk(self, exchange, quotes=True, splits=False, dividends=False, date=None):
statusQ = 200
statusS = 200
statusD = 200
quotesData = []
splitsData = []
dividendsData = []
params = {}
if date != None:
params["date"] = date
if quotes:
statusQ, quotesData = self.doRequest(self.BASE_URL+'eod-bulk-last-day/{0}'.format(exchange), params)
if splits:
params["type"] = "splits"
statusS, splitsData = self.doRequest(self.BASE_URL+'eod-bulk-last-day/{0}'.format(exchange), params)
if dividends:
params["type"] = "dividends"
statusD, dividendsData = self.doRequest(self.BASE_URL+'eod-bulk-last-day/{0}'.format(exchange), params)
# Return combined data
return [statusQ, statusS, statusD], {"quotes":quotesData, "splits":splitsData, "dividends":dividendsData}
# Get IPOs
# Maximum 10 years data request in one API call
def getIPOs(self, start, end):
params = {"from":start, "to":end}
return self.doRequest(self.BASE_URL+'calendar/ipos', params)
# Get only upcoming IPOs
def getUpcomingIPOs(self):
dateFrom = date.today()
dateTo = dateFrom + timedelta(days=364*10) # Add nearly 10 years
todayString = dateFrom.strftime("%Y-%m-%d")
endString = dateTo.strftime("%Y-%m-%d")
return self.getIPOs(todayString, endString)
# Get insider transactions
# Use 'AAPL.US' as code to get all insider transactions of a single stock
def getInsiderTransactions(self, start="", end="", code="", limit=1000):
params = {"from":start, "to":end, "limit":limit}
if len(code) > 0:
params["code"] = code
return self.doRequest(self.BASE_URL+'insider-transactions', params)
# Get technical indicator data
def getSplitAdjustedQuotes(self, symbol, exchange, start="", end="", agg_period='d'):
params = {"from":start, "to":end, "function":"splitadjusted", "agg_period":agg_period}
return self.doRequest(self.BASE_URL+'technical/'+symbol+'.'+exchange, params)
|
StarcoderdataPython
|
6443617
|
#-*- coding: utf-8 -*-
#!/usr/bin/python3
"""
Copyright (c) 2020 LG Electronics Inc.
SPDX-License-Identifier: MIT
"""
__version__ = "0.4.7"
|
StarcoderdataPython
|
273590
|
"""This module contains the meta information of OrgGetNumImpactedDomains ExternalMethod."""
from ..ucscentralcoremeta import MethodMeta, MethodPropertyMeta
method_meta = MethodMeta("OrgGetNumImpactedDomains", "orgGetNumImpactedDomains", "Version142b")
prop_meta = {
"cookie": MethodPropertyMeta("Cookie", "cookie", "Xs:string", "Version142b", "InputOutput", False),
"in_config": MethodPropertyMeta("InConfig", "inConfig", "ConfigConfig", "Version142b", "Input", True),
"in_domain_group_dn": MethodPropertyMeta("InDomainGroupDn", "inDomainGroupDn", "ReferenceObject", "Version142b", "Input", False),
"in_firmware_type": MethodPropertyMeta("InFirmwareType", "inFirmwareType", "Xs:string", "Version142b", "Input", False),
"out_num_domains_impacted": MethodPropertyMeta("OutNumDomainsImpacted", "outNumDomainsImpacted", "Xs:unsignedInt", "Version142b", "Output", False),
"out_num_domains_subscribed": MethodPropertyMeta("OutNumDomainsSubscribed", "outNumDomainsSubscribed", "Xs:unsignedInt", "Version142b", "Output", False),
"out_num_domains_unsubscribed": MethodPropertyMeta("OutNumDomainsUnsubscribed", "outNumDomainsUnsubscribed", "Xs:unsignedInt", "Version142b", "Output", False),
}
prop_map = {
"cookie": "cookie",
"inConfig": "in_config",
"inDomainGroupDn": "in_domain_group_dn",
"inFirmwareType": "in_firmware_type",
"outNumDomainsImpacted": "out_num_domains_impacted",
"outNumDomainsSubscribed": "out_num_domains_subscribed",
"outNumDomainsUnsubscribed": "out_num_domains_unsubscribed",
}
|
StarcoderdataPython
|
11362331
|
# _read_h5ad.py
__module_name__ = "_read_h5ad.py"
__author__ = ", ".join(["<NAME>"])
__email__ = ", ".join(["<EMAIL>",])
# package imports #
# --------------- #
from anndata import read_h5ad
def _read_h5ad(h5ad_path, silent=False):
"""Wraps anndata.read_h5ad"""
adata = read_h5ad(h5ad_path)
if not silent:
print(adata)
return adata
|
StarcoderdataPython
|
3492638
|
<reponame>nsteins/crash-model<filename>src/showcase/app.py
import os
from flask import Flask, render_template, send_from_directory
app = Flask(__name__)
CONFIG_FILE = os.path.join('static', 'config.js')
@app.route('/data/<path:path>')
def static_files(path):
return send_from_directory('data', path)
@app.route('/', methods=['GET', 'POST'])
def index():
if 'CONFIG_FILE' in os.environ:
global CONFIG_FILE
CONFIG_FILE = os.environ['CONFIG_FILE']
return render_template(
'index.html',
mapbox_token=os.environ['MAPBOX_TOKEN'],
config_file=CONFIG_FILE
)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
StarcoderdataPython
|
11331884
|
"""
Parser for the Telnet protocol. (Not a complete implementation of the telnet
specification, but sufficient for a command line interface.)
Inspired by `Twisted.conch.telnet`.
"""
import struct
from typing import Callable, Generator
from .log import logger
__all__ = [
"TelnetProtocolParser",
]
def int2byte(number: int) -> bytes:
return bytes((number,))
# Telnet constants.
NOP = int2byte(0)
SGA = int2byte(3)
IAC = int2byte(255)
DO = int2byte(253)
DONT = int2byte(254)
LINEMODE = int2byte(34)
SB = int2byte(250)
WILL = int2byte(251)
WONT = int2byte(252)
MODE = int2byte(1)
SE = int2byte(240)
ECHO = int2byte(1)
NAWS = int2byte(31)
LINEMODE = int2byte(34)
SUPPRESS_GO_AHEAD = int2byte(3)
DM = int2byte(242)
BRK = int2byte(243)
IP = int2byte(244)
AO = int2byte(245)
AYT = int2byte(246)
EC = int2byte(247)
EL = int2byte(248)
GA = int2byte(249)
class TelnetProtocolParser:
"""
Parser for the Telnet protocol.
Usage::
def data_received(data):
print(data)
def size_received(rows, columns):
print(rows, columns)
p = TelnetProtocolParser(data_received, size_received)
p.feed(binary_data)
"""
def __init__(
self,
data_received_callback: Callable[[bytes], None],
size_received_callback: Callable[[int, int], None],
) -> None:
self.data_received_callback = data_received_callback
self.size_received_callback = size_received_callback
self._parser = self._parse_coroutine()
self._parser.send(None) # type: ignore
def received_data(self, data: bytes) -> None:
self.data_received_callback(data)
def do_received(self, data: bytes) -> None:
""" Received telnet DO command. """
logger.info("DO %r", data)
def dont_received(self, data: bytes) -> None:
""" Received telnet DONT command. """
logger.info("DONT %r", data)
def will_received(self, data: bytes) -> None:
""" Received telnet WILL command. """
logger.info("WILL %r", data)
def wont_received(self, data: bytes) -> None:
""" Received telnet WONT command. """
logger.info("WONT %r", data)
def command_received(self, command: bytes, data: bytes) -> None:
if command == DO:
self.do_received(data)
elif command == DONT:
self.dont_received(data)
elif command == WILL:
self.will_received(data)
elif command == WONT:
self.wont_received(data)
else:
logger.info("command received %r %r", command, data)
def naws(self, data: bytes) -> None:
"""
Received NAWS. (Window dimensions.)
"""
if len(data) == 4:
# NOTE: the first parameter of struct.unpack should be
# a 'str' object. Both on Py2/py3. This crashes on OSX
# otherwise.
columns, rows = struct.unpack(str("!HH"), data)
self.size_received_callback(rows, columns)
else:
logger.warning("Wrong number of NAWS bytes")
def negotiate(self, data: bytes) -> None:
"""
Got negotiate data.
"""
command, payload = data[0:1], data[1:]
if command == NAWS:
self.naws(payload)
else:
logger.info("Negotiate (%r got bytes)", len(data))
def _parse_coroutine(self) -> Generator[None, bytes, None]:
"""
Parser state machine.
Every 'yield' expression returns the next byte.
"""
while True:
d = yield
if d == int2byte(0):
pass # NOP
# Go to state escaped.
elif d == IAC:
d2 = yield
if d2 == IAC:
self.received_data(d2)
# Handle simple commands.
elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA):
self.command_received(d2, b"")
# Handle IAC-[DO/DONT/WILL/WONT] commands.
elif d2 in (DO, DONT, WILL, WONT):
d3 = yield
self.command_received(d2, d3)
# Subnegotiation
elif d2 == SB:
# Consume everything until next IAC-SE
data = []
while True:
d3 = yield
if d3 == IAC:
d4 = yield
if d4 == SE:
break
else:
data.append(d4)
else:
data.append(d3)
self.negotiate(b"".join(data))
else:
self.received_data(d)
def feed(self, data: bytes) -> None:
"""
Feed data to the parser.
"""
for b in data:
self._parser.send(int2byte(b))
|
StarcoderdataPython
|
11397919
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table(u'report_userprofile')
def backwards(self, orm):
# Adding model 'UserProfile'
db.create_table(u'report_userprofile', (
('bio', self.gf('django.db.models.fields.TextField')(null=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('wants_newsletters', self.gf('django.db.models.fields.BooleanField')()),
('wants_alerts', self.gf('django.db.models.fields.BooleanField')()),
('position', self.gf('django.db.models.fields.TextField')(null=True)),
('has_optedout', self.gf('django.db.models.fields.BooleanField')()),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('wants_marketinfo', self.gf('django.db.models.fields.BooleanField')()),
))
db.send_create_signal(u'report', ['UserProfile'])
models = {
u'report.client': {
'Meta': {'object_name': 'Client'},
'clientcity': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'clientdma': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'clientdmmimage': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'clientemail': ('django.db.models.fields.EmailField', [], {'max_length': '50'}),
'clienthasoptedout': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'clienthitlistimage': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'clientname': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'clientshopimage': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'clientsocialimage': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'clientstate': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'clientutilityimage': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'clientwebsite': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'dataiumclientid': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'dataiumreportmonth': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'report.clientcrossshop': {
'Meta': {'object_name': 'ClientCrossshop'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shops'", 'to': u"orm['report.Client']"}),
'clientmodel': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'crossmodel': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'dataiumclientid': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shops': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'report.clienthitlist': {
'Meta': {'object_name': 'ClientHitList'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hitlist'", 'to': u"orm['report.Client']"}),
'dataiumclientid': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastviewed': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shopperindex': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'stocknumber': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'vehicle': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'report.clientmodelmomentum': {
'Meta': {'object_name': 'ClientModelMomentum'},
'active': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dmm'", 'to': u"orm['report.Client']"}),
'clientmodel': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'dataiumclientid': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'dmm': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'yearmonth': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'report.clientnewusedsplit': {
'Meta': {'object_name': 'ClientNewUsedSplit'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newused'", 'to': u"orm['report.Client']"}),
'dataiumclientid': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'newpercent': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'usedpercent': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'report.clientsearchkeyword': {
'Meta': {'object_name': 'ClientSearchKeyword'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'search'", 'to': u"orm['report.Client']"}),
'dataiumclientid': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'searchkeyword': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'searchkw_pageviews': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['report']
|
StarcoderdataPython
|
5163735
|
import time
import argparse
import sys
class Auxiliar:
dp = [[]]
def readFile(v):
try:
file = args.folder
except Exception as e:
print(e)
sys.exit("Directory Not Found, for help use the -h option")
try:
f = open(file, 'r')
N = int (f.readline())
for j in range (N):
element = f.readline()
v.append(int(element))
except Exception as e:
print(e)
sys.exit("Vector is not named in the correct way, for help use the -h option")
finally:
f.close()
def printInput(v):
print("\nVector is: ", v)
def printTime(initial, final, vector):
n = str (len(vector))
runtime = final - initial
print("For", n, "elements -> Taken time: ", str(runtime) + " s.")
def printOutput(vector, found):
if found:
print("\nVector can be partitioned")
else:
print("\nVector can not be partitioned")
exit()
set1 = []
set2 = []
x = len(vector)
currSum = int (sum(vector)/2)
print("Results: ", currSum)
while x > 0 and currSum >= 0:
if Auxiliar.dp[x-1][currSum]:
x = x - 1
set1.append(vector[x])
elif Auxiliar.dp[x-1][currSum-vector[x-1]]:
x = x - 1
currSum = currSum - vector[x]
set2.append(vector[x])
print("Set 1: ", set1)
print("Set 2: ", set2)
def canPartition(num):
if sum(num) % 2 != 0:
return False
else:
return True
def canPartitionTab(num):
s = sum(num)
s = int(s / 2)
n = len(num)
Auxiliar.dp = [[False for x in range(s+1)] for y in range(n+1)]
for i in range(0, n+1):
Auxiliar.dp[i][0] = True
for j in range(1, s+1):
Auxiliar.dp[0][j] = False
for i in range(1, n+1):
for j in range(1, s+1):
if j < num[i-1]:
Auxiliar.dp[i][j] = Auxiliar.dp[i - 1][j]
else:
Auxiliar.dp[i][j] = Auxiliar.dp[i-1][j] or Auxiliar.dp[i - 1][j - num[i-1]]
return Auxiliar.dp[n][s]
if __name__ == '__main__':
# Command line library implementation
parser = argparse.ArgumentParser(description = "Partition problem is to determine whether a given set can be partitioned into two subsets such that the sum of elements in both subsets is same. For further information, use the option '-h'.")
parser.add_argument('-di', '--input', action='store_true', help='Display input data')
parser.add_argument('-do', '--output', action='store_true', help='Display output data')
parser.add_argument('-dt', '--time', action='store_true', help='Display taken time in seconds')
parser.add_argument('-f', '--folder', metavar='', type=str, required=True, help='Read elements from a file')
args = parser.parse_args()
vector = []
readFile(vector)
if canPartition(vector):
initial=time.time()
found=canPartitionTab(vector)
final=time.time()
else:
print("Vector can not be partitioned")
# Control of options
if args.input: printInput(vector)
if args.output: printOutput(vector, found)
if args.time: printTime(initial, final, vector)
|
StarcoderdataPython
|
5111832
|
<reponame>13thProgression/gold-blockchain<filename>chia/util/service_groups.py
from typing import Generator, KeysView
SERVICES_FOR_GROUP = {
"all": "gold_harvester gold_timelord_launcher gold_timelord gold_farmer gold_full_node gold_wallet".split(),
"node": "gold_full_node".split(),
"harvester": "gold_harvester".split(),
"farmer": "gold_harvester gold_farmer gold_full_node gold_wallet".split(),
"farmer-no-wallet": "gold_harvester gold_farmer gold_full_node".split(),
"farmer-only": "gold_farmer".split(),
"timelord": "gold_timelord_launcher gold_timelord gold_full_node".split(),
"timelord-only": "gold_timelord".split(),
"timelord-launcher-only": "gold_timelord_launcher".split(),
"wallet": "gold_wallet gold_full_node".split(),
"wallet-only": "gold_wallet".split(),
"introducer": "gold_introducer".split(),
"simulator": "gold_full_node_simulator".split(),
}
def all_groups() -> KeysView[str]:
return SERVICES_FOR_GROUP.keys()
def services_for_groups(groups) -> Generator[str, None, None]:
for group in groups:
for service in SERVICES_FOR_GROUP[group]:
yield service
def validate_service(service: str) -> bool:
return any(service in _ for _ in SERVICES_FOR_GROUP.values())
|
StarcoderdataPython
|
1623154
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='top2']/div[@id='productName']|//div[@class='top2']/div[@class='name']",
'price' : "//div[@class='pm-2']/div[@class='detail2']/div[@class='row-cl-2']/div[@class='row-right']/strong",
'category' : "//div[@class='breadcrumb-new detail']/ul/li/a",
'description' : "//div[@class='detail-content description']/div[@class='item-info']",
'images' : "//a[@class='thumb']/@href",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'weshop.com.vn'
allowed_domains = ['weshop.com.vn']
start_urls = ['http://weshop.com.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
Rule(LinkExtractor(allow=['^((?!item).)+/item/((?!item).)+-\d+\.html$'], deny=['facebook']), 'parse_item'),
Rule(LinkExtractor(allow=['^((?!category).)+/category/((?!category).)+-\d+\.html($|\?page=\d+$)'], deny=['facebook']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.