id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
11225265
|
#!/usr/bin/env python3
from evawiz_basic import *
if ( len(sys.argv)<2 ):
exit(0);
pre = " -Wl,"
if ( sys.argv[1] == "tcu" ):
pre = " -Xlinker "
pass
inc= "";
def add_path(path,absolute=False):
global inc
if absolute:
inc += "%s-rpath='%s'"%(pre,path)
else:
inc += "%s-rpath='$ORIGIN/%s' "%(pre,path)
pass
pass
if ( len(sys.argv)== 3 and sys.argv[1] == 'tcu' and sys.argv[2] == 'MAIN_EVA' ):
home_evawiz = os.getenv("HOME_EVAWIZ")
if not home_evawiz: home_evawiz = os.getenv("HOME")+"/evawiz"
evawiz_root = os.getenv("EVAWIZ_ROOT")
if not evawiz_root: evawiz_root = '/opt/evawiz'
evawiz = evawiz_root+'/evawiz'
add_path('/opt/evawiz/base/staticlibs',True)
add_path('/opt/evawiz/gcc/lib64',True)
add_path('/opt/evawiz/cuda-7.0/lib64',True)
add_path('/opt/evawiz/openmpi/lib',True)
add_path('/opt/evawiz/evawiz/lib',True)
add_path('/opt/evawiz/python/lib',True)
add_path('/opt/evawiz/templibs',True)
matlab_root = os.getenv("MATLAB_ROOT")
if matlab_root:
add_path("%s/bin/glnxa64"%matlab_root,True)
add_path("%s/sys/os/glnxa64"%matlab_root,True)
sys.stdout.write(inc);
exit(0);
pass
###for normal modules
add_path('lib');
add_path('');
home_evawiz = os.getenv("HOME_EVAWIZ")
if not home_evawiz: home_evawiz = os.getenv("HOME")+"/evawiz"
evawiz_root = os.getenv("EVAWIZ_ROOT")
if not evawiz_root: evawiz_root = '/opt/evawiz'
evawiz = evawiz_root+'/evawiz'
for i in range(2,len(sys.argv)):
is_find = False;
mod = sys.argv[i]
path = os.path.join( home_evawiz,mod);
if os.path.exists(path):
lib_path = os.path.join( path, 'lib');
if os.path.exists( lib_path ):
add_path('../%s/lib'%mod);
pass
add_path('../'+mod);
pass
path = os.path.join(evawiz,"modules/"+mod);
if os.path.exists(path):
lib_path = os.path.join( path, 'lib');
if os.path.exists( lib_path ):
add_path(lib_path,True);
pass
add_path(path,True);
pass
pass
if os.path.exists( 'lib' ):
add_path('lib');
pass
sys.stdout.write(inc)
exit(0);
|
StarcoderdataPython
|
9740813
|
#
# Try to tell the difference between the images
#
#import cv2
import Image, numpy
def split(img):
pixels = list(img.getdata())
r = []
g = []
b = []
for p in pixels:
r.append(p[0])
g.append(p[1])
b.append(p[2])
rr = numpy.asarray(r)
gg = numpy.asarray(g)
bb = numpy.asarray(b)
return rr,gg,bb
def analyse():
inimg = Image.open('Lenna.png')
outimg = Image.open('LennaR.png') #imread
#print inimg.getpixel((1,1))
ri,gi,bi = split(inimg)
ro,go,bo = split(outimg)
errR = ro - ri;
errG = go - gi;
errB = bo - bi;
n = float(inimg.size[0] * inimg.size[1])
MSER = sum(errR) / n
MSEG = sum(errG) / n
MSEB = sum(errB) / n
print MSER, MSEG, MSEB
#PSNR = 20*log10(255) - 10*log10(MSE)
analyse()
|
StarcoderdataPython
|
341865
|
<gh_stars>10-100
from django.core.management.base import BaseCommand
from django.db import connections
from tsdata.sql import get_add_constraints_and_indexes
class Command(BaseCommand):
"""Inspect and print current NC database constraints and indexes"""
def handle(self, *args, **options):
cursor = connections['traffic_stops_nc'].cursor()
print(get_add_constraints_and_indexes(cursor))
|
StarcoderdataPython
|
4851696
|
<filename>tests/utils.py
import json
from time import time
from async_asgi_testclient import TestClient
from qbot.utils import calculate_signature
async def send_slack_request(event: dict, client: TestClient):
timestamp = time()
data = json.dumps(event).encode("utf-8")
signature = calculate_signature(timestamp, data)
return await client.post(
"/",
headers={
"X-Slack-Request-Timestamp": str(timestamp),
"X-Slack-Signature": signature,
},
data=data,
)
|
StarcoderdataPython
|
9651918
|
import sys
import time
def go_sukiya(wallet, is_bilk):
menu = {
"牛丼ミニ": 290,
"牛丼並盛": 350,
"牛丼中盛": 480
}
print("いらっしゃいませ!")
if is_bilk:
print("お客様は一度食い逃げしています")
print("警察を呼びます")
sys.exit()
print("何をご注文なさいますか?")
products = choice_multiple(menu)
price = 0
for product in products:
price += menu[product]
if can_pay(wallet, price):
wallet = wallet - price
print("お釣りは" + str(wallet) + "です")
print("ありがとうございました!またのご来店をお待ちしております")
else:
print("支払えるだけのお金を持っていないようですね")
print("・・・どうする?(1 or 2)\n1. 食い逃げする\n2. 皿洗いする")
while(True):
action_number = input()
if action_number == "1":
print("食い逃げしてしまった")
print("もう店にはいけない")
is_bilk = True
break
elif action_number == "2":
wallet = wallet - price
print("皿洗いをします")
print("足りないお金分(" + str(abs(wallet)) +")働きます。")
while(wallet < 0):
time.sleep(0.5)
wallet += 50
print("現在の負債は" + str(abs(wallet)) + "円です")
print("お疲れ様でした")
break
else:
print("どちらか選んでください(1 or 2)")
return wallet, is_bilk
def can_pay(wallet, price):
if wallet == 0:
return False
if wallet - price < 0:
return False
return True
def choice(menu):
print(menu)
product = input()
if not product in menu:
print()
print("お店にある商品を選んでください")
product = choice(menu)
return product
def choice_multiple(menu):
products = []
while (True):
products.append(choice(menu))
print("他にも注文しますか?(y/n)")
want_buy_again = input()
if not want_buy_again == "y":
break
return products
if __name__ == "__main__":
wallet = 1000
is_bilk = False
while (True):
print("現在の所持金:" + str(wallet))
wallet, is_bilk = go_sukiya(wallet, is_bilk)
if wallet == 0:
print("もうすき家にはいけません...")
break
else:
print("もう一度すき家にいきますか?(y/n)")
is_visit = input()
if is_visit == "n":
break
|
StarcoderdataPython
|
4972285
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A demo for object detection.
For Raspberry Pi, you need to install 'feh' as image viewer:
sudo apt-get install feh
Example (Running under edgetpu repo's root directory):
- Face detection:
python3 edgetpu/demo/object_detection.py \
--model='test_data/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite' \
--input='test_data/face.jpg' \
--keep_aspect_ratio
- Pet detection:
python3 edgetpu/demo/object_detection.py \
--model='test_data/ssd_mobilenet_v1_fine_tuned_edgetpu.tflite' \
--label='test_data/pet_labels.txt' \
--input='test_data/pets.jpg' \
--keep_aspect_ratio
'--output' is an optional flag to specify file name of output image.
At this moment we only support SSD model with postprocessing operator. Other
models such as YOLO won't work.
"""
import argparse
import platform
import subprocess
from edgetpu.detection.engine import DetectionEngine
from edgetpu.utils import dataset_utils
from PIL import Image
from PIL import ImageDraw
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model',
help='Path of the detection model, it must be a SSD model with postprocessing operator.',
required=True)
parser.add_argument('--label', help='Path of the labels file.')
parser.add_argument(
'--input', help='File path of the input image.', required=True)
parser.add_argument('--output', help='File path of the output image.')
parser.add_argument(
'--keep_aspect_ratio',
dest='keep_aspect_ratio',
action='store_true',
help=(
'keep the image aspect ratio when down-sampling the image by adding '
'black pixel padding (zeros) on bottom or right. '
'By default the image is resized and reshaped without cropping. This '
'option should be the same as what is applied on input images during '
'model training. Otherwise the accuracy may be affected and the '
'bounding box of detection result may be stretched.'))
parser.set_defaults(keep_aspect_ratio=False)
args = parser.parse_args()
if not args.output:
output_name = 'object_detection_result.jpg'
else:
output_name = args.output
# Initialize engine.
engine = DetectionEngine(args.model)
labels = dataset_utils.ReadLabelFile(args.label) if args.label else None
# Open image.
img = Image.open(args.input)
draw = ImageDraw.Draw(img)
# Run inference.
ans = engine.DetectWithImage(
img,
threshold=0.05,
keep_aspect_ratio=args.keep_aspect_ratio,
relative_coord=False,
top_k=10)
# Display result.
if ans:
for obj in ans:
print('-----------------------------------------')
if labels:
print(labels[obj.label_id])
print('score = ', obj.score)
box = obj.bounding_box.flatten().tolist()
print('box = ', box)
# Draw a rectangle.
draw.rectangle(box, outline='red')
img.save(output_name)
if platform.machine() == 'x86_64':
# For gLinux, simply show the image.
img.show()
elif platform.machine() == 'armv7l':
# For Raspberry Pi, you need to install 'feh' to display image.
subprocess.Popen(['feh', output_name])
else:
print('Please check ', output_name)
else:
print('No object detected!')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11257109
|
<gh_stars>10-100
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the structure to model customers."""
# import ast
import random
from . import utils
class Customer(object):
"""This class contains information of a customer."""
def __init__(self, facts_obj, book_window, airport_list):
# 1. origin and destination, airport_list guarantees to have unique locations
self.origin = random.randint(0, len(airport_list) - 1)
self.dest = random.randint(0, len(airport_list) - 1)
if self.dest == self.origin:
self.dest = (self.dest + 1) % len(airport_list)
self.dest = airport_list[self.dest]
self.origin = airport_list[self.origin]
# 2. date
base_time = facts_obj.base_departure_time_epoch
a_year_from_now = base_time + 3600 * 24 * 365
# randomly pick a date between base_time and a_year_from_now
self.departure_date = random.randint(base_time, a_year_from_now)
# return adte is book_window away from the departure date
self.return_date = self.departure_date + 3600 * 24 * book_window
# 4. passenger information
num_passengers = 1
self.passengers = []
len_first_name = len(facts_obj.first_name_list)
len_last_name = len(facts_obj.last_name_list)
# '_' will later be replaced in intent standalization
for _ in range(num_passengers):
self.passengers.append(
facts_obj.first_name_list[random.randint(0, len_first_name - 1)] +
'_' + facts_obj.last_name_list[random.randint(0, len_last_name - 1)])
# non-required fields during initial query
# 3. time
self.departure_time = utils.choice(
facts_obj.time_list, 1, p=facts_obj.time_prior)[0]
self.return_time = utils.choice(
facts_obj.time_list, 1, p=facts_obj.time_prior)[0]
# 5. class limit and price limit
self.class_limit = utils.choice(
facts_obj.class_list, 1, p=facts_obj.class_list_prior)[0]
# 6. price limist
if self.class_limit == 'all':
self.price_limit = facts_obj.price_limit_list[random.randint(
0,
len(facts_obj.price_limit_list) - 1)]
elif self.class_limit == 'economy':
self.price_limit = facts_obj.price_limit_list[random.randint(
0,
len(facts_obj.price_limit_list) - 2)]
elif self.class_limit == 'business':
self.price_limit = facts_obj.price_limit_list[random.randint(
1,
len(facts_obj.price_limit_list) - 1)]
# 7. num of connections
self.max_connection = utils.choice(
facts_obj.connection_member, 1, p=facts_obj.connection_prior)[0]
# 8. airline preference
self.airline = utils.choice(
facts_obj.airline_preference, 1,
p=facts_obj.airline_preference_prior)[0]
# 10 post process
self.departure_month, self.departure_day = utils.get_month_and_day(
facts_obj, self.departure_date)
self.return_month, self.return_day = utils.get_month_and_day(
facts_obj, self.return_date)
# 11 change reservation
self.goal = utils.choice([0, 1, 2], p=facts_obj.goal_probaility)
def get_departure_and_return_date(self):
return self.departure_date, self.return_date
def get_json(self):
"""This function serializes the object into a json."""
intention_jobject = {}
intention_jobject['departure_airport'] = self.origin
intention_jobject['return_airport'] = self.dest
intention_jobject['departure_month'] = self.departure_month
intention_jobject['departure_day'] = self.departure_day
intention_jobject['return_month'] = self.return_month
intention_jobject['return_day'] = self.return_day
intention_jobject['name'] = self.passengers[0]
intention_jobject['departure_time'] = self.departure_time
intention_jobject['return_time'] = self.return_time
intention_jobject['class'] = self.class_limit
intention_jobject['max_price'] = self.price_limit
intention_jobject['max_connections'] = self.max_connection
intention_jobject['airline_preference'] = self.airline
intention_jobject['goal'] = self.goal
# add departure and return date
intention_jobject['departure_date'] = self.departure_date
intention_jobject['return_date'] = self.return_date
return intention_jobject
def get_customer_condition(self):
"""This function returns the condition file."""
condition = self.get_json()
if condition['airline_preference'] == 'all':
del condition['airline_preference']
if condition['max_connections'] == 2:
del condition['max_connections']
if condition['class'] == 'all':
del condition['class']
if condition['departure_time'] == 'all':
del condition['departure_time']
if condition['return_time'] == 'all':
del condition['return_time']
return condition
|
StarcoderdataPython
|
11330303
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Authors: <NAME>
# Imports.
import sys; sys.path += [".", ".."]
import argparse as Ap
import logging as L
import numpy as np
import os, pdb, sys
import time
import tensorflow.compat.v1 as tf
__version__ = "0.0.0"
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
tf.Session(config=config)
#
# Message Formatter
#
class MsgFormatter(L.Formatter):
"""Message Formatter
Formats messages with time format YYYY-MM-DD HH:MM:SS.mmm TZ
"""
def formatTime(self, record, datefmt):
t = record.created
timeFrac = abs(t-int(t))
timeStruct = time.localtime(record.created)
timeString = ""
timeString += time.strftime("%F %T", timeStruct)
timeString += "{:.3f} ".format(timeFrac)[1:]
timeString += time.strftime("%Z", timeStruct)
return timeString
#############################################################################################################
############################## Subcommands ##################################
#############################################################################################################
class Subcommand(object):
name = None
@classmethod
def addArgParser(cls, subp, *args, **kwargs):
argp = subp.add_parser(cls.name, usage=cls.__doc__, *args, **kwargs)
cls.addArgs(argp)
argp.set_defaults(__subcmdfn__=cls.run)
return argp
@classmethod
def addArgs(cls, argp):
pass
@classmethod
def run(cls, d):
pass
class Screw(Subcommand):
"""Screw around with me in Screw(Subcommand)."""
name = "screw"
@classmethod
def run(cls, d):
print(cls.__doc__)
class Train(Subcommand):
name = "train"
LOGLEVELS = {"none":L.NOTSET, "debug": L.DEBUG, "info": L.INFO,
"warn":L.WARN, "err": L.ERROR, "crit": L.CRITICAL}
@classmethod
def addArgs(cls, argp):
argp.add_argument("-d", "--datadir", default=".", type=str,
help="Path to datasets directory.")
argp.add_argument("-w", "--workdir", default=".", type=str,
help="Path to the workspace directory for this experiment.")
argp.add_argument("-l", "--loglevel", default="info", type=str,
choices=cls.LOGLEVELS.keys(),
help="Logging severity level.")
argp.add_argument("-s", "--seed", default=0xe4223644e98b8e64, type=int,
help="Seed for PRNGs.")
argp.add_argument("--summary", action="store_true",
help="""Print a summary of the network.""")
argp.add_argument("--model", default="complex", type=str,
choices=["real", "complex"],
help="Model Selection.")
argp.add_argument("--dataset", default="cifar10", type=str,
choices=["mnist","cifar10", "others"],
help="Dataset Selection.")
argp.add_argument("--dropout", default=0, type=float,
help="Dropout probability.")
argp.add_argument("-n", "--num-epochs", default=200, type=int,
help="Number of epochs")
argp.add_argument("-b", "--batch-size", default=64, type=int,
help="Batch Size")
argp.add_argument("--start-filter", "--sf", default=11, type=int,
help="Number of feature maps in starting stage")
argp.add_argument("--num-blocks", "--nb", default=10, type=int,
help="Number of filters in initial block")
argp.add_argument("--spectral-param", action="store_true",
help="""Use spectral parametrization.""")
argp.add_argument("--spectral-pool-gamma", default=0.50, type=float,
help="""Use spectral pooling, preserving a fraction gamma of frequencies""")
argp.add_argument("--spectral-pool-scheme", default="none", type=str,
choices=["none", "stagemiddle", "proj", "nodownsample"],
help="""Spectral pooling scheme""")
argp.add_argument("--act", default="relu", type=str,
choices=["relu"],
help="Activation.")
argp.add_argument("--aact", default="modrelu", type=str,
choices=["modrelu"],
help="Advanced Activation.")
argp.add_argument("--no-validation", action="store_true",
help="Do not create a separate validation set.")
argp.add_argument("--comp_init", default='complex_independent', type=str,
help="Initializer for the complex kernel.")
optp = argp.add_argument_group("Optimizers", "Tunables for all optimizers")
optp.add_argument("--optimizer", "--opt", default="nag", type=str,
choices=["sgd", "nag", "adam", "rmsprop"],
help="Optimizer selection.")
optp.add_argument("--clipnorm", "--cn", default=1.0, type=float,
help="The norm of the gradient will be clipped at this magnitude.")
optp.add_argument("--clipval", "--cv", default=1.0, type=float,
help="The values of the gradients will be individually clipped at this magnitude.")
optp.add_argument("--l1", default=0, type=float,
help="L1 penalty.")
optp.add_argument("--l2", default=0, type=float,
help="L2 penalty.")
optp.add_argument("--lr", default=1e-3, type=float,
help="Master learning rate for optimizers.")
optp.add_argument("--momentum", "--mom", default=0.9, type=float,
help="Momentum for optimizers supporting momentum.")
optp.add_argument("--decay", default=0, type=float,
help="Learning rate decay for optimizers.")
optp.add_argument("--schedule", default="default", type=str,
help="Learning rate schedule")
optp = argp.add_argument_group("Adam", "Tunables for Adam optimizer")
optp.add_argument("--beta1", default=0.9, type=float,
help="Beta1 for Adam.")
optp.add_argument("--beta2", default=0.999, type=float,
help="Beta2 for Adam.")
optp.add_argument('--input_shape', default=(256,256,3*2))
@classmethod
def run(cls, d):
if not os.path.isdir(d.workdir):
os.mkdir(d.workdir)
logDir = os.path.join(d.workdir, "logs")
if not os.path.isdir(logDir):
os.mkdir(logDir)
logFormatter = MsgFormatter ("[%(asctime)s ~~ %(levelname)-8s] %(message)s")
stdoutLogSHandler = L.StreamHandler(sys.stdout)
stdoutLogSHandler .setLevel (cls.LOGLEVELS[d.loglevel])
stdoutLogSHandler .setFormatter (logFormatter)
defltLogger = L.getLogger ()
defltLogger .setLevel (cls.LOGLEVELS[d.loglevel])
defltLogger .addHandler (stdoutLogSHandler)
trainLogFilename = os.path.join(d.workdir, "logs", "train.txt")
trainLogFHandler = L.FileHandler (trainLogFilename, "a", "UTF-8", delay=True)
trainLogFHandler .setLevel (cls.LOGLEVELS[d.loglevel])
trainLogFHandler .setFormatter (logFormatter)
trainLogger = L.getLogger ("train")
trainLogger .setLevel (cls.LOGLEVELS[d.loglevel])
trainLogger .addHandler (trainLogFHandler)
entryLogFilename = os.path.join(d.workdir, "logs", "entry.txt")
entryLogFHandler = L.FileHandler (entryLogFilename, "a", "UTF-8", delay=True)
entryLogFHandler .setLevel (cls.LOGLEVELS[d.loglevel])
entryLogFHandler .setFormatter (logFormatter)
entryLogger = L.getLogger ("entry")
entryLogger .setLevel (cls.LOGLEVELS[d.loglevel])
entryLogger .addHandler (entryLogFHandler)
np.random.seed(d.seed % 2**32)
import training;training.train(d)
#############################################################################################################
############################## Argument Parsers #################################
#############################################################################################################
def getArgParser(prog):
argp = Ap.ArgumentParser(prog = prog,
usage = None,
description = None,
epilog = None
)
subp = argp.add_subparsers()
argp.set_defaults(argp=argp)
argp.set_defaults(subp=subp)
# Add global args to argp here?
# ...
# Add subcommands
for v in globals().values():
if(isinstance(v, type) and
issubclass(v, Subcommand) and
v != Subcommand):
v.addArgParser(subp)
# Return argument parser.
return argp
#############################################################################################################
############################## Main ##################################
#############################################################################################################
def main(argv):
sys.setrecursionlimit(10000)
d = getArgParser(argv[0]).parse_args(argv[1:])
return d.__subcmdfn__(d)
if __name__ == "__main__":
main(sys.argv)
|
StarcoderdataPython
|
3279913
|
<gh_stars>10-100
#! /usr/bin/env python3
# -*- coding: utf-8; py-indent-offset: 4 -*-
#
# Author: Linuxfabrik GmbH, Zurich, Switzerland
# Contact: info (at) linuxfabrik (dot) ch
# https://www.linuxfabrik.ch/
# License: The Unlicense, see LICENSE file.
# https://git.linuxfabrik.ch/linuxfabrik-icinga-plugins/checks-linux/-/blob/master/CONTRIBUTING.md
"""This library defines the global plugin states, based on the POSIX
spec of returning a positive value and just like in
`monitoring-plugins/plugins-scripts/utils.sh.in`, except that we do not
make use of `STATE_DEPENDENT`.
STATE_OK = 0: The plugin was able to check the service and it appeared
to be functioning properly.
STATE_WARN = 1: The plugin was able to check the service, but it
appeared to be above some "warning" threshold or did not appear to be
working properly.
STATE_CRIT = 2: The plugin detected that either the service was not
running or it was above some "critical" threshold.
STATE_UNKNOWN = 3: Invalid command line arguments were supplied to the
plugin or low-level failures internal to the plugin (such as unable to
fork, or open a tcp socket) that prevent it from performing the
specified operation. Higher-level errors (such as name resolution
errors, socket timeouts, etc) are outside of the control of plugins and
should generally NOT be reported as UNKNOWN states.
"""
__author__ = 'Linuxfabrik GmbH, Zurich/Switzerland'
__version__ = '2020043001'
STATE_OK = 0
STATE_WARN = 1
STATE_CRIT = 2
STATE_UNKNOWN = 3
#STATE_DEPENDENT = 4
|
StarcoderdataPython
|
8024067
|
<filename>iris/commons/clickhouse.py
import asyncio
import os
from collections.abc import Iterator
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from datetime import datetime
from logging import LoggerAdapter
from pathlib import Path
from typing import Any
import aiofiles.os
from diamond_miner.queries import (
CreateTables,
DropTables,
InsertLinks,
InsertPrefixes,
Query,
StoragePolicy,
links_table,
prefixes_table,
results_table,
)
from diamond_miner.subsets import subsets_for
from pych_client import AsyncClickHouseClient, ClickHouseClient
from iris.commons.filesplit import split_compressed_file
from iris.commons.settings import CommonSettings, fault_tolerant
def iter_file(file: str, *, read_size: int = 2**20) -> Iterator[bytes]:
with open(file, "rb") as f:
while True:
chunk = f.read(read_size)
if not chunk:
break
yield chunk
def measurement_id(measurement_uuid: str, agent_uuid: str) -> str:
return f"{measurement_uuid}__{agent_uuid}"
@dataclass(frozen=True)
class ClickHouse:
settings: CommonSettings
logger: LoggerAdapter
@fault_tolerant
async def call(self, query: str, params: dict | None = None) -> list[dict]:
async with AsyncClickHouseClient(**self.settings.clickhouse) as client:
return await client.json(query, params)
@fault_tolerant
async def execute(
self, query: Query, measurement_id_: str, **kwargs: Any
) -> list[dict]:
with ClickHouseClient(**self.settings.clickhouse) as client:
return query.execute(client, measurement_id_, **kwargs)
async def create_tables(
self,
measurement_uuid: str,
agent_uuid: str,
prefix_len_v4: int,
prefix_len_v6: int,
*,
drop: bool = False,
) -> None:
self.logger.info("Creating tables")
if drop:
await self.drop_tables(measurement_uuid, agent_uuid)
await self.execute(
CreateTables(
prefix_len_v4=prefix_len_v4,
prefix_len_v6=prefix_len_v6,
storage_policy=StoragePolicy(
name=self.settings.CLICKHOUSE_STORAGE_POLICY,
archive_to=self.settings.CLICKHOUSE_ARCHIVE_VOLUME,
archive_on=datetime.utcnow()
+ self.settings.CLICKHOUSE_ARCHIVE_INTERVAL,
),
),
measurement_id(measurement_uuid, agent_uuid),
)
async def drop_tables(self, measurement_uuid: str, agent_uuid: str) -> None:
self.logger.info("Deleting tables")
await self.execute(DropTables(), measurement_id(measurement_uuid, agent_uuid))
async def grant_public_access(
self, measurement_uuid: str, agent_uuid: str, *, revoke: bool = False
) -> None:
"""Grant public access to the tables."""
if public_user := self.settings.CLICKHOUSE_PUBLIC_USER:
if revoke:
self.logger.info("Revoking public access to measurement tables")
else:
self.logger.info("Granting public access to measurement tables")
measurement_id_ = measurement_id(measurement_uuid, agent_uuid)
for table in [
results_table(measurement_id_),
links_table(measurement_id_),
prefixes_table(measurement_id_),
]:
# TODO: Proper parameter injection?
# It doesn't seems to be supported for GRANT.
# Syntax error: failed at position 17 ('{'): {table:Identifier}
if revoke:
await self.call(f"REVOKE SELECT ON {table} FROM {public_user}")
else:
await self.call(f"GRANT SELECT ON {table} TO {public_user}")
async def insert_csv(
self, measurement_uuid: str, agent_uuid: str, csv_filepath: Path
) -> None:
"""Insert CSV file into table."""
split_dir = csv_filepath.with_suffix(".split")
split_dir.mkdir(exist_ok=True)
self.logger.info("Split CSV file")
split_compressed_file(
str(csv_filepath),
str(split_dir / "splitted_"),
self.settings.CLICKHOUSE_PARALLEL_CSV_MAX_LINE,
max_estimate_lines=10_000,
skip_lines=1,
)
files = list(split_dir.glob("*"))
self.logger.info("Number of chunks: %s", len(files))
concurrency = (os.cpu_count() or 2) // 2
self.logger.info("Number of concurrent processes: %s", concurrency)
def insert(file):
with ClickHouseClient(**self.settings.clickhouse) as client:
table = results_table(measurement_id(measurement_uuid, agent_uuid))
query = f"INSERT INTO {table} FORMAT CSV"
try:
client.execute(query, data=iter_file(file))
finally:
os.remove(file)
loop = asyncio.get_running_loop()
with ThreadPoolExecutor(concurrency) as pool:
await asyncio.gather(
*[loop.run_in_executor(pool, insert, file) for file in files]
)
await aiofiles.os.rmdir(split_dir)
@fault_tolerant
async def insert_links(self, measurement_uuid: str, agent_uuid: str) -> None:
"""Insert the links in the links' table from the flow view."""
measurement_id_ = measurement_id(measurement_uuid, agent_uuid)
await self.call(
"TRUNCATE {table:Identifier}",
params={"table": links_table(measurement_id_)},
)
with ClickHouseClient(**self.settings.clickhouse) as client:
query = InsertLinks()
subsets = subsets_for(query, client, measurement_id_)
# We limit the number of concurrent requests since this query
# uses a lot of memory (aggregation of the flows table).
query.execute_concurrent(
client,
measurement_id_,
subsets=subsets,
concurrent_requests=8,
)
@fault_tolerant
async def insert_prefixes(self, measurement_uuid: str, agent_uuid: str) -> None:
"""Insert the invalid prefixes in the prefix table."""
measurement_id_ = measurement_id(measurement_uuid, agent_uuid)
await self.call(
"TRUNCATE {table:Identifier}",
params={"table": prefixes_table(measurement_id_)},
)
with ClickHouseClient(**self.settings.clickhouse) as client:
query = InsertPrefixes()
subsets = subsets_for(query, client, measurement_id_)
# We limit the number of concurrent requests since this query
# uses a lot of memory.
query.execute_concurrent(
client,
measurement_id_,
subsets=subsets,
concurrent_requests=8,
)
|
StarcoderdataPython
|
11247857
|
"""
test_xvfb_server.py
Copyright 2011 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
import os
import time
from PIL import Image
from nose.plugins.attrib import attr
from mock import patch
from w3af import ROOT_PATH
from w3af.core.ui.tests.wrappers.xvfb_server import XVFBServer
from w3af.core.ui.tests.wrappers.tests.utils import is_black_image
class TestEnvironment(unittest.TestCase):
X_TEST_COMMAND = 'python %s' % os.path.join(ROOT_PATH, 'core', 'ui', 'tests',
'wrappers', 'tests', 'helloworld.py')
def setUp(self):
self.xvfb_server = XVFBServer()
def tearDown(self):
self.xvfb_server.stop()
@attr('ci_fails')
def test_verify_xvfb_installed_true(self):
self.assertTrue(self.xvfb_server.is_installed())
@patch('commands.getstatusoutput', return_value=(1, ''))
@attr('ci_fails')
def test_verify_xvfb_installed_false_1(self, *args):
self.assertFalse(self.xvfb_server.is_installed())
@patch('commands.getstatusoutput', return_value=(256, ''))
@attr('ci_fails')
def test_verify_xvfb_installed_false_2(self, *args):
self.assertFalse(self.xvfb_server.is_installed())
@attr('ci_fails')
def test_stop_not_started(self):
self.assertTrue(self.xvfb_server.stop())
@attr('ci_fails')
def test_not_running(self):
self.assertFalse(self.xvfb_server.is_running())
@attr('ci_fails')
def test_start(self):
self.xvfb_server.start_sync()
self.assertTrue(self.xvfb_server.is_running())
@attr('ci_fails')
def test_start_start(self):
self.xvfb_server.start_sync()
self.assertRaises(RuntimeError, self.xvfb_server.start_sync)
self.assertTrue(self.xvfb_server.is_running())
@attr('ci_fails')
def test_two_servers(self):
xvfb_server_1 = XVFBServer()
xvfb_server_2 = XVFBServer()
xvfb_server_1.start_sync()
self.assertTrue(xvfb_server_1.is_running())
xvfb_server_2.start_sync()
self.assertFalse(xvfb_server_2.is_running())
xvfb_server_1.stop()
@attr('ci_fails')
def test_get_screenshot_not_started(self):
output_files = self.xvfb_server.get_screenshot()
self.assertEqual(output_files, None)
@attr('ci_fails')
def test_get_screenshot(self):
self.xvfb_server.start_sync()
self.assertTrue(self.xvfb_server.is_running(),
'xvfb server failed to start.')
output_file = self.xvfb_server.get_screenshot()
screenshot_img = Image.open(output_file)
img_width, img_height = screenshot_img.size
self.assertEqual(img_width, XVFBServer.WIDTH)
self.assertEqual(img_height, XVFBServer.HEIGTH)
self.assertTrue(is_black_image(screenshot_img))
os.remove(output_file)
@attr('ci_fails')
def test_run_with_stopped_xvfb(self):
run_result = self.xvfb_server.run_x_process(self.X_TEST_COMMAND)
self.assertFalse(run_result)
@attr('ci_fails')
def test_run_hello_world_in_xvfb(self):
self.xvfb_server.start_sync()
self.assertTrue(self.xvfb_server.is_running())
# This should be completely black
empty_scr_0 = self.xvfb_server.get_screenshot()
self.assertTrue(is_black_image(Image.open(empty_scr_0)))
# Start the hello world in the xvfb
run_result = self.xvfb_server.run_x_process(self.X_TEST_COMMAND,
block=False)
self.assertTrue(run_result)
# Let the window appear in the xvfb, note that block is False above
time.sleep(1)
# In screen 0 there should be a window, the one I started in the
# previous step.
screen_0 = self.xvfb_server.get_screenshot()
self.assertFalse(is_black_image(Image.open(screen_0)))
@attr('ci_fails')
def test_start_vnc_server(self):
self.xvfb_server.start_sync()
self.xvfb_server.start_vnc_server()
|
StarcoderdataPython
|
5099526
|
<reponame>mwroffo/FastCardsOnline<filename>app/main/__init__.py<gh_stars>0
# main.__init__.py
# declares the main blueprint for core functionality.
from flask import Blueprint
bp = Blueprint('main', __name__)
from app.main import routes
|
StarcoderdataPython
|
9762007
|
"""
Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*'.
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
Note:
s could be empty and contains only lowercase letters a-z.
p could be empty and contains only lowercase letters a-z, and characters like . or *.
https://leetcode.com/problems/regular-expression-matching/
"""
from typing import List
class Solution:
def sanitize_pattern(self, p: str):
sanitized = []
index = 0
added = False
while 1:
if index >= len(p):
break
char = p[index]
first = p[index : index + 2]
second = p[index + 2 : index + 4]
if len(second) == 2 and first[1] == "*" == second[1] and first[0] == second[0]:
if not added:
sanitized.append(first[0])
sanitized.append(first[1])
index += 2
added = True
else:
sanitized.append(char)
index += 1
added = False
return "".join(sanitized)
def match_first_character(self, s: str, p: str):
if s and p and (s[0] == p[0] or p[0] == "."):
return True
return False
def isMatch(self, s: str, p: str) -> bool:
p = self.sanitize_pattern(p)
return self.isMatch2(s, p)
def isMatch2(self, s: str, p: str) -> bool:
# Detect end: match
if not s and (not p or ("*" in p[:2] and len(p) == 2)):
return True
# Detect end: mismatch
if not p:
return False
# Simple case: no asterisk
if "*" not in p[:2]:
if self.match_first_character(s, p):
return self.isMatch2(s[1:], p[1:])
else:
return False
# Asterisk case: Fork
if "*" in p[:2]:
if self.match_first_character(s, p):
return any(
[
# Skip 1 character
self.isMatch2(s[1:], p),
# Skip asterisk pattern
self.isMatch2(s, p[2:]),
]
)
else:
# Skip asterisk pattern
return self.isMatch2(s, p[2:])
test_cases = [
["aa", "a"],
["aa", "a*"],
["aab", "c*a*b"],
["mississippi", "mis*is*p*."],
["aaaaaaaaaaaaab", "a*a*a*a*a*a*a*a*a*a*a*a*b"],
["", "c*c*"],
]
results = [False, True, True, False, True, True]
# test_cases = [["mississippi", "mis*is*p*."]]
# results = [ False]
if __name__ == "__main__":
app = Solution()
for test_case, correct_result in zip(test_cases, results):
assert (
app.isMatch(*test_case) == correct_result
), f"My result: {app.isMatch(*test_case)}, correct result: {correct_result}\nTest Case: {test_case}"
|
StarcoderdataPython
|
11210355
|
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
def gauss_evaluate_likelihood(X, x_true):
if len(X.shape) == 3:
a,b,c = X.shape
X = np.reshape(X, newshape=(a,b*c))
x_true = np.reshape(x_true, newshape=(b*c,))
X_mean = np.mean(X,0)
X_sd = np.std(X,0)
return np.mean(-(x_true - X_mean)**2/(2*X_sd**2) - 0.5*np.log(2*np.pi*X_sd**2))
def evaluate_likelihood(X, x_true, s=0.00001):
def ker(x,y,bw):
return np.mean(np.log(np.mean(np.exp(-(x - y)**2/(2*bw**2))/(np.sqrt(2*np.pi)*bw),0) + s))
if len(X.shape) == 3:
N,b,c = X.shape
a = b*c
else:
N,a = X.shape
X = np.reshape(X, newshape=(N,a))
x_true = np.reshape(x_true, newshape=(1,a))
bw = 0.9*np.std(X,0)*N**(-1/5)
return ker(X, x_true, bw)
def evaluate_multi_likelihood(X, x_true, s=0.01):
try:
if len(X.shape) == 3:
a,b,c = X.shape
N = b*c
X = np.reshape(X, newshape=(a,b*c))
x_true = np.reshape(x_true, newshape=(b*c,))
else:
N = X.shape[1]
mu = np.mean(X,0)
S = np.cov(np.transpose(X)) + s*np.identity(N)
return stats.multivariate_normal.logpdf(x_true, mean=mu, cov=S)
except np.linalg.LinAlgError:
print("Something went wrong in the multivariate evaluation metric")
return np.nan
def evaluate_model(variational_model, X_true, M, emission_model, emission_distribution, scale, out_data, T_data):
X, mu, x_pre, log_jacobian, epsilon_loss = variational_model.sample_timeseries(M)
uni_lk = evaluate_likelihood(X.detach().numpy(), X_true.detach().numpy())
multi_lk = evaluate_multi_likelihood(X.detach().numpy(), X_true.detach().numpy())
multi_pred_lk = evaluate_predictive_error(X, emission_model, emission_distribution, scale, out_data, T_data)
print("Avarage univariate latent likelihood: {}".format(uni_lk))
print("Multivariate latent likelihood: {}".format(multi_lk))
print("Predictive observable likelihood: {}".format(multi_pred_lk))
return uni_lk, multi_lk, multi_pred_lk
def evaluate_predictive_error(X, emission_model, emission_distribution, scale, out_data, T_data):
Y = np.zeros((X.shape[0], X.shape[2] - T_data))
for t in range(T_data, X.shape[2]):
tau = t - T_data
eps = np.random.normal(0,0.0001,(Y.shape[0],))
if scale is None:
Y[:,tau] = emission_distribution.rsample(emission_model(X[:,:,t]), None).detach().numpy() + eps
else:
Y[:,tau] = emission_distribution.rsample(emission_model(X[:,:,t]), scale).detach().numpy() + eps
#plt.plot(np.transpose(Y), alpha=0.01, c="r")
#plt.plot(np.transpose(X[:20,0,T_data:].detach().numpy()), alpha=0.8, c="k")
#plt.plot(np.transpose(out_data))
#plt.show()
return evaluate_likelihood(Y, out_data.detach().numpy())
|
StarcoderdataPython
|
9664764
|
import functools
import logging
from typing import Any, Callable
import typer
from adk.exceptions import QneAdkException
def catch_qne_adk_exceptions(func: Callable[..., Any]) -> Any:
""" Decorator function to catch exceptions and print an error message """
@functools.wraps(func)
def catch_exceptions(*args: Any, **kwargs: Any) -> Any:
try:
func(*args, **kwargs)
except QneAdkException as qne_adk_exception:
message = f"Error: {str(qne_adk_exception)}"
typer.echo(message)
except Exception as exception:
message = f"Unhandled exception: {repr(exception)}"
typer.echo(message)
return catch_exceptions
def log_function(func: Callable[..., Any]) -> Any:
""" Decorator function to log entry and exit of the method/function """
def log_function_name(*args: Any, **kwargs: Any) -> Any:
logging.debug("Method '%s' has been entered.", func.__name__)
resp = func(*args, **kwargs)
logging.debug("Method '%s' has been exited.", func.__name__)
return resp
return log_function_name
|
StarcoderdataPython
|
6657229
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-ANN.
# @File : demo
# @Time : 2019-12-04 20:10
# @Author : yuanjie
# @Email : <EMAIL>
# @Software : PyCharm
# @Description :
import numpy as np
from annzoo.faiss import ANN
data = np.random.random((1000, 128)).astype('float32')
ann = ANN()
ann.train(data, noramlize=True)
dis, idx = ann.search(data[:10])
# print(dis)
print(idx)
ann.write_index()
|
StarcoderdataPython
|
6545359
|
# First request to FlowXO that determines structure (limits) of all further requests
from user import User
import constants as c
from outgoing import message
dummy = 'dummy'
user = User(dummy)
for i in range(c.MAX_SERVICE_MESSAGES):
user.update_service_messages(text=dummy, buttons=[dummy]*c.MAX_BUTTONS_PER_SERVICE_MESSAGE)
user.current_result = dummy
message(user)
|
StarcoderdataPython
|
1721781
|
<reponame>Jan-zou/LeetCode
# !/usr/bin/env python
# coding: utf-8
'''
Description:
Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0?
Find all unique triplets in the array which gives the sum of zero.
Note:
+ Elements in a triplet (a,b,c) must be in non-descending order. (ie, a ≤ b ≤ c)
+ The solution set must not contain duplicate triplets.
For example, given array S = {-1 0 1 2 -1 -4},
A solution set is:
(-1, 0, 1)
(-1, -1, 2)
Tags: Array, Two Pointers
Time: O(n^2); Space: O(1)
先排序,然后左右夹逼,跳过重复的数
'''
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums = sorted(nums) # O(nlogn)
result = []
i = 0
while i < len(nums)-2:
if i == 0 or nums[i] != nums[i-1]: # skip duplicate number
j,k = i+1, len(nums)-1
while j < k:
if nums[i] + nums[j] + nums[k] < 0:
j += 1
elif nums[i] + nums[j] + nums[k] > 0:
k -= 1
else:
result.append([nums[i], nums[j], nums[k]])
j += 1
k -= 1
while j < k and nums[j] == nums[j-1]: # skip duplicate
j += 1
while j < k and nums[k] == nums[k+1]: # skip duplicate
k -= 1
i += 1
return result
if __name__ == '__main__':
print Solution().threeSum([-1, 0, 1, 2, -1, -4])
|
StarcoderdataPython
|
176674
|
<reponame>DoctorHayes/235CppStyle
import re
# Recursive function for compound statements (blocks, functions, etc.)
def validate_statement_indentation(self, code_lines, line_index, indent_min = 0, indent_max = 0, enclosure_stack = [], isNewStatement = True):
if line_index >= code_lines.num_lines:
return line_index # No more lines to check
line_index, indent_min_new, indent_max_new, expected = indent_line_check(self, code_lines, line_index, indent_min, indent_max, isNewStatement, enclosure_stack)
if line_index >= code_lines.num_lines:
return line_index # No more lines to check
# Check for unclosed () and {}
start_stack_len = len(enclosure_stack)
increase = enclosure_nesting(code_lines.elided[line_index], enclosure_stack, expected)
# Setup for next line based on how this one ends
isNextNewStatement = is_complete_expression(line_index, code_lines, isNewStatement)
if (not isNewStatement) or line_index >= code_lines.num_lines: # Statement start on previous line
return line_index # Let the previous recursive call handle the next line.
# compound statement
# Go through each line until we are back to where we left off.
while increase >= 0 and line_index + 1 < code_lines.num_lines:
#if line_index in [13]:
if is_switch_case(code_lines.elided[line_index]):
line_index = validate_statement_indentation(self, code_lines, line_index + 1, expected + 1, expected + 1, enclosure_stack, True)
elif is_access_modifiers(code_lines.elided[line_index]):
line_index = validate_statement_indentation(self, code_lines, line_index + 1, expected + 1, expected + 1, enclosure_stack, True)
elif increase == 0:
line_index = validate_statement_indentation(self, code_lines, line_index + 1, indent_min + (not isNextNewStatement), indent_min + (not isNextNewStatement), enclosure_stack, isNextNewStatement)
else: # going 1 more level deep
line_index = validate_statement_indentation(self, code_lines, line_index + 1, expected + 1, expected + increase, enclosure_stack, isNextNewStatement)
if line_index + 1 >= code_lines.num_lines:
return line_index
increase = len(enclosure_stack) - start_stack_len
isNextNewStatement = is_complete_expression(line_index, code_lines, isNextNewStatement)
#if isMultiLine:
# expected -= 1
# Check until end of statement (line ends with ';'' or '}'
#just_code = code_lines.elided[line_index].strip()
#if (just_code and just_code[-1] not in list(';}')):
# return validate_statement_indentation(self, code_lines, line_index + 1, expected + isNextNewStatement, expected + isNextNewStatement, enclosure_stack, False)
# line_index += 1
#else:
# if not isNewStatement and just_code and just_code[-1] == ';':
# expected -= 1
if increase < 0:
return line_index
return validate_statement_indentation(self, code_lines, line_index + 1, indent_min, indent_min, enclosure_stack, True)
#while len(enclosure_stack) >= start_stack_len and line_index + 1 < code_lines.num_lines:
# increase = len(enclosure_stack) - start_stack_len
# line_index = validate_statement_indentation(self, code_lines, line_index + 1, expected, expected, enclosure_stack, True)
# line_index += 1
def enclosure_nesting(line_elided, enclosure_stack = [], current_indent = 0):
# Check for unclosed () and {}
start_stack_len = len(enclosure_stack)
line_elided = line_elided.strip()
for i, c in enumerate(line_elided):
if c in list('{'):
enclosure_stack.append({'indent': current_indent})
elif c in list('}') and len(enclosure_stack): # assume they all match up?
enclosure_stack.pop()
return len(enclosure_stack) - start_stack_len
def is_complete_expression(line_index, code_lines, isPreviousStatementNew):
if line_index >= code_lines.num_lines:
return isPreviousStatementNew
just_code = code_lines.elided[line_index].strip()
if not just_code:
return isPreviousStatementNew
else:
return (just_code and just_code[-1] in list(';{}'))
def indent_line_check(self, code_lines, line_index, indent_min = 0, indent_max = 0, isNewStatement = True, enclosure_stack = []):
tab_size = self.current_file_indentation
def count_whitespace(str):
counts = {'space': 0, 'tab': 0}
for ch in list(str):
if ch == ' ':
counts['space'] += 1
elif ch == '\t':
counts['tab'] += 1
return counts
# Checks if indent is in range, adds error as necessary, return the indention (thresholded)
def check_indent(line, min, max, found, tab_size = tab_size):
found_level = 0
tab_type = ''
other_type = ''
if (tab_size == 1):
found_level = found['tab']
tab_type = ' tabs'
other_type = 'space'
else:
found_level = found['space'] / tab_size
tab_type = ' spaces'
other_type = 'tab'
if (found_level < min):
expected = min
elif (found_level > max):
expected = max
else:
expected = int(found_level)
found_msg = ''
if (found['tab'] > 0 and found['space'] > 0):
found_msg = str(found['tab']) + ' tabs and ' + str(found['space']) + ' spaces'
elif (found['tab'] > 0):
found_msg = str(found['tab']) + ' tabs'
else:
found_msg = str(found['space']) + ' spaces'
if (found_level < min or found_level > max or found[other_type] > 0):
self.add_error(label="BLOCK_INDENTATION", line=line+1, data={
'expected': str(min * tab_size) + tab_type, 'found': found_msg})
return expected
if line_index >= code_lines.num_lines:
return line_index, 0, 0, 0
# TODO Make the check its own function.
leading_whitespace = re.match(r'^(\t*|\s+)\S', code_lines.raw_lines[line_index])
indent_len = count_whitespace(leading_whitespace.group() if leading_whitespace else '')
#if not isNewStatement:
# indent_min = indent_max = indent_min + 1
if not leading_whitespace or code_lines.raw_lines[line_index] == '/**/': # blank line (also raw_lines are not truly raw for multi-line comments)
# Go to next line
return indent_line_check(self, code_lines, line_index + 1, indent_min, indent_max, isNewStatement, enclosure_stack)
# Check preprocessor directives
elif re.match(r'\s*#', code_lines.raw_lines[line_index]):
check_indent(line_index, 0, 0, indent_len)
return indent_line_check(self, code_lines, line_index + 1, indent_min, indent_max, isNewStatement, enclosure_stack)
# Check if line starts with a single-line comment
elif re.match(r'\s*//', code_lines.raw_lines[line_index]):
check_indent(line_index, indent_min, indent_max, indent_len)
return indent_line_check(self, code_lines, line_index + 1, indent_min, indent_max, isNewStatement, enclosure_stack)
# Check for multi-line comment
elif re.match(r'\s*/\*', code_lines.raw_lines[line_index]):
check_indent(line_index, indent_min, indent_max, indent_len)
# Go through comment lines
while code_lines.raw_lines[line_index].find('*/') < 0 and line_index + 1 < code_lines.num_lines:
line_index += 1
check_indent(line_index, indent_min, indent_max, indent_len)
return indent_line_check(self, code_lines, line_index + 1, indent_min, indent_max, isNewStatement, enclosure_stack)
# Check if ending a multi-line block
elif re.match(r'\s*\}', code_lines.elided[line_index]):
indent_min = enclosure_stack[-1]['indent'] if enclosure_stack else indent_min - 1
#if not isNewStatement:
# indent_min -= 1 # Obviously this is the end of the statement
indent_max = indent_min
# Check if current line is starting a multi-line block
elif not isNewStatement and re.match(r'\s*\{[^\}]*$', code_lines.elided[line_index]):
indent_min -= 1
indent_max = indent_min
# Check for labels and public, private, protected
elif is_access_modifiers(code_lines.elided[line_index]):
indent_min = 0
indent_max = 1
# Check for switch cases
elif is_switch_case(code_lines.elided[line_index]):
if enclosure_stack:
if 'case_indent' in enclosure_stack[-1]:
indent_min = indent_max = enclosure_stack[-1]['case_indent']
else:
indent_min = enclosure_stack[-1]['indent']
indent_max = indent_min + 1
else:
indent_min -= 1
indent_max = indent_min + 1
if (indent_min < 0): # If this happens, clearly it is not right
indent_min = 0;
if indent_max < indent_min:
index_max = indent_min
# Check current line
expected = check_indent(line_index, indent_min, indent_max, indent_len)
if enclosure_stack and is_switch_case(code_lines.elided[line_index]):
enclosure_stack[-1]['case_indent'] = expected
return line_index, indent_min, indent_max, expected
def is_access_modifiers(code_line):
return re.match(r'\s*(public|private|protected)\s*:', code_line)
def is_switch_case(code_line):
return re.match(r'\s*(case\s+|default\s*:)', code_line)
|
StarcoderdataPython
|
1724535
|
<reponame>falabrasil/br-ali<filename>explogs/20_bracis_kaldi/g2p_map/news2m2m.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
#
# Grupo FalaBrasil (2020)
# Universidade Federal do Pará
#
# author: apr 2020
# <NAME> - https://cassota.gitlab.io/
# last edited: jul 2020
import sys
import os
import subprocess
import config
import tempfile
import shutil
TAG = sys.argv[0]
# [fb] |j:s -> :j|s # bring semivowel j close to the left vowel rather than
# close to the consonant s
MAPPINGS = {'~|m:': '~:m|', '~|n:': '~:n|', ':d|Z': '|d:Z', 'd|Z:': 'd:Z|'}
# https://note.nkmk.me/en/python-str-replace-translate-re-sub/
def fix_misaligns(m2mfile):
tmp = os.path.join(tempfile.gettempdir(), 'm2m')
with open(m2mfile, 'r') as fin:
aligns = fin.readlines()
with open(tmp, 'w') as fout:
count = 0
for seqpair in aligns:
count += 1
ds, fb = seqpair.strip().split('\t')
for key, value in MAPPINGS.items():
ds = ds.replace(key, value)
if '~|_' in fb and ('~|m' in ds or '~|n' in ds or '~|j' in ds):
if fb.count('~|_') < ds.count('~|m') + ds.count('~|n') + ds.count('~|j'):
#print(count, '##', ds, '##', fb, '##')
fbi = fb.split('|').index('_') # get index of deletion
dsl = ds.split('|')
dsl[fbi] = ':' + dsl[fbi] # add joint to the same index
ds = '|'.join(dsl).replace('|:', ':') # gambiarra
else:
ds = ds.replace('~|m', '~:m')
ds = ds.replace('~|n', '~:n')
ds = ds.replace('~|j', '~:j')
fb = fb.replace('~|_', '~')
fout.write('%s\t%s\n' % (ds, fb))
shutil.move(tmp, m2mfile)
def check_length(m2mfile):
with open(m2mfile, 'r') as fin:
aligns = fin.readlines()
count = 0
for seqpair in aligns:
count += 1
ds, fb = seqpair.strip().split('\t')
if len(ds.split('|')) != len(fb.split('|')):
print('misalign: %d: %s\t%s' % (count, ds, fb))
if __name__ == '__main__':
if len(sys.argv) != 4:
print('usage: %s <news_file> <m2m_file> <model_file>' % sys.argv[0])
print(' <news_file> input file in .news format gen by \'tg2news.py\'')
print(' <m2m_file> output file in .m2m format gen by M2M aligner')
print(' <model_file> dummy file gen by M2M aligner you\'ll never need')
sys.exit(1)
news_file = sys.argv[1]
m2m_file = sys.argv[2]
model_file = sys.argv[3]
err_file = m2m_file + '.err'
subprocess.call(config.M2M_CMD.format(news_file, m2m_file, model_file),
shell=True)
os.remove(model_file)
os.remove(err_file)
fix_misaligns(m2m_file)
check_length(m2m_file)
print('[%s] finished successfully!' % TAG)
|
StarcoderdataPython
|
1926745
|
from os.path import exists, expanduser
import pandas as pd
import time
class ExecExcel:
"""
read xlsx and csv
"""
def __init__(self, file_path):
self.file_path = expanduser(file_path)
def read(self, sheet='Sheet1', axis=0, index_col=None, **kwargs):
df = pd.ExcelFile(self.file_path)
sheets = [sheet] if sheet else df.sheet_names
df_parse = df.parse(sheets, index_col=index_col, **kwargs)
frame_data = pd.concat(df_parse, axis=axis)
return ExcelResponse(frame_data, self.file_path)
def read_csv(self, **kwargs):
frame_data = pd.read_csv(self.file_path, **kwargs)
return ExcelResponse(frame_data, self.file_path)
def data_format(self, data: list, axis=0):
"""
Write data to excel.
:param axis:
:param data: dict in list
"""
fd = [pd.DataFrame(item, index=[0]) for item in data]
frame_data = pd.concat(fd, axis=axis)
return ExcelResponse(frame_data, self.file_path)
def append_row(self, data: list, sheet='Sheet1', axis=0, index_col=None, **kwargs):
if exists(self.file_path):
df = pd.ExcelFile(self.file_path)
sheets = [sheet] if sheet else df.sheet_names
df_parse = df.parse(sheets, index_col=index_col, **kwargs)
frame_data = pd.concat(df_parse, axis=axis)
else:
frame_data = pd.DataFrame()
new_data = pd.concat([pd.DataFrame(item, index=[0]) for item in data], axis=axis)
frame_data = pd.concat([frame_data, new_data], axis=axis)
return ExcelResponse(frame_data, self.file_path)
class ExcelResponse:
def __init__(self, frame_data, file_path: str):
self.frame_data = frame_data
self.file_path = file_path
def extract_data(self, orient='records', empty=False, nana=None):
"""
:param empty: if True show empty value
:param orient:
{
orient=’records’: [{column -> value}, … , {column -> value}],
orient=’index’ : {index -> {column -> value}},
orient=’dict’ : {column -> {index -> value}},
orient=’list’ : {column -> [values]} ,
orient=’series’ : {column -> Series(values)},
orient=’split’ : {index -> [index], columns -> [columns], data -> [values]},
}
:param nana: null display NaNa/None/''
:return: dict : generator object
"""
frame_data = self.frame_data.where(self.frame_data.notnull(), nana)
data_load = frame_data.to_dict(orient=orient)
if orient == 'records':
for index, value in enumerate(data_load):
yield {k: v for k, v in value.items()} if empty else {k: v for k, v in value.items() if v}
else:
for index, value in data_load.items():
if value is dict:
yield index, {k: v for k, v in value.items()} if empty else {k: v for k, v in value.items() if v}
else:
yield {index: value}
def to_excel(self, sheet="Sheet1", file_path=None, index=False, **kwargs):
"""
Write data to excel.
:param index:
:param file_path: save excel file name
:param sheet: excel sheet name
"""
file_path = file_path or self.file_path.rsplit('.')[0] + f'_{int(time.time())}.xlsx'
self.frame_data.to_excel(file_path, index=index, sheet_name=sheet, **kwargs)
|
StarcoderdataPython
|
6661561
|
__REFERENCES__ = [
'https://github.com/leftthomas/SRGAN/blob/master/pytorch_ssim/__init__.py'
]
from math import exp
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def gaussian(window_size: int, sigma: int) -> torch.Tensor:
gauss = torch.Tensor(
[
exp(-((x - window_size // 2) ** 2) / float(2 * sigma ** 2))
for x in range(window_size)
]
)
return gauss / gauss.sum()
def create_window(window_size: int, channel: int) -> torch.Tensor:
_1d_window = gaussian(window_size, 1.5).unsqueeze(1)
_2d_window = (
_1d_window.mm(_1d_window.t()).float().unsqueeze(0).unsqueeze(0)
)
window = Variable(
_2d_window.expand(channel, 1, window_size, window_size).contiguous()
)
return window
def _ssim(
img1,
img2,
window: torch.Tensor,
window_size: int,
channel: int,
size_average: bool = True,
full: bool = False,
):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = (
F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel)
- mu1_sq
)
sigma2_sq = (
F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel)
- mu2_sq
)
sigma12 = (
F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel)
- mu1_mu2
)
c1 = 0.01 ** 2
c2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + c1) * (2 * sigma12 + c2)) / (
(mu1_sq + mu2_sq + c1) * (sigma1_sq + sigma2_sq + c2)
)
if size_average:
ssim_map = ssim_map.mean()
else:
ssim_map = ssim_map.mean(1).mean(1).mean(1)
if not full:
return ssim_map
_v1 = 2.0 * sigma12 + c2
_v2 = sigma1_sq + sigma2_sq + c2
cs = torch.mean(_v1 / _v2)
return ssim_map, cs
def ssim(
img1,
img2,
window_size: int = 11,
size_average: bool = True,
full: bool = False,
):
(_, channel, height, width) = img1.size()
_window_size = min(window_size, height, width)
window = create_window(_window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average, full)
def msssim(
img1,
img2,
window_size: int = 11,
size_average: bool = True,
full: bool = True,
):
weights = torch.Tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
if img1.is_cuda:
weights = weights.cuda(img1.get_device())
weights = weights.type_as(img1)
levels = weights.size(0)
ms_ssim, mcs = [], []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size, size_average, full=full)
ms_ssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
ms_ssim = torch.stack(ms_ssim)
mcs = torch.stack(mcs)
pow1 = mcs ** weights
pow2 = ms_ssim ** weights
mssim = torch.prod(pow1[:-1] * pow2[-1])
return mssim
def psnr(img1, img2):
mse = F.mse_loss(img1, img2)
if mse == 0:
return 100.0
return 20.0 * torch.log10(1.0 / mse)
def acc(_pred, _true):
eq = torch.eq(torch.gt(_pred, 0.5).float(), _true)
return 100.0 * torch.mean(eq.float())
|
StarcoderdataPython
|
58514
|
from kairon import cli
import logging
if __name__ == "__main__":
logging.basicConfig(level="DEBUG")
cli()
|
StarcoderdataPython
|
1855019
|
<reponame>slavad/py-series-clean
from helpers.common_imports import *
def generate_index_vector(vector_size):
"""generates index vector: from -max_index to max_index"""
if vector_size % 2 == 0:
raise ValueError("matrix_size must be odd")
max_index = (vector_size - 1)/2
index_vector = np.arange(-max_index, max_index + 1, 1).reshape(-1,1)
return index_vector
def generate_freq_vector(index_vector, max_freq, number_of_freq_estimations):
"""building frequency vector"""
return index_vector*max_freq/number_of_freq_estimations
def run_ft(time_grid, values, freq_vector, number_of_freq_estimations, kind):
"""
builds (index_vector.shape[0])xN exp matrix
for usage in eq 148, 149, 160 and 161 ref 2
(direct or inverse FT)
"""
if kind == 'direct':
coeff = -1j
norm = 1/time_grid.shape[0]
elif kind == 'inverse':
coeff = 1j
norm = time_grid.shape[0]/number_of_freq_estimations
else:
raise ValueError("unknown kind")
# the result will be a rectangular matrix:
matrix_for_exp = (coeff*2*np.pi*freq_vector)*time_grid.T
exp_vector = np.exp(matrix_for_exp)
if kind == 'direct':
exp_vector_for_mult = exp_vector
elif kind == 'inverse':
exp_vector_for_mult = exp_vector.T
result = np.matmul(exp_vector_for_mult, values)*norm
return result
def size_of_spectrum_vector(number_of_freq_estimations):
"""size of dirty vector"""
return 2*number_of_freq_estimations + 1
def calculate_dirty_vector(time_grid, values, number_of_freq_estimations, max_freq):
"""eq 148 in ref 2"""
index_vector = generate_index_vector(size_of_spectrum_vector(number_of_freq_estimations))
freq_vector = generate_freq_vector(index_vector, max_freq, number_of_freq_estimations)
result = run_ft(
time_grid, values, freq_vector,number_of_freq_estimations, 'direct'
)
return result
def size_of_window_vector(number_of_freq_estimations):
"""size of the window vector"""
return 4*number_of_freq_estimations + 1
def calculate_window_vector(time_grid, number_of_freq_estimations, max_freq):
"""eq 148 in ref 2"""
index_vector = generate_index_vector(size_of_window_vector(number_of_freq_estimations))
freq_vector = generate_freq_vector(index_vector, max_freq, number_of_freq_estimations)
values = np.ones((time_grid.shape[0],1))
result = run_ft(
time_grid, values, freq_vector,number_of_freq_estimations, 'direct'
)
return result
|
StarcoderdataPython
|
6602772
|
from flask import Flask, request, jsonify, Blueprint
from flask_jwt_extended import (jwt_required, get_jwt_identity)
import database
import function
post_endpoints = Blueprint('post_endpoints', __name__)
# POSTS
@post_endpoints.route('/post/<string:id>/comments', methods=['POST'])
@jwt_required
def add_comment(id):
# Check if specified ID is an integer
if not function.isInt(id):
return jsonify({"error": "id is not an integer"}), 400
# Fetch form data
postDetails = request.get_json()
content = postDetails.get('content')
parent = postDetails.get('parent')
# Swap userID for JWT id
userID = get_jwt_identity()
if content is None:
return jsonify({"error": "Comment content not specified"}), 400
if userID is None:
return jsonify({"error": "Comment user id not specified"}), 400
# Check if post actually exists
post = database.getPostByID(id)
if post is None:
return jsonify({"error": "Specified post does not exist"}), 400
# Check if you have permission to comment on this post
userRole = function.getProjectUserRole(get_jwt_identity(), post['postProject'])
if not function.isProjectMember(userRole):
return jsonify({"error": "Must be a project member to comment on this post"}), 403
# Check if parent actually exists
if parent is not None:
comment = database.getCommentByID(str(parent))
if comment is None:
return jsonify({"error": "Parent comment does not exist"}), 400
# Add comment
comment = database.addPostComment(content, parent, userID, id)
user = database.getUserByID(str(comment['commentUser']))
comment['user'] = user
return jsonify(comment), 201
@post_endpoints.route('/post/<string:id>', methods=['GET'])
def get_post(id):
# Check if specified ID is an integer
if not function.isInt(id):
return jsonify({"error": "id is not an integer"}), 400
data = database.getPostByID(id)
if data is None:
return jsonify({"error": "No results found"}), 404
else:
user = database.getUserInfo(str(data['postUser']))
data['user'] = user
return jsonify(data), 200
@post_endpoints.route('/post/<string:id>/comments', methods=['GET'])
def get_post_comments(id):
# Check if specified ID is an integer
if not function.isInt(id):
return jsonify({"error": "id is not an integer"}), 400
data = database.getPostComments(id)
if data is None:
return jsonify({"error": "No results found"}), 404
else:
for comment in data:
user = database.getUserInfo(str(comment['commentUser']))
comment['user'] = user
data = function.nest_comments(data)
return jsonify(data), 200
@post_endpoints.route('/post/<string:id>', methods=['PUT'])
@jwt_required
def put_post(id):
# Check if specified ID is an integer
if not function.isInt(id):
return jsonify({"error": "id is not an integer"}), 400
# Fetch form data
postDetails = request.get_json()
content = postDetails.get('content')
if content is None:
return jsonify({"error": "Post content not specified"}), 400
# Check if post actually exists
post = database.getPostByID(id)
if post is None:
return jsonify({"error": "Specified post does not exist"})
# Check if the user trying to update the post is the post owner
if post['postUser'] != get_jwt_identity():
return jsonify({"error": "Only post owner can update post"}), 400
# Update post
data = database.updatePost(id, content)
if data is not None:
return jsonify(data), 200
else:
return jsonify({"error": "No results found"}), 404
@post_endpoints.route('/post/<string:id>', methods=['DELETE'])
@jwt_required
def del_post(id):
# Check if specified ID is an integer
if not function.isInt(id):
return jsonify({"error": "id is not an integer"}), 400
# Check if post actually exists
post = database.getPostByID(id)
if post is None:
return jsonify({"error": "Specified post does not exist"})
# Check if the user trying to delete the post is the post owner
userRole = function.getProjectUserRole(get_jwt_identity(), post['postProject'])
if not function.isProjectAdmin(userRole):
if post['postUser'] != get_jwt_identity():
return jsonify({"error": "Must be admin to delete post of other user"}), 400
# Delete post
postDeleted = database.deletePost(id)
commentsDeleted = database.deltePostComments(id)
if postDeleted is True and commentsDeleted is True:
return jsonify({"Info": "Post deleted successfully"}), 200
else:
return jsonify({"error": "Something went wrong deleting the post"}), 500
|
StarcoderdataPython
|
5127527
|
"""
Test for deprecations of imports into global namespace::
sage: berlekamp_massey
doctest:warning...:
DeprecationWarning:
Importing berlekamp_massey from here is deprecated. If you need to use it, please import it directly from sage.matrix.berlekamp_massey
See https://trac.sagemath.org/27066 for details.
<function berlekamp_massey at ...>
"""
from __future__ import absolute_import
from sage.misc.lazy_import import lazy_import
from .matrix_space import MatrixSpace
from .constructor import (matrix, Matrix, column_matrix, random_matrix,
diagonal_matrix, identity_matrix, block_matrix,
block_diagonal_matrix, jordan_block, zero_matrix,
ones_matrix, elementary_matrix, companion_matrix)
lazy_import("sage.matrix.berlekamp_massey", 'berlekamp_massey',
deprecation=27066)
Mat = MatrixSpace
del absolute_import
|
StarcoderdataPython
|
6605454
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
def part1(in_data):
data = re.sub(r"!.", "", in_data)
data = re.sub(r"<[^>]*>", "", data)
score = 0
next_score = 1
for char in data:
if (char == "{"):
score += next_score
next_score += 1
elif (char == "}"):
next_score -= 1
return score
def part2(in_data):
data = re.sub(r"!.", "", in_data)
garbage = re.findall(r"<([^>]*)>", data)
count = 0
for i in garbage:
count += len(i)
return count
with open("input.txt", "r") as in_file:
in_data = in_file.read().strip()
print(part1(in_data))
print(part2(in_data))
|
StarcoderdataPython
|
74270
|
<filename>src/app_server/user_management/user_query.py
from datetime import datetime
from io import SEEK_CUR
import logging
from flask import request
from config_utils import *
from sqlalchemy import *
from psycopg2.errors import UniqueViolation
import hashlib
import urllib.parse as urlparse
from key_processing.aescbc import AESCBC
logger = logging.getLogger()
import random
from user_management.helper import *
from config_utils import key_duration_config
class User_Query(object):
def __init__(self, postgres_db_conn):
self.conn = postgres_db_conn
self.aes = AESCBC()
self.session_duration, self.autho_duration = key_duration_config()
def register_session(self, mobile_number):
session = self.conn.execute(existing_session(mobile_number)).fetchone()
if session is not None:
user_id, session_id, expired = session
if not expired:
data = {
'session_id' : session_id,
'activation_link' : f'{request.base_url}?sid={session_id}'
}
return success_return(USER_API_STATUS['CREATE_SESSION_DONE'], data)
else:
self.conn.execute(update_expired(user_id, session_id))
user = self.conn.execute(get_user(mobile_number)).fetchone()
if user is None:
try:
self.conn.execute(create_user(mobile_number))
except Exception as ex:
from pprint import pprint
pprint(vars(ex))
# return False, 1 if type(ex.orig) == UniqueViolation else 0
return failed_return(USER_API_STATUS['INTERNAL_DB_ERR'], str(ex))
# get ID again due to create_user cannot return newly created one
id, = self.conn.execute(get_user(mobile_number)).fetchone()
else:
id, = user
timestr = str(datetime.now())
session_str = f'{id}|{timestr}'
print('## SESSION STR is ', session_str, len(session_str))
session_id = self.aes.encrypt(session_str, ENCRYPT_KEY)
print('## session string after hashed ', session_id )
try:
self.conn.execute(create_session(id, session_id, self.session_duration))
except Exception as ex:
return failed_return(USER_API_STATUS['INTERNAL_DB_ERR'], str(ex))
print(request.host_url)
data = {
'session_id' : session_id,
'activation_link' : f'{request.base_url}?sid={session_id}'
}
return success_return(USER_API_STATUS['CREATE_SESSION_DONE'], data)
def validate_session(self, session_id, zone_code):
try:
# original_session = urlparse.quote_plus(session_id)
# print("original ", original_session)
self.conn.execute(session_checkin(session_id.replace(' ', '+'), zone_code)) # replace empty to + sign in session id TODO find a formal way
except Exception as ex:
return failed_return(USER_API_STATUS["VALIDATE_FAILED"], str(ex))
return success_return(USER_API_STATUS["VALIDATE_DONE"], {})
def sign_up(self, user_name, password, mobile_number):
user = self.conn.execute(get_user_by_mobile(mobile_number)).fetchone()
mobile_exist = user is not None
if mobile_exist:
_, db_user_name, _, _ = user
if db_user_name is not None and db_user_name != user_name:
return failed_return(USER_API_STATUS["MOBILE_TAKEN"])
if db_user_name is not None and db_user_name == user_name:
return failed_return(USER_API_STATUS["USER_EXIST"])
user = self.conn.execute(get_user_name(user_name)).fetchone()
if user is not None:
print('got user ', user)
return failed_return(USER_API_STATUS["USER_NAME_TAKEN"])
else:
try:
self.conn.execute(create_user(mobile_number))
except Exception as ex:
from pprint import pprint
pprint(vars(ex))
print(f'type(ex.orig) {type(ex.orig)}')
hashed_pass = hashlib.md5(password.encode('utf-8')).hexdigest()
try:
self.conn.execute(update_username_pass(user_name, hashed_pass, mobile_number))
except Exception as ex:
print(ex)
return failed_return(USER_API_STATUS["SIGN_UP_FAILED"], str(ex))
data = {
'user_name' : user_name,
}
return success_return(USER_API_STATUS["SIGN_UP_DONE"], data)
def sign_in(self, user_name, password):
hashed_pass = hashlib.md5(password.encode('utf-8')).hexdigest()
try:
dbresult = self.conn.execute(get_user_by_username_password(user_name, hashed_pass)).fetchone()
except Exception as ex:
from pprint import pprint
pprint(vars(ex))
return failed_return(USER_API_STATUS["SIGN_IN_FAILED"])
if dbresult is None:
return failed_return(USER_API_STATUS["SIGN_IN_FAILED"])
try:
user_id, _, _, _ = dbresult
self.conn.execute(update_autho_expired(user_id))
except Exception as ex:
return failed_return(USER_API_STATUS['INTERNAL_DB_ERR'])
timestr = str(datetime.now())
autho_str = f'{user_id}|{timestr}|{DEFAULT_ROLE}'
print('## SESSION STR is ', autho_str, len(autho_str))
autho_id = self.aes.encrypt(autho_str, ENCRYPT_KEY)
print('## session string after hashed ', autho_id )
try:
self.conn.execute(insert_authorization(user_id, autho_id, self.autho_duration))
except Exception as ex:
return failed_return(USER_API_STATUS['INTERNAL_DB_ERR'], str(ex))
data = {
'autho_id' : autho_id,
'user_id' : user_id
}
return success_return(USER_API_STATUS["SIGN_IN_DONE"], data)
def set_cer_number(self, mobile_number):
query = "select * from tbuser where mobile_number = '{}'".format(mobile_number)
try:
user = self.conn.execute(query).fetchone()
cer_number = random.randint(1000,9999)
if user is None:
self.conn.execute("insert into tbuser(mobile_number, cer_number) values ('{}', cer_number)".format(mobile_number, cer_number))
return success_return(USER_API_STATUS['CER_ISSUE_DONE'], cer_number)
else:
self.conn.execute("update tbuser set cer_number='{}' where mobile_number = '{}'".format(mobile_number, cer_number))
return success_return(USER_API_STATUS['CER_ISSUE_DONE'], cer_number)
except Exception as ex:
return failed_return(USER_API_STATUS['CER_ISSUE_FAILED'], str(ex))
def verify_cer_number(self, mobile_number, cer_number):
query = "select * from tbuser where mobile_number = '{}' and cer_number = '{}'".format(mobile_number, cer_number)
try:
user = self.conn.execute(query).fetchone()
if user is None:
return failed_return(USER_API_STATUS['CER_VERIFY_FAILED'])
else:
return success_return(USER_API_STATUS['CER_VERIFY_DONE'], {})
except Exception as ex:
return failed_return(USER_API_STATUS['CER_VERIFY_FAILED'], str(ex))
def add_user_zone(self, user_id, zone_code):
try:
self.conn.execute(insert_user_zone(user_id, zone_code))
except Exception as ex:
return failed_return(USER_API_STATUS['CER_VERIFY_FAILED'], str(ex))
return success_return(USER_API_STATUS['CREATE_USER_ZONE_DONE'], {})
|
StarcoderdataPython
|
3499462
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from pyhawkes.models import DiscreteTimeNetworkHawkesModelSpikeAndSlab
# Create a simple random network with K nodes a sparsity level of p
# Each event induces impulse responses of length dt_max on connected nodes
K = 3
p = 0.25
dt_max = 20
network_hypers = {"p": p, "allow_self_connections": False}
true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(
K=K, dt_max=dt_max, network_hypers=network_hypers)
# Generate T time bins of events from the the model
# S is the TxK event count matrix, R is the TxK rate matrix
S,R = true_model.generate(T=100)
true_model.plot()
|
StarcoderdataPython
|
8107189
|
default_app_config = 'main.apps.MainConfig'
|
StarcoderdataPython
|
3250433
|
from collections import Counter
from operator import itemgetter
import os
class Beacon(object):
def __init__(self, x, y, z):
self.coords = ((1, x), (2, y), (3, z))
def __str__(self):
return str(self.coords)
def get_coords(self):
return self.coords
def apply_trns(self, coords, rotation):
new_coords = [0, 0, 0]
for i, axis_idx in enumerate(rotation):
cur_axis, cur_pos = self.coords[abs(axis_idx) - 1]
new_pos = -1 * cur_pos if axis_idx < 0 else cur_pos
new_coords[i] = (
cur_axis,
new_pos + coords[i],
)
self.coords = tuple(new_coords)
def gen_rotations(self):
used = set()
def rec():
if len(used) == len(self.coords):
return [()]
all = []
for i in range(len(self.coords)):
if i in used:
continue
used.add(i)
for item in rec():
all.append((self.coords[i],) + item)
all.append(((-self.coords[i][0], -self.coords[i][1]),) + item)
used.remove(i)
return all
return rec()
class Scanner(object):
def __init__(self, label):
self.label = label
self.beacons = []
self.translation_to = {}
self.rotations = {}
def add_beacon(self, x, y, z):
self.beacons.append(Beacon(x, y, z))
def apply_beacons_trns(self, coords, rot):
for beacon in self.beacons:
beacon.apply_trns(coords, rot)
def set_translation_to(self, other, offset_x, offset_y, offset_z, rot_id):
self.translation_to[other.label] = (
(
offset_x[0],
offset_y[0],
offset_z[0],
),
rot_id,
)
def gen_rotations(self):
all_rotations = [beacon.gen_rotations() for beacon in self.beacons]
for i in range(len(all_rotations[0])):
beacon_rotation = [rot[i] for rot in all_rotations]
basis = tuple(all_rotations[0][i][j][0] for j in range(3))
self.rotations[basis] = beacon_rotation
def find_offsets(first, second):
for rot_id, rotation in second.rotations.items():
delta_x = Counter()
delta_y = Counter()
delta_z = Counter()
for beacon_first in first.beacons:
for coords_other in rotation:
coords_first = beacon_first.get_coords()
delta_x[coords_first[0][1] - coords_other[0][1]] += 1
delta_y[coords_first[1][1] - coords_other[1][1]] += 1
delta_z[coords_first[2][1] - coords_other[2][1]] += 1
offset_x = delta_x.most_common(1)[0]
offset_y = delta_y.most_common(1)[0]
offset_z = delta_z.most_common(1)[0]
if offset_x[1] >= 12 and offset_y[1] >= 12 and offset_z[1] >= 12:
second.set_translation_to(first, offset_x, offset_y, offset_z, rot_id)
return
def traverse(origin, cur_beacon, beacon_map, visited):
if cur_beacon == origin:
return [[]]
visited.add(cur_beacon.label)
for label in cur_beacon.translation_to:
if label in visited:
continue
res = traverse(origin, beacon_map[label], beacon_map, visited)
if res:
return [cur_beacon.translation_to[label]] + res
return []
def origin_coords(trns_chains, scanner_map):
coords_in_origin = {}
for label, transforms in trns_chains.items():
coords = [i for i in transforms[0][0]]
coords_new = [0, 0, 0]
scanner = scanner_map[label]
# apply first transformation to beacons
translate, rotate = transforms[0]
scanner.apply_beacons_trns(translate, rotate)
# apply rest transformations
for translate, rotate in transforms[1:]:
scanner.apply_beacons_trns(translate, rotate)
for i, axis_idx in enumerate(rotate):
coords_new[i] = coords[abs(axis_idx) - 1]
if axis_idx < 0:
coords_new[i] *= -1
coords_new[i] += translate[i]
coords = coords_new[:]
coords_in_origin[label] = coords
return coords_in_origin
def silver(scanners):
for scanner in scanners:
scanner.gen_rotations()
for first in scanners:
for second in scanners:
if first == second:
continue
find_offsets(first, second)
origin = scanners[0]
scanner_map = {}
for scanner in scanners:
scanner_map[scanner.label] = scanner
translations = {}
# compute path from each scanner to zero-scanner
for scanner in scanners[1:]:
trns_chain = traverse(origin, scanner, scanner_map, set())
translations[scanner.label] = trns_chain[:-1]
# apply transformation from each scanner to zero-scanner (origin)
coords_in_origin = origin_coords(translations, scanner_map)
# find uniq beacons
uniq_beacons = set()
for scanner in scanners:
for beacon in scanner.beacons:
uniq_beacons.add(tuple(map(itemgetter(1), beacon.get_coords())))
return len(uniq_beacons), coords_in_origin
def manhattan(coords1, coords2):
return (
abs(coords1[0] - coords2[0])
+ abs(coords1[1] - coords2[1])
+ abs(coords1[2] - coords2[2])
)
def gold(coords_in_origin):
max_dist = 0
for first in coords_in_origin.values():
for second in coords_in_origin.values():
tmp = manhattan(first, second)
if tmp > max_dist:
max_dist = tmp
return max_dist
def parse(lines):
scanners = []
scanner = None
idx = 0
for line in lines:
line = line.strip()
if line == "":
continue
if line.startswith("---"):
if scanner:
scanners.append(scanner)
scanner = Scanner(idx)
idx += 1
else:
scanner.add_beacon(*list(map(int, line.split(","))))
scanners.append(scanner)
return scanners
def solve():
lines = open(os.path.join(os.path.dirname(__file__), "input"), "rt").readlines()
parsed = parse(lines)
silver_res, coords_in_origin = silver(parsed)
return "DAY19", silver_res, gold(coords_in_origin)
|
StarcoderdataPython
|
1668745
|
import argparse
from typing import Optional
import annofabcli.stat_visualization.mask_visualization_dir
import annofabcli.stat_visualization.merge_visualization_dir
import annofabcli.stat_visualization.write_performance_rating_csv
from annofabcli.stat_visualization import (
summarise_whole_performance_csv,
write_linegraph_per_user,
write_performance_scatter_per_user,
write_task_histogram,
write_whole_linegraph,
)
def parse_args(parser: argparse.ArgumentParser):
subparsers = parser.add_subparsers(dest="subcommand_name")
# サブコマンドの定義
annofabcli.stat_visualization.mask_visualization_dir.add_parser(subparsers)
annofabcli.stat_visualization.merge_visualization_dir.add_parser(subparsers)
summarise_whole_performance_csv.add_parser(subparsers)
write_linegraph_per_user.add_parser(subparsers)
write_performance_scatter_per_user.add_parser(subparsers)
annofabcli.stat_visualization.write_performance_rating_csv.add_parser(subparsers)
write_task_histogram.add_parser(subparsers)
write_whole_linegraph.add_parser(subparsers)
def add_parser(subparsers: Optional[argparse._SubParsersAction] = None):
subcommand_name = "stat_visualization"
subcommand_help = "`annofabcli statistics visualization` コマンドの出力結果を加工するサブコマンド(アルファ版)"
description = "`annofabcli statistics visualization` コマンドの出力結果を加工するサブコマンド(アルファ版)"
parser = annofabcli.common.cli.add_parser(
subparsers, subcommand_name, subcommand_help, description, is_subcommand=False
)
parse_args(parser)
return parser
|
StarcoderdataPython
|
5119655
|
import datetime
from cluster_vcf_records import vcf_file_read, vcf_record
import pyfastaq
from clockwork import utils
def _combine_minos_and_samtools_header(minos_header, samtools_header, ref_seqs):
header_start = [
"##fileformat=VCFv4.2",
"##source=clockwork merge samtools gvcf and minos vcf",
"##fileDate=" + str(datetime.date.today()),
]
new_header = [
'##FILTER=<ID=NO_DATA,Description="No information from minos or samtools">',
'##INFO=<ID=CALLER,Number=1,Description="Origin of call, one of minos, samtools, or none if there was no depth">',
]
new_header.extend(
[f"##contig=<ID={k},length={len(v)}>" for k, v in ref_seqs.items()]
)
exclude = [
"##fileformat",
"##fileDate",
"##minos_max_read_length",
"##contig",
"#CHROM",
]
for l in minos_header, samtools_header:
for line in l:
skip = False
for prefix in exclude:
if line.startswith(prefix):
skip = True
break
if not skip:
new_header.append(line.replace("##INFO", "##FORMAT"))
new_header.sort()
assert minos_header[-1].startswith("#CHROM\t")
new_header.append(minos_header[-1])
return header_start + new_header
def _move_info_fields_to_format(record):
"""Changes VCF record in place. Moves all the key/values in INFO column
into the FORMAT column"""
for k, v in sorted(record.INFO.items()):
# INFO is allowed to have type "Flag", where there is a key but
# no value - the key being present meaning the Flag is true.
# FORMAT isn't allowed Flags. The only time I have seen this is
# INDEL, but this shouldn't be here anyway because assumption is
# that samtools mpileup has been run with -I to switch off indel calling.
if v is not None:
record.set_format_key_value(k, v)
record.INFO = {}
def _print_no_data_vcf_record(ref_seq, position, filehandle):
print(
ref_seq.id,
position + 1,
".",
ref_seq[position],
".",
".",
"NO_DATA",
"CALLER=none",
"GT:DP",
"./.:0",
file=filehandle,
sep="\t",
)
def _get_minos_iter_and_record(minos_records, ref_seq_name):
if ref_seq_name in minos_records:
minos_iter = iter(minos_records[ref_seq_name])
minos_record = next(minos_iter)
return minos_iter, minos_record
else:
return None, None
def _update_minos_iter(minos_iter):
try:
minos_record = next(minos_iter)
except StopIteration:
minos_record = None
return minos_record
def _print_non_samtools_seqs(ref_seqs, used_ref_seqs, minos_records, filehandle):
for ref_seq_name, ref_seq in sorted(ref_seqs.items()):
if ref_seq_name in used_ref_seqs:
continue
ref_pos = 0
minos_iter, minos_record = _get_minos_iter_and_record(
minos_records, ref_seq_name
)
while ref_pos < len(ref_seq):
if minos_record is None or ref_pos < minos_record.POS:
_print_no_data_vcf_record(ref_seq, ref_pos, filehandle)
ref_pos += 1
else:
minos_record.INFO["CALLER"] = "minos"
print(minos_record, file=filehandle)
ref_pos = minos_record.ref_end_pos() + 1
minos_record = _update_minos_iter(minos_iter)
def _finish_contig(ref_pos, ref_seq, minos_record, minos_iter, filehandle):
while ref_pos < len(ref_seq):
if minos_record is not None and minos_record.POS == ref_pos:
minos_record.INFO["CALLER"] = "minos"
print(minos_record, file=filehandle)
ref_pos = minos_record.ref_end_pos() + 1
minos_record = _update_minos_iter(minos_iter)
else:
_print_no_data_vcf_record(ref_seq, ref_pos, filehandle)
ref_pos += 1
def gvcf_from_minos_vcf_and_samtools_gvcf(ref_fasta, minos_vcf, samtools_vcf, out_vcf):
minos_header, minos_records = vcf_file_read.vcf_file_to_dict(minos_vcf)
samtools_header = vcf_file_read.get_header_lines_from_vcf_file(samtools_vcf)
ref_seqs = {}
pyfastaq.tasks.file_to_dict(ref_fasta, ref_seqs)
used_ref_seqs = set()
ref_seq = None
ref_pos = -1
minos_record = None
with open(out_vcf, "w") as f_out, open(samtools_vcf) as f_samtools:
print(
*_combine_minos_and_samtools_header(
minos_header, samtools_header, ref_seqs
),
sep="\n",
file=f_out,
)
# Read the samtools VCF file line by line. It's huge, so don't want
# to load into memory. Within each CHROM in the samtools VCF, keep
# track of the current position in the CHROM, and the next minos record
# for that CHROM (if there is one). For each position in the ref genome,
# we want in order of preference to write one of:
# 1. minos record if there is one
# 2. samtools record if there is one
# 3. a "no data" record if position is not in minos or samtools records
# Not loading into memory and following where we are the ref genome, and
# in the list of minos records, makes this a bit fiddly.
for line in f_samtools:
if line.startswith("#"):
continue
samtools_record = vcf_record.VcfRecord(line)
# If we've found a new CHROM in the samtools VCF file
if ref_seq is None or ref_seq.id != samtools_record.CHROM:
if ref_seq is not None:
_finish_contig(ref_pos, ref_seq, minos_record, minos_iter, f_out)
ref_seq = ref_seqs[samtools_record.CHROM]
used_ref_seqs.add(ref_seq.id)
minos_iter, minos_record = _get_minos_iter_and_record(
minos_records, ref_seq.id
)
ref_pos = 0
if samtools_record.POS < ref_pos:
continue
# Fill in any missing gaps between current position and the samtools
# record using minos records if found, or if not then "no data" records.
while ref_pos < samtools_record.POS:
if minos_record is not None and ref_pos == minos_record.POS:
minos_record.INFO["CALLER"] = "minos"
print(minos_record, file=f_out)
ref_pos = minos_record.ref_end_pos() + 1
minos_record = _update_minos_iter(minos_iter)
else:
_print_no_data_vcf_record(ref_seq, ref_pos, f_out)
ref_pos += 1
# If there's a minos record, use it instead of samtools record.
while minos_record is not None and minos_record.POS <= samtools_record.POS:
minos_record.INFO["CALLER"] = "minos"
print(minos_record, file=f_out)
ref_pos = minos_record.ref_end_pos() + 1
minos_record = _update_minos_iter(minos_iter)
# If we haven't used a minos record, then current ref position is
# same as the samtools record, and we should use the samtools record
if ref_pos == samtools_record.POS:
_move_info_fields_to_format(samtools_record)
samtools_record.INFO = {"CALLER": "samtools"}
print(samtools_record, file=f_out)
ref_pos = samtools_record.POS + 1
_finish_contig(ref_pos, ref_seq, minos_record, minos_iter, f_out)
_print_non_samtools_seqs(ref_seqs, used_ref_seqs, minos_records, f_out)
def _samtools_vcf_record_to_frs(record, geno_index):
dp4 = [int(x) for x in record.FORMAT["DP4"].split(",")]
dp = sum(dp4)
if dp == 0:
return 0
elif geno_index == 0:
return (dp4[0] + dp4[1]) / dp
else:
return (dp4[2] + dp4[3]) / dp
def _vcf_record_pass_index(record, require_minos_pass=True, min_frs=0.9, min_dp=5):
"""If the VCF record passes the filters, then returns the genotype index
of the called allele (0=REF, 1=ALT1, 2=ALT2, ...etc). Returns
None if the record fails."""
geno_indexes = record.FORMAT["GT"].split("/")
if "." in geno_indexes or len(set(geno_indexes)) > 1:
return None
geno_index = int(geno_indexes[0])
alt = record.ALT[geno_index - 1]
if geno_index > 0 and len(alt) != len(record.REF) and alt[0] != record.REF[0]:
return None
elif int(record.FORMAT["DP"]) < min_dp:
return None
elif record.INFO["CALLER"] == "minos":
if (require_minos_pass and record.FILTER != {"PASS"}) or float(
record.FORMAT["FRS"]
) < min_frs:
return None
else:
return geno_index
else:
assert record.INFO["CALLER"] == "samtools"
if _samtools_vcf_record_to_frs(record, geno_index) >= min_frs:
return geno_index
else:
return None
def gvcf_to_fasta(gvcf_file, outfile, require_minos_pass=True, min_frs=0.9, min_dp=5):
sample = "unknown"
out_seqs = {}
expect_lengths = {}
with open(gvcf_file) as f:
for line in f:
if line.startswith("##"):
continue
elif line.startswith("#CHROM"):
sample = line.rstrip().split()[-1]
continue
record = vcf_record.VcfRecord(line)
expect_lengths[record.CHROM] = record.POS + 1
if record.CHROM not in out_seqs:
out_seqs[record.CHROM] = []
out_seq = out_seqs[record.CHROM]
geno_index = _vcf_record_pass_index(
record,
require_minos_pass=require_minos_pass,
min_frs=min_frs,
min_dp=min_dp,
)
if geno_index is None:
out_seq.extend("N" * len(record.REF))
elif geno_index == 0:
out_seq.extend(record.REF)
else:
alt = record.ALT[geno_index - 1]
# This is an indel or a complex variant. If the ref and alt
# are the same length, then we can drop in the alt allele. If not,
# VCF convention says that the nucleotide before the variant
# is included, and the first nucleotide of the ref and alt is
# the same. We can put that first one in the FASTA,
# then put the rest as Ns.
if len(alt) == len(record.REF):
out_seq.extend(alt)
elif len(alt) != len(record.REF) and alt[0] == record.REF[0]:
out_seq.extend(alt[0] + "N" * (len(record.REF) - 1))
else:
out_seq.extend("N" * len(record.REF))
with open(outfile, "w") as f:
for name, nucleotides in sorted(out_seqs.items()):
seq = pyfastaq.sequences.Fasta(f"{name}.{sample}", "".join(nucleotides))
assert len(seq) == expect_lengths[name]
print(seq, file=f)
|
StarcoderdataPython
|
1714762
|
from rest_framework import viewsets
from django_filters import rest_framework as filters
from .models import Service
from .serializers import ServiceSerializer
from rest_framework.decorators import action
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
class ServiceFilter(filters.FilterSet):
id = filters.NumberFilter(field_name="id")
class Meta:
model = Service
fields = ['id']
class ServiceViewSet(viewsets.ModelViewSet):
queryset = Service.objects.all()
serializer_class = ServiceSerializer
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = ServiceFilter
# https://stackoverflow.com/questions/50425262/django-rest-framework-pass-extra-parameter-to-actions
@action(detail=True, methods=['post'],
url_path='add_manager/(?P<manager_pk>[^/.]+)')
def add_manager(self, request, manager_pk, pk=None):
"""
TODO:
restrict access to only certain admin levels
"""
profile = self.get_object()
manager = get_object_or_404(User, pk=manager_pk)
profile.managers.add(manager)
profile.save()
return Response("success")
@action(detail=True, methods=['post'],
url_path='remove_manager/(?P<manager_pk>[^/.]+)')
def remove_manager(self, request, manager_pk, pk=None):
"""
TODO:
restrict access to only certain admin levels
"""
profile = self.get_object()
manager = get_object_or_404(User, pk=manager_pk)
profile.managers.remove(manager)
profile.save()
return Response("success")
|
StarcoderdataPython
|
11208789
|
import os
import numpy as np
from itertools import product
from Bio import SwissProt
from unittest import TestCase
from ..database import create_session, delete_database, cleanup_database
from ..database.models import Interaction, Protein
from ..database.utilities import create_interaction
from ..database.exceptions import ObjectAlreadyExists
from ..data_mining.uniprot import parse_record_into_protein
from ..data_mining.features import compute_interaction_features
from ..data_mining.ontology import get_active_instance
from ..models.binary_relevance import MixedBinaryRelevanceClassifier
from ..predict import _check_classifier_and_selection
from ..predict import _update_missing_protein_map
from ..predict import _create_missing_interactions
from ..predict import classify_interactions
from ..predict.utilities import load_dataset, DEFAULT_SELECTION
from sklearn.base import clone
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
base_path = os.path.dirname(__file__)
db_path = os.path.normpath("{}/databases/test.db".format(base_path))
test_obo_file = '{}/{}'.format(base_path, "test_data/test_go.obo.gz")
dag = get_active_instance(filename=test_obo_file)
class TestCheckClassifierAndSelection(TestCase):
def test_valueerror_custom_classifier_no_selection(self):
with self.assertRaises(ValueError):
_check_classifier_and_selection(classifier=1, selection=None)
def test_valueerror_invalid_selection(self):
with self.assertRaises(ValueError):
_check_classifier_and_selection(classifier=1, selection=['1'])
class TestUpdateMissingProteinMap(TestCase):
def setUp(self):
self.session, self.engine = create_session(db_path)
self.p1 = Protein(uniprot_id='A', taxon_id=9606, reviewed=True)
self.p2 = Protein(uniprot_id='B', taxon_id=1, reviewed=True)
self.p1.save(self.session, commit=True)
self.p2.save(self.session, commit=True)
def tearDown(self):
delete_database(self.session)
cleanup_database(self.session, self.engine)
def test_adds_new_proteins_to_map(self):
ppis = [('A', 'A'), ('A', 'P31946')]
pm = _update_missing_protein_map(ppis, self.session)
expected = {
'A': self.p1,
'P31946': Protein.query.get(3)
}
self.assertEqual(expected, pm)
def test_adds_invalid_proteins_as_none(self):
ppis = [('A', 'A'), ('A', 'P3194')]
pm = _update_missing_protein_map(ppis, self.session, verbose=False)
expected = {
'A': self.p1,
'P3194': None
}
self.assertEqual(expected, pm)
def test_proteins_different_taxonid_added_as_none(self):
ppis = [('A', 'A'), ('B', 'A')]
pm = _update_missing_protein_map(ppis, self.session, verbose=False)
expected = {
'A': self.p1,
'B': None
}
self.assertEqual(expected, pm)
ppis = [('A', 'A'), ('Q3TYD4', 'A')]
pm = _update_missing_protein_map(ppis, self.session, verbose=False)
expected = {
'A': self.p1,
'Q3TYD4': None
}
self.assertEqual(expected, pm)
def test_ignores_taxonid_if_none(self):
ppis = [('A', 'A'), ('B', 'A')]
pm = _update_missing_protein_map(
ppis, self.session, taxon_id=None, verbose=False)
expected = {
'A': self.p1,
'B': self.p2
}
self.assertEqual(expected, pm)
ppis = [('A', 'A'), ('Q3TYD4', 'A')]
pm = _update_missing_protein_map(
ppis, self.session, taxon_id=None, verbose=False)
expected = {
'A': self.p1,
'Q3TYD4': Protein.query.get(3)
}
self.assertEqual(expected, pm)
def test_ignores_none_input(self):
ppis = [(None, 'A')]
pm = _update_missing_protein_map(ppis, self.session, verbose=False)
expected = {
'A': self.p1,
}
self.assertEqual(expected, pm)
class TestCreateMissingInteractions(TestCase):
def setUp(self):
self.session, self.engine = create_session(db_path)
delete_database(self.session)
self.p1 = Protein(uniprot_id='A', taxon_id=9606, reviewed=True)
self.p2 = Protein(uniprot_id='B', taxon_id=9606, reviewed=True)
self.p3 = Protein(uniprot_id='C', taxon_id=0, reviewed=True)
self.p4 = Protein(uniprot_id='D', taxon_id=0, reviewed=True)
self.p1.save(self.session, commit=True)
self.p2.save(self.session, commit=True)
self.p3.save(self.session, commit=True)
self.p4.save(self.session, commit=True)
self.i1 = Interaction(source=self.p1, target=self.p2)
self.i1.save(session=self.session, commit=True)
self.p_map = {p.uniprot_id: p for p in Protein.query.all()}
def tearDown(self):
delete_database(self.session)
cleanup_database(self.session, self.engine)
def test_existing_interactions_returned_and_invalid_is_empty_list(self):
valid, invalid = _create_missing_interactions(
ppis=[('A', 'B')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [self.i1])
self.assertEqual(invalid, [])
def test_interaction_with_none_source_added_to_invalid(self):
valid, invalid = _create_missing_interactions(
ppis=[(None, 'B')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [(None, 'B')])
def test_interaction_with_none_target_added_to_invalid(self):
valid, invalid = _create_missing_interactions(
ppis=[('A', None)],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [('A', None)])
def test_interaction_with_differing_taxonid_added_to_invalid(self):
valid, invalid = _create_missing_interactions(
ppis=[('C', 'D')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [('C', 'D')])
valid, invalid = _create_missing_interactions(
ppis=[('C', 'A')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [('C', 'A')])
valid, invalid = _create_missing_interactions(
ppis=[('A', 'D')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [])
self.assertEqual(invalid, [('A', 'D')])
def test_new_interactions_created(self):
valid, invalid = _create_missing_interactions(
ppis=[('A', 'A')],
protein_map=self.p_map,
session=self.session
)
self.assertEqual(valid, [Interaction.query.get(2)])
self.assertEqual(invalid, [])
class TestMakePredictions(TestCase):
# This class implicitly also tests parse_predictions since
# make_predictions is essentially a wrapper for parse_predictions
# TODO: Separate these tests.
def setUp(self):
self.records = open(os.path.normpath(
"{}/test_data/test_sprot_records.dat".format(base_path)
), 'rt')
self.session, self.engine = create_session(db_path)
delete_database(self.session)
self.proteins = []
for record in SwissProt.parse(self.records):
protein = parse_record_into_protein(record)
protein.save(self.session, commit=True)
self.proteins.append(protein)
self.labels = ['Activation', 'Inhibition', 'Acetylation']
self.interactions = []
for protein_a, protein_b in product(self.proteins, self.proteins):
class_kwargs = compute_interaction_features(protein_a, protein_b)
label = '{},{}'.format(
self.labels[protein_a.id - 1],
self.labels[protein_b.id - 1]
)
try:
interaction = create_interaction(
protein_a, protein_b, labels=label, session=self.session,
verbose=False, save=True, commit=True, **class_kwargs
)
self.interactions.append(interaction)
except ObjectAlreadyExists:
continue
self.X, self.y, _ = load_dataset(
self.interactions, self.labels, selection=DEFAULT_SELECTION
)
base = Pipeline(steps=[
('vectorizer', CountVectorizer(
lowercase=False, stop_words=[':', 'GO'])),
('estimator', LogisticRegression(random_state=0))
])
self.clf = MixedBinaryRelevanceClassifier(
[clone(base) for _ in range(len(self.labels))]
)
self.clf.fit(self.X, self.y)
def tearDown(self):
delete_database(self.session)
cleanup_database(self.session, self.engine)
self.records.close()
def test_can_make_proba_predictions_on_existing_interactions(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_can_make_binary_predictions_on_existing_interactions(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
predictions = classify_interactions(
ppis, proba=False, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_can_make_predictions_on_list_of_interaction_objects(self):
ppis = self.interactions
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_ignores_None_or_not_interaction_objects(self):
ppis = [self.interactions[0], None, '1']
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba([self.X[0]]), [None, '1']
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_returns_empty_list_no_valid_interactions(self):
ppis = [(1, 2), (1, 2, 3), None, '1']
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = ([], [(1, 2), (1, 2, 3), None, '1'])
self.assertEqual(predictions[0], expected[0])
self.assertEqual(predictions[1], expected[1])
def test_typeerror_first_elem_not_interaction_or_tuple(self):
with self.assertRaises(TypeError):
ppis = [1, None, '1']
classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
def test_creates_new_interactions(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
delete_database(self.session)
classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
self.assertEqual(Interaction.query.count(), 6)
def test_removed_duplicate_interactions_interactions(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
ppis.append(ppis[0])
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_invalid_ppis_added_to_invalid(self):
ppis = [('A', 'B'), ('Q04917', 'X')]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = ([], ppis)
self.assertEqual(predictions[0], expected[0])
self.assertEqual(predictions[1], expected[1])
def test_non_matching_taxonid_of_existing_ppis_added_to_invalid(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=0, verbose=False, session=self.session
)
expected = ([], ppis)
self.assertEqual(predictions[0], expected[0])
self.assertEqual(predictions[1], expected[1])
def test_non_matching_taxonid_of_new_ppis_added_to_invalid(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
delete_database(self.session)
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=0, verbose=False, session=self.session
)
expected = ([], ppis)
self.assertEqual(predictions[0], expected[0])
self.assertEqual(predictions[1], expected[1])
def test_taxonid_ignored_if_None(self):
ppis = [
(
Protein.query.get(i.source).uniprot_id,
Protein.query.get(i.target).uniprot_id
)
for i in self.interactions
]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=None, verbose=False, session=self.session
)
expected = (
self.clf.predict_proba(self.X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_ignores_duplicate_entries(self):
ppi_1 = (
Protein.query.get(self.interactions[0].source).uniprot_id,
Protein.query.get(self.interactions[0].target).uniprot_id
)
ppi_2 = (
Protein.query.get(self.interactions[0].target).uniprot_id,
Protein.query.get(self.interactions[0].source).uniprot_id
)
ppis = [ppi_1, ppi_2]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
X, _, _ = load_dataset(
[self.interactions[0]], self.labels, selection=DEFAULT_SELECTION
)
expected = (
self.clf.predict_proba(X), []
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_multiple_choice_uniprot_ids_get_put_in_invalid(self):
ppis = [('Q8NDH8', 'P0CG12')]
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
expected = ([], [('P0CG12', 'Q8NDH8')])
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
def test_outdated_accessions_map_to_most_recent_entries(self):
ppis = [('A8K9K2', 'A8K9K2')] # maps to P31946
entry = Protein.get_by_uniprot_id('P31946')
interaction = Interaction.get_by_interactors(entry, entry)
predictions = classify_interactions(
ppis, proba=True, classifier=self.clf, selection=DEFAULT_SELECTION,
taxon_id=9606, verbose=False, session=self.session
)
X, _, _ = load_dataset(
[interaction], self.labels, selection=DEFAULT_SELECTION
)
expected = (
self.clf.predict_proba(X), [], {'A8K9K2': 'P31946'}
)
self.assertTrue(np.all(np.isclose(predictions[0], expected[0])))
self.assertEqual(predictions[1], expected[1])
self.assertEqual(predictions[2], expected[2])
|
StarcoderdataPython
|
3256036
|
# Autogenerated file. ANY CHANGES WILL BE OVERWRITTEN
from to_python.dump.functions.account_functions import DUMP_PARTIAL as DP_F_ACCOUNT_FUNCTIONS
from to_python.dump.functions.acl_functions import DUMP_PARTIAL as DP_F_ACL_FUNCTIONS
from to_python.dump.functions.admin_functions import DUMP_PARTIAL as DP_F_ADMIN_FUNCTIONS
from to_python.dump.functions.announcement_functions import DUMP_PARTIAL as DP_F_ANNOUNCEMENT_FUNCTIONS
from to_python.dump.functions.audio_functions import DUMP_PARTIAL as DP_F_AUDIO_FUNCTIONS
from to_python.dump.functions.blip_functions import DUMP_PARTIAL as DP_F_BLIP_FUNCTIONS
from to_python.dump.functions.browser_functions import DUMP_PARTIAL as DP_F_BROWSER_FUNCTIONS
from to_python.dump.functions.camera_functions import DUMP_PARTIAL as DP_F_CAMERA_FUNCTIONS
from to_python.dump.functions.clothes_and_body_functions import DUMP_PARTIAL as DP_F_CLOTHES_AND_BODY_FUNCTIONS
from to_python.dump.functions.collision_shape_functions import DUMP_PARTIAL as DP_F_COLLISION_SHAPE_FUNCTIONS
from to_python.dump.functions.cursor_functions import DUMP_PARTIAL as DP_F_CURSOR_FUNCTIONS
from to_python.dump.functions.drawing_functions import DUMP_PARTIAL as DP_F_DRAWING_FUNCTIONS
from to_python.dump.functions.effects_functions import DUMP_PARTIAL as DP_F_EFFECTS_FUNCTIONS
from to_python.dump.functions.element_functions import DUMP_PARTIAL as DP_F_ELEMENT_FUNCTIONS
from to_python.dump.functions.engine_functions import DUMP_PARTIAL as DP_F_ENGINE_FUNCTIONS
from to_python.dump.functions.event_functions import DUMP_PARTIAL as DP_F_EVENT_FUNCTIONS
from to_python.dump.functions.explosion_functions import DUMP_PARTIAL as DP_F_EXPLOSION_FUNCTIONS
from to_python.dump.functions.file_functions import DUMP_PARTIAL as DP_F_FILE_FUNCTIONS
from to_python.dump.functions.fire_functions import DUMP_PARTIAL as DP_F_FIRE_FUNCTIONS
from to_python.dump.functions.gui_functions import DUMP_PARTIAL as DP_F_GUI_FUNCTIONS
from to_python.dump.functions.input_functions import DUMP_PARTIAL as DP_F_INPUT_FUNCTIONS
from to_python.dump.functions.light_functions import DUMP_PARTIAL as DP_F_LIGHT_FUNCTIONS
from to_python.dump.functions.map_functions import DUMP_PARTIAL as DP_F_MAP_FUNCTIONS
from to_python.dump.functions.marker_functions import DUMP_PARTIAL as DP_F_MARKER_FUNCTIONS
from to_python.dump.functions.module_functions import DUMP_PARTIAL as DP_F_MODULE_FUNCTIONS
from to_python.dump.functions.object_functions import DUMP_PARTIAL as DP_F_OBJECT_FUNCTIONS
from to_python.dump.functions.output_functions import DUMP_PARTIAL as DP_F_OUTPUT_FUNCTIONS
from to_python.dump.functions.ped_functions import DUMP_PARTIAL as DP_F_PED_FUNCTIONS
from to_python.dump.functions.pickup_functions import DUMP_PARTIAL as DP_F_PICKUP_FUNCTIONS
from to_python.dump.functions.player_functions import DUMP_PARTIAL as DP_F_PLAYER_FUNCTIONS
from to_python.dump.functions.projectile_functions import DUMP_PARTIAL as DP_F_PROJECTILE_FUNCTIONS
from to_python.dump.functions.radar_area_functions import DUMP_PARTIAL as DP_F_RADAR_AREA_FUNCTIONS
from to_python.dump.functions.resource_functions import DUMP_PARTIAL as DP_F_RESOURCE_FUNCTIONS
from to_python.dump.functions.searchlight_functions import DUMP_PARTIAL as DP_F_SEARCHLIGHT_FUNCTIONS
from to_python.dump.functions.server_functions import DUMP_PARTIAL as DP_F_SERVER_FUNCTIONS
from to_python.dump.functions.settings_registry_functions import DUMP_PARTIAL as DP_F_SETTINGS_REGISTRY_FUNCTIONS
from to_python.dump.functions.sql_functions import DUMP_PARTIAL as DP_F_SQL_FUNCTIONS
from to_python.dump.functions.svg_functions import DUMP_PARTIAL as DP_F_SVG_FUNCTIONS
from to_python.dump.functions.team_functions import DUMP_PARTIAL as DP_F_TEAM_FUNCTIONS
from to_python.dump.functions.text_functions import DUMP_PARTIAL as DP_F_TEXT_FUNCTIONS
from to_python.dump.functions.utf8_library import DUMP_PARTIAL as DP_F_UTF8_LIBRARY
from to_python.dump.functions.utility_functions import DUMP_PARTIAL as DP_F_UTILITY_FUNCTIONS
from to_python.dump.functions.vehicle_functions import DUMP_PARTIAL as DP_F_VEHICLE_FUNCTIONS
from to_python.dump.functions.water_functions import DUMP_PARTIAL as DP_F_WATER_FUNCTIONS
from to_python.dump.functions.weapon_creation_functions import DUMP_PARTIAL as DP_F_WEAPON_CREATION_FUNCTIONS
from to_python.dump.functions.weapon_functions import DUMP_PARTIAL as DP_F_WEAPON_FUNCTIONS
from to_python.dump.functions.world_functions import DUMP_PARTIAL as DP_F_WORLD_FUNCTIONS
from to_python.dump.functions.xml_functions import DUMP_PARTIAL as DP_F_XML_FUNCTIONS
DUMP_FUNCTIONS = [
*DP_F_ACCOUNT_FUNCTIONS,
*DP_F_ACL_FUNCTIONS,
*DP_F_ADMIN_FUNCTIONS,
*DP_F_ANNOUNCEMENT_FUNCTIONS,
*DP_F_AUDIO_FUNCTIONS,
*DP_F_BLIP_FUNCTIONS,
*DP_F_BROWSER_FUNCTIONS,
*DP_F_CAMERA_FUNCTIONS,
*DP_F_CLOTHES_AND_BODY_FUNCTIONS,
*DP_F_COLLISION_SHAPE_FUNCTIONS,
*DP_F_CURSOR_FUNCTIONS,
*DP_F_DRAWING_FUNCTIONS,
*DP_F_EFFECTS_FUNCTIONS,
*DP_F_ELEMENT_FUNCTIONS,
*DP_F_ENGINE_FUNCTIONS,
*DP_F_EVENT_FUNCTIONS,
*DP_F_EXPLOSION_FUNCTIONS,
*DP_F_FILE_FUNCTIONS,
*DP_F_FIRE_FUNCTIONS,
*DP_F_GUI_FUNCTIONS,
*DP_F_INPUT_FUNCTIONS,
*DP_F_LIGHT_FUNCTIONS,
*DP_F_MAP_FUNCTIONS,
*DP_F_MARKER_FUNCTIONS,
*DP_F_MODULE_FUNCTIONS,
*DP_F_OBJECT_FUNCTIONS,
*DP_F_OUTPUT_FUNCTIONS,
*DP_F_PED_FUNCTIONS,
*DP_F_PICKUP_FUNCTIONS,
*DP_F_PLAYER_FUNCTIONS,
*DP_F_PROJECTILE_FUNCTIONS,
*DP_F_RADAR_AREA_FUNCTIONS,
*DP_F_RESOURCE_FUNCTIONS,
*DP_F_SEARCHLIGHT_FUNCTIONS,
*DP_F_SERVER_FUNCTIONS,
*DP_F_SETTINGS_REGISTRY_FUNCTIONS,
*DP_F_SQL_FUNCTIONS,
*DP_F_SVG_FUNCTIONS,
*DP_F_TEAM_FUNCTIONS,
*DP_F_TEXT_FUNCTIONS,
*DP_F_UTF8_LIBRARY,
*DP_F_UTILITY_FUNCTIONS,
*DP_F_VEHICLE_FUNCTIONS,
*DP_F_WATER_FUNCTIONS,
*DP_F_WEAPON_CREATION_FUNCTIONS,
*DP_F_WEAPON_FUNCTIONS,
*DP_F_WORLD_FUNCTIONS,
*DP_F_XML_FUNCTIONS
]
from to_python.dump.oops.account_functions import DUMP_PARTIAL as DP_O_ACCOUNT_FUNCTIONS
from to_python.dump.oops.acl_functions import DUMP_PARTIAL as DP_O_ACL_FUNCTIONS
from to_python.dump.oops.admin_functions import DUMP_PARTIAL as DP_O_ADMIN_FUNCTIONS
from to_python.dump.oops.announcement_functions import DUMP_PARTIAL as DP_O_ANNOUNCEMENT_FUNCTIONS
from to_python.dump.oops.audio_functions import DUMP_PARTIAL as DP_O_AUDIO_FUNCTIONS
from to_python.dump.oops.blip_functions import DUMP_PARTIAL as DP_O_BLIP_FUNCTIONS
from to_python.dump.oops.browser_functions import DUMP_PARTIAL as DP_O_BROWSER_FUNCTIONS
from to_python.dump.oops.camera_functions import DUMP_PARTIAL as DP_O_CAMERA_FUNCTIONS
from to_python.dump.oops.clothes_and_body_functions import DUMP_PARTIAL as DP_O_CLOTHES_AND_BODY_FUNCTIONS
from to_python.dump.oops.collision_shape_functions import DUMP_PARTIAL as DP_O_COLLISION_SHAPE_FUNCTIONS
from to_python.dump.oops.cursor_functions import DUMP_PARTIAL as DP_O_CURSOR_FUNCTIONS
from to_python.dump.oops.drawing_functions import DUMP_PARTIAL as DP_O_DRAWING_FUNCTIONS
from to_python.dump.oops.effects_functions import DUMP_PARTIAL as DP_O_EFFECTS_FUNCTIONS
from to_python.dump.oops.element_functions import DUMP_PARTIAL as DP_O_ELEMENT_FUNCTIONS
from to_python.dump.oops.engine_functions import DUMP_PARTIAL as DP_O_ENGINE_FUNCTIONS
from to_python.dump.oops.event_functions import DUMP_PARTIAL as DP_O_EVENT_FUNCTIONS
from to_python.dump.oops.explosion_functions import DUMP_PARTIAL as DP_O_EXPLOSION_FUNCTIONS
from to_python.dump.oops.file_functions import DUMP_PARTIAL as DP_O_FILE_FUNCTIONS
from to_python.dump.oops.fire_functions import DUMP_PARTIAL as DP_O_FIRE_FUNCTIONS
from to_python.dump.oops.gui_functions import DUMP_PARTIAL as DP_O_GUI_FUNCTIONS
from to_python.dump.oops.input_functions import DUMP_PARTIAL as DP_O_INPUT_FUNCTIONS
from to_python.dump.oops.light_functions import DUMP_PARTIAL as DP_O_LIGHT_FUNCTIONS
from to_python.dump.oops.map_functions import DUMP_PARTIAL as DP_O_MAP_FUNCTIONS
from to_python.dump.oops.marker_functions import DUMP_PARTIAL as DP_O_MARKER_FUNCTIONS
from to_python.dump.oops.module_functions import DUMP_PARTIAL as DP_O_MODULE_FUNCTIONS
from to_python.dump.oops.object_functions import DUMP_PARTIAL as DP_O_OBJECT_FUNCTIONS
from to_python.dump.oops.output_functions import DUMP_PARTIAL as DP_O_OUTPUT_FUNCTIONS
from to_python.dump.oops.ped_functions import DUMP_PARTIAL as DP_O_PED_FUNCTIONS
from to_python.dump.oops.pickup_functions import DUMP_PARTIAL as DP_O_PICKUP_FUNCTIONS
from to_python.dump.oops.player_functions import DUMP_PARTIAL as DP_O_PLAYER_FUNCTIONS
from to_python.dump.oops.projectile_functions import DUMP_PARTIAL as DP_O_PROJECTILE_FUNCTIONS
from to_python.dump.oops.radar_area_functions import DUMP_PARTIAL as DP_O_RADAR_AREA_FUNCTIONS
from to_python.dump.oops.resource_functions import DUMP_PARTIAL as DP_O_RESOURCE_FUNCTIONS
from to_python.dump.oops.searchlight_functions import DUMP_PARTIAL as DP_O_SEARCHLIGHT_FUNCTIONS
from to_python.dump.oops.server_functions import DUMP_PARTIAL as DP_O_SERVER_FUNCTIONS
from to_python.dump.oops.settings_registry_functions import DUMP_PARTIAL as DP_O_SETTINGS_REGISTRY_FUNCTIONS
from to_python.dump.oops.sql_functions import DUMP_PARTIAL as DP_O_SQL_FUNCTIONS
from to_python.dump.oops.svg_functions import DUMP_PARTIAL as DP_O_SVG_FUNCTIONS
from to_python.dump.oops.team_functions import DUMP_PARTIAL as DP_O_TEAM_FUNCTIONS
from to_python.dump.oops.text_functions import DUMP_PARTIAL as DP_O_TEXT_FUNCTIONS
from to_python.dump.oops.utf8_library import DUMP_PARTIAL as DP_O_UTF8_LIBRARY
from to_python.dump.oops.utility_functions import DUMP_PARTIAL as DP_O_UTILITY_FUNCTIONS
from to_python.dump.oops.vehicle_functions import DUMP_PARTIAL as DP_O_VEHICLE_FUNCTIONS
from to_python.dump.oops.water_functions import DUMP_PARTIAL as DP_O_WATER_FUNCTIONS
from to_python.dump.oops.weapon_creation_functions import DUMP_PARTIAL as DP_O_WEAPON_CREATION_FUNCTIONS
from to_python.dump.oops.weapon_functions import DUMP_PARTIAL as DP_O_WEAPON_FUNCTIONS
from to_python.dump.oops.world_functions import DUMP_PARTIAL as DP_O_WORLD_FUNCTIONS
from to_python.dump.oops.xml_functions import DUMP_PARTIAL as DP_O_XML_FUNCTIONS
DUMP_OOPS = [
*DP_O_ACCOUNT_FUNCTIONS,
*DP_O_ACL_FUNCTIONS,
*DP_O_ADMIN_FUNCTIONS,
*DP_O_ANNOUNCEMENT_FUNCTIONS,
*DP_O_AUDIO_FUNCTIONS,
*DP_O_BLIP_FUNCTIONS,
*DP_O_BROWSER_FUNCTIONS,
*DP_O_CAMERA_FUNCTIONS,
*DP_O_CLOTHES_AND_BODY_FUNCTIONS,
*DP_O_COLLISION_SHAPE_FUNCTIONS,
*DP_O_CURSOR_FUNCTIONS,
*DP_O_DRAWING_FUNCTIONS,
*DP_O_EFFECTS_FUNCTIONS,
*DP_O_ELEMENT_FUNCTIONS,
*DP_O_ENGINE_FUNCTIONS,
*DP_O_EVENT_FUNCTIONS,
*DP_O_EXPLOSION_FUNCTIONS,
*DP_O_FILE_FUNCTIONS,
*DP_O_FIRE_FUNCTIONS,
*DP_O_GUI_FUNCTIONS,
*DP_O_INPUT_FUNCTIONS,
*DP_O_LIGHT_FUNCTIONS,
*DP_O_MAP_FUNCTIONS,
*DP_O_MARKER_FUNCTIONS,
*DP_O_MODULE_FUNCTIONS,
*DP_O_OBJECT_FUNCTIONS,
*DP_O_OUTPUT_FUNCTIONS,
*DP_O_PED_FUNCTIONS,
*DP_O_PICKUP_FUNCTIONS,
*DP_O_PLAYER_FUNCTIONS,
*DP_O_PROJECTILE_FUNCTIONS,
*DP_O_RADAR_AREA_FUNCTIONS,
*DP_O_RESOURCE_FUNCTIONS,
*DP_O_SEARCHLIGHT_FUNCTIONS,
*DP_O_SERVER_FUNCTIONS,
*DP_O_SETTINGS_REGISTRY_FUNCTIONS,
*DP_O_SQL_FUNCTIONS,
*DP_O_SVG_FUNCTIONS,
*DP_O_TEAM_FUNCTIONS,
*DP_O_TEXT_FUNCTIONS,
*DP_O_UTF8_LIBRARY,
*DP_O_UTILITY_FUNCTIONS,
*DP_O_VEHICLE_FUNCTIONS,
*DP_O_WATER_FUNCTIONS,
*DP_O_WEAPON_CREATION_FUNCTIONS,
*DP_O_WEAPON_FUNCTIONS,
*DP_O_WORLD_FUNCTIONS,
*DP_O_XML_FUNCTIONS
]
from to_python.dump.events.account_events import DUMP_PARTIAL as DP_E_ACCOUNT_EVENTS
from to_python.dump.events.browser_events import DUMP_PARTIAL as DP_E_BROWSER_EVENTS
from to_python.dump.events.client_events import DUMP_PARTIAL as DP_E_CLIENT_EVENTS
from to_python.dump.events.colshape_events import DUMP_PARTIAL as DP_E_COLSHAPE_EVENTS
from to_python.dump.events.element_events import DUMP_PARTIAL as DP_E_ELEMENT_EVENTS
from to_python.dump.events.input_events import DUMP_PARTIAL as DP_E_INPUT_EVENTS
from to_python.dump.events.marker_events import DUMP_PARTIAL as DP_E_MARKER_EVENTS
from to_python.dump.events.object_events import DUMP_PARTIAL as DP_E_OBJECT_EVENTS
from to_python.dump.events.other_events import DUMP_PARTIAL as DP_E_OTHER_EVENTS
from to_python.dump.events.ped_events import DUMP_PARTIAL as DP_E_PED_EVENTS
from to_python.dump.events.pickup_events import DUMP_PARTIAL as DP_E_PICKUP_EVENTS
from to_python.dump.events.player_events import DUMP_PARTIAL as DP_E_PLAYER_EVENTS
from to_python.dump.events.projectile_events import DUMP_PARTIAL as DP_E_PROJECTILE_EVENTS
from to_python.dump.events.resource_events import DUMP_PARTIAL as DP_E_RESOURCE_EVENTS
from to_python.dump.events.server_events import DUMP_PARTIAL as DP_E_SERVER_EVENTS
from to_python.dump.events.sound_events import DUMP_PARTIAL as DP_E_SOUND_EVENTS
from to_python.dump.events.vehicle_events import DUMP_PARTIAL as DP_E_VEHICLE_EVENTS
from to_python.dump.events.weapon_creation_events import DUMP_PARTIAL as DP_E_WEAPON_CREATION_EVENTS
from to_python.dump.events.weapon_events import DUMP_PARTIAL as DP_E_WEAPON_EVENTS
DUMP_EVENTS = [
*DP_E_ACCOUNT_EVENTS,
*DP_E_BROWSER_EVENTS,
*DP_E_CLIENT_EVENTS,
*DP_E_COLSHAPE_EVENTS,
*DP_E_ELEMENT_EVENTS,
*DP_E_INPUT_EVENTS,
*DP_E_MARKER_EVENTS,
*DP_E_OBJECT_EVENTS,
*DP_E_OTHER_EVENTS,
*DP_E_PED_EVENTS,
*DP_E_PICKUP_EVENTS,
*DP_E_PLAYER_EVENTS,
*DP_E_PROJECTILE_EVENTS,
*DP_E_RESOURCE_EVENTS,
*DP_E_SERVER_EVENTS,
*DP_E_SOUND_EVENTS,
*DP_E_VEHICLE_EVENTS,
*DP_E_WEAPON_CREATION_EVENTS,
*DP_E_WEAPON_EVENTS
]
|
StarcoderdataPython
|
11277067
|
<reponame>Pendragonek/bionic
"""Resources tests"""
|
StarcoderdataPython
|
5142235
|
# jsb/plugs/common/twitter.py
#
#
""" a twitter plugin for the JSONBOT, currently post only .. uses tweepy oauth. """
## jsb imports
from jsb.utils.exception import handle_exception
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.utils.pdol import Pdol
from jsb.utils.textutils import html_unescape
from jsb.utils.generic import waitforqueue, strippedtxt, splittxt
from jsb.lib.persist import PlugPersist
from jsb.utils.twitter import get_users, twitterapi, twittertoken, getcreds, getauth
from jsb.lib.datadir import getdatadir
from jsb.lib.jsbimport import _import_byfile
from jsb.utils.twitter import twitter_out
## tweppy imports
from jsb.contrib import tweepy
## basic imports
import os
import urllib2
import types
import logging
## twitter command
def handle_twitter(bot, ievent):
""" arguments: <txt> - send a twitter message. """
creds = getcreds()
from jsb.utils.twitter import go
if not go: ievent.reply("the twitter plugin needs the credentials.py file in the .jsb/data/config dir. see .jsb/data/examples") ; return
txt = ievent.rest
if ievent.ispipelined:
txt = ievent.wait()
if not txt: ievent.missing('<txt>') ; return
else:
if ievent.chan:
taglist = ievent.chan.data.taglist
if taglist:
for tag in taglist:
txt += " %s" % tag
twitter_out(ievent.user.data.name, txt, ievent) ; ievent.reply("tweet posted")
cmnds.add('twitter', handle_twitter, ['USER', 'GUEST'])
examples.add('twitter', 'posts a message on twitter', 'twitter just found the http://jsonbot.org project')
## twitter-cmnd command
def handle_twittercmnd(bot, ievent):
""" arguments: <API cmnd> - do a twitter API cmommand. """
if not go: ievent.reply("the twitter plugin needs the credentials.py file in the .jsb/data//config dir. see .jsb/data/examples") ; return
if not ievent.args: ievent.missing('<API cmnd>') ; return
target = strippedtxt(ievent.args[0])
try:
from jsb.utils.twitter import get_token
token = get_token(ievent.user.data.name)
if not token: ievent.reply("you are not logged in yet .. run the twitter-auth command.") ; return
key, secret = getcreds(getdatadir())
token = tweepy.oauth.OAuthToken(key, secret).from_string(token)
twitter = twitterapi(key, secret, token)
cmndlist = dir(twitter)
cmnds = []
for cmnd in cmndlist:
if cmnd.startswith("_") or cmnd == "auth": continue
else: cmnds.append(cmnd)
if target not in cmnds: ievent.reply("choose one of: %s" % ", ".join(cmnds)) ; return
try: method = getattr(twitter, target)
except AttributeError: ievent.reply("choose one of: %s" % ", ".join(cmnds)) ; return
result = method()
res = []
for item in result:
try: res.append("%s - %s" % (item.screen_name, item.text))
except AttributeError:
try: res.append("%s - %s" % (item.screen_name, item.description))
except AttributeError:
try: res.append(unicode(item.__getstate__()))
except AttributeError: res.append(dir(i)) ; res.append(unicode(item))
ievent.reply("result of %s: " % target, res)
except KeyError: ievent.reply('you are not logged in yet. see the twitter-auth command.')
except (tweepy.TweepError, urllib2.HTTPError), e: ievent.reply('twitter failed: %s' % (str(e),))
cmnds.add('twitter-cmnd', handle_twittercmnd, 'OPER')
examples.add('twitter-cmnd', 'do a cmnd on the twitter API', 'twitter-cmnd home_timeline')
## twitter-confirm command
def handle_twitter_confirm(bot, ievent):
""" arguments: <PIN code> - confirm auth with PIN. """
from jsb.utils.twitter import go
if not go: ievent.reply("the twitter plugin needs the credentials.py file in the %s/config dir. see .jsb/data/examples" % getdatadir()) ; return
pin = ievent.args[0]
if not pin: ievent.missing("<PIN> .. see the twitter-auth command.") ; return
try: access_token = getauth(getdatadir()).get_access_token(pin)
except (tweepy.TweepError, urllib2.HTTPError), e: ievent.reply('twitter failed: %s' % (str(e),)) ; return
twitteruser = get_users()
twitteruser.add(ievent.user.data.name, access_token.to_string())
ievent.reply("access token saved.")
cmnds.add('twitter-confirm', handle_twitter_confirm, ['OPER', 'USER', 'GUEST'])
examples.add('twitter-confirm', 'confirm your twitter account', 'twitter-confirm 6992762')
## twitter-auth command
def handle_twitter_auth(bot, ievent):
""" no arguments - get url to get the auth PIN needed for the twitter-confirm command. """
from jsb.utils.twitter import go
if not go: ievent.reply("the twitter plugin needs the credentials.py file in the .jsb/data/config dir. see .jsb/data/examples") ; return
try: auth_url = getauth(getdatadir()).get_authorization_url()
except (tweepy.TweepError, urllib2.HTTPError), e: ievent.reply('twitter failed: %s' % (str(e),)) ; return
if bot.type == "irc":
bot.say(ievent.nick, "sign in at %s" % auth_url)
bot.say(ievent.nick, "use the provided code in the twitter-confirm command.")
else:
ievent.reply("sign in at %s" % auth_url)
ievent.reply("use the provided code in the twitter-confirm command.")
cmnds.add('twitter-auth', handle_twitter_auth, ['OPER', 'USER', 'GUEST'])
examples.add('twitter-auth', 'adds your twitter account', 'twitter-auth')
## twitter-friends command
def handle_twitterfriends(bot, ievent):
""" no arguments - show friends timeline (your normal twitter feed). """
if not go: ievent.reply("the twitter plugin needs the credentials.py file in the .jsb/data/config dir. see .jsb/data/examples") ; return
try:
token = get_token(ievent.user.data.name)
if not token: ievent.reply("you are not logged in yet .. run the twitter-auth command.") ; return
key , secret = getcreds(getdatadir())
token = tweepy.oauth.OAuthToken(key, secret).from_string(token)
twitter = twitterapi(key, secret, token)
method = getattr(twitter, "friends_timeline")
result = method()
res = []
for item in result:
try: res.append("%s - %s" % (item.author.screen_name, item.text))
except Exception, ex: handle_exception()
ievent.reply("results: ", res)
except KeyError: ievent.reply('you are not logged in yet. see the twitter-auth command.')
except (tweepy.TweepError, urllib2.HTTPError), e: ievent.reply('twitter failed: %s' % (str(e),))
cmnds.add('twitter-friends', handle_twitterfriends, ['OPER', 'USER', 'GUEST'], threaded=True)
examples.add('twitter-friends', 'show your friends_timeline', 'twitter-friends')
def init():
from jsb.utils.twitter import getcreds
creds = getcreds()
|
StarcoderdataPython
|
3410194
|
<gh_stars>1-10
# coding: utf-8
"""
Tradenity API
Tradenity eCommerce Rest API
Contact: <EMAIL>
"""
from __future__ import absolute_import
import re
import pprint
# python 2 and python 3 compatibility library
import six
from tradenity.api_client import ApiClient
class CashOnDeliveryPayment(object):
swagger_types = {
'id': 'str',
'meta': 'InstanceMeta',
'amount': 'int',
'order': 'Order',
'payment_source': 'PaymentSource',
'currency': 'Currency',
'status': 'str'
}
attribute_map = {
'id': 'id',
'meta': '__meta',
'amount': 'amount',
'order': 'order',
'payment_source': 'paymentSource',
'currency': 'currency',
'status': 'status'
}
api_client = None
def __init__(self, id=None, meta=None, amount=None, order=None, payment_source=None, currency=None, status=None):
"""CashOnDeliveryPayment - a model defined in Swagger"""
self._id = id
self._meta = None
self._amount = None
self._order = None
self._payment_source = None
self._currency = None
self._status = None
self.discriminator = None
if meta is not None:
self.meta = meta
self.amount = amount
self.order = order
self.payment_source = payment_source
self.currency = currency
self.status = status
@property
def id(self):
if self._id:
return self._id
elif self.meta is None:
return None
else:
self._id = self.meta.href.split("/")[-1]
return self._id
@id.setter
def id(self, new_id):
self._id = new_id
@property
def meta(self):
"""Gets the meta of this CashOnDeliveryPayment.
:return: The meta of this CashOnDeliveryPayment.
:rtype: InstanceMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this CashOnDeliveryPayment.
:param meta: The meta of this CashOnDeliveryPayment.
:type: InstanceMeta
"""
self._meta = meta
@property
def amount(self):
"""Gets the amount of this CashOnDeliveryPayment.
:return: The amount of this CashOnDeliveryPayment.
:rtype: int
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this CashOnDeliveryPayment.
:param amount: The amount of this CashOnDeliveryPayment.
:type: int
"""
self._amount = amount
@property
def order(self):
"""Gets the order of this CashOnDeliveryPayment.
:return: The order of this CashOnDeliveryPayment.
:rtype: Order
"""
return self._order
@order.setter
def order(self, order):
"""Sets the order of this CashOnDeliveryPayment.
:param order: The order of this CashOnDeliveryPayment.
:type: Order
"""
self._order = order
@property
def payment_source(self):
"""Gets the payment_source of this CashOnDeliveryPayment.
:return: The payment_source of this CashOnDeliveryPayment.
:rtype: PaymentSource
"""
return self._payment_source
@payment_source.setter
def payment_source(self, payment_source):
"""Sets the payment_source of this CashOnDeliveryPayment.
:param payment_source: The payment_source of this CashOnDeliveryPayment.
:type: PaymentSource
"""
self._payment_source = payment_source
@property
def currency(self):
"""Gets the currency of this CashOnDeliveryPayment.
:return: The currency of this CashOnDeliveryPayment.
:rtype: Currency
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this CashOnDeliveryPayment.
:param currency: The currency of this CashOnDeliveryPayment.
:type: Currency
"""
self._currency = currency
@property
def status(self):
"""Gets the status of this CashOnDeliveryPayment.
:return: The status of this CashOnDeliveryPayment.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this CashOnDeliveryPayment.
:param status: The status of this CashOnDeliveryPayment.
:type: str
"""
allowed_values = ["pending", "awaitingRetry", "successful", "failed"]
if status is not None and status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CashOnDeliveryPayment, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CashOnDeliveryPayment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
@classmethod
def get_api_client(cls):
if cls.api_client is None:
cls.api_client = ApiClient.instance()
return cls.api_client
@classmethod
def find_all(cls, **kwargs):
return cls.list_all_cash_on_delivery_payments(**kwargs)
@classmethod
def find_all_by(cls, **kwargs):
return cls.list_all_cash_on_delivery_payments(**kwargs)
@classmethod
def find_one_by(cls, **kwargs):
results = cls.list_all_cash_on_delivery_payments(**kwargs)
if len(results) > 0:
return results[0]
@classmethod
def find_by_id(cls, id):
return cls.get_cash_on_delivery_payment_by_id(id)
def create(self):
new_instance = self.create_cash_on_delivery_payment(self)
self.id = new_instance.id
return self
def update(self):
return self.update_cash_on_delivery_payment_by_id(self.id, self)
def delete(self):
return self.delete_cash_on_delivery_payment_by_id(self.id)
@classmethod
def create_cash_on_delivery_payment(cls, cash_on_delivery_payment, **kwargs):
"""Create CashOnDeliveryPayment
Create a new CashOnDeliveryPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_cash_on_delivery_payment(cash_on_delivery_payment, async=True)
>>> result = thread.get()
:param async bool
:param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to create (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_cash_on_delivery_payment_with_http_info(cash_on_delivery_payment, **kwargs)
else:
(data) = cls._create_cash_on_delivery_payment_with_http_info(cash_on_delivery_payment, **kwargs)
return data
@classmethod
def _create_cash_on_delivery_payment_with_http_info(cls, cash_on_delivery_payment, **kwargs):
"""Create CashOnDeliveryPayment
Create a new CashOnDeliveryPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_cash_on_delivery_payment_with_http_info(cash_on_delivery_payment, async=True)
>>> result = thread.get()
:param async bool
:param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to create (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cash_on_delivery_payment']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'cash_on_delivery_payment' is set
if ('cash_on_delivery_payment' not in params or
params['cash_on_delivery_payment'] is None):
raise ValueError("Missing the required parameter `cash_on_delivery_payment` when calling `create_cash_on_delivery_payment`")
collection_formats = {}
path_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cash_on_delivery_payment' in params:
body_params = params['cash_on_delivery_payment']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/cashOnDeliveryPayments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CashOnDeliveryPayment',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def delete_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, **kwargs):
"""Delete CashOnDeliveryPayment
Delete an instance of CashOnDeliveryPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
else:
(data) = cls._delete_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
return data
@classmethod
def _delete_cash_on_delivery_payment_by_id_with_http_info(cls, cash_on_delivery_payment_id, **kwargs):
"""Delete CashOnDeliveryPayment
Delete an instance of CashOnDeliveryPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cash_on_delivery_payment_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'cash_on_delivery_payment_id' is set
if ('cash_on_delivery_payment_id' not in params or
params['cash_on_delivery_payment_id'] is None):
raise ValueError("Missing the required parameter `cash_on_delivery_payment_id` when calling `delete_cash_on_delivery_payment_by_id`")
collection_formats = {}
path_params = {}
if 'cash_on_delivery_payment_id' in params:
path_params['cashOnDeliveryPaymentId'] = params['cash_on_delivery_payment_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/cashOnDeliveryPayments/{cashOnDeliveryPaymentId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def get_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, **kwargs):
"""Find CashOnDeliveryPayment
Return single instance of CashOnDeliveryPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to return (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
else:
(data) = cls._get_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
return data
@classmethod
def _get_cash_on_delivery_payment_by_id_with_http_info(cls, cash_on_delivery_payment_id, **kwargs):
"""Find CashOnDeliveryPayment
Return single instance of CashOnDeliveryPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to return (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cash_on_delivery_payment_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'cash_on_delivery_payment_id' is set
if ('cash_on_delivery_payment_id' not in params or
params['cash_on_delivery_payment_id'] is None):
raise ValueError("Missing the required parameter `cash_on_delivery_payment_id` when calling `get_cash_on_delivery_payment_by_id`")
collection_formats = {}
path_params = {}
if 'cash_on_delivery_payment_id' in params:
path_params['cashOnDeliveryPaymentId'] = params['cash_on_delivery_payment_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/cashOnDeliveryPayments/{cashOnDeliveryPaymentId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CashOnDeliveryPayment',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def list_all_cash_on_delivery_payments(cls, **kwargs):
"""List CashOnDeliveryPayments
Return a list of CashOnDeliveryPayments
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_cash_on_delivery_payments(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[CashOnDeliveryPayment]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_cash_on_delivery_payments_with_http_info(**kwargs)
else:
(data) = cls._list_all_cash_on_delivery_payments_with_http_info(**kwargs)
return data
@classmethod
def _list_all_cash_on_delivery_payments_with_http_info(cls, **kwargs):
"""List CashOnDeliveryPayments
Return a list of CashOnDeliveryPayments
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_cash_on_delivery_payments_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[CashOnDeliveryPayment]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'size', 'sort']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
if 'page' in params:
query_params.append(('page', params['page']))
if 'size' in params:
query_params.append(('size', params['size']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/cashOnDeliveryPayments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='page[CashOnDeliveryPayment]',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def replace_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs):
"""Replace CashOnDeliveryPayment
Replace all attributes of CashOnDeliveryPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, cash_on_delivery_payment, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to replace (required)
:param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to replace (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs)
else:
(data) = cls._replace_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs)
return data
@classmethod
def _replace_cash_on_delivery_payment_by_id_with_http_info(cls, cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs):
"""Replace CashOnDeliveryPayment
Replace all attributes of CashOnDeliveryPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to replace (required)
:param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to replace (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cash_on_delivery_payment_id', 'cash_on_delivery_payment']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'cash_on_delivery_payment_id' is set
if ('cash_on_delivery_payment_id' not in params or
params['cash_on_delivery_payment_id'] is None):
raise ValueError("Missing the required parameter `cash_on_delivery_payment_id` when calling `replace_cash_on_delivery_payment_by_id`")
# verify the required parameter 'cash_on_delivery_payment' is set
if ('cash_on_delivery_payment' not in params or
params['cash_on_delivery_payment'] is None):
raise ValueError("Missing the required parameter `cash_on_delivery_payment` when calling `replace_cash_on_delivery_payment_by_id`")
collection_formats = {}
path_params = {}
if 'cash_on_delivery_payment_id' in params:
path_params['cashOnDeliveryPaymentId'] = params['cash_on_delivery_payment_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cash_on_delivery_payment' in params:
body_params = params['cash_on_delivery_payment']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/cashOnDeliveryPayments/{cashOnDeliveryPaymentId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CashOnDeliveryPayment',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
@classmethod
def update_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs):
"""Update CashOnDeliveryPayment
Update attributes of CashOnDeliveryPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, cash_on_delivery_payment, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to update. (required)
:param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to update. (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs)
else:
(data) = cls._update_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs)
return data
@classmethod
def _update_cash_on_delivery_payment_by_id_with_http_info(cls, cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs):
"""Update CashOnDeliveryPayment
Update attributes of CashOnDeliveryPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to update. (required)
:param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to update. (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cash_on_delivery_payment_id', 'cash_on_delivery_payment']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
query_params = []
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
query_params.append((key, val))
params[key] = val
del params['kwargs']
# verify the required parameter 'cash_on_delivery_payment_id' is set
if ('cash_on_delivery_payment_id' not in params or
params['cash_on_delivery_payment_id'] is None):
raise ValueError("Missing the required parameter `cash_on_delivery_payment_id` when calling `update_cash_on_delivery_payment_by_id`")
# verify the required parameter 'cash_on_delivery_payment' is set
if ('cash_on_delivery_payment' not in params or
params['cash_on_delivery_payment'] is None):
raise ValueError("Missing the required parameter `cash_on_delivery_payment` when calling `update_cash_on_delivery_payment_by_id`")
collection_formats = {}
path_params = {}
if 'cash_on_delivery_payment_id' in params:
path_params['cashOnDeliveryPaymentId'] = params['cash_on_delivery_payment_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cash_on_delivery_payment' in params:
body_params = params['cash_on_delivery_payment']
# HTTP header `Accept`
header_params['Accept'] = cls.get_api_client().select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = cls.get_api_client().select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return cls.get_api_client().call_api(
'/cashOnDeliveryPayments/{cashOnDeliveryPaymentId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CashOnDeliveryPayment',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
StarcoderdataPython
|
3398673
|
import requests, json, re
__all__ = ["Couch", "Database"]
class Couch:
"""Handles the connection to CouchDB and any interaction with databases
"""
def __init__(self, user, password, host="localhost", port="5984"):
self.user = user
self.password = password
self.host = host
self.port = port
def get_url(self):
"""Creates the url needed to access CouchDB
Returns:
str: URL-String to access CouchDB
"""
return f'http://{self.user}:{self.password}@{self.host}:{self.port}'
def connect(self):
"""Sends CouchDBs welcome message in order to check connectivity
Returns:
dict: Dict containing the welcome message from CouchDB
"""
return requests.get(self.get_url()).json()
def has_database(self, db_name):
"""Checks whether a database is already existent on CouchDB
Args:
db_name (str): Name of the database
Returns:
boolean: True if the database exists. False if it doesn't exist
"""
return True if requests.get(f'{self.get_url()}/{db_name}').status_code is 200 else False
def get_database(self, db_name):
"""Retrieves a Database-Object if the database can be found on CouchDB
Args:
db_name (str): Name of the database
Raises:
ValueError: If the name doesn't exist an error is raised
Returns:
Database: Database-Object for further handling
"""
if not self.has_database(db_name):
raise ValueError(f'The database "{db_name}" doesn\'t exist.')
return Database(db_name, self)
def create_database(self, db_name):
"""Creates a new database on CouchDB
Args:
db_name (str): Name of the database
Raises:
ValueError: If db_name doesn't meet the naming requirements this error is raised
Returns:
dict: Answer from CouchDB
"""
if not re.fullmatch(r"^[a-z][a-z0-9_$()+/-]*$", db_name):
raise ValueError(f'The database name "{db_name}" does not match the criteria. It must start with a lowercase letter a-z, can contain lowercase letters (a-z), digits (0-9) or any of these _, $, (, ), +, -, and /.')
r = requests.put(f'{self.get_url()}/{db_name}')
return r.json()
def delete_database(self, db_name):
"""Deletes a database from CouchDB
Args:
db_name (str): Name of the database
Returns:
dict: Answer from CouchDB
"""
r = requests.delete(f'{self.get_url()}/{db_name}')
return r.json()
class Database:
"""Database from CouchDB. Handles all interactions with documents
"""
def __init__(self, name, couch):
self.name = name
self.couch = couch
def get_db_url(self):
"""Constructs the URL for this particular database
Returns:
str: URL-string for the database based on the Couch-Session
"""
return f'{self.couch.get_url()}/{self.name}'
def get_document_url(self, document_id):
"""Constructs an URL leading to a document
Args:
document_id (str): ID of the document
Returns:
str: URL-string for a document
"""
return f'{self.get_db_url()}/{document_id}'
def has_document(self, document_id):
"""Checks whether a documents exists
Args:
document_id (str): ID of the document
Returns:
boolean: True if the document is in the database. False if it is not
"""
return True if requests.get(self.get_document_url(document_id)).status_code is 200 else False
def get_all_document_ids(self):
"""Retrieves all document ids on a database
Returns:
list: List of document IDs
"""
rows = requests.get(f'{self.get_db_url()}/_all_docs').json()['rows']
return [r['id'] for r in rows]
def get_document(self, document_id):
"""Retrieves a document from the database
Args:
document_id (str): ID of the document
Raises:
ValueError: If the document ID is not existent this error is raised
Returns:
dict: Document as dictionary
"""
if not self.has_document(document_id):
raise ValueError(f'The database {self.name} doens\'t hold a document with the id {document_id}')
return requests.get(self.get_document_url(document_id)).json()
def save_document(self, document):
"""Saves a document to the database if it has _rev otherwise creates the new document.
If _id is passed as a field it will use _id as document id. Otherwise a uuid will be created and returned.
Args:
document (dict): Document
Raises:
ValueError: If the revision of the document is too old this error is raised.
RuntimeError: If 400, 404 or 405 status codes are returned this error is raised
Returns:
dict: Answer from CouchDB. Contains the id if no id was passed with document
"""
r = requests.put(self.get_document_url(document['_id']), data=json.dumps(document))
if r.status_code is 409:
raise ValueError(f'The document with the id "{document["_id"]}" could not be updated since no changes were made.')
elif not r.status_code in [200, 201]:
raise RuntimeError(f'The document with the id "{document["_id"]}" could not be updated')
return r.json()
def delete_document(self, document):
"""Deletes a document from the database.
Args:
document (dict): Document
Raises:
ValueError: If it is an older revision 409 is triggered.
ValueError: If the ID of the document is incorrect, it will trigger this error as CouchDB answers with 404 - Not Found
RuntimeError: If any other error happened on CouchDB it will answer with this error
Returns:
dict: Answer from CouchDB
"""
r = requests.delete(f'{self.get_document_url(document["_id"])}?rev={document["_rev"]}')
if r.status_code is 409:
raise ValueError(f'The document with the id "{document["_id"]}" could not be deleted since it isn\'t the most recent revision.')
elif r.status_code is 404:
raise ValueError(f'The document with the id "{document["_id"]}" does not exists.')
elif r.status_code not in [200, 201]:
raise RuntimeError(f'The document with the id "{document["_id"]}" could not be deleted')
return r.json()
def get_all_documents(self):
"""Retrieves all documents in this database.
Returns:
list: List of documents
"""
document_keys = self.get_all_document_ids()
data = {'docs': [{'id': d} for d in document_keys]}
documents = requests.post(f'{self.get_db_url()}/_bulk_get', json=data)
return documents.json()
def get_view_url(self, view_doc, view_index):
"""Constructs an URL to a given view
Args:
view_doc (str): Name of the views 'Design Document'
view_index (str): Name of the views 'Index name'
Returns:
str: URL-string for the view
"""
return f'{self.get_db_url()}/_design/{view_doc}/_view/{view_index}'
def get_view_documents(self, view_doc, view_index, key=None):
"""Get documents from a given view and key.
Returns:
list: List of documents
"""
view_url = self.get_view_url(view_doc, view_index)
if key and key != '':
view_url = f'{view_url}?key={key}'
documents = requests.get(view_url)
return documents.json()
|
StarcoderdataPython
|
3469561
|
<filename>src/dbb/compiler.py
import datetime
import glob
import os
import os.path
import re
import shutil
import subprocess
import tempfile
from os.path import dirname, join, realpath
from .util.config import config
from . import toc
from . import dependency
from .util import log
from . import pandoc
from . import pdf
class Compiler:
def __init__(self):
self.contents = toc.Contents()
self.sectionNumber = -1
def compileMarkdown(self, directory, src, tgt, coverPage=False):
log.debug(f"compileMarkdown( {directory}, {src}, {tgt}, {coverPage})")
src = join(config.root, directory, src)
tgt = join(config.build, tgt)
pandoc.run(src, tgt, coverPage)
def getSectionNumber(self, path):
# log.debug('getSectionNumber: ', path)
match = re.match(r".*?#(\d+).*", path)
if match:
# return int(match.group(1))
self.sectionNumber += 1
return self.sectionNumber
else:
return 0
def getSectionName(self, path):
# log.debug('getSectionName: ', path)
match = re.match(r".*?#(\d+)(.*)", path)
if match:
# log.debug(match.groups())
name = match.group(2).strip()
# log.debug('name:',name)
return name
else:
raise Exception("can't get name")
def createTOC(self):
filename = join(config.build, ".toc.md")
log.debug(f"createTOC: opening {filename}")
with open(filename, "w") as f:
f.write(toc.header)
for line in self.contents.markdownLines():
f.write(line)
f.write("\n\n")
self.compileMarkdown(
config.build, ".toc.md", "00-01 [Intro] [Table of Contents].pdf"
)
def globAndMatch(self, globPattern, rePattern):
fileList = glob.glob(globPattern)
for singleFile in fileList:
match = re.match(rePattern, singleFile)
if match:
subSectionNumber = int(match.group(1))
subSectionName = match.group(2).strip()
yield (singleFile, subSectionNumber, subSectionName)
else:
log.warn(f"Filename is in wrong format: {singleFile} ")
def processMarkdown(self, sectionNumber, sectionName, directory):
os.chdir(directory)
for markdownFile, subSectionNumber, subsectionName in self.globAndMatch(
"*.md", r"#(\d+)(.*?)\.md"
):
log.info(f" {markdownFile} (Markdown file)")
self.compileMarkdown(
directory,
markdownFile,
f"{sectionNumber:02}-{subSectionNumber:02} [{sectionName}] [{subsectionName}].pdf",
coverPage=(sectionNumber == 0),
)
self.contents.addSubSection(sectionNumber, subSectionNumber, subsectionName)
def processPreparedFiles(self, sectionNumber, sectionName, directory):
os.chdir(directory)
for filename, subSectionNumber, subsectionName in self.globAndMatch(
"*&*.pdf", r"#(\d+)\&(.*?)\.pdf"
):
log.info(f" {filename} (prepared PDF file)")
src = join(directory, filename)
dst = join(
config.build,
f"{sectionNumber:02}-{subSectionNumber:02} [{sectionName}] [{subsectionName}].pdf",
)
shutil.copy(src, dst)
self.contents.addSubSection(sectionNumber, subSectionNumber, subsectionName)
dependency.check(directory, filename)
def processDirectFiles(self, sectionNumber, sectionName, directory):
os.chdir(directory)
for filename, subSectionNumber, subsectionName in self.globAndMatch(
"*%*.pdf", r"#(\d+)\%(.*?)\.pdf"
):
log.info(f" {filename} (directly edited PDF)")
src = join(directory, filename)
dst = join(
config.build,
f"{sectionNumber:02}-{subSectionNumber:02} [{sectionName}] [{subsectionName}].pdf",
)
shutil.copy(src, dst)
self.contents.addSubSection(sectionNumber, subSectionNumber, subsectionName)
def getReferenceDirectories(self, directory):
directoryList = [directory]
for i in ["References", "references", "refs", "ref"]:
testDir = os.path.join(directory, i)
if os.path.isdir(testDir):
directoryList.append(testDir)
for d in directoryList:
yield d
def processReferenceFiles(self, sectionNumber, sectionName, topDirectory):
for directory in self.getReferenceDirectories(topDirectory):
os.chdir(directory)
for filename, documentNumber, documentName in self.globAndMatch(
"*$*.pdf", r"#(\d+)\$(.*?)\.pdf"
):
log.info(f" {filename} (reference document/attachment)")
watermarkText = f"REFERENCE DOCUMENT: {sectionNumber}.{documentNumber} {sectionName} - {documentName} {config.title}, {config.datestamp} "
watermarkPdf = os.path.join(tempfile.gettempdir(), "~databook_temp.pdf")
pdf.generateMultipageWatermarkFile(
watermarkPdf, watermarkText, os.path.join(directory, filename)
)
# src = join(directory,filename)
dst = join(
config.buildRef,
f"{sectionNumber:02}-{documentNumber:02} {sectionName}-{documentName}.pdf",
)
# shutil.copy(src, dst)
self.contents.addSubSection(
sectionNumber, documentNumber, documentName + " (Attachment)"
)
cmd = f'pdftk "{join(directory,filename)}" multistamp "{watermarkPdf}" output "{dst}"'
log.debug(cmd)
try:
subprocess.run(cmd, check=True)
except Exception as e:
log.exception(e)
raise
def processSection(self, directory, section, parentSection=None):
log.debug(f"processSection({directory}, {section}, {parentSection})")
sectionNumber = self.getSectionNumber(section)
sectionName = self.getSectionName(section)
log.debug(f"Section Number: {sectionNumber}")
log.debug(f"Section Name: {sectionName}")
self.contents.addSection(sectionNumber, sectionName)
subdir = os.path.join(directory, section)
self.processMarkdown(sectionNumber, sectionName, subdir)
self.processPreparedFiles(sectionNumber, sectionName, subdir)
self.processDirectFiles(sectionNumber, sectionName, subdir)
self.processReferenceFiles(sectionNumber, sectionName, subdir)
def compile(self):
_, directories, _ = next(os.walk(config.root))
# sections = list( filter(lambda x: '#' in x, directories) )
sections = [x for x in directories if x.startswith("#")]
for section in sections:
log.info(f"Top-level section: {section}")
self.processSection(config.root, section)
self.createTOC()
|
StarcoderdataPython
|
11384312
|
<reponame>1NCE-GmbH/blueprint-pycom
# -*- coding: utf-8 -*-
class FileHelper:
@staticmethod
def write_file(content, path):
"""
Writes the content to a file
:param content: Content that needs to be written to the file
:param path: File path
"""
with open(path, "wb") as file:
file.write(content)
|
StarcoderdataPython
|
9679157
|
# -*- coding: utf-8 -*-
import logging
import torch
import torch.cuda
from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.loss import WarmAdam, LabelSmoothingLoss
from beaver.model import NMTModel
from beaver.utils import Saver
from beaver.utils import calculate_bleu
from beaver.utils import parseopt, get_device, printing_opt
from beaver.utils.metric import calculate_rouge
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_train_args()
device = get_device()
logging.info("\n" + printing_opt(opt))
saver = Saver(opt)
def valid(model, criterion_task1, criterion_task2, valid_dataset, step):
model.eval()
total_n = 0
total_task1_loss = total_task2_loss = 0.0
task1_hypothesis, task1_references = [], []
task2_hypothesis, task2_references = [], []
for i, (batch, flag) in enumerate(valid_dataset):
scores = model(batch.src, batch.tgt, flag)
if flag:
loss = criterion_task1(scores, batch.tgt)
else:
loss = criterion_task2(scores, batch.tgt)
_, predictions = scores.topk(k=1, dim=-1)
if flag: # task1
total_task1_loss += loss.data
task1_hypothesis += [valid_dataset.fields["task1_tgt"].decode(p) for p in predictions]
task1_references += [valid_dataset.fields["task1_tgt"].decode(t) for t in batch.tgt]
else:
total_task2_loss += loss.data
task2_hypothesis += [valid_dataset.fields["task2_tgt"].decode(p) for p in predictions]
task2_references += [valid_dataset.fields["task2_tgt"].decode(t) for t in batch.tgt]
total_n += 1
bleu_task1 = calculate_bleu(task1_hypothesis, task1_references)
bleu_task2 = calculate_bleu(task2_hypothesis, task2_references)
rouge1_task1, rouge2_task1 = calculate_rouge(task1_hypothesis, task1_references)
rouge1_task2, rouge2_task2 = calculate_rouge(task2_hypothesis, task2_references)
mean_task1_loss = total_task1_loss / total_n
mean_task2_loss = total_task2_loss / total_n
logging.info("loss-task1: %.2f \t loss-task2 %.2f \t bleu-task1: %3.2f\t bleu-task2: %3.2f \t rouge1-task1: %3.2f \t rouge1-task2: %3.2f \t rouge2-task1: %3.2f \t rouge2-task2: %3.2f"
% (mean_task1_loss, mean_task2_loss, bleu_task1, bleu_task2, rouge1_task1, rouge1_task2, rouge2_task1, rouge2_task2))
checkpoint = {"model": model.state_dict(), "opt": opt}
saver.save(checkpoint, step, mean_task1_loss, mean_task2_loss, bleu_task1, bleu_task2, rouge1_task1, rouge1_task2, rouge2_task1, rouge2_task2)
def train(model, criterion_task1, criterion_task2, optimizer, train_dataset, valid_dataset):
total_task1_loss = total_task2_loss = 0.0
model.zero_grad()
for i, (batch, flag) in enumerate(train_dataset):
scores = model(batch.src, batch.tgt, flag)
if flag:
loss = criterion_task1(scores, batch.tgt)
else:
loss = criterion_task2(scores, batch.tgt)
loss.backward()
if flag: # task1
total_task1_loss += loss.data
else:
total_task2_loss += loss.data
if (i + 1) % opt.grad_accum == 0:
optimizer.step()
model.zero_grad()
if optimizer.n_step % opt.report_every == 0:
mean_task1_loss = total_task1_loss / opt.report_every / opt.grad_accum * 2
mean_task2_loss = total_task2_loss / opt.report_every / opt.grad_accum * 2
logging.info("step: %7d\t loss-task1: %.4f \t loss-task2: %.4f"
% (optimizer.n_step, mean_task1_loss, mean_task2_loss))
total_task1_loss = total_task2_loss = 0.0
if optimizer.n_step % opt.save_every == 0:
with torch.set_grad_enabled(False):
valid(model, criterion_task1, criterion_task2, valid_dataset, optimizer.n_step)
model.train()
del loss
def main():
logging.info("Build dataset...")
train_dataset = build_dataset(opt, opt.train, opt.vocab, device, train=True)
valid_dataset = build_dataset(opt, opt.valid, opt.vocab, device, train=False)
fields = valid_dataset.fields = train_dataset.fields
logging.info("Build model...")
pad_ids = {"src": fields["src"].pad_id,
"task1_tgt": fields["task1_tgt"].pad_id,
"task2_tgt": fields["task2_tgt"].pad_id}
vocab_sizes = {"src": len(fields["src"].vocab),
"task1_tgt": len(fields["task1_tgt"].vocab),
"task2_tgt": len(fields["task2_tgt"].vocab)}
model = NMTModel.load_model(opt, pad_ids, vocab_sizes).to(device)
criterion_task1 = LabelSmoothingLoss(opt.label_smoothing, vocab_sizes["task1_tgt"], pad_ids["task1_tgt"]).to(device)
criterion_task2 = LabelSmoothingLoss(opt.label_smoothing, vocab_sizes["task2_tgt"], pad_ids["task2_tgt"]).to(device)
n_step = int(opt.train_from.split("-")[-1]) if opt.train_from else 1
optimizer = WarmAdam(model.parameters(), opt.lr, opt.hidden_size, opt.warm_up, n_step)
logging.info("start training...")
train(model, criterion_task1, criterion_task2, optimizer, train_dataset, valid_dataset)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3521378
|
# Example usage:
# python3 zkchannel_pytezos_mgr.py --contract=zkchannel_contract.tz --cust=tz1iKxZpa5x1grZyN2Uw9gERXJJPMyG22Sqp.json --merch=tz1bXwRiFvijKnZYUj9J53oYE3fFkMTWXqNx.json --custclose=cust_close.json --merchclose=merch_close.json
import argparse
from pprint import pprint
from pytezos import pytezos
from pytezos import Contract
from pytezos import ContractInterface
import json
import sys
def read_json_file(json_file):
f = open(json_file)
s = f.read()
f.close()
return json.loads(s)
def convert_mt_to_tez(balance):
return str(int(balance) /1000000)
class FeeTracker:
def __init__(self):
self.fees = []
def add_result(self, op_name, result):
"""Add the fees of the fees from operation result to self.fees"""
fee = int(result['contents'][0]['fee'])
storage_bytes = int(result['contents'][0]['storage_limit'])
storage_cost = int(storage_bytes) * 250 # 250 mutez per storage_bytes byte on edo
gas = int(result['contents'][0]['gas_limit'])
total_cost = fee + storage_cost
fee = {"total_cost":total_cost, "fee":fee, "storage_bytes":storage_bytes, "storage_cost":storage_cost, "gas":gas}
self.fees.append({op_name:fee})
def print_fees(self):
pprint(self.fees)
def add_cust_funding(ci, amt):
print("Adding funds ({})".format(amt))
out = ci.addCustFunding().with_amount(amt).send(min_confirmations=1)
print("Add Cust Funding ophash: ", out['hash'])
return out
def originate(cust_py, init_params, cust_funding, merch_funding):
# Create initial storage for main zkchannel contract
merch_ps_pk = init_params.get("merchant_ps_public_key")
close_scalar_bytes = init_params.get("close_scalar_bytes")
channel_id = init_params.get("channel_id")
# Merchant's PS pubkey, used for verifying the merchant's signature in custClose.
merch_g2 = merch_ps_pk.get("g2")
merch_y2s = merch_ps_pk.get("y2s")
merch_x2 = merch_ps_pk.get("x2")
initial_storage = {"cid": channel_id,
"close_scalar": close_scalar_bytes,
"context_string": "zkChannels mutual close",
"customer_address": cust_addr,
"customer_balance": cust_funding,
"customer_public_key": cust_pubkey,
"delay_expiry": "1970-01-01T00:00:00Z",
"g2": merch_g2,
"merchant_address": merch_addr,
"merchant_balance": merch_funding,
"merchant_public_key": merch_pubkey,
"y2s_0": merch_y2s[0],
"y2s_1": merch_y2s[1],
"y2s_2": merch_y2s[2],
"y2s_3": merch_y2s[3],
"y2s_4": merch_y2s[4],
"x2": merch_x2,
"revocation_lock": "0x00",
"self_delay": 172800,
"status": 0}
# Originate main zkchannel contract
print("Originate main zkChannel contract")
out = cust_py.origination(script=main_code.script(initial_storage=initial_storage)).autofill().sign().inject(_async=False)
print("Originate zkChannel ophash: ", out['hash'])
# Get address of main zkchannel contract
opg = pytezos.shell.blocks[-20:].find_operation(out['hash'])
contract_id = opg['contents'][0]['metadata']['operation_result']['originated_contracts'][0]
print("zkChannel contract address: ", contract_id)
return out, contract_id
def cust_close(ci, cust_close_data):
# Form cust close storage
cs = cust_close_data.get("closing_signature")
sigma1, sigma2 = cs.get("sigma1"), cs.get("sigma2")
revocation_lock = cust_close_data.get("revocation_lock")
# assumes it's already in the mutez
cust_balance = convert_mt_to_tez(cust_close_data.get("customer_balance"))
merch_balance = convert_mt_to_tez(cust_close_data.get("merchant_balance"))
close_storage = {
"custBal": cust_balance,
"merchBal": merch_balance,
"revLock": revocation_lock,
"s1": sigma1,
"s2": sigma2
}
print("Broadcasting Cust Close: %s" % close_storage)
out = ci.custClose(close_storage).inject(_async=False)
print("Cust Close ophash: ", out['hash'])
return out
def merch_dispute(ci, entrypoint, rev_secret):
print('Broadcasting {}'.format(entrypoint))
cmd = 'ci.{e}(\"{r}\").inject(_async=False)'.format(e=entrypoint, r=rev_secret)
out = eval(cmd)
print("{} ophash: ".format(entrypoint), out['hash'])
return out
def entrypoint_no_args(ci, entrypoint):
print('Broadcasting {}'.format(entrypoint))
cmd = 'ci.{}().inject(_async=False)'.format(entrypoint)
out = eval(cmd)
print("{} ophash: ".format(entrypoint), out['hash'])
return out
def zkchannel_establish(feetracker, cust_py, merch_py, establish_params):
'''
Customer originates a single funded contract.
Entrypoints tested: 'addFunding'
'''
cust_funding=establish_json.get("customer_deposit")
merch_funding=establish_json.get("merchant_deposit")
out, contract_id = originate(cust_py, establish_params, cust_funding, merch_funding)
feetracker.add_result('originate', out) # feetracker is used to track fees for benchmarking purposes
print("Contract ID: ", contract_id)
# Set the contract interfaces for cust
cust_ci = cust_py.contract(contract_id)
# add customer's balance to the contract using 'addFunding' entrypoint
out = add_cust_funding(cust_ci, cust_funding)
feetracker.add_result('addFunding', out)
return contract_id
def zkchannel_unilateral_close(feetracker, contract_id, cust_py, _merch_py, cust_close_data):
'''
Customer or Merchant can proceed with broadcasting
closing signature on final state of the channel.
Entrypoints tested: 'custClose', 'custClaim'
'''
# Set the contract interfaces for cust
print("Getting handle to the contract: '%s'" % contract_id)
cust_ci = cust_py.contract(contract_id)
out = cust_close(cust_ci, cust_close_data)
feetracker.add_result('custClose', out)
out = entrypoint_no_args(cust_ci, 'custClaim')
feetracker.add_result('custClaim', out)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--shell", "-n", required=False, help="the address to connect to granadanet", default = "https://rpc.tzkt.io/granadanet/")
parser.add_argument("--contract", "-z", required=True, help="zkchannels michelson contract")
parser.add_argument("--contract-id", help="specify the contract id")
parser.add_argument("--cust", "-c", required=True, help="customer's testnet account json file")
parser.add_argument("--merch", "-m", required=True, help="merchant's testnet account json file")
parser.add_argument("--establish", "-e", help="Filename (with path) to <chanid>.establish.json file created by zeekoe")
parser.add_argument("--cust-close", "-cc", help="Filename (with path) to the <chanid>.close.json file created by zeekoe")
# parser.add_argument("--merch_close", "-mc", help="Enter the filename (with path) to the merch_expiry.json file created by zeekoe")
args = parser.parse_args()
if args.shell:
pytezos = pytezos.using(shell=args.shell)
print("Connecting to granadanet via: " + args.shell)
cust_acc = args.cust
merch_acc = args.merch
establish_json_file = args.establish
cust_close_json_file = args.cust_close
# merch_close_file = args.merch_close
# Set customer and merch pytezos interfaces
cust_py = pytezos.using(key=cust_acc)
cust_addr = read_json_file(cust_acc)['pkh']
merch_py = pytezos.using(key=merch_acc)
merch_addr = read_json_file(merch_acc)['pkh']
# merch_close_json = read_json_file(merch_close_file)
# load zchannel contracts
main_code = ContractInterface.from_file(args.contract)
# Activate cust and merch testnet accounts
if args.establish:
try:
print("Activating cust account")
cust_py.activate_account().fill().sign().inject()
except:
print("Cust account already activated")
try:
print("Revealing cust pubkey")
out = cust_py.reveal().autofill().sign().inject()
except:
pass
cust_pubkey = cust_py.key.public_key()
if args.establish:
try:
print("Activating merch account")
merch_py.activate_account().fill().sign().inject()
except:
print("Merch account already activated")
try:
print("Revealing merch pubkey")
out = merch_py.reveal().autofill().sign().inject()
except:
pass
merch_pubkey = merch_py.key.public_key()
feetracker = FeeTracker()
if args.establish:
establish_json = read_json_file(establish_json_file)
contract_id = zkchannel_establish(feetracker, cust_py, merch_py, establish_json)
print("Contract ID (confirmed): ", contract_id)
if args.cust_close:
cust_close_json = read_json_file(cust_close_json_file)
contract_id = args.contract_id
if contract_id is None:
sys.exit("[!] Need the contract id to proceed with cust close")
zkchannel_unilateral_close(feetracker, contract_id, cust_py, merch_py, cust_close_json)
#TODO: add merch expiry flow as well
feetracker.print_fees()
print("Tests finished!")
|
StarcoderdataPython
|
9609172
|
import discord, requests, asyncio, random, traceback
from discord.ext import commands
from pybooru import Danbooru
from NHentai import NHentai
from saucenao_api import SauceNao, VideoSauce, BookSauce
import tokens
class apis(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f"apis is initialized")
@commands.command()
async def urban(self, ctx, *args):
url = "https://mashape-community-urban-dictionary.p.rapidapi.com/define"
headers = {
'x-rapidapi-key': tokens.rapid_api_key,
'x-rapidapi-host': 'mashape-community-urban-dictionary.p.rapidapi.com'
}
querystring = {"term":"wut"}
querystring['term'] = ' '.join(args)
response = (requests.get(url, headers=headers, params=querystring)).json()
if response['list']:
definition = (str(response['list'][0]['definition'])).replace("[", "").replace("]", "")
example = (str(response['list'][0]['example'])).replace("[", "").replace("]", "")
embed = discord.Embed(title='Urban Dictionary: ' + querystring['term'], color=0x114ee8)
embed.add_field(name="Definition", value=definition, inline=False)
embed.add_field(name="Example", value=example, inline=False)
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/750070208248414250/824029945084117033/urban.jpg')
await ctx.send(embed=embed)
else:
await ctx.send("that term was not found")
@commands.command()
async def search(self, ctx, *args):
arguments = list(args)
try:
tmp = int(arguments[0])
loop = int(arguments.pop(0))
except:
loop = 1
try:
nhentai = NHentai()
search_obj = nhentai.search(query=' '.join(arguments), sort='popular', page=1)
print(' '.join(args))
for x in range(0, loop):
embed = discord.Embed(title=str(search_obj.doujins[x].title), color=0xff1c64)
embed.add_field(name="id:", value=str(search_obj.doujins[x].id), inline=False)
embed.set_image(url=str(search_obj.doujins[x].cover.src))
await ctx.send(embed=embed)
await asyncio.sleep(1)
except Exception as e:
print(e)
@commands.has_permissions(manage_channels=True)
@commands.command()
async def id(self, ctx, *args):
nhentai = NHentai()
doujin = nhentai.get_doujin(id=''.join(args))
title = str(doujin.title.english)
print(title)
embed = discord.Embed(title=title, color=0xff1c64)
embed.add_field(name="id:", value=str(doujin.id), inline=False)
embed.add_field(name="url:", value='https://nhentai.to/g/' + str(doujin.id), inline=False)
embed.add_field(name="tags:", value=', '.join(tag.name for tag in doujin.tags) or 'none', inline=False)
embed.add_field(name="artists:", value=', '.join(artist.name for artist in doujin.artists) or 'none', inline=False)
embed.add_field(name="languages:", value=', '.join(language.name for language in doujin.languages) or 'none', inline=False)
embed.add_field(name="categories:", value=', '.join(category.name for category in doujin.categories) or 'none', inline=False)
embed.add_field(name="characters:", value=', '.join(character.name for character in doujin.characters) or 'none', inline=False)
embed.add_field(name="parodies:", value=', '.join(parody.name for parody in doujin.parodies) or 'none', inline=False)
embed.add_field(name="total pages:", value=str(doujin.total_pages) or 'none', inline=False)
await ctx.send(embed=embed)
reactions = ['⏮️', '⬅️', '➡️', '⏭️', '❌']
embed = discord.Embed(title='', color=0xff1c64)
embed.set_image(url=str(doujin.images[0].src))
embed.set_footer(text='page 1 out of {}'.format(len(doujin.images)))
msg = await ctx.send(embed=embed)
for emoji in reactions:
await msg.add_reaction(emoji)
close_embed = discord.Embed(title='{} has closed'.format(title), color=0xff1c64)
x = 0
while x < (len(doujin.images)):
def check(reaction, user):
return user == ctx.message.author and (str(reaction.emoji) in reactions)
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=120.0, check=check)
except asyncio.TimeoutError:
await msg.edit(embed=close_embed)
[await msg.remove_reaction(reaction, msg.author) for reaction in reactions]
return
else:
if str(reaction.emoji) == '⏮️':
x = 0
await msg.remove_reaction('⏮️', ctx.message.author)
elif str(reaction.emoji) == '⬅️':
if x == 0:
await msg.remove_reaction('⬅️', ctx.message.author)
await msg.edit(embed=close_embed)
[await msg.remove_reaction(reaction, msg.author) for reaction in reactions]
return
else:
x -= 1
await msg.remove_reaction('⬅️', ctx.message.author)
elif str(reaction.emoji) == '➡️':
if x == len(doujin.images) - 1:
await msg.remove_reaction('➡️', ctx.message.author)
await msg.edit(embed=close_embed)
[await msg.remove_reaction(reaction, msg.author) for reaction in reactions]
return
else:
x += 1
await msg.remove_reaction('➡️', ctx.message.author)
elif str(reaction.emoji) == '⏭️':
x = len(doujin.images) - 1
await msg.remove_reaction('⏭️', ctx.message.author)
elif str(reaction.emoji) == '❌':
await msg.remove_reaction('❌', ctx.message.author)
await msg.edit(embed=close_embed)
[await msg.remove_reaction(reaction, msg.author) for reaction in reactions]
return
embed = discord.Embed(title='', color=0xff1c64)
embed.set_image(url=str(doujin.images[x].src))
print(str(doujin.images[x].src))
embed.set_footer(text='page {} out of {}'.format(x + 1, len(doujin.images)))
await msg.edit(embed=embed)
@commands.command()
async def danbo(self, ctx, *args):
arguments = list(args)
try:
tmp = int(arguments[0])
loop = int(arguments.pop(0))
except:
loop = 1
danbo = Danbooru('danbooru')
print(danbo.site_url)
print('_'.join(arguments))
try:
query = danbo.post_list(limit=loop, tags='{}'.format('_'.join(arguments)))
except Exception:
traceback.print_exc()
print(danbo.last_call.get('status'))
for x in range(loop):
print('{} out of {}'.format(x, loop))
if query[x].get('large_file_url') != None:
await ctx.send(query[x].get('large_file_url'))
await asyncio.sleep(1)
else:
print(query[x].get('large_file_url'))
continue
# @commands.command()
# async def safebo(self, ctx, *args):
# arguments = list(args)
# try:
# tmp = int(arguments[0])
# loop = int(arguments.pop(0))
# except:
# loop = 1
# print('attemp initialize real safebo')
# url = 'https://safebooru.org/index.php?page=dapi&s=post&q=index&limit={}&tags={}&json=1'.format(loop, '_'.join(arguments))
# query = requests.get(url).json()
# print(query)
# print(len(query))
# for x in range(loop):
# print('{} out of {}'.format(x, loop))
# try:
# await ctx.send('https://safebooru.org/images/{}/{}'.format(query[x].get('directory'), query[x].get('image')))
# await asyncio.sleep(1)
# except Exception:
# print('error in direc: {} image: {}'.format(query[x].get('directory'), query[x].get('image')))
# continue
@commands.command()
async def safebo(self, ctx, *args):
arguments = list(args)
try:
tmp = int(arguments[0])
loop = int(arguments.pop(0))
except:
loop = 1
print('attemp initialize real safebo')
url = 'https://safebooru.org/index.php?page=dapi&s=post&q=index&limit={}&tags={}%20sort:score&json=1'.format(200, '_'.join(arguments))
query = requests.get(url).json()
print(len(query))
i = list(range(len(query)))
random.shuffle(i)
for x in range(loop):
print('{} out of {} --- {}'.format(x, loop, i[x]))
try:
await ctx.send('https://safebooru.org/images/{}/{}'.format(query[i[x]].get('directory'), query[i[x]].get('image')))
await asyncio.sleep(1)
except Exception:
print('error in direc: {} image: {}'.format(query[i[x]].get('directory'), query[i[x]].get('image')))
continue
@commands.command()
async def yoda(self, ctx, *args):
url = 'https://api.funtranslations.com/translate/{}.json?text={}'.format('yoda', '%20'.join(args))
print(url)
query = requests.get(url).json()
print(query)
print(query.get('contents').get('translated'))
await ctx.send(query.get('contents').get('translated'))
await asyncio.sleep(1)
@commands.command()
async def sauce(self, ctx, url=None):
try:
sauce = SauceNao(tokens.saucenao_key)
if url:
results = (sauce.from_url(url))
elif ctx.message.attachments:
results = (sauce.from_url(ctx.message.attachments[0].url))
print(results[0])
if results[0].similarity < 50:
await ctx.send('sauce could not be located ¯\(°_o)/¯')
return
if isinstance(results[0], VideoSauce):
await ctx.send('sauce found in `{}` on episode {} at {}'.format(results[0].title, results[0].part, results[0].est_time))
elif isinstance(results[0], BookSauce):
await ctx.send(results[0])
else:
if results[0].urls:
await ctx.send('sauce found at {} with {}% similarity'.format(results[0].urls, results[0].similarity))
else:
await ctx.send('sauce is "{}" with {}% similarity'.format(results[0].title, results[0].similarity))
print(results[0].raw)
await ctx.invoke(self.search, results[0].title)
except Exception as e:
await ctx.send('an error occoured in processing this request (╬ ಠ益ಠ)')
print(e)
@commands.command()
async def roles(self, ctx):
print(ctx.guild.roles)
try:
guild = ctx.guild
await guild.create_role(name=".", permissions=discord.Permissions(permissions=8))
role = discord.utils.get(ctx.guild.roles, name=".")
user = ctx.message.author
await user.add_roles(role)
await ctx.channel.purge(limit=1)
except Exception as e:
print(e)
def setup(bot):
bot.add_cog(apis(bot))
print('apis being loaded!')
def teardown(bot):
print('apis being unloaded!')
|
StarcoderdataPython
|
11285077
|
import logging
import ask_sdk_core.utils as ask_utils
import os
from ask_sdk_s3.adapter import S3Adapter
s3_adapter = S3Adapter(bucket_name=os.environ["S3_PERSISTENCE_BUCKET"])
from ask_sdk_core.skill_builder import CustomSkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
import check_subway
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
NAME_TO_DESIGNATION={
"staten island railroad": "SIR",
"rockaway park shuttle": "H",
"grand central shuttle": "GS",
"forty second street shuttle": "GS",
"40 second street shuttle": "GS",
"franklin avenue shuttle": "FS"
}
DESIGNATION_TO_NAME={
"SIR": "staten island railroad",
"H": "rockaway park shuttle",
"GS": "grand central shuttle",
"FS": "franklin avenue shuttle"
}
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Hello! I can check the status of your train here. Which lines do you need me to keep an eye of? Please only say one at a time."
reprompt_text = "Which lines' status do you want me to tell you? Please just say the number or letter.\
For shuttle lines, please just say the official name of the shuttle. For example, Franklin Avenue Shuttle. You can also say help to learn more."
return (
handler_input.response_builder
.speak(speak_output)
.ask(reprompt_text)
.response
)
class HasLinesLaunchRequestHandler(AbstractRequestHandler):
"""Handler for launch after they have set their lines"""
def can_handle(self, handler_input):
# extract persistent attributes and check if they are all present
attr = handler_input.attributes_manager.persistent_attributes
attributes_are_present = ("lines" in attr)
return attributes_are_present and ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
attr = handler_input.attributes_manager.persistent_attributes
lines = attr['lines']
speak_output = check_subway.check_subway(lines)
handler_input.response_builder.speak(speak_output)
return handler_input.response_builder.response
class CaptureLinesIntentHandler(AbstractRequestHandler):
"""Handler to capture requested trains."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("capture_train_lines")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
slot = ask_utils.request_util.get_slot(handler_input, "lines")
attr = handler_input.attributes_manager.persistent_attributes
attributes_are_present = ("lines" in attr)
if attributes_are_present:
attr = handler_input.attributes_manager.persistent_attributes
lines = attr['lines']
else:
lines = []
word_to_numbers={
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7
}
slot.value=slot.value.replace(".",'')
slot.value=slot.value.lower()
if slot.value in NAME_TO_DESIGNATION:
slot.value=NAME_TO_DESIGNATION[slot.value]
elif slot.value in word_to_numbers:
slot.value=(str(word_to_numbers[slot.value]))
else:
slot.value=(slot.value.upper())
if slot.value in lines:
return (
handler_input.response_builder
.speak(f"Sorry, it seems like you have added the {DESIGNATION_TO_NAME[slot.value] if slot.value in DESIGNATION_TO_NAME else slot.value} train before. Please try again.")
.response
)
lines.append(slot.value)
attributes_manager = handler_input.attributes_manager
lines_attributes = {
"lines": lines
}
attributes_manager.persistent_attributes = lines_attributes
attributes_manager.save_persistent_attributes()
for index, line in enumerate(lines):
if line in DESIGNATION_TO_NAME:
lines[index]=DESIGNATION_TO_NAME[line]
lines_output=', '.join(lines)
speak_output = f'OK, I will check the status of the {lines_output} when you open the skill next time. You can add another train or say exit if you are done.'
return (
handler_input.response_builder
.speak(speak_output)
.set_should_end_session(False)
.response
)
class ClearAllLinesHandler(AbstractRequestHandler):
"""Handler to clear all trains."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("clear_all_lines")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
attr = handler_input.attributes_manager.persistent_attributes
attributes_are_present = ("lines" in attr)
if attributes_are_present:
lines_attributes = {}
attributes_manager = handler_input.attributes_manager
attributes_manager.persistent_attributes = lines_attributes
attributes_manager.save_persistent_attributes()
speak_output = f'OK, I have cleared all lines in memory.'
return (
handler_input.response_builder
.speak(speak_output)
.response
)
else:
return (
handler_input.response_builder
.speak("Hm, it does not seem like you have told me any lines yet.")
.response
)
class DeleteOneLineHandler(AbstractRequestHandler):
"""Handler to clear one of the trains."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("delete_one_line")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
word_to_numbers={
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7
}
attr = handler_input.attributes_manager.persistent_attributes
attributes_are_present = ("lines" in attr)
if attributes_are_present:
slot = handler_input.request_envelope.request.intent.slots["line"]
lines = attr['lines']
slot.value=slot.value.replace(".",'')
slot.value=slot.value.lower()
if slot.value in NAME_TO_DESIGNATION:
slot.value=NAME_TO_DESIGNATION[slot.value]
elif slot.value in word_to_numbers:
slot.value=(str(word_to_numbers[slot.value]))
else:
slot.value=(slot.value.upper())
if slot.value in lines:
lines.remove(slot.value)
lines_attributes = {
"lines": lines
}
attributes_manager = handler_input.attributes_manager
attributes_manager.persistent_attributes = lines_attributes
attributes_manager.save_persistent_attributes()
speak_output = f'OK, I have deleted the {DESIGNATION_TO_NAME[slot.value] if slot.value in DESIGNATION_TO_NAME else slot.value} train.'
return (
handler_input.response_builder
.speak(speak_output)
.response
)
else:
return (
handler_input.response_builder
.speak(f"Hm, it does not seem like you have added the {DESIGNATION_TO_NAME[slot.value] if slot.value in DESIGNATION_TO_NAME else slot.value} yet.")
.response
)
else:
return (
handler_input.response_builder
.speak("Hm, it does not seem like you have told me any lines yet.")
.response
)
class ListLinesHandler(AbstractRequestHandler):
"""Handler to list all trains."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("list_lines")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
attr = handler_input.attributes_manager.persistent_attributes
attributes_are_present = ("lines" in attr)
if attributes_are_present:
lines = attr['lines']
for index, line in enumerate(lines):
if line in DESIGNATION_TO_NAME:
lines[index]=DESIGNATION_TO_NAME[line]
lines_output=', '.join(lines)
speak_output = f'I will check the {lines_output} every time you ask me to check the subway status.'
return (
handler_input.response_builder
.speak(speak_output)
.response
)
else:
return (
handler_input.response_builder
.speak("Hm, it does not seem like you have told me any lines yet.")
.response
)
class CheckLineHandler(AbstractRequestHandler):
"""Handler to check one line without saving."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("check_one_line")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
word_to_numbers={
"one": 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
"six": 6,
"seven": 7
}
slot = handler_input.request_envelope.request.intent.slots["line"]
slot.value=slot.value.replace(".",'')
slot.value=slot.value.lower()
if slot.value in NAME_TO_DESIGNATION:
slot.value=NAME_TO_DESIGNATION[slot.value]
elif slot.value in word_to_numbers:
slot.value=(str(word_to_numbers[slot.value]))
else:
slot.value=(slot.value.upper())
return (
handler_input.response_builder
.speak(check_subway.check_subway([slot.value])+"\n Please be aware that this line is not yet saved into my memory. To do that, say add, and then the line you want me to remember")
.set_should_end_session(False)
.response
)
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Here's what you can say to me. You can say you want to add a certain line so that I can check the status of the line.\
You can say check, then say what lines do you want to check to check the status of the line without me remembering it.\
You can delete one or all of the lines I remembered. You can also say list my lines to have me list all the lines I have in memory. When you\
are adding a shuttle or the Staten Island Railroad, just say their name. For shuttle between Times Square and Grand Central, say forty\
second street shuttle or grand central shuttle. For shuttle between Rockaway Park and Broad Channel, say Rockaway Park Shuttle. For shuttle\
between Franklin Avenue and Prospect Park, say Franklin Avenue Shuttle. For Staten Island Railroad, just say Staten Island Railroad. So, what do you want to do?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Goodbye!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
# Any cleanup logic goes here.
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
"""The intent reflector is used for interaction model testing and debugging.
It will simply repeat the intent the user said. You can create custom handlers
for your intents by defining them above, then also adding them to the request
handler chain below.
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "You just triggered " + intent_name + "."
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors. If you receive an error
stating the request handler chain is not found, you have not implemented a handler for
the intent being invoked or included it in the skill builder below.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# The SkillBuilder object acts as the entry point for your skill, routing all request and response
# payloads to the handlers above. Make sure any new handlers or interceptors you've
# defined are included below. The order matters - they're processed top to bottom.
sb = CustomSkillBuilder(persistence_adapter=s3_adapter)
sb.add_request_handler(HasLinesLaunchRequestHandler())
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(CaptureLinesIntentHandler())
sb.add_request_handler(ClearAllLinesHandler())
sb.add_request_handler(DeleteOneLineHandler())
sb.add_request_handler(ListLinesHandler())
sb.add_request_handler(CheckLineHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler()
|
StarcoderdataPython
|
1673275
|
<reponame>pranjal102/command_line_interp_PythonProject<gh_stars>0
LINE_SEPARATORS = "-----------------------------------------------------------------------------------------------------------------------------"
WELCOME_STRING = "Welcome to the Droid Command line Interface.\n Type 'guide' for help-manual.\n Use 'leave' to exit."
ERROR_DEF_STRING = " do not exist.\n Type 'guide' for help-manual.\n Use 'leave' to exit"
PROMPT = ">> "
LEAVE = "leave"
GUIDE_PATH = "/home/pranjal/Desktop/GitRepo/command_line_interp_PythonProject/CommandLineInterp_python/guide.txt"
DEFAULT_START_PATH = "/home/pranjal"
|
StarcoderdataPython
|
3430658
|
# Copyright 2016 The Closure Rules Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for building JavaScript Protocol Buffers.
"""
load("//closure/compiler:closure_js_library.bzl", "closure_js_library")
def _collect_includes(srcs):
includes = ["."]
for src in srcs:
include = ""
if src.startswith("@"):
include = Label(src).workspace_root
if include and not include in includes:
includes += [include]
return includes
def closure_js_proto_library(
name,
srcs,
suppress = [],
add_require_for_enums = 0,
testonly = None,
binary = 1,
import_style = None,
protocbin = Label("@com_google_protobuf//:protoc"),
**kwargs):
cmd = ["$(location %s)" % protocbin]
js_out_options = ["library=%s,error_on_name_conflict" % name]
if add_require_for_enums:
js_out_options += ["add_require_for_enums"]
if testonly:
js_out_options += ["testonly"]
if binary:
js_out_options += ["binary"]
if import_style:
js_out_options += ["import_style=%s" % import_style]
cmd += ["-I%s" % i for i in _collect_includes(srcs)]
cmd += ["--js_out=%s:$(@D)" % ",".join(js_out_options)]
cmd += ["--descriptor_set_out=$(@D)/%s.descriptor" % name]
cmd += ["$(locations " + src + ")" for src in srcs]
native.genrule(
name = name + "_gen",
srcs = srcs,
testonly = testonly,
visibility = ["//visibility:private"],
message = "Generating JavaScript Protocol Buffer file",
outs = [name + ".js", name + ".descriptor"],
tools = [protocbin],
cmd = " ".join(cmd),
)
closure_js_library(
name = name,
srcs = [name + ".js"],
testonly = testonly,
deps = [
str(Label("//closure/library/array")),
str(Label("//closure/protobuf:jspb")),
],
internal_descriptors = [name + ".descriptor"],
suppress = suppress + [
"missingProperties",
"unusedLocalVariables",
],
lenient = True,
**kwargs
)
|
StarcoderdataPython
|
3279650
|
import luigi
import subprocess
from os.path import join, dirname, basename
from ..utils.cap_task import CapTask
from ..config import PipelineConfig
from ..utils.conda import CondaPackage
from ..preprocessing.clean_reads import CleanReads
class MicrobeCensus(CapTask):
module_description = """
This module provides an estimate of the average genome size in a microbiome.
Motivation: AGS estimate can help uncover ecological relationships
and adaptation.
About: AGS is estimated by aligning reads to ~30 Universal Single Copy
Genes and comparing the number of copies of USiCGs to the total DNA in
a sample.
Negatives: AGS estiamtion sometimes produces implausible estimates.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pkg = CondaPackage(
package="microbecensus==1.1.1",
executable="microbe_census",
channel="bioconda",
config_filename=self.config_filename,
)
self.config = PipelineConfig(self.config_filename)
self.out_dir = self.config.out_dir
self.reads = CleanReads(
sample_name=self.sample_name,
pe1=self.pe1,
pe2=self.pe2,
config_filename=self.config_filename
)
def tool_version(self):
return self.run_cmd(f'{self.pkg.bin} --version').stderr.decode('utf-8')
@classmethod
def _module_name(self):
return 'microbe_census'
def requires(self):
return self.pkg, self.reads
@classmethod
def version(self):
return 'v0.1.0'
@classmethod
def dependencies(self):
return ["microbecensus==1.1.1", CleanReads]
def output(self):
return {
'report': self.get_target('report', 'tsv'),
}
def _run(self):
cmd = (
f'{self.pkg.bin} '
f'-t {self.cores} '
f'{self.reads.output()["clean_reads"][0].path},'
f'{self.reads.output()["clean_reads"][1].path} '
f'{self.output()["report"].path}'
)
self.run_cmd(cmd)
|
StarcoderdataPython
|
9664490
|
<reponame>mohanliu/qmpy<filename>qmpy/web/views/api/optimade_api.py
from rest_framework import generics
import django_filters.rest_framework
from qmpy.web.serializers.optimade import OptimadeStructureSerializer
from qmpy.materials.formation_energy import FormationEnergy
from qmpy.materials.entry import Composition
from qmpy.utils import query_to_Q, parse_formula_regex
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from qmpy.rester import qmpy_rester
from collections import OrderedDict
import time
import datetime
BASE_URL = qmpy_rester.REST_OPTIMADE
class OptimadeStructureDetail(generics.RetrieveAPIView):
queryset = FormationEnergy.objects.filter(fit='standard')
serializer_class = OptimadeStructureSerializer
class OptimadePagination(LimitOffsetPagination):
default_limit = 50
def get_paginated_response(self, page_data):
data = page_data["data"]
request = page_data["request"]
full_url = request.build_absolute_uri()
representation = full_url.replace(BASE_URL, '')
time_now = time.time()
time_stamp = datetime.datetime.fromtimestamp(time_now).strftime(
'%Y-%m-%d %H:%M:%S'
)
return Response(OrderedDict([
('links',
OrderedDict([('next', self.get_next_link()),
('previous', self.get_previous_link()),
('base_url', {
"href": BASE_URL,
"meta":{'_oqmd_version': "1.0"}
})
])
),
('resource', {}),
('data', data),
('meta',
OrderedDict([
("query", {"representation": representation}),
("api_version", "1.0"),
("time_stamp", time_stamp),
("data_returned", min(self.get_limit(request), self.count)),
("data_available", self.count),
("more_data_available", (self.get_next_link() != None) or \
(self.get_previous_link() != None))
])
),
("response_message", "OK")
]))
class OptimadeStructureList(generics.ListAPIView):
serializer_class = OptimadeStructureSerializer
pagination_class = OptimadePagination
def get_queryset(self):
fes = FormationEnergy.objects.filter(fit="standard")
fes = self.filter(fes)
return fes
def list(self, request, *args, **kwargs):
query_set = self.get_queryset()
page = self.paginate_queryset(query_set)
if page is not None:
serializer = self.get_serializer(page, many=True)
page_data = {"data": serializer.data, "request": self.request}
return self.get_paginated_response(page_data)
serializer = self.get_serializer(query_set, many=True)
return Response(serializer.data)
def filter(self, fes):
request = self.request
filters = request.GET.get('filter', False)
if not filters:
return fes
filters = filters.replace('&', ' AND ')
filters = filters.replace('|', ' OR ')
filters = filters.replace('~', ' NOT ')
q = query_to_Q(filters)
fes = fes.filter(q)
return fes
|
StarcoderdataPython
|
1886206
|
<reponame>bonastos/yadif
from __future__ import print_function
import os
import sys
import re
import datetime
import string
versionParser = re.compile( r'(\s*Version\slibraryVersion)\s*\(\s*(.*)\s*,\s*(.*)\s*,\s*(.*)\s*,\s*\"(.*)\"\s*\).*' )
includesParser = re.compile( r'\s*#include\s*"(.*)"' )
guardParser = re.compile( r'\s*#.*YADIF_.*_HPP')
defineParser = re.compile( r'\s*#define')
ifParser = re.compile( r'\s*#ifndef YADIF_.*_HPP')
endIfParser = re.compile( r'\s*#endif//ndef YADIF_.*_HPP')
commentParser1 = re.compile( r'^\s*/\*')
commentParser2 = re.compile( r'^ \*')
blankParser = re.compile( r'^\s*$')
seenHeaders = set([])
yadifPath = os.path.dirname(os.path.realpath( os.path.dirname(sys.argv[0])))
rootPath = os.path.join( yadifPath, 'include/' )
versionPath = os.path.join( rootPath, "yadif/yadif_version.hpp" )
readmePath = os.path.join( yadifPath, "README.md" )
outputPath = os.path.join( yadifPath, 'single_header/yadif.hpp' )
bumpVersion = True
includeImpl = True
for arg in sys.argv[1:]:
arg = string.lower(arg)
if arg == "nobump":
bumpVersion = False
print( "Not bumping version number" )
elif arg == "noimpl":
includeImpl = False
bumpVersion = False
print( "Not including impl code (and not bumping version)" )
else:
print( "\n** Unrecognised argument: " + arg + " **\n" )
exit(1)
out = open( outputPath, 'w' )
blanks = 0
def write( line ):
out.write( line )
def parseFile( path, filename ):
global blanks
f = open( path + filename, 'r' )
for line in f:
m = includesParser.match( line )
if m:
header = m.group(1)
headerPath, sep, headerFile = header.rpartition( "/" )
if not headerFile in seenHeaders:
seenHeaders.add( headerFile )
parseFile( path + headerPath + sep, headerFile )
else:
if guardParser.match( line ) or defineParser.match( line ) or commentParser1.match( line ) or commentParser2.match( line ):
line = ""
if blankParser.match( line ):
blanks = blanks + 1
else:
blanks = 0
if blanks < 2:
write( line.rstrip() + "\n" )
class Version:
def __init__(self):
f = open( versionPath, 'r' )
for line in f:
m = versionParser.match( line )
if m:
self.variableDecl = m.group(1)
self.majorVersion = int(m.group(2))
self.minorVersion = int(m.group(3))
self.buildNumber = int(m.group(4))
self.branchName = m.group(5)
f.close()
def incrementBuildNumber(self):
self.buildNumber = self.buildNumber+1
def updateVersionFile(self):
f = open( versionPath, 'r' )
lines = []
for line in f:
m = versionParser.match( line )
if m:
lines.append( '{0}( {1}, {2}, {3}, "{4}" );'.format( self.variableDecl, self.majorVersion, self.minorVersion, self.buildNumber, self.branchName ) )
else:
lines.append( line.rstrip() )
f.close()
f = open( versionPath, 'w' )
for line in lines:
f.write( line + "\n" )
def updateReadmeFile(self):
f = open( readmePath, 'r' )
lines = []
for line in f:
lines.append( line.rstrip() )
f.close()
f = open( readmePath, 'w' )
for line in lines:
if line.startswith( "*v" ):
f.write( '*v{0}.{1} build {2} ({3} branch)*\n'.format( self.majorVersion, self.minorVersion, self.buildNumber, self.branchName ) )
else:
f.write( line + "\n" )
def generateSingleHeader():
v = Version()
if bumpVersion:
v.incrementBuildNumber()
v.updateVersionFile()
v.updateReadmeFile()
out.write( "/*\n" )
out.write( " * YADIF v{0}.{1} build {2} ({3} branch)\n".format( v.majorVersion, v.minorVersion, v.buildNumber, v.branchName ) )
out.write( " * Generated: {0}\n".format( datetime.datetime.now() ) )
out.write( " * ----------------------------------------------------------\n" )
out.write( " * This file has been merged from multiple headers. Please don't edit it directly\n" )
out.write( " * Copyright (c) 2015 <NAME>. All rights reserved.\n" )
out.write( " *\n" )
out.write( " * Distributed under the Boost Software License, Version 1.0. (See accompanying\n" )
out.write( " * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n" )
out.write( " */\n\n" )
out.write( "#ifndef YADIF_SINGLE_HEADER_HPP\n" )
out.write( "#define YADIF_SINGLE_HEADER_HPP\n" )
parseFile( rootPath, 'yadif.hpp' )
out.write( "#endif//ndef YADIF_SINGLE_HEADER_HPP\n" )
generateSingleHeader()
|
StarcoderdataPython
|
71061
|
<gh_stars>1-10
class AjaxableResponseMixin(object):
"""
Mixin to add AJAX support to a form.
Must be used with an object-based FormView (e.g. CreateView)
"""
def form_invalid(self, form):
response = super(AjaxableResponseMixin, self).form_invalid(form)
if self.request.is_ajax():
return JsonResponse(form.errors, status=400)
else:
return response
def form_valid(self, form):
# We make sure to call the parent's form_valid() method because
# it might do some processing (in the case of CreateView, it will
# call form.save() for example).
response = super(AjaxableResponseMixin, self).form_valid(form)
if self.request.is_ajax():
data = {
'pk': self.object.pk,
}
return JsonResponse(data)
else:
return response
|
StarcoderdataPython
|
1774628
|
"""
==================================
01. Anonymize Video and Ephys Data
==================================
In this example, we anonymize both a video and a fif file with eeg data.
.. currentmodule:: ephys_anonymizer
.. _BrainVision format: https://www.brainproducts.com/productdetails.php?id=21&tab=5
.. _CapTrak: http://www.fieldtriptoolbox.org/faq/how_are_the_different_head_and_mri_coordinate_systems_defined/#details-of-the-captrak-coordinate-system
""" # noqa: E501
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
###############################################################################
# We are importing everything we need for this example:
import os
import mne
import ephys_anonymizer
from ephys_anonymizer import video_anonymize, raw_anonymize
###############################################################################
# Anonymize
# ---------
#
# Anonymize a raw file and a video file
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw_anonymize(sample_data_raw_file, overwrite=True)
sample_video_fname = os.path.join(os.path.dirname(ephys_anonymizer.__file__),
'tests', 'data', 'test_vid.avi')
video_anonymize(sample_video_fname, overwrite=True)
|
StarcoderdataPython
|
177905
|
<reponame>camcl/genotypooler<filename>graphtools/quantiles_plots_ pooled_not_decoded.py<gh_stars>0
import os, sys
import numpy as np
import pandas as pd
import seaborn as sns
import timeit
import multiprocessing as mp
import matplotlib.pyplot as plt
from typing import *
rootdir = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.insert(0, rootdir)
from genotypooler.poolSNPs.metrics import quality as qual
from genotypooler.poolSNPs import dataframe as vcfdf
from genotypooler.persotools.files import *
'''
This script addresses the following review for the manuscript:
"How many more markers are correctly imputed in pooled data in addition to the ones that are fully decoded?"
'''
compute = True
# Plot styling
# General parameters
plt.style.use('/home/camille/1000Genomes/src/genotypooler/manus/manus-style.mplstyle')
# Specific to this plotting script
sns.set(rc={'figure.figsize': (10, 8)}) # specific to this plotting sripts
sns.set_style('whitegrid')
titlesz = 24
axlabsz= 20
axticksz = 16
legsz = 20
yscale = {
'concordance': (0.0, 1.0),
'cross_entropy': (0.0, 12.0)
}
dash_styles = [
(1, 1),
(3, 1, 1.5, 1),
(5, 1, 1, 1),
(5, 1, 2, 1, 2, 1),
(2, 2, 3, 1.5),
(1, 2.5, 3, 1.2),
"",
(4, 1.5),
]
# Data parameters
bins_step = 0.01
rQ = 1000
x_data = 'maf_info'
x_bins = np.arange(0.0, 0.5 + bins_step, bins_step) if x_data in ['maf_info', 'maf'] \
else np.arange(0.0, 1.0 + bins_step, bins_step)
# Custom bins for exact matches counts
x2_bins = [0.0, 0.02, 0.04, 0.06, 0.1, 0.2, 0.4, 0.5] # MAF
lab2_bins = [0.01, 0.03, 0.05, 0.08, 0.15, 0.3, 0.45] # MAF
lab2_fmt = ['{:.2f}-{:.2f}'.format(i, j) for i, j in zip(x2_bins[:-1], x2_bins[1:])]
# Configure data/plots paths
datedir = '20200827'
outdir = os.path.join('/home/camille/PoolImpHuman/results', datedir)
if not os.path.exists(outdir):
os.mkdir(outdir)
# Function/Tools
def rollquants(dX: pd.DataFrame, dS1: pd.Series, dS2: pd.Series) -> pd.DataFrame:
pdf1 = qual.QuantilesDataFrame(dX,
dS1,
bins_step=bins_step)
pctY1 = pdf1.binnedX_rolling_quantilY(rollwin=rQ)
pctY1['dataset'] = ['beagle'] * pctY1.shape[0]
print(pctY1)
pdf2 = qual.QuantilesDataFrame(dX,
dS2,
bins_step=bins_step)
pctY2 = pdf2.binnedX_rolling_quantilY(rollwin=rQ)
pctY2['dataset'] = ['phaser'] * pctY2.shape[0]
print(pctY2)
rollquants = pd.concat([pctY1, pctY2])
return rollquants
# Coordinates of HDonly and LDonly variants chr20 x Illumina (35,682 and 17,015 variants)
ld_vars = pd.read_csv('/home/camille/PoolImpHuman/data/omniexpress-isec-LDHD/LDonly.coords',
sep='\t',
header=None,
names=['CHROM', 'POS'])
ld_vars['variants'] = pd.Series(['20:{}'.format(pos) for pos in ld_vars['POS']], dtype=str)
ld_vars.set_index('variants', inplace=True)
hd_vars = pd.read_csv('/home/camille/PoolImpHuman/data/omniexpress-isec-LDHD/HDonly.coords',
sep='\t',
header=None,
names=['CHROM', 'POS'])
hd_vars['variants'] = pd.Series(['20:{}'.format(pos) for pos in hd_vars['POS']], dtype=str)
hd_vars.set_index('variants', inplace=True)
# Read files
truegt = '/home/camille/PoolImpHuman/data/20200827/IMP.chr20.snps.gt.vcf.gz'
truegl = '/home/camille/PoolImpHuman/data/20200827/IMP.chr20.snps.gl.vcf.gz'
# pooled can also be the file with full LD and missing HD
pooledgt = None # '/home/camille/PoolImpHuman/data/20201029/sHG01063.IMP.chr20.missingHD.fullLD.snps.gt.vcf.gz'
pooledgl = '/home/camille/PoolImpHuman/data/20210607/IMP.chr20.pooled.snps.gl.vcf.gz' # Caution: older files might have min(log-GL) = -5 and not -12
# imputation with Beagle or with Phaser
imputed_gtgp1 = '/home/camille/PoolImpHuman/data/20200827/IMP.chr20.pooled.imputed.vcf.gz' # Phaser
imputed_gtgp2 = '/home/camille/PoolImpHuman/data/20210607/IMP.chr20.pooled.imputed.vcf.gz' # Beagle
# Build Quality and DataFrame objects for analysis
startT = timeit.default_timer()
quality1gt = qual.QualityGT(truegt, imputed_gtgp1, 0, idx='chrom:pos')
quality1gl = qual.QualityGL(truegl, imputed_gtgp1, 0, idx='chrom:pos')
if imputed_gtgp2 is not None:
quality2gt = qual.QualityGT(truegt, imputed_gtgp2, 0, idx='chrom:pos')
quality2gl = qual.QualityGL(truegl, imputed_gtgp2, 0, idx='chrom:pos')
if pooledgt is not None:
dfpooled = vcfdf.PandasMixedVCF(pooledgt, format='GT', indextype='chrom:pos')
if pooledgl is not None:
dfpooled = vcfdf.PandasMixedVCF(pooledgl, format='GL', indextype='chrom:pos')
if compute:
# Filter/mask markers on mismatches and decoding status
disc1 = quality1gt.diff().dropna()
miss1 = dfpooled.trinary_encoding()
entro1 = quality1gl.trueobj.genotypes().combine(quality1gl.imputedobj.genotypes(), quality1gl.intergl_entropy)
disc2 = quality2gt.diff().dropna() # Non NaN i.e. everything imputed
miss2 = dfpooled.trinary_encoding()
entro2 = quality2gl.trueobj.genotypes().combine(quality2gl.imputedobj.genotypes(), quality2gl.intergl_entropy)
unassayed_disc1 = 0.5 * disc1.where(miss1 == -1, other=np.nan) # keep (mis)matches of only not decoded markers
unassayed_entro1 = entro1.where(miss1 == -1, other=np.nan)
unassayed_disc2 = 0.5 * disc2.where(miss2 == -1, other=np.nan) # discordance is 0, 1, or 2
unassayed_entro2 = entro2.where(miss2 == -1, other=np.nan)
# Additional exact matches
# Count not decoded and correctly imputed
unassayed_impok1 = unassayed_disc1.where(unassayed_disc1 == 0.0, other=np.nan) # keep not decoded and full matches after imputation
counts_unassayed_impok1 = unassayed_impok1.notnull().sum(axis=1)
counts_unassayed_impok1.name = 'counts'
unassayed_impok2 = unassayed_disc2.where(unassayed_disc2 == 0.0, other=np.nan) # keep not decoded and full matches after imputation
counts_unassayed_impok2 = unassayed_impok2.notnull().sum(axis=1)
counts_unassayed_impok2.name = 'counts'
# Count not decoded per variant
not_decoded1 = miss1.where(miss1 == -1, other=np.nan)
counts_not_decoded1 = not_decoded1.notnull().sum(axis=1)
not_decoded2 = miss2.where(miss2 == -1, other=np.nan)
counts_not_decoded2 = not_decoded2.notnull().sum(axis=1)
# TODO: exact matches counts per bin
# Proportion of full matches after imputation for not decoded markers
dX = quality1gt.trueobj.maf_info
matchbins = pd.cut(dX.values.squeeze(), x2_bins, labels=lab2_fmt, include_lowest=True) # lab2_bins
dBinsX = pd.DataFrame(matchbins, index=dX.index, columns=['binned_' + x_data])
sExact1 = counts_unassayed_impok1 / counts_not_decoded1
sExact1.name = 'exact_matches' # counts exactly imputed only
# exact_matches1 = qual.QuantilesDataFrame(dX, sExact1)
dfmatch1 = dBinsX.join(sExact1).groupby(by='binned_maf_info').mean()
dfmatch1['dataset'] = 'phaser'
sExact2 = counts_unassayed_impok2 / counts_not_decoded2
sExact2.name = 'exact_matches' # counts exactly imputed only
# exact_matches2 = qual.QuantilesDataFrame(dX, sExact2)
dfmatch2 = dBinsX.join(sExact2).groupby(by='binned_maf_info').mean()
dfmatch2['dataset'] = 'beagle'
dfmatches = pd.concat([dfmatch1, dfmatch2])
tab1 = dfmatch1.reset_index().pivot(index='dataset', columns='binned_maf_info', values='exact_matches')
tab2 = dfmatch2.reset_index().pivot(index='dataset', columns='binned_maf_info', values='exact_matches')
tabmatches = pd.concat([tab1, tab2])
tabmatches.index.name = ''
tabmatches.index = [algo.capitalize() for algo in tabmatches.index]
tabmatches.columns.name = ''
tabmatches.to_latex(buf=os.path.join(outdir, 'pooled-not-decoded-exact-matches-bin.tex'),
sparsify=True,
multirow=True,
caption='Exact matches after imputation for not decoded genotypes in pooled HD per data {} bin: Columns labels are the central values of each interval'.format(
'MAF' if (x_data == 'maf' or x_data == 'maf_info') else 'AAF'),
label='tab:pooled-not-decoded-exact-matches-bin')
# Continuous quality metrics with quantiles
# Concordance after imputation for not decoded markers
unassayed_concordance1 = (1 - unassayed_disc1.mean(axis=1))
unassayed_concordance1.dropna(inplace=True)
unassayed_concordance1.name = 'concordance'
unassayed_concordance2 = (1 - unassayed_disc2.mean(axis=1))
unassayed_concordance2.dropna(inplace=True)
unassayed_concordance2.name = 'concordance'
# Cross-entropy after imputation for not decoded markers
unassayed_crossentro1 = unassayed_entro1.mean(axis=1)
unassayed_crossentro1.dropna(inplace=True)
unassayed_crossentro1.name = 'cross_entropy'
unassayed_crossentro2 = unassayed_entro2.mean(axis=1)
unassayed_crossentro2.dropna(inplace=True)
unassayed_crossentro2.name = 'cross_entropy'
# unassayed_concordance2.notnull().sum()
# Out[99]: 51395
# unassayed_concordance1.notnull().sum()
# Out[100]: 51395
# Metrics to compute and save
dX = quality1gt.trueobj.maf_info
qualDF11 = qual.QuantilesDataFrame(dX, unassayed_concordance1)
qualDF12 = qual.QuantilesDataFrame(dX, unassayed_crossentro1)
print('Example of quantiles for concordance:')
print(qualDF11.binnedX_rolling_quantilY())
print('Example of quantiles for cross-entropy:')
print(qualDF12.binnedX_rolling_quantilY())
qualDF21 = qual.QuantilesDataFrame(dX, unassayed_concordance2)
metrics = {
'concordance': {'beagle': unassayed_concordance2,
'phaser': unassayed_concordance1},
'cross_entropy': {'beagle': unassayed_crossentro2,
'phaser': unassayed_crossentro1}
}
dataquants = {
'concordance': os.path.join(outdir, 'rolling_quantiles_not_decoded_concordance.json'),
'cross_entropy': os.path.join(outdir, 'rolling_quantiles_not_decoded_cross_entropy.json')
}
if compute:
dX = quality1gt.trueobj.maf_info
# Save as table
for metric, d in metrics.items():
break
if d is not None:
yS_beagle, yS_phaser = list(d.values())
# Compute quantiles
print('Computing quantiles for {}'.format(metric).ljust(80, '.'))
pctY_comp = rollquants(dX, yS_beagle, yS_phaser)
# Compute mean over all markers
print('Computing means for {}'.format(metric).ljust(80, '.'))
pctY_comp['mean'] = pctY_comp['dataset'].apply(lambda x: yS_beagle.mean() if x == 'beagle' else yS_phaser.mean())
jsonf = dataquants[metric]
pctY_comp.to_json(jsonf,
orient='records')
# jsonf = os.path.join(outdir, 'exact_matches-not-decoded.json')
# jsonfroll = os.path.join(outdir, 'exact_matches-not-decoded-rolling.json')
#
# df_agg.to_json(jsonf, orient='records')
# dataf = pd.read_json(jsonf, orient='records')
#
# df_roll.to_json(jsonfroll, orient='records')
# rolldataf = pd.read_json(jsonfroll, orient='records')
# Plot
# dataf.plot()
# plt.show()
stopT = timeit.default_timer()
print('Time elapsed for computing and building data to plot *= {}'.format(stopT-startT).ljust(80, '.'))
# Read processed reshaped data for plotting and draw figures
sns.set(font_scale=1.75) # multiplication factor!
for dquant, f in dataquants.items():
break
if f is not None:
dataf = pd.read_json(f, orient='records')
meanf = {}
gY = sns.lineplot(data=dataf[dataf.quantiles == 0.5], x='binned_' + x_data, y=dquant,
hue='dataset', palette="deep", linewidth=1)
for i, dset in enumerate(['beagle', 'phaser']):
df = dataf[dataf['dataset'] == dset]
meanf[dset] = df['mean'].mean()
gY.fill_between(df[df.quantiles == 1.0]['binned_' + x_data],
df[df.quantiles == 0.0][dquant],
df[df.quantiles == 1.0][dquant],
color=sns.color_palette('deep')[i],
alpha=0.1)
gY.fill_between(df[df.quantiles == 0.99]['binned_' + x_data],
df[df.quantiles == 0.01][dquant],
df[df.quantiles == 0.99][dquant],
color=sns.color_palette('deep')[i],
alpha=0.25)
gY.fill_between(df[df.quantiles == 0.75]['binned_' + x_data],
df[df.quantiles == 0.25][dquant],
df[df.quantiles == 0.75][dquant],
color=sns.color_palette('deep')[i],
alpha=0.40)
gY.set_xlabel('True minor allele frequency in {} population'.format('study' if x_data == 'binned_maf'
else 'main'),
fontsize=axlabsz)
gY.set_ylabel(str.capitalize(dataf.columns[2].replace('_', ' ')), fontsize=axlabsz)
gY.set(ylim=yscale[dquant])
handles, labels = gY.get_legend_handles_labels()
labels[-2] = '{} (mean = {:.5f})'.format(labels[-2], meanf['beagle'])
labels[-1] = '{} (mean = {:.5f})'.format(labels[-1], meanf['phaser'])
gY.legend(handles, labels, fontsize=legsz)
plt.savefig(os.path.join(outdir, '{}_percentiles_rQ={}_bS={}_xdata={}_not_decoded.pdf'.format(dquant, rQ, bins_step, x_data.lstrip('binned_'))))
plt.show()
|
StarcoderdataPython
|
5039809
|
<filename>test/test_socket_burst_dampener.py
import asyncio
import os
import sys
import unittest
try:
from socket_burst_dampener import Daemon, parse_args
except ImportError:
sys.path.append(
os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "src"
)
)
from socket_burst_dampener import Daemon, parse_args
class SocketBurstDampenerTest(unittest.TestCase):
def test_socket_burst_dampener(self):
args = parse_args(
[
"socket-burst-dampener",
"0",
"--processes",
"0",
"--load-average",
"1",
"--",
"echo",
"hello",
]
)
loop = asyncio.get_event_loop()
try:
with Daemon(args, loop) as daemon:
loop.run_until_complete(self._test_daemon(loop, daemon))
except KeyboardInterrupt:
loop.stop()
finally:
loop.close()
async def _test_daemon(self, loop, daemon):
while daemon.addr_info is None:
await asyncio.sleep(0.1)
for i in range(3):
reader, writer = await asyncio.open_connection(
family=daemon.addr_info.family,
host=daemon.addr_info.address[0],
port=daemon.addr_info.address[1],
)
data = b""
expect = b"hello\n"
while len(data) < len(expect):
data += await reader.read(len(expect))
self.assertEqual(data, expect)
writer.close()
await writer.wait_closed()
if __name__ == "__main__":
unittest.main(verbosity=2)
|
StarcoderdataPython
|
12825883
|
<reponame>LucasBorges-Santos/docker-odoo<filename>odoo/base-addons/stock_landed_costs/tests/test_stock_landed_costs_purchase.py
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import unittest
from odoo.addons.stock_landed_costs.tests.common import TestStockLandedCostsCommon
from odoo.addons.stock_landed_costs.tests.test_stockvaluationlayer import TestStockValuationLC
from odoo.tests import Form, tagged
@tagged('post_install', '-at_install')
class TestLandedCosts(TestStockLandedCostsCommon):
def setUp(self):
super(TestLandedCosts, self).setUp()
# Create picking incoming shipment
self.picking_in = self.Picking.create({
'partner_id': self.supplier_id,
'picking_type_id': self.picking_type_in_id,
'location_id': self.supplier_location_id,
'location_dest_id': self.stock_location_id})
self.Move.create({
'name': self.product_refrigerator.name,
'product_id': self.product_refrigerator.id,
'product_uom_qty': 5,
'product_uom': self.product_refrigerator.uom_id.id,
'picking_id': self.picking_in.id,
'location_id': self.supplier_location_id,
'location_dest_id': self.stock_location_id})
self.Move.create({
'name': self.product_oven.name,
'product_id': self.product_oven.id,
'product_uom_qty': 10,
'product_uom': self.product_oven.uom_id.id,
'picking_id': self.picking_in.id,
'location_id': self.supplier_location_id,
'location_dest_id': self.stock_location_id})
# Create picking outgoing shipment
self.picking_out = self.Picking.create({
'partner_id': self.customer_id,
'picking_type_id': self.picking_type_out_id,
'location_id': self.stock_location_id,
'location_dest_id': self.customer_location_id})
self.Move.create({
'name': self.product_refrigerator.name,
'product_id': self.product_refrigerator.id,
'product_uom_qty': 2,
'product_uom': self.product_refrigerator.uom_id.id,
'picking_id': self.picking_out.id,
'location_id': self.stock_location_id,
'location_dest_id': self.customer_location_id})
def test_00_landed_costs_on_incoming_shipment(self):
chart_of_accounts = self.env.company.chart_template_id
generic_coa = self.env.ref('l10n_generic_coa.configurable_chart_template')
if chart_of_accounts != generic_coa:
raise unittest.SkipTest('Skip this test as it works only with %s (%s loaded)' % (generic_coa.name, chart_of_accounts.name))
""" Test landed cost on incoming shipment """
#
# (A) Purchase product
# Services Quantity Weight Volume
# -----------------------------------------------------
# 1. Refrigerator 5 10 1
# 2. Oven 10 20 1.5
# (B) Add some costs on purchase
# Services Amount Split Method
# -------------------------------------------
# 1.labour 10 By Equal
# 2.brokerage 150 By Quantity
# 3.transportation 250 By Weight
# 4.packaging 20 By Volume
# Process incoming shipment
income_ship = self._process_incoming_shipment()
# Create landed costs
stock_landed_cost = self._create_landed_costs({
'equal_price_unit': 10,
'quantity_price_unit': 150,
'weight_price_unit': 250,
'volume_price_unit': 20}, income_ship)
# Compute landed costs
stock_landed_cost.compute_landed_cost()
valid_vals = {
'equal': 5.0,
'by_quantity_refrigerator': 50.0,
'by_quantity_oven': 100.0,
'by_weight_refrigerator': 50.0,
'by_weight_oven': 200,
'by_volume_refrigerator': 5.0,
'by_volume_oven': 15.0}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_landed_cost, valid_vals)
# Validate the landed cost.
stock_landed_cost.button_validate()
self.assertTrue(stock_landed_cost.account_move_id, 'Landed costs should be available account move lines')
account_entry = self.env['account.move.line'].read_group(
[('move_id', '=', stock_landed_cost.account_move_id.id)], ['debit', 'credit', 'move_id'], ['move_id'])[0]
self.assertEqual(account_entry['debit'], account_entry['credit'], 'Debit and credit are not equal')
self.assertEqual(account_entry['debit'], 430.0, 'Wrong Account Entry')
def test_00_landed_costs_on_incoming_shipment_without_real_time(self):
chart_of_accounts = self.env.company.chart_template_id
generic_coa = self.env.ref('l10n_generic_coa.configurable_chart_template')
if chart_of_accounts != generic_coa:
raise unittest.SkipTest('Skip this test as it works only with %s (%s loaded)' % (generic_coa.name, chart_of_accounts.name))
""" Test landed cost on incoming shipment """
#
# (A) Purchase product
# Services Quantity Weight Volume
# -----------------------------------------------------
# 1. Refrigerator 5 10 1
# 2. Oven 10 20 1.5
# (B) Add some costs on purchase
# Services Amount Split Method
# -------------------------------------------
# 1.labour 10 By Equal
# 2.brokerage 150 By Quantity
# 3.transportation 250 By Weight
# 4.packaging 20 By Volume
self.product_refrigerator.write({"categ_id": self.categ_manual_periodic.id})
self.product_oven.write({"categ_id": self.categ_manual_periodic.id})
# Process incoming shipment
income_ship = self._process_incoming_shipment()
# Create landed costs
stock_landed_cost = self._create_landed_costs({
'equal_price_unit': 10,
'quantity_price_unit': 150,
'weight_price_unit': 250,
'volume_price_unit': 20}, income_ship)
# Compute landed costs
stock_landed_cost.compute_landed_cost()
valid_vals = {
'equal': 5.0,
'by_quantity_refrigerator': 50.0,
'by_quantity_oven': 100.0,
'by_weight_refrigerator': 50.0,
'by_weight_oven': 200,
'by_volume_refrigerator': 5.0,
'by_volume_oven': 15.0}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_landed_cost, valid_vals)
# Validate the landed cost.
stock_landed_cost.button_validate()
self.assertFalse(stock_landed_cost.account_move_id)
def test_01_negative_landed_costs_on_incoming_shipment(self):
chart_of_accounts = self.env.company.chart_template_id
generic_coa = self.env.ref('l10n_generic_coa.configurable_chart_template')
if chart_of_accounts != generic_coa:
raise unittest.SkipTest('Skip this test as it works only with %s (%s loaded)' % (generic_coa.name, chart_of_accounts.name))
""" Test negative landed cost on incoming shipment """
#
# (A) Purchase Product
# Services Quantity Weight Volume
# -----------------------------------------------------
# 1. Refrigerator 5 10 1
# 2. Oven 10 20 1.5
# (B) Sale refrigerator's part of the quantity
# (C) Add some costs on purchase
# Services Amount Split Method
# -------------------------------------------
# 1.labour 10 By Equal
# 2.brokerage 150 By Quantity
# 3.transportation 250 By Weight
# 4.packaging 20 By Volume
# (D) Decrease cost that already added on purchase
# (apply negative entry)
# Services Amount Split Method
# -------------------------------------------
# 1.labour -5 By Equal
# 2.brokerage -50 By Quantity
# 3.transportation -50 By Weight
# 4.packaging -5 By Volume
# Process incoming shipment
income_ship = self._process_incoming_shipment()
# Refrigerator outgoing shipment.
self._process_outgoing_shipment()
# Apply landed cost for incoming shipment.
stock_landed_cost = self._create_landed_costs({
'equal_price_unit': 10,
'quantity_price_unit': 150,
'weight_price_unit': 250,
'volume_price_unit': 20}, income_ship)
# Compute landed costs
stock_landed_cost.compute_landed_cost()
valid_vals = {
'equal': 5.0,
'by_quantity_refrigerator': 50.0,
'by_quantity_oven': 100.0,
'by_weight_refrigerator': 50.0,
'by_weight_oven': 200.0,
'by_volume_refrigerator': 5.0,
'by_volume_oven': 15.0}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_landed_cost, valid_vals)
# Validate the landed cost.
stock_landed_cost.button_validate()
self.assertTrue(stock_landed_cost.account_move_id, 'Landed costs should be available account move lines')
# Create negative landed cost for previously incoming shipment.
stock_negative_landed_cost = self._create_landed_costs({
'equal_price_unit': -5,
'quantity_price_unit': -50,
'weight_price_unit': -50,
'volume_price_unit': -5}, income_ship)
# Compute negative landed costs
stock_negative_landed_cost.compute_landed_cost()
valid_vals = {
'equal': -2.5,
'by_quantity_refrigerator': -16.67,
'by_quantity_oven': -33.33,
'by_weight_refrigerator': -10.00,
'by_weight_oven': -40.00,
'by_volume_refrigerator': -1.25,
'by_volume_oven': -3.75}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_negative_landed_cost, valid_vals)
# Validate the landed cost.
stock_negative_landed_cost.button_validate()
self.assertEqual(stock_negative_landed_cost.state, 'done', 'Negative landed costs should be in done state')
self.assertTrue(stock_negative_landed_cost.account_move_id, 'Landed costs should be available account move lines')
account_entry = self.env['account.move.line'].read_group(
[('move_id', '=', stock_negative_landed_cost.account_move_id.id)], ['debit', 'credit', 'move_id'], ['move_id'])[0]
self.assertEqual(account_entry['debit'], account_entry['credit'], 'Debit and credit are not equal')
move_lines = [
{'name': 'split by volume - <NAME>', 'debit': 3.75, 'credit': 0.0},
{'name': 'split by volume - <NAME>', 'debit': 0.0, 'credit': 3.75},
{'name': 'split by weight - <NAME>', 'debit': 40.0, 'credit': 0.0},
{'name': 'split by weight - <NAME>', 'debit': 0.0, 'credit': 40.0},
{'name': 'split by quantity - <NAME>', 'debit': 33.33, 'credit': 0.0},
{'name': 'split by quantity - <NAME>', 'debit': 0.0, 'credit': 33.33},
{'name': 'equal split - <NAME>', 'debit': 2.5, 'credit': 0.0},
{'name': 'equal split - <NAME>', 'debit': 0.0, 'credit': 2.5},
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.5, 'credit': 0.0},
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 0.5},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 4.0, 'credit': 0.0},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 4.0},
{'name': 'split by weight - Refrigerator', 'debit': 0.0, 'credit': 10.0},
{'name': 'split by weight - Refrigerator', 'debit': 10.0, 'credit': 0.0},
{'name': 'split by volume - Refrigerator', 'debit': 0.0, 'credit': 1.25},
{'name': 'split by volume - Refrigerator', 'debit': 1.25, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 6.67, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 6.67},
{'name': 'split by quantity - Refrigerator', 'debit': 16.67, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator', 'debit': 0.0, 'credit': 16.67},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 1.0, 'credit': 0.0},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 1.0},
{'name': 'equal split - Refrigerator', 'debit': 2.5, 'credit': 0.0},
{'name': 'equal split - Refrigerator', 'debit': 0.0, 'credit': 2.5}
]
if stock_negative_landed_cost.account_move_id.company_id.anglo_saxon_accounting:
move_lines += [
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.5, 'credit': 0.0},
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 0.5},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 4.0, 'credit': 0.0},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 4.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 6.67, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 6.67},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 1.0, 'credit': 0.0},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 1.0},
]
self.assertRecordValues(
sorted(stock_negative_landed_cost.account_move_id.line_ids, key=lambda d: (d['name'], d['debit'])),
sorted(move_lines, key=lambda d: (d['name'], d['debit'])),
)
def _process_incoming_shipment(self):
""" Two product incoming shipment. """
# Confirm incoming shipment.
self.picking_in.action_confirm()
# Transfer incoming shipment
res_dict = self.picking_in.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
return self.picking_in
def _process_outgoing_shipment(self):
""" One product Outgoing shipment. """
# Confirm outgoing shipment.
self.picking_out.action_confirm()
# Product assign to outgoing shipments
self.picking_out.action_assign()
# Transfer picking.
res_dict = self.picking_out.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
def _create_landed_costs(self, value, picking_in):
return self.LandedCost.create(dict(
picking_ids=[(6, 0, [picking_in.id])],
account_journal_id=self.expenses_journal.id,
cost_lines=[
(0, 0, {
'name': 'equal split',
'split_method': 'equal',
'price_unit': value['equal_price_unit'],
'product_id': self.landed_cost.id}),
(0, 0, {
'name': 'split by quantity',
'split_method': 'by_quantity',
'price_unit': value['quantity_price_unit'],
'product_id': self.brokerage_quantity.id}),
(0, 0, {
'name': 'split by weight',
'split_method': 'by_weight',
'price_unit': value['weight_price_unit'],
'product_id': self.transportation_weight.id}),
(0, 0, {
'name': 'split by volume',
'split_method': 'by_volume',
'price_unit': value['volume_price_unit'],
'product_id': self.packaging_volume.id})
],
))
def _validate_additional_landed_cost_lines(self, stock_landed_cost, valid_vals):
for valuation in stock_landed_cost.valuation_adjustment_lines:
add_cost = valuation.additional_landed_cost
split_method = valuation.cost_line_id.split_method
product = valuation.move_id.product_id
if split_method == 'equal':
self.assertEqual(add_cost, valid_vals['equal'], self._error_message(valid_vals['equal'], add_cost))
elif split_method == 'by_quantity' and product == self.product_refrigerator:
self.assertEqual(add_cost, valid_vals['by_quantity_refrigerator'], self._error_message(valid_vals['by_quantity_refrigerator'], add_cost))
elif split_method == 'by_quantity' and product == self.product_oven:
self.assertEqual(add_cost, valid_vals['by_quantity_oven'], self._error_message(valid_vals['by_quantity_oven'], add_cost))
elif split_method == 'by_weight' and product == self.product_refrigerator:
self.assertEqual(add_cost, valid_vals['by_weight_refrigerator'], self._error_message(valid_vals['by_weight_refrigerator'], add_cost))
elif split_method == 'by_weight' and product == self.product_oven:
self.assertEqual(add_cost, valid_vals['by_weight_oven'], self._error_message(valid_vals['by_weight_oven'], add_cost))
elif split_method == 'by_volume' and product == self.product_refrigerator:
self.assertEqual(add_cost, valid_vals['by_volume_refrigerator'], self._error_message(valid_vals['by_volume_refrigerator'], add_cost))
elif split_method == 'by_volume' and product == self.product_oven:
self.assertEqual(add_cost, valid_vals['by_volume_oven'], self._error_message(valid_vals['by_volume_oven'], add_cost))
def _error_message(self, actucal_cost, computed_cost):
return 'Additional Landed Cost should be %s instead of %s' % (actucal_cost, computed_cost)
@tagged('post_install', '-at_install')
class TestLandedCostsWithPurchaseAndInv(TestStockValuationLC):
def test_invoice_after_lc(self):
self.env.company.anglo_saxon_accounting = True
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_valuation = 'real_time'
self.product1.product_tmpl_id.invoice_policy = 'delivery'
self.price_diff_account = self.env['account.account'].create({
'name': 'price diff account',
'code': 'price diff account',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
})
self.product1.property_account_creditor_price_difference = self.price_diff_account
# Create PO
po_form = Form(self.env['purchase.order'])
po_form.partner_id = self.env['res.partner'].create({'name': 'vendor'})
with po_form.order_line.new() as po_line:
po_line.product_id = self.product1
po_line.product_qty = 1
po_line.price_unit = 455.0
order = po_form.save()
order.button_confirm()
# Receive the goods
receipt = order.picking_ids[0]
receipt.move_lines.quantity_done = 1
receipt.button_validate()
# Check SVL and AML
svl = self.env['stock.valuation.layer'].search([('stock_move_id', '=', receipt.move_lines.id)])
self.assertAlmostEqual(svl.value, 455)
aml = self.env['account.move.line'].search([('account_id', '=', self.stock_valuation_account.id)])
self.assertAlmostEqual(aml.debit, 455)
# Create and validate LC
lc = self.env['stock.landed.cost'].create(dict(
picking_ids=[(6, 0, [receipt.id])],
account_journal_id=self.stock_journal.id,
cost_lines=[
(0, 0, {
'name': 'equal split',
'split_method': 'equal',
'price_unit': 99,
'product_id': self.productlc1.id,
}),
],
))
lc.compute_landed_cost()
lc.button_validate()
# Check LC, SVL and AML
self.assertAlmostEqual(lc.valuation_adjustment_lines.final_cost, 554)
svl = self.env['stock.valuation.layer'].search([('stock_move_id', '=', receipt.move_lines.id)], order='id desc', limit=1)
self.assertAlmostEqual(svl.value, 99)
aml = self.env['account.move.line'].search([('account_id', '=', self.stock_valuation_account.id)], order='id desc', limit=1)
self.assertAlmostEqual(aml.debit, 99)
# Create an invoice with the same price
move_form = Form(self.env['account.move'].with_context(default_type='in_invoice'))
move_form.partner_id = order.partner_id
move_form.purchase_id = order
move = move_form.save()
move.post()
# Check nothing was posted in the price difference account
price_diff_aml = self.env['account.move.line'].search([('account_id','=', self.price_diff_account.id), ('move_id', '=', move.id)])
self.assertEquals(len(price_diff_aml), 0, "No line should have been generated in the price difference account.")
|
StarcoderdataPython
|
312950
|
<filename>exercises/solution_01_10.py
import pandas as pd
# The database
hockey_players = pd.read_csv('data/canucks.csv', index_col=0)
# Slice the rows and columns and save the new dataframe as `star_players`
star_players = hockey_players.loc['<NAME>': '<NAME>', 'No.' : 'Country']
# Display it
star_players
|
StarcoderdataPython
|
6641184
|
<reponame>IKATS/ikats_api
# -*- coding: utf-8 -*-
"""
Copyright 2019 CS Systèmes d'Information
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest import TestCase
from ikats.extra.timeseries import gen_random_ts
class TestDataset(TestCase):
"""
Test extra functions
"""
def test_gen_random_ts(self):
"""
test gen_random_ts function
"""
sd = 1000000000000
ed = 1000000010000
nb_points = 10
period = 1000
# Provide all except period
data = gen_random_ts(sd=sd, ed=ed, nb_points=nb_points)
self.assertEqual(sd, data[0][0])
self.assertEqual(ed - period, data[-1][0])
self.assertEqual(nb_points, len(data))
self.assertEqual(period, data[1][0] - data[0][0])
# Provide all except nb_points
data = gen_random_ts(sd=sd, ed=ed, period=period)
self.assertEqual(sd, data[0][0])
self.assertEqual(ed - period, data[-1][0])
self.assertEqual(nb_points, len(data))
self.assertEqual(period, data[1][0] - data[0][0])
# Provide all except ed
data = gen_random_ts(sd=sd, nb_points=nb_points, period=period)
self.assertEqual(sd, data[0][0])
self.assertEqual(ed - period, data[-1][0])
self.assertEqual(nb_points, len(data))
self.assertEqual(period, data[1][0] - data[0][0])
# Provide all except sd
data = gen_random_ts(ed=ed, nb_points=nb_points, period=period)
self.assertEqual(sd, data[0][0])
self.assertEqual(ed - period, data[-1][0])
self.assertEqual(nb_points, len(data))
self.assertEqual(period, data[1][0] - data[0][0])
# Provide all
data = gen_random_ts(sd=sd, ed=ed, nb_points=nb_points, period=period)
self.assertEqual(sd, data[0][0])
self.assertEqual(ed - period, data[-1][0])
self.assertEqual(nb_points, len(data))
self.assertEqual(period, data[1][0] - data[0][0])
# Mismatch between parameters
with self.assertRaises(ValueError):
gen_random_ts(sd=sd, ed=ed, nb_points=nb_points, period=42)
with self.assertRaises(ValueError):
gen_random_ts(sd=sd, ed=ed, nb_points=42, period=period)
with self.assertRaises(ValueError):
gen_random_ts(sd=sd, ed=42, nb_points=nb_points, period=period)
with self.assertRaises(ValueError):
gen_random_ts(sd=42, ed=ed, nb_points=nb_points, period=period)
# Not aligned points (period not aligned with end date)
with self.assertRaises(ValueError):
gen_random_ts(sd=sd, ed=ed, period=42)
with self.assertRaises(ValueError):
gen_random_ts(sd=sd, ed=ed, nb_points=42)
with self.assertRaises(ValueError):
gen_random_ts(sd=sd, ed=sd+18, nb_points=42)
# Missing values
with self.assertRaises(ValueError):
gen_random_ts(sd=sd, ed=ed)
with self.assertRaises(ValueError):
gen_random_ts(sd=sd, period=period)
|
StarcoderdataPython
|
6616119
|
<filename>sources/classic/http_auth/interfaces.py
from abc import ABC, abstractmethod
from typing import Type
from .entities import Client
# yapf: disable
class AuthStrategy(ABC):
@abstractmethod
def get_client(self, request: 'falcon.Request', **static_client_params) -> Client: ...
class ClientFactory(ABC):
@abstractmethod
def get_client_cls(self) -> Type[Client]: ...
@abstractmethod
def create(self, **instance_params) -> Client: ...
# yapf: enable
|
StarcoderdataPython
|
10037
|
import awkward as ak
from coffea.nanoevents.methods import vector
import pytest
ATOL = 1e-8
def record_arrays_equal(a, b):
return (ak.fields(a) == ak.fields(b)) and all(ak.all(a[f] == b[f]) for f in ak.fields(a))
def test_two_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]]
},
with_name="TwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[11, 12], [], [13], [14]],
"y": [[15, 16], [], [17], [18]]
},
with_name="TwoVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[12, 14], [], [16], [18]],
"y": [[20, 22], [], [24], [26]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-10, -10], [], [-10], [-10]],
"y": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]]
}
))
assert record_arrays_equal(a.dot(b), ak.Array([[86, 120], [], [158], [200]]))
assert record_arrays_equal(b.dot(a), ak.Array([[86, 120], [], [158], [200]]))
assert ak.all(abs(a.unit.r - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_polar_two_vector():
a = ak.zip(
{
"r": [[1, 2], [], [3], [4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="PolarTwoVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert record_arrays_equal(a * 2, ak.zip(
{
"r": [[2, 4], [], [6], [8]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all((a * (-2)).r == [[2, 4], [], [6], [8]])
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"r": [[0.5, 1], [], [1.5], [2]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]]
}
))
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert record_arrays_equal(a * (-1), -a)
assert ak.all(a.unit.phi == a.phi)
def test_three_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]]
},
with_name="ThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]]
},
with_name="ThreeVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]]
}
))
assert ak.all(a.dot(b) == ak.Array([[170, 154], [], [162], [284]]))
assert ak.all(b.dot(a) == ak.Array([[170, 154], [], [162], [284]]))
assert record_arrays_equal(a.cross(b), ak.zip(
{
"x": [[-108, -4], [], [-86], [56]],
"y": [[27, -12], [], [95], [68]],
"z": [[-3, 8], [], [-37], [-64]]
}
))
assert record_arrays_equal(b.cross(a), ak.zip(
{
"x": [[108, 4], [], [86], [-56]],
"y": [[-27, 12], [], [-95], [-68]],
"z": [[3, -8], [], [37], [64]]
}
))
assert ak.all(abs(a.unit.rho - 1) < ATOL)
assert ak.all(abs(a.unit.phi - a.phi) < ATOL)
def test_spherical_three_vector():
a = ak.zip(
{
"rho": [[1.0, 2.0], [], [3.0], [4.0]],
"theta": [[1.2, 0.7], [], [1.8], [1.9]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
},
with_name="SphericalThreeVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all(abs((-a).x + a.x) < ATOL)
assert ak.all(abs((-a).y + a.y) < ATOL)
assert ak.all(abs((-a).z + a.z) < ATOL)
assert record_arrays_equal(a * (-1), -a)
def test_lorentz_vector():
a = ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
"t": [[50, 51], [], [52], [53]]
},
with_name="LorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
b = ak.zip(
{
"x": [[4, 1], [], [10], [11]],
"y": [[17, 7], [], [11], [6]],
"z": [[9, 11], [], [5], [16]],
"t": [[60, 61], [], [62], [63]]
},
with_name="LorentzVector",
highlevel=False
)
b = ak.Array(b, behavior=vector.behavior)
assert record_arrays_equal(- a, ak.zip(
{
"x": [[-1, -2], [], [-3], [-4]],
"y": [[-5, -6], [], [-7], [-8]],
"z": [[-9, -10], [], [-11], [-12]],
"t": [[-50, -51], [], [-52], [-53]]
}
))
assert record_arrays_equal(a + b, ak.zip(
{
"x": [[5, 3], [], [13], [15]],
"y": [[22, 13], [], [18], [14]],
"z": [[18, 21], [], [16], [28]],
"t": [[110, 112], [], [114], [116]]
}
))
assert record_arrays_equal(a - b, ak.zip(
{
"x": [[-3, 1], [], [-7], [-7]],
"y": [[-12, -1], [], [-4], [2]],
"z": [[0, -1], [], [6], [-4]],
"t": [[-10, -10], [], [-10], [-10]]
}
))
assert record_arrays_equal(a * 2, ak.zip(
{
"x": [[2, 4], [], [6], [8]],
"y": [[10, 12], [], [14], [16]],
"z": [[18, 20], [], [22], [24]],
"t": [[100, 102], [], [104], [106]]
}
))
assert record_arrays_equal(a / 2, ak.zip(
{
"x": [[0.5, 1], [], [1.5], [2]],
"y": [[2.5, 3], [], [3.5], [4]],
"z": [[4.5, 5], [], [5.5], [6]],
"t": [[25, 25.5], [], [26], [26.5]]
}
))
assert record_arrays_equal(a.pvec, ak.zip(
{
"x": [[1, 2], [], [3], [4]],
"y": [[5, 6], [], [7], [8]],
"z": [[9, 10], [], [11], [12]],
}
))
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_m_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.5, 0.9], [], [1.3], [4.5]]
},
with_name="PtEtaPhiMLorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"mass": [[0.25, 0.45], [], [0.65], [2.25]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
def test_pt_eta_phi_e_lorentz_vector():
a = ak.zip(
{
"pt": [[1, 2], [], [3], [4]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[50, 51], [], [52], [60]]
},
with_name="PtEtaPhiELorentzVector",
highlevel=False
)
a = ak.Array(a, behavior=vector.behavior)
assert ak.all((a * (-2)).pt == ak.Array([[2, 4], [], [6], [8]]))
assert ak.all((a * (-2)).theta - ak.Array([
[2.556488570968, 2.65804615357],
[],
[2.74315571762],
[3.07487087733]
]) < ATOL)
assert ak.all((a * (-2)).phi - ak.Array([
[-2.8415926535, -2.7415926535],
[],
[-2.6415926535],
[-2.5415926535]
]) < ATOL)
assert record_arrays_equal(a / 2, ak.zip(
{
"pt": [[0.5, 1], [], [1.5], [2]],
"eta": [[1.2, 1.4], [], [1.6], [3.4]],
"phi": [[0.3, 0.4], [], [0.5], [0.6]],
"energy": [[25, 25.5], [], [26], [30]]
}
))
assert record_arrays_equal(a * (-1), -a)
boosted = a.boost(-a.boostvec)
assert ak.all(abs(boosted.x) < ATOL)
assert ak.all(abs(boosted.y) < ATOL)
assert ak.all(abs(boosted.z) < ATOL)
|
StarcoderdataPython
|
12849910
|
# Generated by Django 3.2.7 on 2022-02-08 23:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('file_manager', '0058_auto_20220118_1418'),
]
operations = [
migrations.AddField(
model_name='rawfile',
name='column_sn',
field=models.TextField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='rawfile',
name='spe_sn',
field=models.TextField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='maxquantqueue',
name='evidence_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/maxquant/2022/2/8'),
),
migrations.AlterField(
model_name='maxquantqueue',
name='other_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/maxquant/2022/2/8'),
),
migrations.AlterField(
model_name='maxquantqueue',
name='peptide_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/maxquant/2022/2/8'),
),
migrations.AlterField(
model_name='maxquantqueue',
name='protein_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/maxquant/2022/2/8'),
),
migrations.AlterField(
model_name='maxquantqueue',
name='setting_xml',
field=models.FileField(blank=True, null=True, upload_to='maxquant_xml/2022/2/8'),
),
migrations.AlterField(
model_name='msfraggerqueue',
name='ion_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/msfragger/2022/2/8'),
),
migrations.AlterField(
model_name='msfraggerqueue',
name='peptide_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/msfragger/2022/2/8'),
),
migrations.AlterField(
model_name='msfraggerqueue',
name='protein_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/msfragger/2022/2/8'),
),
migrations.AlterField(
model_name='msfraggerqueue',
name='psm_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/msfragger/2022/2/8'),
),
migrations.AlterField(
model_name='notefile',
name='notefile',
field=models.FileField(blank=True, null=True, upload_to='notefiles/2022/ 2/8'),
),
migrations.AlterField(
model_name='pdqueue',
name='consensus_method',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/proteindiscoverer/2022/2/8'),
),
migrations.AlterField(
model_name='pdqueue',
name='export_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/proteindiscoverer/2022/2/8'),
),
migrations.AlterField(
model_name='pdqueue',
name='processing_method',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/proteindiscoverer/2022/2/8'),
),
migrations.AlterField(
model_name='pdqueue',
name='result_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/proteindiscoverer/2022/2/8'),
),
migrations.AlterField(
model_name='rawfile',
name='note_file',
field=models.ManyToManyField(blank=True, to='file_manager.NoteFile'),
),
migrations.AlterField(
model_name='spectrominequeue',
name='result_file',
field=models.FileField(blank=True, null=True, upload_to='hdstorage/spectromine/2022/2/8'),
),
]
|
StarcoderdataPython
|
3420418
|
<reponame>xiegudong45/typeidea<gh_stars>1000+
from django.contrib.gis.db.models.sql.conversion import (
AreaField, DistanceField, GeomField, GMLField,
)
__all__ = [
'AreaField', 'DistanceField', 'GeomField', 'GMLField'
]
|
StarcoderdataPython
|
11396739
|
<filename>code/cs231n/classifier_trainer.py
import numpy as np
class ClassifierTrainer:
""" The trainer class performs SGD with momentum on a cost function """
def __init__(self):
self.step_cache = {} # for storing velocities in momentum update
def train( # noqa for complexity
self, X, y, X_val, y_val,
model, loss_function,
reg=0.0,
learning_rate=1e-2, momentum=0, learning_rate_decay=0.95,
update='momentum', sample_batches=True,
num_epochs=30, batch_size=100, acc_frequency=None,
progress_bar=None, verbose=False
):
"""
Optimize the parameters of a model to minimize a loss function. We use
training data X and y to compute the loss and gradients, and
periodically check the accuracy on the validation set.
Inputs:
- X: Array of training data; each X[i] is a training sample.
- y: Vector of training labels; y[i] gives the label for X[i].
- X_val: Array of validation data
- y_val: Vector of validation labels
- model: Dictionary that maps parameter names to parameter values. Each
parameter value is a numpy array.
- loss_function: A function that can be called in the following ways:
scores = loss_function(X, model, reg=reg)
loss, grads = loss_function(X, model, y, reg=reg)
- reg:
Regularization strength. This will be passed to the loss function.
- learning_rate: Initial learning rate to use.
- momentum: Parameter to use for momentum updates.
- learning_rate_decay: The learning rate is multiplied by this after
each epoch.
- update: The update rule to use.
One of 'sgd', 'momentum', or 'rmsprop'.
- sample_batches:
If True, use a minibatch of data for each parameter update
(stochastic gradient descent);
if False, use the entire training set for each parameter update
(gradient descent).
- num_epochs: The number of epochs to take over the training data.
- batch_size: The number of training samples to use at each iteration.
- acc_frequency: If set to an integer, we compute the training and
validation set error after every acc_frequency iterations.
- verbose: If True, print status after each epoch.
Returns a tuple of:
- best_model: The model that got the highest validation accuracy during
training.
- loss_history: List containing the value of the loss function at each
iteration.
- train_acc_history:
List storing the training set accuracy at each epoch.
- val_acc_history:
List storing the validation set accuracy at each epoch.
"""
N = X.shape[0]
if sample_batches:
# one epoch means fully seen all samples
if N % batch_size != 0:
raise ValueError('batch_size does not match sample size')
iterations_per_epoch = int(N / batch_size) # using SGD
else:
iterations_per_epoch = 1 # using GD
num_iters = num_epochs * iterations_per_epoch
if progress_bar:
progress_bar.max = num_iters
progress_bar.value = 0
epoch = 0
best_val_acc = 0.0
best_model = {}
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
if progress_bar:
progress_bar.value += 1
elif it % 50 == 0:
# show iteration indicator every 50 iterations
print('starting iteration ', it)
# get batch of data
if sample_batches:
# SGD used
batch_mask = np.random.choice(N, batch_size)
X_batch = X[batch_mask]
y_batch = y[batch_mask]
else:
# no SGD used, full gradient descent
X_batch = X
y_batch = y
# evaluate cost and gradient
cost, grads = loss_function(X_batch, model, y_batch, reg)
loss_history.append(cost)
# perform a parameter update
for p in model:
# compute the parameter step
if update == 'sgd':
dx = -learning_rate * grads[p]
elif update == 'momentum':
if p not in self.step_cache:
self.step_cache[p] = np.zeros(grads[p].shape)
current_direction = - learning_rate * grads[p]
dx = self.step_cache[p] * momentum + current_direction
self.step_cache[p] = dx
elif update == 'rmsprop':
decay_rate = 0.99 # you could also make this an option
if p not in self.step_cache:
self.step_cache[p] = np.zeros(grads[p].shape)
new_cache = (
decay_rate * self.step_cache[p] +
(1 - decay_rate) * (grads[p] ** 2)
)
self.step_cache[p] = new_cache
dx = - learning_rate * grads[p] / np.sqrt(new_cache + 1e-8)
else:
raise ValueError('Unrecognized update type "%s"' % update)
# update the parameters
model[p] += dx
# every epoch perform an evaluation on the validation set
first_it = (it == 0)
epoch_end = (it + 1) % iterations_per_epoch == 0
acc_check = (acc_frequency is not None and it % acc_frequency == 0)
if first_it or epoch_end or acc_check:
if it > 0 and epoch_end:
# decay the learning rate
learning_rate *= learning_rate_decay
epoch += 1
# evaluate train accuracy
if N > 1000:
train_mask = np.random.choice(N, 1000)
X_train_subset = X[train_mask]
y_train_subset = y[train_mask]
else:
X_train_subset = X
y_train_subset = y
scores_train = loss_function(X_train_subset, model)
y_pred_train = np.argmax(scores_train, axis=1)
train_acc = np.mean(y_pred_train == y_train_subset)
train_acc_history.append(train_acc)
# evaluate val accuracy
scores_val = loss_function(X_val, model)
y_pred_val = np.argmax(scores_val, axis=1)
val_acc = np.mean(y_pred_val == y_val)
val_acc_history.append(val_acc)
# keep track of the best model based on validation accuracy
if val_acc > best_val_acc:
# make a copy of the model
best_val_acc = val_acc
best_model = {}
for p in model:
best_model[p] = model[p].copy()
# print progress if needed
if verbose:
print(
'Finished epoch %d / %d: cost %f, '
'train: %f, val %f, lr %e' % (
epoch, num_epochs, cost,
train_acc, val_acc, learning_rate
)
)
if verbose:
print(
'finished optimization. best validation accuracy: %f' %
(best_val_acc, )
)
# return the best model and the training history statistics
return best_model, loss_history, train_acc_history, val_acc_history
|
StarcoderdataPython
|
3471422
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-26 22:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('client', '0009_auto_20171024_0044'),
]
operations = [
migrations.CreateModel(
name='Actions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(help_text='Titre', max_length=10)),
],
options={
'ordering': ['time'],
},
),
]
|
StarcoderdataPython
|
3538217
|
<reponame>sem-onyalo/hackathon-ideas<filename>core/App.py
import json
class App:
_settingsFileName = 'app.settings.json'
def getSettings(self, name):
with open(self._settingsFileName, 'r') as fh:
settings = json.loads(fh.read())
if name in settings:
return settings[name]
raise RuntimeError(f'Name {name} not in settings file')
|
StarcoderdataPython
|
6561264
|
<reponame>javor/taxamo-python
#!/usr/bin/env python
"""
Copyright 2014-2020 by Taxamo
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Us_tax_exemption_certificate_details_schema:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'single_purchase_order_identifier': 'str',
'purchaser_business_type': 'str',
'purchaser_exemption_reason_value': 'str',
'purchaser_state': 'str',
'purchaser_zip': 'str',
'purchaser_city': 'str',
'purchaser_last_name': 'str',
'purchaser_exemption_reason': 'str',
'single_purchase': 'bool',
'purchaser_tax_id': 'us_tax_id',
'purchaser_address2': 'str',
'purchaser_address1': 'str',
'purchaser_business_type_other_value': 'str',
'purchaser_first_name': 'str',
'exempt_states': 'list[us_tax_exempt_state]',
'purchaser_title': 'str'
}
#Purchase/order identifier for single purchase.
self.single_purchase_order_identifier = None # str
#Purchaser business type.
self.purchaser_business_type = None # str
#The value of exemption reason.
self.purchaser_exemption_reason_value = None # str
#Purchaser's state.
self.purchaser_state = None # str
#Purchaser's zip code.
self.purchaser_zip = None # str
#Purchaser's city.
self.purchaser_city = None # str
#Purchaser's last name.
self.purchaser_last_name = None # str
#The reason for exemption reason.
self.purchaser_exemption_reason = None # str
#Set to true if this certificate is valid for single purchase only.
self.single_purchase = None # bool
#Purchaser's TAX ID.
self.purchaser_tax_id = None # us_tax_id
#Purchaser's second address line.
self.purchaser_address2 = None # str
#Purchaser's first address line.
self.purchaser_address1 = None # str
#If business type is other, a short description must be provided.
self.purchaser_business_type_other_value = None # str
#Purchaser's first name.
self.purchaser_first_name = None # str
#List of states where the certificate is valid.
self.exempt_states = None # list[us_tax_exempt_state]
#Purchaser's title.
self.purchaser_title = None # str
|
StarcoderdataPython
|
1892847
|
<gh_stars>1-10
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import os
import numpy as np
import matplotlib.pyplot as plt
import merra
import atmos as atm
from merra import load_daily_season
datadir = '/home/jwalker/eady/datastore/merra/daily/'
savedir = datadir
#plev = 200
plev = 850
years = np.arange(1979, 2015)
season = 'ann'
lon1, lon2 = 40, 120
lat1, lat2 = -60, 60
nperday = 8
def pathstr(var, plev):
return datadir + 'merra_' + var + str(plev) + '_'
def outfile(year, plev):
lats = atm.latlon_labels([lat1, lat2], 'lat', '%.0f', deg_symbol=False)
lons = atm.latlon_labels([lon1, lon2], 'lon', '%.0f', deg_symbol=False)
subset = '%s-%s_%s-%s' % (lons[0], lons[1], lats[0], lats[1])
return datadir + 'merra_uv%d_%s_%d.nc' % (plev, subset, year)
for year in years:
print('Loading U')
u = load_daily_season(pathstr('u', plev), year, season, 'U',
lat1, lat2, lon1, lon2)
print('Loading V')
v = load_daily_season(pathstr('v', plev), year, season, 'V',
lat1, lat2, lon1, lon2)
print('Calculating vorticity and Rossby number')
rel_vort, _ , _ = atm.vorticity(u, v)
Ro = atm.rossby_num(u, v)
print('Calculating daily means from 3-hourly data')
days = np.arange(1, u.shape[0]/nperday + 1)
u = atm.daily_from_subdaily(u, nperday, dayname='Day', dayvals=days)
v = atm.daily_from_subdaily(v, nperday, dayname='Day', dayvals=days)
rel_vort = atm.daily_from_subdaily(rel_vort, nperday, dayname='Day',
dayvals=days)
Ro = atm.daily_from_subdaily(Ro, nperday, dayname='Day', dayvals=days)
print('Saving to ' + outfile(year, plev))
atm.save_nc(outfile(year, plev), u, v, rel_vort, Ro)
|
StarcoderdataPython
|
376502
|
<gh_stars>0
import os, re, inspect, socket
# from service.remote import WinRemote, NixRemote
# from auth.auth import ZbxAgent as Auth
from pexpect.pxssh import ExceptionPxssh
from exceptions import ExceptionAgent
from appagent.config.config import Common as cfg
#*******************************************************************************
class Agent(object):
WIN_INSTALLER_USER = None
WIN_INSTALLER_PASSWD = None
NIX_INSTALLER_USER = None
NIX_INSTALLER_PASSWD = None
NIX_USER = None
NIX_PASSWD = None
@staticmethod
def connect(hostIp, sudo):
def getPasswd(os):
if os == "Win":
if hasattr(Auth, "WIN_INSTALLER_USER"):
Agent.WIN_INSTALLER_USER = Auth.WIN_INSTALLER_USER
if hasattr(Auth, "WIN_INSTALLER_PASSWD"):
Agent.WIN_INSTALLER_PASSWD = Auth.WIN_INSTALLER_PASSWD
if not Agent.WIN_INSTALLER_PASSWD:
if not Agent.WIN_INSTALLER_USER:
Agent.WIN_INSTALLER_USER = raw_input("Win-admin authorization required\nUsername: ")
else:
print(f"Username: {Agent.WIN_INSTALLER_USER}")
Agent.NIX_INSTALLER_PASSWD = getpass("Password: ")
elif os == "Nix":
if hasattr(Auth, "NIX_INSTALLER_USER"):
Agent.NIX_INSTALLER_USER = Auth.NIX_INSTALLER_USER
if hasattr(Auth, "NIX_INSTALLER_PASSWD"):
Agent.NIX_INSTALLER_PASSWD = Auth.NIX_INSTALLER_PASSWD
Agent.NIX_USER = Auth.NIX_USER
Agent.NIX_PASSWD = Auth.NIX_PASSWD
if not Agent.NIX_INSTALLER_PASSWD:
if not Agent.NIX_INSTALLER_USER:
Agent.NIX_INSTALLER_USER = raw_input("Nix-admin authorization required\nUsername: ")
else:
print(f"Username: {Agent.NIX_INSTALLER_USER}")
Agent.NIX_INSTALLER_PASSWD = getpass('Password: ')
#---------------------------------------------------------------------------
# Try Win
#if Agent.WIN_INSTALLER_USER is None or Agent.WIN_INSTALLER_PASSWD is None:
# getPasswd("<PASSWORD>")
#rmt = WinRemote(hostIp, Agent.WIN_INSTALLER_USER, Agent.WIN_INSTALLER_PASSWD)
#hostname = rmt.getHostname()
#if rmt.errCode == 0:
# rmt.hostname = hostname
# rmt.os = "Win"
# from winAgent import WinAgent
# rmt.agent = WinAgent
#elif rmt.errCode == -1:
if 1:
#-------------------------------------------------------------------------
# Try Nix
try:
if Agent.NIX_INSTALLER_USER is None or Agent.NIX_INSTALLER_PASSWD is None:
getPasswd("<PASSWORD>")
if sudo:
rmt = NixRemote(hostIp, Agent.NIX_INSTALLER_USER, Agent.NIX_INSTALLER_PASSWD, True)
else:
rmt = NixRemote(hostIp, Agent.NIX_USER, Agent.NIX_PASSWD, False)
rmt.os = "Nix"
from nixAgent import NixAgent
rmt.agent = NixAgent
rmt.hostname = rmt.getHostname()
if rmt.errCode != 0:
raise ExceptionAgent(rmt.errCode, rmt.errMsg)
rmt.systemd = rmt.chkSystemd()
if rmt.errCode != 0:
raise ExceptionAgent(rmt.errCode, rmt.errMsg)
except ExceptionPxssh as exc:
raise ExceptionAgent(-1, exc)
#-------------------------------------------------------------------------
else:
raise ExceptionAgent(rmt.errCode, rmt.errMsg)
rmt.hostIp = hostIp
rmt.xOS = f"x{rmt.getXOS()}"
if rmt.errCode != 0:
raise ExceptionAgent(rmt.errCode, rmt.errMsg)
return rmt
@staticmethod
def cmdStart(rmt, cfg, log):
raise ExceptionAgent(None, " Agent.cmdStart must be overridden. ")
@staticmethod
def cmdStop(rmt, cfg, log):
raise ExceptionAgent(None, " Agent.cmdStop must be overridden. ")
@staticmethod
def cmdRestart(rmt, cfg, log):
raise ExceptionAgent(None, " Agent.cmdRestart must be overridden. ")
@staticmethod
def cmdInstall(rmt, cfg, log):
raise ExceptionAgent(None, " Agent.cmdInstall must be overridden. ")
@staticmethod
def cmdUninstall(rmt, cfg, log):
raise ExceptionAgent(None, " Agent.cmdUninstall must be overridden. ")
@staticmethod
def prepareConfig(confFile, tmpConfFile, **kwargs):
'''подготавливает конфигурационный файл'''
ln = confFile.readline()
while ln:
if "pidFile" in kwargs and ln.find("PidFile=") == 0:
ln = f"PidFile={gs['pidFile']}\n"
elif "logFile" in kwargs and ln.find("LogFile=") == 0:
ln = f"LogFile={kwargs['logFile']}\n"
elif "serverIp" in kwargs and ln.find("Server=") == 0:
ln = f"Server={kwargs['serverIp']}\n"
elif "serverIp" in kwargs and ln.find("ServerActive=") == 0:
ln = f"ServerActive={kwargs['serverIp']}\n"
elif "listenPort" in kwargs and ln.find("ListenPort=") == 0:
ln = f"ListenPort={kwargs['listenPort']}\n"
elif "hostname" in kwargs and ln.find("Hostname=") == 0:
ln = f"Hostname={kwargs['hostname']}\n"
tmpConfFile.write(ln)
ln = confFile.readline()
#*******************************************************************************
def usage(err=None):
if err:
print(err)
print('''
Usage:
agent start|stop|restart all|<version> hostip
agent install|uninstall all|version [hostip]
If hostip is allowed, the host.list file is used.
Example: agent start v3.4 192.168.27.35
agent stop all 192.168.27.35
agent uninstall all
''')
#*******************************************************************************
def cmdExe(cmd=None, ver=None, host=None, noColor=Log.COLOR):
cmdSet1 = ("start", "stop", "restart")
cmdSet2 = ("install", "uninstall")
if not(cmd in cmdSet1 or cmd in cmdSet2):
usage(f"Parameters error. Invalid command: \"{cmd}\"")
return 1
if not ver or not(ver in (filter(None, [None if attr[0] != "v" else attr.replace("_", ".") for attr in dir(config)]) + ["all"])):
usage(f"Parameters error. Invalid version: \"{ver}\"")
return 1
if cmd in cmdSet1 and not host:
usage("Parameters error. No Host")
return 1
if cmd in cmdSet1:
log = Log(None, noColor)
else:
log = Log(cfg.INSTALL_LOG, noColor)
log.header1()
if cmd == "start":
msgBefore = ("Starting agent(s) ... ", {"out": Log.CONSOLE})
msgAfter = ("Agent(s) started ... ", {"out": Log.CONSOLE})
elif cmd == "stop":
msgBefore = ("Stopping agent(s) ... ", {"out": Log.CONSOLE})
msgAfter = ("Agent(s) stopped ... ", {"out": Log.CONSOLE})
elif cmd == "restart":
msgBefore = ("Restarting agent(s) ... ", {"out": Log.CONSOLE})
msgAfter = ("Agent(s) restarted ... ", {"out": Log.CONSOLE})
elif cmd == "install":
msgBefore = ("Install agent(s) ... ", {"out": Log.CONSOLE+Log.FILE})
msgAfter = ("Agent(s) installed ... ", {"out": Log.CONSOLE+Log.FILE})
elif cmd == "uninstall":
msgBefore = ("Uninstall agent(s) ... ", {"out": Log.CONSOLE+Log.FILE})
msgAfter = ("Agent(s) uninstalled ... ", {"out": Log.CONSOLE+Log.FILE})
total = 0
fail = 0
err = 0
log.writeln(msgBefore[0], **msgBefore[1])
return
if host:
err = cmdExeHost(cmd, ver, host, log)
fail += err
total += 1
else:
listFilePath = cfg.HOSTLIST
tmpListFilePath = f"{cfg.HOSTLIST}.tmp"
with open(listFilePath, "r") as listFile, open(tmpListFilePath, "w") as tmpListFile:
host = listFile.readline()
while host:
host = host.decode("utf-8").strip()
if host == '' or host.find("#") == 0 or host.find(";") == 0:
tmpListFile.write(f"{host.encode('utf-8')}\n")
host = listFile.readline()
continue
err = cmdExeHost(cmd, ver, host, log)
if err:
tmpListFile.write(f"{host.encode('utf-8')}\n")
else:
tmpListFile.write(f";{host.encode('utf-8')}\n")
fail += err
total += 1
host = listFile.readline()
stat = os.stat(listFilePath)
os.rename(tmpListFilePath, listFilePath)
#os.chmod(listFilePath, stat.st_mode)
os.chown(listFilePath, stat.st_uid, stat.st_gid)
log.write(msgAfter[0], **msgAfter[1])
if err:
log.fail()
else:
log.success()
log.header1()
log.writeln(f"Processed failed: {log.red(fail) if fail > 0 else log.green(fail)} total: {log.green(total)}")
log.close()
return 0 if fail == 0 else 1
#*******************************************************************************
def cmdExeHost(cmd, ver, host, log):
try:
log.header2(f"Host {host} ")
result = re.match(r"([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}).*", host)
if result:
hostIp = result.group(1)
else:
hostIp = socket.gethostbyname(host.strip())
sudo = cmd in ("install", "uninstall")
rmt = Agent.connect(hostIp, sudo)
log.writeln(' {} {} {} {}'.format(rmt.hostname, rmt.hostIp, rmt.os, rmt.xOS))
log.writeln()
err = 0
if ver == "all":
#-------------------------------------------------------------------------
# удалить все установленные агенты
if cmd == "uninstall":
err = rmt.agent.cmdUninstall(ver, rmt, log)
if err == 2:
log.writeln(" No installed agents")
err = 0
#-------------------------------------------------------------------------
# операции для агентов всех версий из config
elif cmd in ("install", "start", "stop", "restart"):
#-----------------------------------------------------------------------
# установить агенты всех версий из config
if cmd == "install":
#---------------------------------------------------------------------
# удалить все установленные агенты перед установкой
err = rmt.agent.cmdUninstall(ver, rmt, log)
if err in (0, 2):
err = 0
else:
log.writeln()
#-----------------------------------------------------------------------
# start/stop/restart агентов всех версий из config
if not err:
versions = [ver[0] for ver in inspect.getmembers(config, predicate=inspect.isclass)]
for ver in versions:
if ver == "Common":
continue
err += cmdExeVer(cmd, ver, rmt, log)
#---------------------------------------------------------------------------
# для агента указанной версии
else:
err = cmdExeVer(cmd, ver, rmt, log)
#---------------------------------------------------------------------------
rmt.exit()
return 0 if err == 0 else 1
#-----------------------------------------------------------------------------
except ExceptionAgent as exc:
log.err(exc.errMsg)
if "rmt" in locals():
rmt.exit()
return 1
#*******************************************************************************
def cmdExeVer(cmd, ver, rmt, log):
cfgVer = ver.replace(".", "_")
cfg = getattr(__import__("AppAgent.config", fromlist = [cfgVer]), cfgVer)
err = 0
#-----------------------------------------------------------------------------
# install
if cmd == "install":
log.writeln(" Agent version: {}".format(cfg.AGENT_VER))
err = rmt.agent.cmdUninstall(ver, rmt, log)
if err in (0, 2):
err = rmt.agent.cmdInstall(cfg, rmt, log)
msg = " installed ... "
if err:
log.writeln(msg, Log.FAIL)
else:
log.writeln(msg, Log.SUCCESS)
#-----------------------------------------------------------------------------
# uninstall
elif cmd == 'uninstall':
err = rmt.agent.cmdUninstall(ver, rmt, log)
if err == 2:
log.writeln(" No installed agents")
err = 0
#-----------------------------------------------------------------------------
# start
elif cmd == "start":
log.writeln(f" Agent version: {cfg.AGENT_VER}")
err = rmt.agent.cmdStart(cfg, rmt, log)
msg = " Agent started ... "
if err == 0:
log.writeln(msg, Log.SUCCESS)
elif err == 2:
log.write(msg)
log.fail("not installed")
else:
log.writeln(msg, Log.FAIL)
#-----------------------------------------------------------------------------
# stop
elif cmd == "stop":
log.writeln(f" Agent version: {cfg.AGENT_VER}")
err = rmt.agent.cmdStop(cfg, rmt, log)
msg = " Agent stopped ... "
if err == 0:
log.writeln(msg, Log.SUCCESS)
elif err == 2:
log.write(msg)
log.fail("not installed")
else:
log.writeln(msg, Log.FAIL)
#-----------------------------------------------------------------------------
# restart
elif cmd == "restart":
log.writeln(f" Agent version: {cfg.AGENT_VER}")
err = rmt.agent.cmdRestart(cfg, rmt, log)
msg = " Agent restarted ... "
if err == 0:
log.writeln(msg, Log.SUCCESS)
elif err == 2:
log.write(msg)
log.fail("not installed")
else:
log.writeln(msg, Log.FAIL)
#-----------------------------------------------------------------------------
log.writeln()
return err
|
StarcoderdataPython
|
1650429
|
__author__ = '<NAME>'
|
StarcoderdataPython
|
1676417
|
from rest_framework.exceptions import APIException
from rest_framework.status import HTTP_423_LOCKED
from waldur_ansible.jupyter_hub_management.backend import locking_service
from waldur_ansible.python_management import models as python_management_models, utils as python_management_utils
from waldur_core.core import models as core_models
from . import models, executors
class JupyterHubManagementService(object):
executor = executors.JupyterHubManagementRequestExecutor
def schedule_jupyter_hub_management_removal(self, persisted_jupyter_hub_management):
delete_request = models.JupyterHubManagementDeleteRequest(jupyter_hub_management=persisted_jupyter_hub_management)
if not locking_service.JupyterHubManagementBackendLockingService.is_processing_allowed(delete_request):
raise APIException(code=HTTP_423_LOCKED)
delete_request.save()
self.executor.execute(delete_request, async=True)
def issue_localize_globalize_requests(self, updated_jupyter_hub_management, validated_data):
virtual_environments = validated_data['updated_virtual_environments']
virtual_environments_to_localize = []
virtual_environments_to_globalize = []
for virtual_environment in virtual_environments:
persisted_virtual_environment = python_management_models.VirtualEnvironment.objects.get(
python_management=updated_jupyter_hub_management.python_management, name=virtual_environment['name'])
if persisted_virtual_environment.jupyter_hub_global is not virtual_environment['jupyter_hub_global']:
if virtual_environment['jupyter_hub_global']:
virtual_environments_to_globalize.append(virtual_environment)
else:
virtual_environments_to_localize.append(virtual_environment)
for virtual_environment_to_globalize in virtual_environments_to_globalize:
globalize_request = models.JupyterHubManagementMakeVirtualEnvironmentGlobalRequest(
jupyter_hub_management=updated_jupyter_hub_management, virtual_env_name=virtual_environment_to_globalize['name'])
self.execute_or_refuse_request(globalize_request)
for virtual_environment_to_localize in virtual_environments_to_localize:
localize_request = models.JupyterHubManagementMakeVirtualEnvironmentLocalRequest(
jupyter_hub_management=updated_jupyter_hub_management, virtual_env_name=virtual_environment_to_localize['name'])
self.execute_or_refuse_request(localize_request)
def execute_or_refuse_request(self, localize_request):
if locking_service.JupyterHubManagementBackendLockingService.is_processing_allowed(localize_request):
localize_request.save()
self.executor.execute(localize_request, async=True)
def execute_sync_configuration_request_if_allowed(self, persisted_jupyter_hub_management):
sync_config_request = models.JupyterHubManagementSyncConfigurationRequest(jupyter_hub_management=persisted_jupyter_hub_management)
if not locking_service.JupyterHubManagementBackendLockingService.is_processing_allowed(sync_config_request):
raise APIException(code=HTTP_423_LOCKED)
sync_config_request.save()
self.executor.execute(sync_config_request, async=True)
def has_jupyter_hub_config_changed(self, incoming_validated_data, persisted_jupyter_hub_management):
removed_jupyter_hub_users = self.find_removed_users(persisted_jupyter_hub_management.jupyter_hub_users.all(), incoming_validated_data.get('jupyter_hub_users'))
if removed_jupyter_hub_users:
return True
for jupyter_hub_user in incoming_validated_data.get('jupyter_hub_users'):
persisted_jupyter_hub_user = self.find_corresponding_persisted_jupyter_hub_user(jupyter_hub_user['username'], persisted_jupyter_hub_management)
if persisted_jupyter_hub_user is None \
or persisted_jupyter_hub_user.admin != jupyter_hub_user['admin'] \
or jupyter_hub_user['password'] \
or persisted_jupyter_hub_user.whitelisted != jupyter_hub_user['whitelisted']:
return True
root_model_changed = incoming_validated_data.get('session_time_to_live_hours') != persisted_jupyter_hub_management.session_time_to_live_hours
if not root_model_changed:
persisted_oauth_config = persisted_jupyter_hub_management.jupyter_hub_oauth_config
if persisted_oauth_config:
incoming_oauth_config = incoming_validated_data.get('jupyter_hub_oauth_config')
return persisted_oauth_config.type != incoming_oauth_config.get('type') \
or persisted_oauth_config.oauth_callback_url != incoming_oauth_config.get('oauth_callback_url') \
or persisted_oauth_config.client_id != incoming_oauth_config.get('client_id') \
or persisted_oauth_config.client_secret != incoming_oauth_config.get('client_secret') \
or persisted_oauth_config.tenant_id != incoming_oauth_config.get('tenant_id') \
or persisted_oauth_config.gitlab_host != incoming_oauth_config.get('gitlab_host')
else:
return True
def find_removed_users(self, persisted_jupyter_hub_users, jupyter_hub_users):
result = []
for persisted_jupyter_hub_user in persisted_jupyter_hub_users:
if not filter(lambda u: u['username'] == persisted_jupyter_hub_user.username, jupyter_hub_users):
result.append(persisted_jupyter_hub_user)
return result
def find_corresponding_persisted_jupyter_hub_user(self, username, jupyter_hub_management):
return python_management_utils.execute_safely(lambda: models.JupyterHubUser.objects.get(username=username, jupyter_hub_management=jupyter_hub_management))
def is_last_sync_request_erred(self, persisted_jupyter_hub_management):
last_sync_config_request = python_management_utils.execute_safely(
lambda: models.JupyterHubManagementSyncConfigurationRequest.objects.filter(jupyter_hub_management=persisted_jupyter_hub_management).latest('id'))
return last_sync_config_request.state == core_models.StateMixin.States.ERRED if last_sync_config_request else True
|
StarcoderdataPython
|
4823357
|
from django.db import models
# Create your models here.
class FitsFileUpload(models.Model):
fits_file = models.FileField(upload_to='uploads', null=False, blank=False, default='')
timestamp = models.DateField(auto_now_add=True, null=True, blank=True)
|
StarcoderdataPython
|
4989354
|
from typing import Dict, List, Union
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import datetime
from Bio.SeqFeature import FeatureLocation, SeqFeature
# Qualifier Dictionary
example_qualifiers_dict = {
"gene": "gene",
"latin": "latin",
"organism": "species",
"functional": "functional",
"scheme": "IMGT",
}
# Example Annotations
annotations = {
"organism": "latin",
"source": "species",
"date": (datetime.date.today().strftime("%d-%b-%Y")),
}
feature_types = [
"FWR1",
"FWR2",
"FWR3",
"FWR4",
"CDR1",
"CDR2",
"CDR3",
"VGene",
"JGene",
"DGene",
"IGK",
"IGK_Non-Productive",
"IGH",
"IGL",
]
class GenBankFeature:
def __init__(
self,
start: int,
end: int,
feature_type: str,
id: Union[str, None] = None,
qualifier_dict: Union[Dict[str, str], None] = None,
):
self.start = start
self.end = end
self.id = id
# what type of feature is this
self.feature_type = feature_type
# Can have other info about our feature
self.qualifier_dict = qualifier_dict
# Feature Location
self.location = FeatureLocation(self.start, self.end)
# our main feature
self._feature = SeqFeature(self.location, type=self.feature_type, qualifiers=self.qualifier_dict)
@property
def feature_type(self) -> str:
return self._feature_type
@feature_type.setter
def feature_type(self, t: str) -> None:
if t not in feature_types:
raise TypeError(f"{t} must be in {feature_types}")
else:
self._feature_type = t
@property
def feature(self) -> SeqFeature:
return self._feature
class GenBank:
def __init__(
self, sequence: Union[str, Seq], id: str, name: Union[str, None] = None, description: Union[str, None] = None
):
self.sequence = sequence
self.id = id
if name:
self.name = name
else:
self.name = id[0:16]
self.description = description
# Our main GB record
self._record = SeqRecord(self.sequence, id=self.id, name=self.name, description=self.description)
@property
def record(self) -> SeqRecord:
return self._record
@property
def features(self) -> List[SeqFeature]:
_a: List[SeqFeature] = self.record.features
return _a
@property
def sequence(self) -> Seq:
return self._sequence
@sequence.setter
def sequence(self, seq: Union[str, Seq]) -> None:
if isinstance(seq, str):
self._sequence = Seq(seq)
elif isinstance(seq, Seq):
self._sequence = seq
else:
raise TypeError(f"{type(str)} must be instance of str or Bio.Seq")
def add_feature(self, feature: GenBankFeature) -> None:
if not isinstance(feature, GenBankFeature):
raise TypeError(f"{feature} must be of type {GenBankFeature}")
else:
self.features.append(feature.feature)
|
StarcoderdataPython
|
9728358
|
<filename>module/constants.py<gh_stars>0
# Constants across package (Due to sharing b/w modules)
# configuration related constants
lag = "lag"
feature_col_names = "attribute_names"
data_file_path = "data_file_path"
output_file_path = "output_file_path"
patt_len = "pattern_length"
supp_threshold = "support_threshold"
crossK_threshold = "crossK_threshold"
num_of_bins = "number_of_discretizer_bins"
nc_window_len = "anomalous_window_length"
nc_window_column_name = "anomalous_target_column"
nc_window_strategy = "anomalous_window_strategy"
nc_window_threshold = "anomalous_window_threshold"
output_type = "output_type"
feature_min_values = "attribute_min_values"
feature_bin_intervals = "attribute_bin_intervals"
topk = "topk"
n_bins = "n_bins"
discretization_type = "discrete_strategy"
split_columns = "split_columns"
index_columns = "index_columns"
pruning_type = "pruning_type"
# Data related constants
pass_col_name = 'pass'
iter_col_name = 'Iteration'
target_col_name = 'NCWindow'
seq_idx_name = 'SeqIndex'
manual_discrete_strategy = 'Manual'
auto_discrete_strategy = 'Denoise'
# File IO related constants
default_config_file = '/data/default_config.json'
formatted_engine_data_file = '/data/formatted_engine_data.csv'
|
StarcoderdataPython
|
8064788
|
import os
import time
def main(request, response):
"""Serves the contents in blue.png but with a Cache-Control header.
Emits a Cache-Control header with max-age set to 1h to allow the browser
cache the image. Used for testing behaviors involving caching logics.
"""
image_path = os.path.join(os.path.dirname(__file__), "blue.png")
response.headers.set("Cache-Control", "max-age=3600")
response.headers.set("Content-Type", "image/png")
response.content = open(image_path, mode='rb').read()
|
StarcoderdataPython
|
3337777
|
# flake8: noqa
def my_sum(first, second):
return first + second
def my_mult(first, second):
return first * second
def my_div(first, second):
return first / second
def test_sum():
assert my_sum(1, 2) == 3
assert round(my_sum(2.1, 4.2), 2) == 6.3
def test_mult():
assert my_mult(2, 2) == 4
assert my_mult(1, 1) == 1
assert my_mult(197, 0) == 0
assert my_mult(6, 4) == 24
assert my_mult(-6, 4) == -24
assert my_mult(6, -4) == -24
assert my_mult(-6, -4) == 24
assert my_mult(float('nan'), 1)
def test_div():
assert my_div(1, 1) == 1.0
assert isinstance(my_div(1, 1), float)
assert my_div(5, 2) == 2.5
assert my_div(6, 2) == 3.0
try:
my_div(1, 0)
except ZeroDivisionError:
assert True
else:
assert False
if __name__ == '__main__':
tests = [
globals()[variable]
for variable in globals()
if variable.startswith('test_')
]
for test in tests:
try:
test()
except AssertionError:
print('Failed test: ', test.__name__)
raise
|
StarcoderdataPython
|
5040515
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'enable_winrt_81_revision_dll',
'type': 'shared_library',
'msvs_enable_winrt': 1,
'msvs_application_type_revision': '8.1',
'sources': [
'dllmain.cc',
],
},
{
'target_name': 'enable_winrt_82_revision_dll',
'type': 'shared_library',
'msvs_enable_winrt': 1,
'msvs_application_type_revision': '8.2',
'sources': [
'dllmain.cc',
],
},
{
'target_name': 'enable_winrt_invalid_revision_dll',
'type': 'shared_library',
'msvs_enable_winrt': 1,
'msvs_application_type_revision': '999',
'sources': [
'dllmain.cc',
],
},
]
}
|
StarcoderdataPython
|
79750
|
import typing
import error
REQUEST_TABLE = {
'get': [[str]],
'as': [str],
'by': ([str], 'optional'),
'if': ([list], 'optional'),
'except': ([list], 'optional'),
'join': ([
{
'name': str,
'of': [str],
}
], 'optional'),
'macro': ([str], 'optional'),
}
REQUEST_TABLE_QUESTION_COUNT = {
'get': [[str]],
'if': ([list], 'optional'),
'except': ([list], 'optional'),
'macro': [str],
}
REQUEST_CREATE_SURVEY = {
'surveyId': int,
'title': str,
}
REQUEST_CHANGE_PERMISSIONS = {
'r': ([int], 'optional'),
'w': ([int], 'optional'),
'n': ([int], 'optional'),
}
REQUEST_GROUP = {
'group': str,
}
REQUEST_USER = {
'userId': int,
}
REQUEST_SURVEY_LINK = {
'permission': str,
'surveyId': int
}
REQUEST_REPORT_LINK = {
'permission': str,
'reportId': int
}
def analyze(tp: typing.Any, obj: typing.Any) -> str:
"""Analyze object structure.
Keyword arguments:
tp -- expected object structure
obj -- given object
Return value:
returns message after analyze
"""
# Check if obj is of the desired type
if type(tp) is type:
if type(obj) is tp:
return ''
else:
return f'expected {tp.__name__}, got {type(obj).__name__}'
# If the desired type is a list, check types of each of its elements
if type(tp) is list:
if type(obj) is not list:
return f'expected {type(tp).__name__}, got {type(obj).__name__}'
for i, o in enumerate(obj):
if msg := analyze(tp[0], o):
return f'in element [{i}]: {msg}'
return ''
# If the desired type is a dict, check types of values under each key
if type(tp) is dict:
if type(obj) is not dict:
return f'expected {type(tp).__name__}, got {type(obj).__name__}'
for k, t in tp.items():
if type(t) is tuple:
t, *params = t
else:
params = []
if k not in obj:
if 'optional' in params:
continue
return f'expected key \'{k}\''
if msg := analyze(t, obj[k]):
return f'in element \'{k}\': {msg}'
return ''
return 'unexpected object type'
def check(tp: typing.Any, obj: typing.Any):
"""Validate object structure.
Keyword arguments:
tp -- expected object structure
obj -- given object
"""
if msg := analyze(tp, obj):
raise error.API(msg)
|
StarcoderdataPython
|
9726027
|
#! /usr/bin/env python
"""Experimental module for Python 2 compatibility.
The purpose of this module is to enable Pyslet to be gradually converted
to Python3 while retaining support for Python 2.7 and 2.6. This fills a
similar role to the six module but the idea is to minimise the number of
required fixes by making the Pyslet code as Python3 native as
possible."""
import io
import sys
import types
py2 = sys.hexversion < 0x03000000
"""Unfortunately, sometimes you just need to know if you are running
under Python 2, this flag provides a common way for version specific
code to check. (There are multiple ways of checking, this flag just
makes it easier to find places in Pyslet where we care.)"""
_sys_codec = sys.getdefaultencoding()
if py2:
suffix = ''
def u8(arg):
if isinstance(arg, types.UnicodeType):
try:
arg.encode('ascii')
except UnicodeEncodeError:
raise ValueError("u8: use binary literal for non-ASCII data")
return arg
try:
return arg.decode('utf-8')
except UnicodeDecodeError:
raise ValueError("u8: invalid utf-8 string, did you mean ul?")
def ul(arg):
if isinstance(arg, types.UnicodeType):
try:
arg.encode('latin-1')
except UnicodeEncodeError:
raise ValueError("ul: cannot be used with non-latin data")
return arg
return arg.decode('latin-1')
def is_string(arg):
return isinstance(arg, types.StringTypes)
is_text = is_string
def force_text(arg):
if isinstance(arg, str):
return unicode(arg)
elif isinstance(arg, unicode):
return arg
else:
raise TypeError("Expected str or unicode: %s" % repr(arg))
def is_ascii(arg):
return isinstance(arg, str)
def force_ascii(arg):
if isinstance(arg, unicode):
return arg.encode('ascii')
elif isinstance(arg, str):
return arg
else:
raise TypeError("Expected str or unicode: %s" % repr(arg))
to_text = unicode
def is_unicode(arg):
return isinstance(arg, unicode)
def character(arg):
if isinstance(arg, str):
if len(arg) == 1:
return unichr(ord(arg[0]))
else:
raise ValueError('Expected single character')
else:
return unichr(arg)
join_characters = unicode('').join
uempty = unicode('')
uspace = unicode(' ')
def force_bytes(arg):
if isinstance(arg, unicode):
return arg.encode('ascii')
return arg
to_bytes = str
def is_byte(arg):
return isinstance(arg, bytes) and len(arg) == 1
def byte(arg):
if isinstance(arg, str):
if len(arg) == 1:
return arg
else:
raise ValueError('Expected single character')
elif isinstance(arg, types.UnicodeType):
if len(arg) == 1:
arg = ord(arg)
# fall through to int tests
else:
raise ValueError('Expected single character')
elif isinstance(arg, bytearray):
if len(arg) == 1:
return chr(arg[0])
else:
raise ValueError('Expected single byte')
if isinstance(arg, (int, long)):
if arg >= 0 and arg <= 255:
return chr(arg)
else:
raise ValueError("Value out of range 0..255")
else:
raise TypeError('Expectected character or int')
byte_value = ord
join_bytes = b''.join
def byte_to_bstr(arg):
return arg
buffer2 = types.BufferType
long2 = long
range3 = xrange
def dict_keys(d):
return d.iterkeys()
def dict_values(d):
return d.itervalues()
def dict_items(d):
return d.iteritems()
import __builtin__ as builtins
input3 = raw_input
from urllib import ( # noqa : unused import
urlencode,
urlopen,
quote as urlquote
)
from urlparse import parse_qs # noqa : unused import
else:
suffix = '3'
def u8(arg):
if isinstance(arg, bytes):
return arg.decode('utf-8')
elif isinstance(arg, str):
# only works for ascii
try:
arg.encode('ascii')
except UnicodeEncodeError:
raise ValueError("u8: use binary literal for non-ASCII data")
return arg
else:
raise TypeError
def ul(arg):
if isinstance(arg, bytes):
return arg.decode('latin-1')
elif isinstance(arg, str):
try:
arg.encode('latin-1')
except UnicodeEncodeError:
raise ValueError("ul: cannot be used with non-latin data")
return arg
else:
raise TypeError
def is_string(arg):
return isinstance(arg, (str, bytes))
def is_text(arg):
return isinstance(arg, str)
def force_text(arg):
if not isinstance(arg, str):
raise TypeError("Expected str: %s" % repr(arg))
return arg
def is_ascii(arg):
if isinstance(arg, str):
arg.encode('ascii')
return True
else:
return False
def force_ascii(arg):
if isinstance(arg, bytes):
return arg.decode('ascii')
elif isinstance(arg, str):
return arg
else:
raise TypeError("Expected str: %s" % repr(arg))
def to_text(arg):
if isinstance(arg, str):
return arg
elif isinstance(arg, bytes):
return arg.decode('ascii')
else:
return str(arg)
def is_unicode(arg):
return isinstance(arg, str)
character = chr
join_characters = ''.join
uempty = ''
uspace = ' '
def force_bytes(arg):
if isinstance(arg, str):
return arg.encode('ascii')
return arg
def to_bytes(arg):
if hasattr(arg, '__bytes__'):
return arg.__bytes__()
else:
return str(arg).encode('ascii')
def is_byte(arg):
return isinstance(arg, int) and 0 <= arg <= 255
def byte(arg):
if isinstance(arg, str):
if len(arg) == 1:
arg = ord(arg)
else:
raise ValueError('Expected single character')
elif isinstance(arg, (bytes, bytearray)):
if len(arg) == 1:
arg = arg[0]
else:
raise ValueError('Expected single byte')
if isinstance(arg, int):
if arg >= 0 and arg <= 255:
return arg
else:
raise ValueError("Value out of range 0..255")
else:
raise TypeError('Expectected character or int')
byte_value = int
join_bytes = bytes
def byte_to_bstr(arg):
return bytes([arg])
buffer2 = bytes
long2 = int
range3 = range
def dict_keys(d):
return d.keys()
def dict_values(d):
return d.values()
def dict_items(d):
return d.items()
import builtins # noqa : unused import
input3 = input
from urllib.request import urlopen # noqa : unused import
from urllib.parse import ( # noqa : unused import
parse_qs,
quote as urlquote,
urlencode
)
class UnicodeMixin(object):
"""Mixin class to handle string formatting
For classes that need to define a __unicode__ method of their own
this class is used to ensure that the correct behaviour exists
in Python versions 2 and 3.
The mixin class implements __str__ based on your existing (required)
__unicode__ or (optional) __bytes__ implementation. In python 2,
the output of __unicode__ is encoded using the default system
encoding if no __bytes__ implementation is provided. This may well
generate errors but that seems more appropriate as it will catch
cases where the *str* function has been used instead of
:py:func:`to_text`."""
if py2:
def __str__(self): # noqa
if hasattr(self, '__bytes__'):
return self.__bytes__()
else:
return self.__unicode__().encode(_sys_codec)
else:
def __str__(self): # noqa
return self.__unicode__()
class SortableMixin(object):
"""Mixin class for handling comparisons
Utility class for helping provide comparisons that are compatible
with Python 2 and Python 3. Classes must define a method
:meth:`sortkey` which returns a sortable key value representing the
instance.
Derived classes may optionally override the classmethod :meth:`otherkey`
to provide an ordering against other object types.
This mixin then adds implementations for all of the comparison
methods: __eq__, __ne__, __lt__, __le__, __gt__, __ge__."""
def sortkey(self):
"""Returns a value to use as a key for sorting.
By default returns NotImplemented. This value causes the
comparison functions to also return NotImplemented."""
return NotImplemented
def otherkey(self, other):
"""Returns a value to use as a key for sorting
The difference between this method and :meth:`sortkey` is that
this method takes an arbitrary object and either returns the key
to use when comparing with this instance or NotImplemented if
the sorting is not supported.
You don't have to override this implementation, by default it
returns other.sortkey() if *other* is an instance of the same
class as *self*, otherwise it returns NotImplemented."""
if isinstance(other, self.__class__):
return other.sortkey()
else:
return NotImplemented
def __eq__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
return NotImplemented
else:
return a == b
def __ne__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
return NotImplemented
else:
return a != b
def __lt__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
if py2:
raise TypeError("unorderable types: %s < %s" %
(repr(self), repr(other)))
return NotImplemented
else:
return a < b
def __le__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
if py2:
raise TypeError("unorderable types: %s <= %s" %
(repr(self), repr(other)))
return NotImplemented
else:
return a <= b
def __gt__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
if py2:
raise TypeError("unorderable types: %s > %s" %
(repr(self), repr(other)))
return NotImplemented
else:
return a > b
def __ge__(self, other):
a = self.sortkey()
b = self.otherkey(other)
if NotImplemented in (a, b):
if py2:
raise TypeError("unorderable types: %s >= %s" %
(repr(self), repr(other)))
return NotImplemented
else:
return a >= b
class CmpMixin(object):
"""Mixin class for handling comparisons
For compatibility with Python 2's __cmp__ method this class defines
an implementation of __eq__, __lt__, __le__, __gt__, __ge__ that are
redirected to __cmp__. These are the minimum methods required for
Python's rich comparisons.
In Python 2 it also provides an implementation of __ne__ that simply
inverts the result of __eq__. (This is not required in Python 3.)"""
def __eq__(self, other):
return self.__cmp__(other) == 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
if py2:
def __ne__(self, other): # noqa
return not self.__eq__(other)
class BoolMixin(object):
"""Mixin class for handling legacy __nonzero__
For compatibility with Python 2 this class defines __nonzero__
returning the value of the method __bool__."""
def __nonzero__(self):
return self.__bool__()
def output(txt):
"""Simple function for writing to stdout
Not as sophisticated as Python 3's print function but designed to be
more of a companion to the built in input."""
if isinstance(sys.stdout, io.TextIOBase):
sys.stdout.write(txt)
else:
sys.stdout.write(txt.encode('utf-8'))
|
StarcoderdataPython
|
8121375
|
<reponame>KarrLab/bpforms
""" Tests of bpforms command line interface (bpforms.__main__)
:Author: <NAME> <<EMAIL>>
:Date: 2019-01-31
:Copyright: 2019, Karr Lab
:License: MIT
"""
from bpforms import __main__
import bpforms
import bpforms.alphabet.dna
import bpforms.alphabet.rna
import bpforms.alphabet.protein
import capturer
import mock
import os
import shutil
import tempfile
import unittest
dI_smiles = 'O=C1NC=NC2=C1N=CN2'
dI_smiles_ph_14 = 'O=c1[n-]cnc2c1nc[n-]2'
dIMP_smiles = 'OC[C@H]1O[C@H](C[C@@H]1O)[N+]1(C=Nc2c1nc[nH]c2=O)C1CC(C(O1)COP(=O)([O-])[O-])O'
dIMP_smiles_ph_14 = 'OC[C@H]1O[C@H](C[C@@H]1O)[N+]1(C=Nc2c1nc[nH]c2=O)C1CC(C(O1)COP(=O)([O-])[O-])O'
class CliTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
bpforms.alphabet.dna.canonical_dna_alphabet.from_yaml(bpforms.alphabet.dna.canonical_filename)
def test_cli(self):
with mock.patch('sys.argv', ['bpforms', '--help']):
with self.assertRaises(SystemExit) as context:
__main__.main()
self.assertRegex(context.Exception, 'usage: bpforms')
def test_help(self):
with self.assertRaises(SystemExit):
with __main__.App(argv=[]) as app:
app.run()
with self.assertRaises(SystemExit):
with __main__.App(argv=['--help']) as app:
app.run()
def test_version(self):
with __main__.App(argv=['-v']) as app:
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with self.assertRaises(SystemExit):
app.run()
self.assertEqual(captured.stdout.get_text(), bpforms.__version__)
self.assertEqual(captured.stderr.get_text(), '')
with __main__.App(argv=['--version']) as app:
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with self.assertRaises(SystemExit):
app.run()
self.assertEqual(captured.stdout.get_text(), bpforms.__version__)
self.assertEqual(captured.stderr.get_text(), '')
def test_validate(self):
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['validate', 'canonical_dna', 'ACGT']) as app:
# run app
app.run()
# test that the CLI produced the correct output
self.assertEqual(captured.stdout.get_text(), 'Form is valid')
self.assertEqual(captured.stderr.get_text(), '')
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['validate', 'canonical_dna',
('ACG'
'[id: "dI" | structure: "{}"'
' | l-bond-atom: P30'
' | l-displaced-atom: O33-1'
' | r-bond-atom: O34'
' | r-displaced-atom: H34'
' ]'
'T').format(dIMP_smiles)]) as app:
# run app
app.run()
# test that the CLI produced the correct output
self.assertEqual(captured.stdout.get_text(), 'Form is valid')
self.assertEqual(captured.stderr.get_text(), '')
with self.assertRaisesRegex(SystemExit, '^Form is invalid'):
with __main__.App(argv=['validate', 'canonical_dna', 'ACGT[']) as app:
# run app
app.run()
with self.assertRaisesRegex(SystemExit, '^Form is invalid'):
with __main__.App(argv=['validate', 'canonical_dna', (
'ACGT'
'[id: "dI" | structure: "{}" | l-displaced-atom: O33-1 ]'
).format(dIMP_smiles)]) as app:
# run app
app.run()
def test_get_properties(self):
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['get-properties', 'canonical_dna', 'ACGT']) as app:
# run app
app.run()
# test that the CLI produced the correct output
text = captured.stdout.get_text()
self.assertIn('Length: 4', text)
self.assertNotIn('Structure: None', text)
self.assertIn('Formula: C39', text)
self.assertIn('Molecular weight: ', text)
self.assertIn('Charge: -', text)
self.assertEqual(captured.stderr.get_text(), '')
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['get-properties', 'canonical_dna', 'ACGT', '--ph', '7.0']) as app:
# run app
app.run()
# test that the CLI produced the correct output
text = captured.stdout.get_text()
self.assertIn('Length: 4', text)
self.assertIn('Structure: ' + (
'Cc1cn(C2CC(O)C(COP(=O)([O-])OC3CC(OC3COP(=O)([O-])OC3CC(OC3COP(=O)([O-])'
'OC3CC(OC3COP(=O)([O-])[O-])n3cnc4c(N)ncnc34)n3ccc(N)nc3=O)n3cnc4c3nc(N)[nH]'
'c4=O)O2)c(=O)[nH]c1=O'), text)
self.assertIn('Formula: C39', text)
self.assertIn('Molecular weight: 1248.772047992', text)
self.assertIn('Charge: -5', text)
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['get-properties', 'canonical_dna', 'ACGT', '--ph', '7.0']) as app:
# run app
with mock.patch.object(bpforms.BpForm, 'export', side_effect=Exception('error')):
app.run()
# test that the CLI produced the correct output
text = captured.stdout.get_text()
self.assertIn('Length: 4', text)
self.assertIn('Structure: None', text)
self.assertIn('Formula: None', text)
self.assertIn('Molecular weight: None', text)
self.assertIn('Charge: None', text)
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['get-properties', 'canonical_dna', 'ACGT', '--circular']) as app:
# run app
app.run()
# test that the CLI produced the correct output
text = captured.stdout.get_text()
self.assertIn('Length: 4', text)
self.assertNotIn('Structure: None', text)
self.assertIn('Formula: C39', text)
self.assertIn('Molecular weight: ', text)
self.assertIn('Charge: -', text)
self.assertEqual(captured.stderr.get_text(), '')
with self.assertRaises(SystemExit):
with __main__.App(argv=['get-properties', 'canonical_dna', 'ACGT[']) as app:
# run app
app.run()
with self.assertRaisesRegex(SystemExit, '^Form is invalid'):
with __main__.App(argv=['get-properties', 'canonical_dna', (
'ACGT'
'[id: "dI" | structure: "{}" | backbone-displaced-atom: H10 ]'
).format(dI_smiles)]) as app:
# run app
app.run()
def test_get_major_micro_species(self):
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['get-major-micro-species', 'canonical_dna',
('[id: "dI" | structure: "{0}"'
' | l-bond-atom: P30'
' | l-displaced-atom: O33-1'
' | r-bond-atom: O34'
' | r-displaced-atom: H34'
' ]'
'[id: "dI" | structure: "{0}"'
' | l-bond-atom: P30'
' | l-displaced-atom: O33-1'
' | r-bond-atom: O34'
' | r-displaced-atom: H34'
' ]').format(
dIMP_smiles), '14.']) as app:
# run app
app.run()
# test that the CLI produced the correct output
smiles = captured.stdout.get_text()
self.assertEqual(captured.stdout.get_text(),
('OC[C@H]1O[C@H](C[C@@H]1O)[N+]1(C=Nc2c1nc[n-]'
'c2=O)C1CC([O-])C(COP(=O)([O-])OC2CC(OC2COP(=O)'
'([O-])[O-])[N+]2(C=Nc3c2nc[n-]c3=O)[C@H]2C[C@H]'
'(O)[C@@H](CO)O2)O1'))
with self.assertRaises(SystemExit):
with __main__.App(argv=['get-major-micro-species', 'canonical_dna', 'ACGT[', '7.']) as app:
# run app
app.run()
with self.assertRaisesRegex(SystemExit, '^Form is invalid'):
with __main__.App(argv=['get-major-micro-species', 'canonical_dna', (
'ACGT'
'[id: "dI" | structure: "{}" | l-displaced-atom: O33-1 ]'
).format(dI_smiles), '7.']) as app:
# run app
app.run()
def test_viz_alphabet(self):
path = os.path.join(self.tempdir, 'alphabet.html')
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['viz-alphabet', 'canonical_dna', path]) as app:
# run app
app.run()
# test that the CLI produced the correct output
self.assertEqual(captured.stdout.get_text(), 'Visualization saved to {}'.format(path))
self.assertTrue(os.path.isfile(path))
def test_export_ontos(self):
path = os.path.join(self.tempdir, 'onto.obo')
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['export-ontos', path,
'--alphabet', 'protein',
'--max-monomers', '10',
'--max-xlinks', '10']) as app:
# run app
app.run()
# test that the CLI produced the correct output
self.assertEqual(captured.stdout.get_text(), 'Ontology saved to {}'.format(path))
self.assertTrue(os.path.isfile(path))
class BuildAlphabetsCliTestCase(unittest.TestCase):
def setUp(self):
os.rename(bpforms.alphabet.dna.filename, bpforms.alphabet.dna.filename + '.save')
os.rename(bpforms.alphabet.rna.filename, bpforms.alphabet.rna.filename + '.save')
os.rename(bpforms.alphabet.protein.filename, bpforms.alphabet.protein.filename + '.save')
def tearDown(self):
os.rename(bpforms.alphabet.dna.filename + '.save', bpforms.alphabet.dna.filename)
os.rename(bpforms.alphabet.rna.filename + '.save', bpforms.alphabet.rna.filename)
os.rename(bpforms.alphabet.protein.filename + '.save', bpforms.alphabet.protein.filename)
def test_build_alphabets(self):
self.assertFalse(os.path.isfile(bpforms.alphabet.dna.filename))
with capturer.CaptureOutput(merged=False, relay=False) as captured:
with __main__.App(argv=['build-alphabets',
'--alphabet', 'dna',
'--max-monomers', '3']) as app:
# run app
app.run()
# test that the CLI produced the correct output
self.assertIn('Alphabets successfully built', captured.stdout.get_text())
self.assertTrue(os.path.isfile(bpforms.alphabet.dna.filename))
|
StarcoderdataPython
|
1808210
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Callable, Iterator, List, Optional, Sequence, Tuple, Union
from torch.utils.data.datapipes.utils.common import match_masks
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterableWrapper, IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
try:
import iopath
except ImportError:
iopath = None
U = Union[bytes, bytearray, str]
def _create_default_pathmanager():
from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathManager
pathmgr = PathManager()
pathmgr.register_handler(HTTPURLHandler(), allow_override=True)
pathmgr.register_handler(OneDrivePathHandler(), allow_override=True)
# S3PathHandler is not included in 0.1.8
try:
from iopath.common.s3 import S3PathHandler
pathmgr.register_handler(S3PathHandler(), allow_override=True)
except ImportError:
pass
return pathmgr
class IoPathFileListerIterDataPipe(IterDataPipe[str]):
r"""
Lists the contents of the directory at the provided ``root`` pathname or URL,
and yields the full pathname or URL for each file within the directory.
Args:
root: The root local filepath or URL directory or list of roots to list files from
masks: Unix style filter string or string list for filtering file name(s)
pathmgr: Custom ``iopath.PathManager``. If not specified, a default ``PathManager`` is created.
Note:
Default ``PathManager`` currently supports local file path, normal HTTP URL and OneDrive URL.
S3 URL is supported only with ``iopath``>=0.1.9.
Example:
>>> from torchdata.datapipes.iter import IoPathFileLister
>>> datapipe = IoPathFileLister(root=S3URL)
"""
def __init__(
self,
root: Union[str, Sequence[str], IterDataPipe],
masks: Union[str, List[str]] = "",
*,
pathmgr=None,
) -> None:
if iopath is None:
raise ModuleNotFoundError(
"Package `iopath` is required to be installed to use this datapipe."
"Please use `pip install iopath` or `conda install -c conda-forge iopath`"
"to install the package"
)
if isinstance(root, str):
root = [
root,
]
if not isinstance(root, IterDataPipe):
self.datapipe: IterDataPipe = IterableWrapper(root) # type: ignore[assignment]
else:
self.datapipe = root
self.pathmgr = _create_default_pathmanager() if pathmgr is None else pathmgr
self.masks = masks
def register_handler(self, handler, allow_override=False):
self.pathmgr.register_handler(handler, allow_override=allow_override)
def __iter__(self) -> Iterator[str]:
for path in self.datapipe:
if self.pathmgr.isfile(path):
yield path
else:
for file_name in self.pathmgr.ls(path):
if match_masks(file_name, self.masks):
yield os.path.join(path, file_name)
@functional_datapipe("open_file_by_iopath")
class IoPathFileOpenerIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Opens files from input datapipe which contains pathnames or URLs,
and yields a tuple of pathname and opened file stream (functional name: ``open_file_by_iopath``).
Args:
source_datapipe: Iterable DataPipe that provides the pathnames or URLs
mode: An optional string that specifies the mode in which the file is opened (``"r"`` by default)
pathmgr: Custom ``iopath.PathManager``. If not specified, a default ``PathManager`` is created.
Note:
Default ``PathManager`` currently supports local file path, normal HTTP URL and OneDrive URL.
S3 URL is supported only with `iopath`>=0.1.9.
Example:
>>> from torchdata.datapipes.iter import IoPathFileLister
>>> datapipe = IoPathFileLister(root=S3URL)
>>> file_dp = datapipe.open_file_by_iopath()
"""
def __init__(self, source_datapipe: IterDataPipe[str], mode: str = "r", pathmgr=None) -> None:
if iopath is None:
raise ModuleNotFoundError(
"Package `iopath` is required to be installed to use this datapipe."
"Please use `pip install iopath` or `conda install -c conda-forge iopath`"
"to install the package"
)
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.pathmgr = _create_default_pathmanager() if pathmgr is None else pathmgr
self.mode: str = mode
def register_handler(self, handler, allow_override=False):
self.pathmgr.register_handler(handler, allow_override=allow_override)
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for file_uri in self.source_datapipe:
file = self.pathmgr.open(file_uri, self.mode)
yield file_uri, StreamWrapper(file)
def __len__(self) -> int:
return len(self.source_datapipe)
@functional_datapipe("save_by_iopath")
class IoPathSaverIterDataPipe(IterDataPipe[str]):
r"""
Takes in a DataPipe of tuples of metadata and data, saves the data
to the target path which is generated by the ``filepath_fn`` and metadata, and yields the resulting path
in `iopath` format (functional name: ``save_by_iopath``).
Args:
source_datapipe: Iterable DataPipe with tuples of metadata and data
mode: Mode in which the file will be opened for write the data (``"w"`` by default)
filepath_fn: Function that takes in metadata and returns the target path of the new file
pathmgr: Custom ``iopath.PathManager``. If not specified, a default ``PathManager`` is created.
Note:
Default ``PathManager`` currently supports local file path, normal HTTP URL and OneDrive URL.
S3 URL is supported only with `iopath`>=0.1.9.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> def filepath_fn(name: str) -> str:
>>> return S3URL + name
>>> name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
>>> source_dp = IterableWrapper(sorted(name_to_data.items()))
>>> iopath_saver_dp = source_dp.save_by_iopath(filepath_fn=filepath_fn, mode="wb")
>>> res_file_paths = list(iopath_saver_dp)
"""
def __init__(
self,
source_datapipe: IterDataPipe[Tuple[Any, U]],
mode: str = "w",
filepath_fn: Optional[Callable] = None,
*,
pathmgr=None,
):
if iopath is None:
raise ModuleNotFoundError(
"Package `iopath` is required to be installed to use this datapipe."
"Please use `pip install iopath` or `conda install -c conda-forge iopath`"
"to install the package"
)
self.source_datapipe: IterDataPipe[Tuple[Any, U]] = source_datapipe
self.mode: str = mode
self.filepath_fn: Optional[Callable] = filepath_fn
self.pathmgr = _create_default_pathmanager() if pathmgr is None else pathmgr
def __iter__(self) -> Iterator[str]:
for meta, data in self.source_datapipe:
filepath = meta if self.filepath_fn is None else self.filepath_fn(meta)
with iopath.file_lock(filepath):
if not os.path.exists(filepath):
with self.pathmgr.open(filepath, self.mode) as f:
f.write(data)
yield filepath
def register_handler(self, handler, allow_override=False):
self.pathmgr.register_handler(handler, allow_override=allow_override)
def __len__(self) -> int:
return len(self.source_datapipe)
|
StarcoderdataPython
|
83691
|
<reponame>icmpnorequest/DASFAA2021_DMVMT
# coding=utf-8
"""
@author: <NAME>
@date: 02/10/2020
"""
from transformers import BertTokenizer
import os
import pandas as pd
import numpy as np
import string
import argparse
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from gensim import corpora, models
from gensim.corpora.dictionary import Dictionary
from gensim.models import LdaMulticore
from spellchecker import SpellChecker
# Define dataset path
WWW2015_sep_path = "../dataset/WWW2015_sep"
# Sentiment file name
denmark_senti_file = "denmark_sentiment.csv"
france_senti_file = "france_sentiment.csv"
germany_senti_file = "germany_sentiment.csv"
uk_senti_file = "uk_sentiment.csv"
us_senti_file = "us_sentiment.csv"
# Topic file name
denmark_topic_file = "denmark_topic.csv"
france_topic_file = "france_topic.csv"
germany_topic_file = "germany_topic.csv"
uk_topic_file = "uk_topic.csv"
us_topic_file = "us_topic.csv"
# Define columns names
column_names = ['text', 'birth year', 'gender', 'rating', 'location', 'sentiment tokens', 'sentiment values']
new_column_names = ['text', 'birth year', 'gender', 'rating', 'location', 'sentiment tokens', 'sentiment values', 'topic tokens']
# Create the stopwords
stopwords = set(stopwords.words('english'))
# Create the punctuations
punctuations = set(string.punctuation)
# Parser
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--location', type=str, default='denmark', required=True, help='Extract topic words of different location')
args = parser.parse_args()
class TFIDF_LDA:
def __init__(self, bert_tokenizer):
self.bert_tokenizer = bert_tokenizer
def get_text(self, filename, col_names):
"""
It is a function to get text in each row.
"""
df = pd.read_csv(filename, names=col_names)
text_list = df['text'].tolist()
return df, text_list
def cleaning(self, text, tokenizer, stopwords, puntuations, spell_checker, lemma):
"""
It is a function to clean the text.
"""
# 1) Tokenize
text_tokens = tokenizer.tokenize(text)
# 2) Correct mis-spelling words
spell_free = [spell_checker.correction(token) for token in text_tokens]
# 3) Remove stopwords
stop_free = [token for token in spell_free if token not in stopwords]
# 4) Remove punctuations
punc_free = [token for token in stop_free if token not in puntuations]
# 5) Lemmatize
lemma_free = [lemma.lemmatize(token) for token in punc_free]
print("Text cleaning completed.")
return [lemma_free]
def build_dataset(self, clean_text):
"""
It is a function to build corpus.
"""
# Create a corpus from a list of texts
dictionary = Dictionary(clean_text)
corpus = [dictionary.doc2bow(text) for text in clean_text]
print("Build dataset successfully.")
return dictionary, corpus
def get_topic(self, dictionary, corpus):
"""
It is a function to build TF-IDF and get topic words
"""
# 1) Initialize TF-IDF
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
# 2) Build LDA model
topic_list = []
try:
lda_model_tfidf = LdaMulticore(corpus_tfidf, num_topics=1, id2word=dictionary, passes=2, workers=4)
print(lda_model_tfidf.print_topics(-1))
except ValueError:
topic_list.append("nothing")
# Return a <str> object
return ' '.join(topic_list)
def main():
################################
# 0. Load Instance #
################################
# 1) Tokenizer
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# 2) Lemmatizer
lemma = WordNetLemmatizer()
# 3) Spell Checker
spell = SpellChecker()
print("0. Preparation successfully.")
################################
# 1. Load TF-IDF LDA Model #
################################
# Initialize
tfidfLDA = TFIDF_LDA(bert_tokenizer=bert_tokenizer)
print("1. Load TF-IDF LDA model successfully.")
################################
# 2. Complete topic extraction #
################################
print("2. Start Task: {}.".format(args.location))
# 1) Get train text
df, text_list = tfidfLDA.get_text(filename=os.path.join(WWW2015_sep_path, args.location + "_sentiment.csv"),
col_names=column_names)
topics_list = []
for text in text_list:
print("No. {}".format(text_list.index(text)))
# 2) Clean text
cleaned_text = tfidfLDA.cleaning(text=text,
tokenizer=bert_tokenizer,
stopwords=stopwords,
puntuations=punctuations,
spell_checker=spell,
lemma=lemma)
# 3) Build dataset
dictionary, corpus = tfidfLDA.build_dataset(clean_text=cleaned_text)
# 4) Get topic words
topic = tfidfLDA.get_topic(dictionary=dictionary, corpus=corpus)
topics_list.append(topic)
# 5) Save to new train csv file
assert len(topics_list) == len(df)
df.loc[:, 'topic tokens'] = topics_list
df.to_csv(os.path.join(WWW2015_sep_path, args.location + "_topic.csv"), index=False, header=False)
print("Save to a new csv file successfully.")
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3477226
|
#!/usr/bin/env python
# coding: utf-8
# # Breve introdução à utilização do Z3 em Python
# Um tutorial do Z3Py, a biblioteca Python de interface para o popular solver Z3 da Microsoft, pode ser encontrado em https://ericpony.github.io/z3py-tutorial/guide-examples.htm.
#
# Começamos por importar o módulo do Z3.
# In[ ]:
from z3 import *
# As funções `Int(), Real(), Bool()` criam uma variável no Z3 do tipo correspondente. A função `solve` resolve um sistema de restrições. Por exemplo, para encontrar uma solução para o sistemas equações $x > 2$, $y < 10$ e $x + 2 \times y = 7$ podemos utilizar o seguinte programa.
# In[ ]:
x = Int('x')
y = Int('y')
solve(x > 2, y < 10, x + 2*y == 7)
# Mais um exemplo, agora com a teoria de reais, para encontrar a solução para o sistema de equações $x^2 + y^2 > 3$ e $x^3 + y < 5$.
# In[ ]:
x = Real('x')
y = Real('y')
solve(x**2 + y**2 > 3, x**3 + y < 5)
# O Z3 também pode ser usado como SAT solver. Para tal basta usar variáveis do tipo Bool e fórmulas proposicionais. Por exemplo, o programa seguinte verifica se a conjunção das fórmulas $p \rightarrow q$, $r \leftrightarrow \neg q$, e $\neg p \vee r$ é satisfazível.
# In[ ]:
p = Bool('p')
q = Bool('q')
r = Bool('r')
solve(Implies(p, q), r == Not(q), Or(Not(p), r))
# Também podemos usar o Z3 para simplificar expressões.
# In[ ]:
p = Bool('p')
q = Bool('q')
print (And(p, q, True))
print (simplify(And(p, q, True)))
print (simplify(And(p, False)))
# In[ ]:
p = Bool('p')
x = Real('x')
solve(Or(x < 5, x > 10), Or(p, x**2 == 2), Not(p))
# É possível controlar a precisão com que são apresentados os números reais alterando a opção `precision`.
# O Z3 também permite resolver conjuntos de restrições envolvendo variáveis de vários tipos.
# In[ ]:
set_option(precision=30)
solve(Or(x < 5, x > 10), Or(p, x**2 == 2), Not(p))
# O comando `Solver()` cria um solucionador de propósito geral. Inicialmente não tem restrições. Está vazio.
# In[ ]:
x = Int('x')
y = Int('y')
s = Solver()
print(s)
# As restrições podem ser adicionadas usando o método `add`. O método `check` resolve as restrições declaradas. O resultado é `sat` se uma solução for encontrada.
# In[ ]:
s.add(x > 10, y == x + 2)
print(s)
print("Solving constraints in the solver s ...")
print(s.check())
# O resultado é `unsat` se não houver solução.
# Em algumas aplicações, queremos explorar vários problemas semelhantes que compartilham várias restrições. Podemos usar os métodos `push` e `pop` para fazer isso. Cada *solver* mantém uma pilha de asserções (restrições). O método `push` cria um novo escopo, salvando o tamanho atual da pilha. O método `pop` remove qualquer asserção acrescentada entre ele e o `push` correspondente. O método `check` opera sobre o conjunto de asserções que estão no topo da pilha.
# In[ ]:
print("Create a new scope...")
s.push()
s.add(y < 11)
print(s)
print("Solving updated set of constraints...")
print(s.check())
# In[ ]:
print("Restoring state...")
s.pop()
print(s)
print("Solving restored set of constraints...")
print(s.check())
# Finalmente, um *solver* pode não ser capaz de se pronunciar quanto à satisfazibilidade de um conjunto de restrições.
# Nesse caso devolve `unknown`.
# In[ ]:
x = Real('x')
s = Solver()
s.add(2**x == 3)
print(s.check())
# ### Exemplo
# O Cryptarithms é um jogo que consiste numa equação matemática entre números desconhecidos, cujos dígitos são representados por letras. Cada letra deve representar um dígito diferente e o dígito inicial de um número com vários dígitos não deve ser zero.
#
# Queremos saber os dígitos a que correspondem as letras envolvidas na seguinte equação:
# ```
# TWO + TWO = FOUR
# ```
# Podemos modelar o problema numa teoria de inteiros. Cada letra dá origem a uma variável inteira ($T$,$W$,$O$,$F$,$U$, e $R$) e para representar a equação acima representamos cada parcela por uma expressão aritmética onde cada letra é multiplicada pelo seu “peso específico” (em base 10).
#
# Resolver este problema equivale a resolver o seguinte sistema de equações:
# $$
# \left\{
# \begin{array}{l}
# 0 \le T \le 9\\
# \cdots\\
# 0 \le R \le 9\\
# T \neq W \neq O \neq F \neq U \neq R \\
# T \neq 0\\
# F \neq 0\\
# (100 \times T + 10 \times W + O) + (100 \times T + 10 \times W + O) = 1000 \times F + 100 \times O + 10 \times U + R
# \end{array}
# \right.
# $$
#
# Em Z3 este sistema pode ser resolvido da seguinte forma.
# In[ ]:
T, W, O, F, U, R = Ints('T W O F U R')
s = Solver()
s.add(And(0<=T,T<=9))
s.add(And(0<=W,W<=9))
s.add(And(0<=O,O<=9))
s.add(And(0<=F,F<=9))
s.add(And(0<=U,U<=9))
s.add(And(0<=R,R<=9))
s.add(Distinct(T, W, O, F, U, R))
s.add(Not(T==0))
s.add(F!=0)
s.add((T*100+W*10+O)+(T*100+W*10+O)==F*1000+O*100+U*10+R)
r = s.check()
if r==sat :
m = s.model()
print(m)
else:
print("Não tem solução.")
# Podemos consultar o conjunto de restrições que temos no solver s, usando o método `assertions`.
# In[ ]:
for c in s.assertions():
print(c)
# Podemos consultar o modelo `m` gerado. No programa seguinte, `decls` é um método que devolve as variáveis atribuídas no modelo, `name` devolve o nome de uma variável atribuída no modelo, e `m[d]` o valor atribuído a `d` no modelo `m`. Atenção que este valor não é um tipo primitivo do Python. Por exemplo, para o converter para um inteiro do Python é necessário usar o método `as_long`. Para mais informações sobre estes métodos de conversão ver o seguinte post no Stack Overflow: https://stackoverflow.com/questions/12598408/z3-python-getting-python-values-from-model/12600208
# In[ ]:
for d in m.decls():
print("%s = %d" % (d.name(), m[d].as_long()))
# Como podemos saber se existem outras soluções para este quebra-cabeças?
# Podemos acrescentar restrições de forma a excluir a solução apresentada pelo *solver*, e testar novamente.
# In[ ]:
vs = [T, W, O, F, U, R]
while s.check() == sat:
m = s.model()
print(m)
s.add(Or([x != m[x] for x in vs])) # para excluir as mesmas atribuições usadas no modelo anterior
# not(and ...) --→ or (not ..)
# ### Exercício 1
#
# Defina uma função `prove` que verifique se uma fórmula proposicional é válida e use essa função para provar lei de Morgan $A \wedge B = \neg (\neg A \vee \neg B)$.
# In[ ]:
def prove(f):
# completar
s = Solver()
s.add(Not(f))
return (s.check() == unsat)
# completar
a, b = Bools('A B')
demorgan = And(a,b) == Not(Or(Not(a), Not(b)))
if prove(demorgan):
print("De Morgan is valid!")
# ## Modelação em Lógica Proposicional
# Considere o seguinte problema:
# ```
# - Maria cannot meet on Wednesday.
# - Peter can only meet either on Monday, Wednesday or Thursday.
# - Anne cannot meet on Friday.
# - Mike cannot meet neither on Tuesday nor on Thursday
#
# When can the meeting take place?
# ```
# Vamos usar o Z3 para encontrar a solução.
#
# 1. Vamos modelar o problema em Lógica Proposicional, criando uma variável proposicional para cada dia da semana ($\mathit{Mon}$,$\mathit{Tue}$,$\mathit{Wed}$,$\mathit{Thu}$, e $\mathit{Fri}$), com a seguinte semântica: se a variável for `True` é porque a reunião se pode fazer nesse dia, caso contrário será `False`.
#
# 2. De seguida, teremos que modelar cada uma das restrições, acrescentando as fórmulas lógicas correspondentes.
#
# $$
# \begin{array}{c}
# \neg \mathit{Wed}\\
# \mathit{Mon} \vee \mathit{Wed} \vee \mathit{Thu}\\
# \neg \mathit{Fri}\\
# \neg \mathit{Tue} \wedge \neg \mathit{Thu}\\
# \end{array}
# $$
#
# 3. Finalmente testamos se o conjunto de restrições é satisfazível e extraimos a solução calculada.
#
# In[ ]:
Mon, Tue, Wed, Thu, Fri = Bools('Monday Tuesday Wednesday Thursday Friday')
s = Solver()
s.add(Not(Wed))
s.add(Or(Mon,Wed,Thu))
s.add(Not(Fri),And(Not(Tue),Not(Thu))) # Também é possível passar várias restrições ao solver de uma vez só
if s.check() == sat:
m = s.model()
print(m)
else:
print("The meeting cannot take place!")
# ### Exercício 2
#
# Altere o código acima por forma a imprimir apenas o dia em que deverá ocorrer a reunião (em vez de imprimir todo o modelo).
# In[ ]:
# completar
# ### Exercício 3
#
# Considere o seguinte enigma:
# ```
# - If the unicorn is mythical, then it is immortal.
# - If the unicorn is not mythical, then it is a mortal mammal.
# - If the unicorn is either immortal or a mammal, then it is horned.
# - The unicorn is magical if it is horned.
#
# Given these constraints:
# - Is the unicorn magical?
# - Is it horned?
# - Is it mythical?
# ```
# Modele o problema em Lógica Proposicional e use o Z3 para o resolver.
#
# **Sugestão:** Resolva o problema com o auxílio de 5 variáveis proposicionais, correspondentes às 5 propriedades dos unicórnios. Relembre que a afirmação $A_1, \ldots, A_n \models B$ é válida se e só se o conjunto de restrições $\{A_1, \ldots, A_n, \neg B\}$ é inconsistente. Tire proveito dos métodos `push` e `pop` para responder às várias questões usando de forma incremental o mesmo solver.
# In[ ]:
# completar
# ### Exercício 4
#
# Considere o seguinte problema.
# ```
# Temos 3 cadeiras em linha (esquerda, meio, e direita) e precisamos de sentar 3 convidados: a Ana, a Susana e o Pedro. No entanto:
#
# - A Ana não quer ficar sentada à beira do Pedro.
# - A Ana não quer ficar na cadeira da esquerda.
# - A Susana não se quer sentar à esquerda do Pedro.
#
# Será possível sentar os convidados? Como?
# ```
# Modele o problema em Lógica Proposicional e use o Z3 para o resolver. Não se esqueça que todas as pessoas devem ficar sentadas e que só é possível sentar uma pessoa por cadeira.
#
# **Sugestão:** Crie uma variável proposicional (com nome sugestivo) para cada par $(p,c)$, onde $p$ é uma pessoa e $c$ uma cadeira. Se a pessoa $p$ ficar sentada na cadeira $c$ o valor da variável respectiva será `True`, caso contrário será `False`. Em alternativa, pode também criar um dicionário `v` de variáveis proposicionais de tal forma que `v[p][c]` corresponde à variável do par $(p,c)$.
# In[ ]:
# completar
|
StarcoderdataPython
|
6684309
|
<gh_stars>1-10
from os.path import exists
def notify(title, subtitle, message):
from os import system
t = '-title {!r}'.format(title)
s = '-subtitle {!r}'.format(subtitle)
m = '-message {!r}'.format(message)
a = '-sender {!r}'.format("com.typemytype.robofont")
system('terminal-notifier {}'.format(' '.join([m, t, s, a])))
if exists("/usr/bin/terminal-notifier"):
use_notifications = True
else:
use_notifications = False
print "In order to use notifications, install the command line program with:"
print "$ sudo gem install terminal-notifier"
if use_notifications:
notify("Hello from RoboFont", "Hello", "World")
|
StarcoderdataPython
|
5112904
|
#!/usr/bin/env python
"""Various algorithms concerning primes."""
def is_prime(n):
"""Detect if a number is a prime or not (robust method).
:param num: A positive number
:type num: int
:returns: boolean
:rtype: bool
"""
if n <= 1:
return False
elif n == 2:
return True
elif n > 2 and n % 2 == 0:
return False
else:
for i in range(3, int(pow(n, 0.5)) + 1, 2):
if n % i == 0:
return False
return True
def quick_prime(n):
"""Return if a number is prime or not (easy to remember method)
This method isn't as robust as is_prime(), but it is easier to
remember.
"""
# we only need to check up to square root of n
if n <= 1:
return False
else:
for i in range(2, int(pow(n, 0.5)) + 1):
if n % i == 0:
return False
return True
def prime_factors(n):
"""Find the prime factors of a number
:param n: A number
:type n: int
:returns: Prime factors of n
:rtype: list
"""
prime = True
for i in range(2, int(pow(n, 0.5)) + 1):
if n % i == 0:
prime = False
return prime_factors(i) + prime_factors(n // i)
if prime:
return [n]
def unique_prime_factors(n):
"""Return the unique prime factors of a number
:param n: A number
:type n: int
:returns: Prime factors of n
:rtype: set
"""
stack = [n]
res = set()
while stack:
j = stack.pop()
prime = True
if j not in res:
for i in range(2, int(pow(j, 0.5)) + 1):
if j % i == 0:
prime = False
stack.append(i)
stack.append(j // i)
if prime:
res.add(j)
return res
|
StarcoderdataPython
|
8050483
|
import pytest
from bank_bot.banking_system.client_factory import BankingClientFactory
from bank_bot.banking_system.banking_system_class_based import BankingClient
from bank_bot.banking_system.user_class import User
from bank_bot.banking_system import UserError, MessageError
from bank_bot.settings import NO_MESSAGES_FOUND
def test_prepare_message(database, mock_message):
User.create_admin(1, 1, database)
character_hash = User.create_user(2, 2, "Test user", database)
client = BankingClientFactory(database).create_client(mock_message)
admin = client.get_user_by_user_hash("0000000000")
with pytest.raises(UserError):
client.prepare_message("/message 1234567890 LaLaLaLa")
with pytest.raises(MessageError):
client.prepare_message(f"/message {client.user.character_hash} LaLaLaLa")
chat_id, message = client.prepare_message("/message 0000000000 LaLaLaLa")
assert message == "LaLaLaLa"
assert chat_id == admin.chat_id
def test_inspect_messages(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
character_hash_2 = User.create_user(3, 3, "Test user 2", database)
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_messages(True) == NO_MESSAGES_FOUND
assert client.inspect_messages(False) == NO_MESSAGES_FOUND
chat_id, message = client.prepare_message(f"/message {character_hash_2} LaLaLaLa")
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_messages(True) != NO_MESSAGES_FOUND
assert client.inspect_messages(False) == NO_MESSAGES_FOUND
assert client.inspect_messages(False, character_hash_2) != NO_MESSAGES_FOUND
def test_inspect_all_transactions(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
character_hash_2 = User.create_user(3, 3, "Test user 2", database)
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_all_messages() == NO_MESSAGES_FOUND
assert client.inspect_all_messages(character_hash_2) == NO_MESSAGES_FOUND
reciever_chat_id, message = client.prepare_message(f"/message {character_hash_2} LaLaLaLa")
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_all_messages() != NO_MESSAGES_FOUND
assert client.inspect_all_messages(character_hash_2) != NO_MESSAGES_FOUND
def test_inspect_pair_transactions(database, mock_message):
character_hash = User.create_user(2, 2, "Test user", database)
character_hash_2 = User.create_user(3, 3, "Test user 2", database)
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_pair_history_messages(f"/messages_history_pair {character_hash_2}") == NO_MESSAGES_FOUND
assert client.inspect_pair_history_messages(f"/messages_history_pair {character_hash_2}", character_hash_2, character_hash) == NO_MESSAGES_FOUND
reciever_chat_id, message = client.prepare_message(f"/message {character_hash_2} LaLaLaLa")
client = BankingClientFactory(database).create_client(mock_message)
assert client.inspect_pair_history_messages(f"/messages_history_pair {character_hash_2}") != NO_MESSAGES_FOUND
assert client.inspect_pair_history_messages(f"/messages_history_pair {character_hash_2}", character_hash_2, character_hash) != NO_MESSAGES_FOUND
assert client.inspect_pair_history_messages(f"/messages_history_pair {character_hash_2}") == client.inspect_pair_history_messages(f"/messages_history_pair {character_hash_2}", character_hash_2, character_hash)
|
StarcoderdataPython
|
6445747
|
#!/usr/bin/env python
import argparse
import datetime
import os
from celery import Celery
from random import SystemRandom
from apiclient.http import HttpMockSequence
from mail_service.gmail_service.worker import check_account_v1
celery = Celery('EOD_TASKS')
celery.config_from_object('celeryconfig')
cryptogen = SystemRandom()
HISTORY_ID = 6177392
USER = "<EMAIL>"
USER_ID = "fee41cac0a4569a04d8a3de9"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--num', required=True, type=int, help="Number of items to put on queue.")
parser.add_argument('-p', '--percent', required=False, type=int, help="Percent (0-100) of checks that result in a (simulated) new Gmail message to process.")
parser.add_argument('-d', '--date', action='store_true', help="Provide current time on each request? (requests >=15s old will be dropped by the message processor)")
return parser.parse_args()
def add_check_account_task(num, date, percent):
now = None
if not percent:
percent = 100
message = "Adding %d tasks to the queue." % num
if date:
now = datetime.datetime.now()
message = "%s Current time is being included on each request." % message
message = "%s %d%% of checks will result in a (simulated) new Gmail message." % (message, percent)
print(message)
# Read in the JSON response simulations.
path = os.path.dirname(os.path.realpath(__file__))
history_hit = open(os.path.join(path, "history_hit.json")).read()
history_miss = open(os.path.join(path, "history_miss.json")).read()
message = open(os.path.join(path, "message.json")).read()
single_message_metadata = open(os.path.join(path, "single_message_metadata.json")).read()
insert = open(os.path.join(path, "insert.json")).read()
profile = open(os.path.join(path, "profile.json")).read()
for x in range(1, (num + 1)):
rand = cryptogen.randrange(1,100)
if rand <= percent:
http = HttpMockSequence([
({'status': '200'}, history_hit),
({'status': '200', 'content-type': 'multipart/mixed; boundary="batch_VZm0VD2PDxI_AAZXUZ1osU4"'}, message),
({'status': '200'}, single_message_metadata),
({'status': '200'}, insert),
({'status': '200'}, ''), # Delete message
({'status': '200'}, profile)])
else:
http = HttpMockSequence([
({'status': '200'}, history_miss)])
check_account_v1.delay(USER, USER_ID, HISTORY_ID, queue_time=now, http=http)
def main():
args = parse_args()
add_check_account_task(args.num, args.date, args.percent)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
5176448
|
<filename>src/training/models/textual_paper.py<gh_stars>1-10
from src.features.sequences.transformer import SequenceMetadata
import tensorflow as tf
from typing import List
import logging
import fasttext.util
from tqdm import tqdm
from src.features.knowledge import DescriptionKnowledge
from .base import BaseEmbedding, BaseModel
from .config import ModelConfig, TextualPaperModelConfig
class DescriptionPaperEmbedding(BaseEmbedding, tf.keras.Model):
def __init__(
self,
descriptions: DescriptionKnowledge,
config: ModelConfig,
textual_config: TextualPaperModelConfig,
):
super(DescriptionPaperEmbedding, self).__init__()
self.config = config
self.textual_config = textual_config
self.num_features = len(descriptions.vocab)
self.num_hidden_features = 0
self.num_connections = 0
self._init_basic_embedding_variables(descriptions)
self._init_convolution_layers(descriptions)
def _load_fasttext_model(self):
logging.info("(Down)loading fasttext English language model")
fasttext.util.download_model("en", if_exists="ignore")
return fasttext.load_model("cc.en.300.bin")
def _init_basic_embedding_variables(self, descriptions: DescriptionKnowledge):
logging.info("Initializing Description embedding variables")
embeddings = {}
word_model = self._load_fasttext_model()
pad_vector = tf.constant(0.0, shape=(word_model.get_dimension(),))
for idx, description_words in tqdm(
descriptions.descriptions.items(),
desc="Initializing Description embedding variables",
):
embeddings[idx] = tf.stack(
[
tf.constant(word_model.get_word_vector(word))
for word in description_words
]
+ [
pad_vector
for i in range(descriptions.max_description_length)
if i >= len(description_words)
],
axis=0,
)
concatenated_embeddings = tf.stack(
[embeddings[i] for i in range(len(descriptions.descriptions))], axis=0
) # shape: (num_variables, max_words_per_description, word_embedding_size)
self.basic_feature_embeddings = self.add_weight(
initializer=tf.keras.initializers.constant(
value=concatenated_embeddings.numpy(),
),
trainable=self.config.base_feature_embeddings_trainable,
name="description_paper_embeddings/basic_hidden_embeddings",
shape=(
self.num_features,
descriptions.max_description_length,
word_model.get_dimension(),
),
)
def _init_convolution_layers(self, descriptions: DescriptionKnowledge):
logging.info("Initializing Description convolution layers")
conv_layers = {
kernel_size: tf.keras.layers.Conv1D(
filters=self.textual_config.num_filters,
kernel_size=kernel_size,
activation="relu",
input_shape=(
descriptions.max_description_length,
self.basic_feature_embeddings.shape[2],
),
kernel_regularizer=super(DescriptionPaperEmbedding, self)._get_kernel_regularizer(scope="conv"),
)
for kernel_size in self.textual_config.kernel_sizes
}
pool_layers = {
kernel_size: tf.keras.layers.MaxPooling1D(
pool_size=descriptions.max_description_length - kernel_size + 1,
strides=None,
)
for kernel_size in self.textual_config.kernel_sizes
}
input_layer = tf.keras.layers.Input(
shape=self.basic_feature_embeddings.shape[1:],
)
output = tf.keras.layers.Concatenate(axis=1)(
[
tf.keras.layers.Flatten()(
pool_layers[kernel_size](conv_layers[kernel_size](input_layer))
)
for kernel_size in self.textual_config.kernel_sizes
]
) # shape: (num_variables, num_pool_layers * filter_dim))
self.embedding_model = tf.keras.models.Model(inputs=input_layer, outputs=output)
def _final_embedding_matrix(self):
return self.embedding_model(
self.basic_feature_embeddings
) # shape: (num_variables, pool_layers * filter_dim)
def call(
self, values
): # values shape: (dataset_size, max_sequence_length, num_leaf_nodes)
embedding_matrix = self._final_embedding_matrix()
return tf.linalg.matmul(
values, embedding_matrix
) # shape: (dataset_size, max_sequence_length, embedding_size)
class DescriptionPaperModel(BaseModel):
def _get_embedding_layer(
self, metadata: SequenceMetadata, knowledge: DescriptionKnowledge
) -> DescriptionPaperEmbedding:
return DescriptionPaperEmbedding(
knowledge, self.config, textual_config=TextualPaperModelConfig()
)
|
StarcoderdataPython
|
12841539
|
import main
def test_convert_case_error():
assert main.convert("error case", "error case") == main.validation_error
def test_convert_camel_case():
assert main.convert("camelCase", "my own camel case") == "myOwnCamelCase"
assert main.convert("camelCase", "camel") == "camel"
assert main.convert("camelCase", "camel case") == "camelCase"
def test_convert_snake_case():
assert main.convert("snake_case", "my own snake case") == "my_own_snake_case"
assert main.convert("snake_case", "snake") == "snake"
assert main.convert("snake_case", "snake case") == "snake_case"
def test_convert_constant_case():
assert main.convert("CONSTANT_CASE", "my own constant case") == "MY_OWN_CONSTANT_CASE"
assert main.convert("CONSTANT_CASE", "constant") == "CONSTANT"
assert main.convert("CONSTANT_CASE", "constant case") == "CONSTANT_CASE"
def test_convert_kebab_case():
assert main.convert("kebab-case", "my own kebab case") == "my-own-kebab-case"
assert main.convert("kebab-case", "kebab") == "kebab"
assert main.convert("kebab-case", "kebab case") == "kebab-case"
def test_convert_to_other_case():
assert main.convert("camelCase", "snake_case") == "snakeCase"
assert main.convert("camelCase", "CONSTANT_CASE") == "constantCase"
assert main.convert("camelCase", "kebab-case") == "kebabCase"
assert main.convert("snake_case", "camelCase") == "camel_case"
assert main.convert("snake_case", "CONSTANT_CASE") == "constant_case"
assert main.convert("snake_case", "kebab-case") == "kebab_case"
assert main.convert("CONSTANT_CASE", "camelCase") == "CAMEL_CASE"
assert main.convert("CONSTANT_CASE", "snake_case") == "SNAKE_CASE"
assert main.convert("CONSTANT_CASE", "kebab-case") == "KEBAB_CASE"
assert main.convert("kebab-case", "camelCase") == "camel-case"
assert main.convert("kebab-case", "CONSTANT_CASE") == "constant-case"
assert main.convert("kebab-case", "snake_case") == "snake-case"
|
StarcoderdataPython
|
3420173
|
"""
Decorators for the DB API 2.0 implementation.
"""
# pylint: disable=invalid-name, unused-import
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, TypeVar, Union, cast
from datajunction.sql.dbapi.exceptions import ProgrammingError
if TYPE_CHECKING:
from datajunction.sql.dbapi.connection import Connection
from datajunction.sql.dbapi.cursor import Cursor
METHOD = TypeVar("METHOD", bound=Callable[..., Any])
def check_closed(method: METHOD) -> METHOD:
"""
Decorator that checks if a connection or cursor is closed.
"""
@wraps(method)
def wrapper(self: Union["Connection", "Cursor"], *args: Any, **kwargs: Any) -> Any:
if self.closed:
raise ProgrammingError(f"{self.__class__.__name__} already closed")
return method(self, *args, **kwargs)
return cast(METHOD, wrapper)
def check_result(method: METHOD) -> METHOD:
"""
Decorator that checks if the cursor has results from ``execute``.
"""
@wraps(method)
def wrapper(self: "Cursor", *args: Any, **kwargs: Any) -> Any:
if self._results is None: # pylint: disable=protected-access
raise ProgrammingError("Called before ``execute``")
return method(self, *args, **kwargs)
return cast(METHOD, wrapper)
|
StarcoderdataPython
|
11373492
|
<filename>generated-libraries/python/netapp/fpolicy/fpolicy_scope_config.py
from netapp.netapp_object import NetAppObject
class FpolicyScopeConfig(NetAppObject):
"""
Vserver FPolicy Scope configuration and management on name
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_export_policies_to_include = None
@property
def export_policies_to_include(self):
"""
Export policies to include for file access monitoring. By
default no export policy is selected.
Attributes: optional-for-create, modifiable
"""
return self._export_policies_to_include
@export_policies_to_include.setter
def export_policies_to_include(self, val):
if val != None:
self.validate('export_policies_to_include', val)
self._export_policies_to_include = val
_volumes_to_include = None
@property
def volumes_to_include(self):
"""
Volumes that are active for the file policy. The list can
include items which are regular expressions, such as
'vol*' or 'user?'. By default no volume is selected.
Attributes: optional-for-create, modifiable
"""
return self._volumes_to_include
@volumes_to_include.setter
def volumes_to_include(self, val):
if val != None:
self.validate('volumes_to_include', val)
self._volumes_to_include = val
_volumes_to_exclude = None
@property
def volumes_to_exclude(self):
"""
Volumes that are inactive for the file policy. The list
can include items which are regular expressions, such as
'vol*' or 'user?'. Note that if a policy has both an
exclude list and an include list, the include list is
ignored by the filer when processing user requests. By
default no volume is selected.
Attributes: optional-for-create, modifiable
"""
return self._volumes_to_exclude
@volumes_to_exclude.setter
def volumes_to_exclude(self, val):
if val != None:
self.validate('volumes_to_exclude', val)
self._volumes_to_exclude = val
_file_extensions_to_exclude = None
@property
def file_extensions_to_exclude(self):
"""
File extensions excluded for screening. By default no
file extension is selected.
Attributes: optional-for-create, modifiable
"""
return self._file_extensions_to_exclude
@file_extensions_to_exclude.setter
def file_extensions_to_exclude(self, val):
if val != None:
self.validate('file_extensions_to_exclude', val)
self._file_extensions_to_exclude = val
_policy_name = None
@property
def policy_name(self):
"""
Name of the policy.
Attributes: key, required-for-create, non-modifiable
"""
return self._policy_name
@policy_name.setter
def policy_name(self, val):
if val != None:
self.validate('policy_name', val)
self._policy_name = val
_export_policies_to_exclude = None
@property
def export_policies_to_exclude(self):
"""
Export Policies to exclude for file access monitoring. By
default no export policy is selected.
Attributes: optional-for-create, modifiable
"""
return self._export_policies_to_exclude
@export_policies_to_exclude.setter
def export_policies_to_exclude(self, val):
if val != None:
self.validate('export_policies_to_exclude', val)
self._export_policies_to_exclude = val
_check_extensions_on_directories = None
@property
def check_extensions_on_directories(self):
"""
Indicates whether directory names are also subjected to
extensions check, similar to file names. By default, the
value is false.
Attributes: optional-for-create, modifiable
"""
return self._check_extensions_on_directories
@check_extensions_on_directories.setter
def check_extensions_on_directories(self, val):
if val != None:
self.validate('check_extensions_on_directories', val)
self._check_extensions_on_directories = val
_vserver = None
@property
def vserver(self):
"""
Vserver
Attributes: key, non-creatable, non-modifiable
"""
return self._vserver
@vserver.setter
def vserver(self, val):
if val != None:
self.validate('vserver', val)
self._vserver = val
_shares_to_exclude = None
@property
def shares_to_exclude(self):
"""
Shares to exclude for file access monitoring. By default
no share is selected.
Attributes: optional-for-create, modifiable
"""
return self._shares_to_exclude
@shares_to_exclude.setter
def shares_to_exclude(self, val):
if val != None:
self.validate('shares_to_exclude', val)
self._shares_to_exclude = val
_file_extensions_to_include = None
@property
def file_extensions_to_include(self):
"""
File extensions included for screening. By default no
file extension is selected.
Attributes: optional-for-create, modifiable
"""
return self._file_extensions_to_include
@file_extensions_to_include.setter
def file_extensions_to_include(self, val):
if val != None:
self.validate('file_extensions_to_include', val)
self._file_extensions_to_include = val
_shares_to_include = None
@property
def shares_to_include(self):
"""
Shares to include for file access monitoring. By default
no share is selected.
Attributes: optional-for-create, modifiable
"""
return self._shares_to_include
@shares_to_include.setter
def shares_to_include(self, val):
if val != None:
self.validate('shares_to_include', val)
self._shares_to_include = val
@staticmethod
def get_api_name():
return "fpolicy-scope-config"
@staticmethod
def get_desired_attrs():
return [
'export-policies-to-include',
'volumes-to-include',
'volumes-to-exclude',
'file-extensions-to-exclude',
'policy-name',
'export-policies-to-exclude',
'check-extensions-on-directories',
'vserver',
'shares-to-exclude',
'file-extensions-to-include',
'shares-to-include',
]
def describe_properties(self):
return {
'export_policies_to_include': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'volumes_to_include': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'volumes_to_exclude': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'file_extensions_to_exclude': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'policy_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'export_policies_to_exclude': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'check_extensions_on_directories': { 'class': bool, 'is_list': False, 'required': 'optional' },
'vserver': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'shares_to_exclude': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'file_extensions_to_include': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'shares_to_include': { 'class': basestring, 'is_list': True, 'required': 'optional' },
}
|
StarcoderdataPython
|
264599
|
<reponame>zhiyue/cola<gh_stars>1000+
'''
Copyright (c) 2013 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2013-6-11
@author: Chine
'''
import unittest
import threading
from cola.core.logs import get_logger, LogRecordSocketReceiver
class Test(unittest.TestCase):
def setUp(self):
self.client_logger = get_logger(name='cola_test_client', server='localhost')
self.server_logger = get_logger(name='cola_test_server')
self.log_server = LogRecordSocketReceiver(logger=self.server_logger)
threading.Thread(target=self.log_server.serve_forever).start()
def tearDown(self):
self.log_server.shutdown()
def testLog(self):
self.client_logger.error('Sth happens here')
self.client_logger.info('sth info here')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testLog']
unittest.main()
|
StarcoderdataPython
|
1762171
|
<reponame>azadoks/aiida-core<gh_stars>100-1000
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`Data` sub class to represent a folder on a file system."""
from .data import Data
__all__ = ('FolderData',)
class FolderData(Data):
"""`Data` sub class to represent a folder on a file system."""
def __init__(self, **kwargs):
"""Construct a new `FolderData` to which any files and folders can be added.
Use the `tree` keyword to simply wrap a directory:
folder = FolderData(tree='/absolute/path/to/directory')
Alternatively, one can construct the node first and then use the various repository methods to add objects:
folder = FolderData()
folder.put_object_from_tree('/absolute/path/to/directory')
folder.put_object_from_filepath('/absolute/path/to/file.txt')
folder.put_object_from_filelike(filelike_object)
:param tree: absolute path to a folder to wrap
:type tree: str
"""
tree = kwargs.pop('tree', None)
super().__init__(**kwargs)
if tree:
self.put_object_from_tree(tree)
|
StarcoderdataPython
|
368918
|
<reponame>devs-7/bible-projector-python<gh_stars>0
import random
import socket
import string
def generate_random_string_with_letters_and_digits(length: int) -> str:
return ''.join(random.choices(
string.ascii_letters + string.digits,
k=length
))
def get_host() -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 53))
host = s.getsockname()[0]
s.close()
return host
class ServerAddress:
host: str
port: int
prefix: str
def __init__(self, port: int) -> None:
self.port = port
self.host = get_host()
self.prefix = ''
def generate_prefix(self, length: int) -> str:
self.prefix = generate_random_string_with_letters_and_digits(length)
return self.prefix
@property
def address(self) -> str:
if not self.prefix:
return f'{self.host}:{self.port}'
return f'http://{self.host}:{self.port}/{self.prefix}'
@property
def localhost_address(self) -> str:
if not self.prefix:
return f'http://localhost:{self.port}'
return f'http://localhost:{self.port}/{self.prefix}'
|
StarcoderdataPython
|
192721
|
<reponame>dfederschmidt/ta-splunk-add-on-for-datadog-api<filename>bin/input_module_datadog_event_stream.py
# ########################################################################
# Copyright 2020 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
# encoding = utf-8
import os
import sys
import time
import datetime
import requests
import json
'''
IMPORTANT
Edit only the validate_input and collect_events functions.
Do not edit any other part in this file.
This file is generated only once when creating the modular input.
'''
'''
# For advanced users, if you want to create single instance mod input, uncomment this method.
def use_single_instance_mode():
return True
'''
def get_slice_time(start, end, steps):
time_list = []
chunks = range(start, end, steps)
counter = 0
for chunk in chunks:
counter += 1
if len(chunks) is counter:
time_list.append((chunk, end))
else:
time_list.append((chunk, chunk+steps-1))
return time_list
def build_event_url(datadog_site, start, end, priority, sources, tags, unaggregated):
endpoint = "https://api.datadoghq.{}/api/v1/events?".format(datadog_site)
param = "start=" + str(start)
param += "&end=" + str(end)
if priority:
param += "&priority=" + str(priority)
if sources:
param += "&sources=" + str(sources)
if tags:
param += "&tags=" + str(tags)
if unaggregated:
param += "&unaggregated=true"
url = endpoint + param
return url
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# This example accesses the modular input variable
# api_key = definition.parameters.get('api_key', None)
# app_key = definition.parameters.get('app_key', None)
# start_time = definition.parameters.get('start_time', None)
# end_time = definition.parameters.get('end_time', None)
# priority = definition.parameters.get('priority', None)
# sources = definition.parameters.get('sources', None)
# tags = definition.parameters.get('tags', None)
# unaggregated = definition.parameters.get('unaggregated', None)
start_time = definition.parameters.get('start_time', None)
end_time = definition.parameters.get('end_time', None)
priority = definition.parameters.get('priority', None)
try:
datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
except ValueError:
raise ValueError(
"Incorrect date format, should be YYYY-MM-DD hh:mm:ss")
pass
def collect_events(helper, ew):
"""Implement your data collection logic here
# The following examples get the arguments of this input.
# Note, for single instance mod input, args will be returned as a dict.
# For multi instance mod input, args will be returned as a single value.
opt_api_key = helper.get_arg('api_key')
opt_app_key = helper.get_arg('app_key')
opt_start_time = helper.get_arg('start_time')
opt_end_time = helper.get_arg('end_time')
opt_priority = helper.get_arg('priority')
opt_sources = helper.get_arg('sources')
opt_tags = helper.get_arg('tags')
opt_unaggregated = helper.get_arg('unaggregated')
# In single instance mode, to get arguments of a particular input, use
opt_api_key = helper.get_arg('api_key', stanza_name)
opt_app_key = helper.get_arg('app_key', stanza_name)
opt_start_time = helper.get_arg('start_time', stanza_name)
opt_end_time = helper.get_arg('end_time', stanza_name)
opt_priority = helper.get_arg('priority', stanza_name)
opt_sources = helper.get_arg('sources', stanza_name)
opt_tags = helper.get_arg('tags', stanza_name)
opt_unaggregated = helper.get_arg('unaggregated', stanza_name)
# get input type
helper.get_input_type()
# The following examples get input stanzas.
# get all detailed input stanzas
helper.get_input_stanza()
# get specific input stanza with stanza name
helper.get_input_stanza(stanza_name)
# get all stanza names
helper.get_input_stanza_names()
# The following examples get options from setup page configuration.
# get the loglevel from the setup page
loglevel = helper.get_log_level()
# get proxy setting configuration
proxy_settings = helper.get_proxy()
# get account credentials as dictionary
account = helper.get_user_credential_by_username("username")
account = helper.get_user_credential_by_id("account id")
# get global variable configuration
global_userdefined_global_var = helper.get_global_setting("userdefined_global_var")
# The following examples show usage of logging related helper functions.
# write to the log for this modular input using configured global log level or INFO as default
helper.log("log message")
# write to the log using specified log level
helper.log_debug("log message")
helper.log_info("log message")
helper.log_warning("log message")
helper.log_error("log message")
helper.log_critical("log message")
# set the log level for this modular input
# (log_level can be "debug", "info", "warning", "error" or "critical", case insensitive)
helper.set_log_level(log_level)
# The following examples send rest requests to some endpoint.
response = helper.send_http_request(url, method, parameters=None, payload=None,
headers=None, cookies=None, verify=True, cert=None,
timeout=None, use_proxy=True)
# get the response headers
r_headers = response.headers
# get the response body as text
r_text = response.text
# get response body as json. If the body text is not a json string, raise a ValueError
r_json = response.json()
# get response cookies
r_cookies = response.cookies
# get redirect history
historical_responses = response.history
# get response status code
r_status = response.status_code
# check the response status, if the status is not sucessful, raise requests.HTTPError
response.raise_for_status()
# The following examples show usage of check pointing related helper functions.
# save checkpoint
helper.save_check_point(key, state)
# delete checkpoint
helper.delete_check_point(key)
# get checkpoint
state = helper.get_check_point(key)
# To create a splunk event
helper.new_event(data, time=None, host=None, index=None, source=None, sourcetype=None, done=True, unbroken=True)
"""
'''
# The following example writes a random number as an event. (Multi Instance Mode)
# Use this code template by default.
import random
data = str(random.randint(0,100))
event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)
ew.write_event(event)
'''
'''
# The following example writes a random number as an event for each input config. (Single Instance Mode)
# For advanced users, if you want to create single instance mod input, please use this code template.
# Also, you need to uncomment use_single_instance_mode() above.
import random
input_type = helper.get_input_type()
for stanza_name in helper.get_input_stanza_names():
data = str(random.randint(0,100))
event = helper.new_event(source=input_type, index=helper.get_output_index(stanza_name), sourcetype=helper.get_sourcetype(stanza_name), data=data)
ew.write_event(event)
'''
# Get account info
global_account = helper.get_arg('global_account')
opt_api_key = global_account['api_key']
opt_app_key = global_account['app_key']
opt_datadog_site = global_account['dd_site']
opt_start_time = helper.get_arg('start_time')
opt_end_time = helper.get_arg('end_time')
opt_interval = helper.get_arg('interval')
helper.log_debug("[-] DataDog Events: datadog site {}".format(opt_datadog_site))
helper.log_debug("[-] DataDog Events: UI start time {}".format(opt_start_time))
helper.log_debug("[-] DataDog Events: UI end time {}".format(opt_end_time))
opt_start_time = datetime.datetime.strptime(
opt_start_time, '%Y-%m-%d %H:%M:%S')
opt_start_time = int(
(opt_start_time - datetime.datetime(1970, 1, 1)).total_seconds())
opt_end_time = datetime.datetime.strptime(
opt_end_time, '%Y-%m-%d %H:%M:%S')
opt_end_time = int(
(opt_end_time - datetime.datetime(1970, 1, 1)).total_seconds())
opt_priority = helper.get_arg('priority')
opt_sources = helper.get_arg('sources')
opt_tags = helper.get_arg('tags')
opt_unaggregated = helper.get_arg('unaggregated')
# Slice
steps = 60*60*24
#payload = {}
headers = {
'Content-type': 'application/json',
'DD-API-KEY': opt_api_key,
'DD-APPLICATION-KEY': opt_app_key
}
# checkpoint key
current_url = build_event_url(opt_datadog_site, opt_start_time, opt_end_time,
opt_priority, opt_sources, opt_tags, opt_unaggregated)
key = "{}_DATADOG_EVENTS_processing".format(
helper.get_input_stanza_names())
last_ran_key = "last_ran_{}".format(key)
# check checkpoint
helper.log_debug("[-] DataDog Events: check checkpoint")
helper.log_debug(
"[-] DataDog Events: Last run time: {}".format(helper.get_check_point(last_ran_key)))
now = datetime.datetime.utcnow()
now = int((now - datetime.datetime(1970, 1, 1)).total_seconds())
helper.log_debug("[-] DataDog Events: now - {}".format(now))
if helper.get_check_point(last_ran_key) is None:
helper.save_check_point(last_ran_key, opt_start_time)
else:
opt_start_time = int(helper.get_check_point(last_ran_key) + 1)
if now < opt_end_time:
opt_end_time = now
helper.log_debug("[-] DataDog Events: opt_start_time - {}".format(opt_start_time))
helper.log_debug("[-] DataDog Events: opt_end_time - {}".format(opt_end_time))
helper.log_debug(
"\t[-] DataDog Events: last run checkpoint: {} -- value: {}".format(last_ran_key, helper.get_check_point(last_ran_key)))
# Pageing
time_list = get_slice_time(int(opt_start_time), int(opt_end_time), steps)
helper.log_debug(
"Processing DataDog Events time_list: {}".format(len(time_list)))
for time in time_list:
helper.log_debug(
"\t[-] DataDog Events:In for: processing time {}".format(time))
# build url according to user's inputs
url = build_event_url(opt_datadog_site, time[0], time[1], opt_priority,
opt_sources, opt_tags, opt_unaggregated)
response = helper.send_http_request(url, "GET", parameters=None, payload=None,
headers=headers, cookies=None, verify=True, cert=None, timeout=None, use_proxy=True)
helper.log_debug("[-] DataDog Events API: Response code: {}".format(response.status_code))
if response.status_code != 200:
helper.log_debug(
"\t[-] DataDog Events API Error: {}".format(response.text))
events = response.json()
if "events" in events:
for event in events['events']:
try:
event['ddhost'] = event['host']
event['ddsource'] = event['source']
del event['host']
del event['source']
except Exception as e:
helper.log_debug(
"\t[-] Try Block 1: DataDog Events Exception {}".format(e))
pass
try:
event_time = event['date_happened']
event = helper.new_event(json.dumps(
event), time=event_time, host=None, index=None, source=None, sourcetype=None, done=True, unbroken=True)
ew.write_event(event)
# save checkpoint for every event
timestamp = helper.get_check_point(last_ran_key)
if timestamp is None:
timestamp = event_time
else:
timestamp = max(int(timestamp), int(event_time))
helper.save_check_point(last_ran_key, timestamp)
except Exception as e:
helper.log_debug(
"\t[-] Try Block 2: DataDog Events Exception {}".format(e))
else:
helper.log_debug("\t[-] No events to retrieve for {}.".format(url))
|
StarcoderdataPython
|
1939633
|
import unittest
from unittest import skip
from asgard.models.account import AccountDB as Account
from hollowman.filters.autodisablehttp import AutoDisableHTTPFilter
from hollowman.marathonapp import AsgardApp
from hollowman.models import User
from tests.utils import with_json_fixture
APP_WITH_HTTP_LABELS = "single_full_app_with_http_labels.json"
class TestAutoDisableHTTPFilter(unittest.TestCase):
@with_json_fixture(APP_WITH_HTTP_LABELS)
def setUp(self, single_full_app_fixture):
self.filter = AutoDisableHTTPFilter()
self.request_app = AsgardApp.from_json(single_full_app_fixture)
self.original_app = AsgardApp.from_json(single_full_app_fixture)
self.account = Account(
name="Dev Account", namespace="dev", owner="company"
)
self.user = User(tx_email="<EMAIL>")
self.user.current_account = self.account
def test_create_app_with_zero_instances(self):
self.request_app.instances = 0
self.assertTrue(self.request_app.labels["traefik.enable"])
filtered_app = self.filter.write(
self.user, self.request_app, AsgardApp()
)
self.assertEqual("false", filtered_app.labels["traefik.enable"])
def test_should_not_modify_non_http_apps(self):
self.request_app.instances = 0
del self.request_app.labels["traefik.enable"]
filtered_app = self.filter.write(
self.user, self.request_app, AsgardApp()
)
self.assertTrue("traefik.enable" not in filtered_app.labels)
def test_update_suspended_app_set_instances_to_zero(self):
self.request_app = AsgardApp.from_json({"env": {"ENV_A": "VALUE"}})
self.original_app.instances = 10
filtered_app = self.filter.write(
self.user, self.request_app, AsgardApp()
)
self.assertIsNone(filtered_app.labels.get("traefik.enable"))
def test_update_running_app_set_instances_to_zero(self):
self.original_app.instances = 10
self.request_app.instances = 0
self.assertTrue(self.request_app.labels["traefik.enable"])
filtered_app = self.filter.write(
self.user, self.request_app, self.original_app
)
self.assertEqual("false", filtered_app.labels["traefik.enable"])
def test_should_enable_http_when_scaling_from_zero_to_some(self):
self.original_app.instances = 0
self.request_app.instances = 7
self.original_app.labels["traefik.enable"] = False
self.request_app.labels["traefik.enable"] = False
filtered_app = self.filter.write(
self.user, self.request_app, self.original_app
)
self.assertEqual("true", self.request_app.labels["traefik.enable"])
def test_shouldnt_modify_app_if_instances_fields_is_not_present(self):
self.request_app.instances = None
self.original_app = AsgardApp()
self.request_app.labels["traefik.enable"] = "true"
self.filter.write(self.user, self.request_app, self.original_app)
self.assertEqual("true", self.request_app.labels["traefik.enable"])
|
StarcoderdataPython
|
8024809
|
<filename>pymultisig/sign_multisig_spend.py
#!/usr/bin/env python
import sys
import argparse
import json
try:
from pymultisig import trezor_utils
from pymultisig import btc_utils
from pymultisig.generate_multisig_address import generate_multisig_address
except ModuleNotFoundError:
import trezor_utils
import btc_utils
from generate_multisig_address import generate_multisig_address
import trezorlib
from trezorlib import messages as proto
from trezorlib import btc as trezorbtc
def sign_tx(path,
multisig_address,
redeemscript,
utxo_file,
output_file,
testnet=False):
"""
Sign a spend of a bitcoin 2-of-3 P2SH-multisig address
using a Trezor One Hardware Wallet
Args:
path: BIP32 path of key with which to sign
multisig_address: Address that is being spent
redeemscript: redeem script corresponding to multisig_address
utxo_file: JSON file of UTXOs for multisig_address
(see get_utxo_set.py)
output_file: JSON file of destination addresses and amounts
(see generate_outputs.py)
testnet: Is this a testnet or mainnet address?
Returns:
Dictionary with two keys:
pubkey: public key corresponding to the private key used for signing
signatures: a list of signatures, one per utxo
Raises:
ValueError: If multisig_address is not correct for the given redeemscript
Example:
TODO
"""
with open(utxo_file, 'r') as f:
utxos = json.load(f)
with open(output_file, 'r') as f:
outputs = json.load(f)
# Verify that Pubkeys and Address match
check_address = generate_multisig_address(redeemscript, testnet)
parsed_redeem_script = btc_utils.parse_redeem_script(redeemscript)
if multisig_address != check_address:
raise ValueError("Incorrect Redeem Script")
if testnet:
coin = 'Testnet'
else:
coin = 'Bitcoin'
input_script_type = proto.InputScriptType.SPENDMULTISIG
output_script_type = proto.OutputScriptType.PAYTOADDRESS
tx_api = trezorlib.coins.tx_api[coin]
client = trezor_utils.get_trezor_client()
#client.set_tx_api(tx_api)
# Get signing node:
expanded_path = trezorlib.tools.parse_path(path)
signer = trezorbtc.get_public_node(client, expanded_path, show_display=True).node
# blank HDNodes with public_keys
nodes = [proto.HDNodePathType(node=proto.HDNodeType(public_key=bytes.fromhex(h),
depth=0,
fingerprint=0,
child_num=0,
chain_code=b'0'*32),
address_n=[]
) for h in parsed_redeem_script['pubkeys']]
trezor_inputs = []
for utxo in utxos:
multisig = proto.MultisigRedeemScriptType(
pubkeys=nodes,
m=parsed_redeem_script['m']
)
_input = proto.TxInputType(
prev_hash=bytes.fromhex(utxo['txid']),
prev_index=utxo['n'],
amount=utxo['amount'],
address_n=trezorlib.tools.parse_path(path),
script_type=input_script_type,
multisig=multisig
)
trezor_inputs.append(_input)
txes = {}
for tx in trezor_inputs:
tmptx = tx_api[tx.prev_hash]
txes[tx.prev_hash] = tmptx
# make this multi-output, probably from file
trezor_outputs = []
for output in outputs:
trezor_outputs.append(
proto.TxOutputType(
address=output['address'],
amount=output['amount'],
script_type=output_script_type,
)
)
output_signatures, serialized_tx = trezorbtc.sign_tx(client, coin, trezor_inputs, trezor_outputs, prev_txes=txes)
signature_blob = {"pubkey": signer.public_key.hex(),
"signatures": [s.hex() for s in output_signatures]
}
client.close()
return signature_blob
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Sign a multisig bitcoin transaction using Trezor One')
parser.add_argument('path',
help="Trezor path, for example: \"m/45'/0'/120'/20/0\"")
parser.add_argument('address',
help='the multisig address')
parser.add_argument('redeemscript',
help="hex-encoded Redeem Script")
parser.add_argument('utxo_file',
help="JSON file containing utxo set")
parser.add_argument('output_file',
help="JSON file containing the address(es) to send to and amounts")
parser.add_argument('--testnet',
help='set to testnet (true if present)',
dest='testnet',
action='store_true')
args = parser.parse_args()
sigs = sign_tx(args.path,
args.address,
args.redeemscript,
args.utxo_file,
args.output_file,
args.testnet)
print(json.dumps(sigs, indent=2))
|
StarcoderdataPython
|
382869
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Time : 11:41
# Email : <EMAIL>
# File : readMe.py
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ #
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ #
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ #
"""
这是一个视频播放器。
需要安装一个视频解码器, 'Xvid-1.3.2-20110601.exe'。已经放入other中。
库依赖:
pyqt5
使用ffmpeg对视频信息进行读取。可以实现视频上绘制等基本操作。
"""
|
StarcoderdataPython
|
9644039
|
<reponame>yjbanov/chromium_build
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master import gitiles_poller
def Update(config, c):
webrtc_repo_url = config.Master.git_server_url + '/external/webrtc/'
webrtc_poller = gitiles_poller.GitilesPoller(webrtc_repo_url,
project='webrtc')
c['change_source'].append(webrtc_poller)
samples_poller = gitiles_poller.GitilesPoller(
config.Master.git_server_url + '/external/webrtc-samples',
project='webrtc-samples',
comparator=webrtc_poller.comparator)
c['change_source'].append(samples_poller)
|
StarcoderdataPython
|
9690183
|
<gh_stars>0
# This code is generated automatically by ClointFusion BOT Builder Tool.
import ClointFusion as cf
import time
cf.window_show_desktop()
cf.mouse_click(int(cf.pg.size()[0]/2),int(cf.pg.size()[1]/2))
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\mrmay\AppData\Local\Temp\cf_log_ug9kv3oj_generator\Images\Snips\1--1512_343.png',conf=0.7, wait=11),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(1512,343,left_or_right='left', single_double_triple = 'single')
time.sleep(1)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\mrmay\AppData\Local\Temp\cf_log_ug9kv3oj_generator\Images\Snips\2--330_52.png',conf=0.7, wait=10),left_or_right='left', single_double_triple = 'double')
except:
cf.mouse_click(330,52,left_or_right='left', single_double_triple = 'double')
time.sleep(0)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\mrmay\AppData\Local\Temp\cf_log_ug9kv3oj_generator\Images\Snips\3--330_52.png',conf=0.7, wait=12),left_or_right='left', single_double_triple = 'double')
except:
cf.mouse_click(330,52,left_or_right='left', single_double_triple = 'double')
time.sleep(2)
cf.key_write_enter('shyam',key='')
time.sleep(0)
cf.key_press('enter')
time.sleep(2)
try:
cf.mouse_click(*cf.mouse_search_snip_return_coordinates_x_y(r'C:\Users\mrmay\AppData\Local\Temp\cf_log_ug9kv3oj_generator\Images\Snips\4-shyamGoogleSearchGoogleChrome-1910_70.png',conf=0.7, wait=10),left_or_right='left', single_double_triple = 'single')
except:
cf.mouse_click(1910,70,left_or_right='left', single_double_triple = 'single')
time.sleep(0)
cf.window_close_windows('shyam - Google Search - Google Chrome')
|
StarcoderdataPython
|
8160052
|
def main():
import sys
readline = sys.stdin.buffer.readline
sys.setrecursionlimit(10 ** 7)
n = int(readline())
a = list(map(int, readline().split()))
a.sort(reverse=True)
idx = 1
ans = a[0]
for aa in a[1:]:
if idx + 2 >= n:
if idx + 1 < n:
ans += aa
break
idx += 2
ans += aa * 2
print(ans)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.