id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
84219 | from aerosandbox.common import ExplicitAnalysis
import aerosandbox.numpy as np
import subprocess
from pathlib import Path
from aerosandbox.geometry import Airplane
from aerosandbox.performance import OperatingPoint
from typing import Union, List, Dict
import tempfile
import warnings
class AVL(ExplicitAnalysis):
"""
An interface to AVL, a 3D vortex lattice aerodynamics code developed by <NAME> at MIT.
Requires AVL to be on your computer; AVL is available here: https://web.mit.edu/drela/Public/web/avl/
It is recommended (but not required) that you add AVL to your system PATH environment variable such that it can
be called with the command `avl`. If this is not the case, you need to specify the path to your AVL
executable using the `avl_command` argument of the constructor.
Usage example:
>>>avl = asb.AVL(
>>> airplane=my_airplane,
>>> op_point=asb.OperatingPoint(
>>> velocity=100, # m/s
>>> alpha=5, # deg
>>> beta=4, # deg
>>> p=0.01, # rad/sec
>>> q=0.02, # rad/sec
>>> r=0.03, # rad/sec
>>> )
>>>)
>>>outputs = avl.run()
"""
def __init__(self,
airplane: Airplane,
op_point: OperatingPoint = OperatingPoint(),
avl_command: str = "avl",
verbose: bool = False,
working_directory: str = None,
):
"""
Interface to AVL.
Args:
airplane: The airplane object you wish to analyze.
op_point: The operating point you wish to analyze at.
avl_command: The command-line argument to call AVL.
* If AVL is on your system PATH, then you can just leave this as "avl".
* If AVL is not on your system PATH, thjen you should provide a filepath to the AVL executable.
Note that AVL is not on your PATH by default. To tell if AVL is on your system PATH, open up a
terminal and type "avl".
* If the AVL menu appears, it's on your PATH.
* If you get something like "'avl' is not recognized as an internal or external command..." or
"Command 'avl' not found, did you mean...", then it is not on your PATH and you'll need to
specify the location of your AVL executable as a string.
To add AVL to your path, modify your system's environment variables. (Google how to do this for your OS.)
verbose:
working_directory:
"""
self.airplane = airplane
self.op_point = op_point
self.avl_command = avl_command
self.verbose = verbose
self.working_directory = working_directory
def run(self) -> Dict:
return self._run_avl()
def _default_keystroke_file_contents(self) -> List[str]:
run_file_contents = []
# Disable graphics
run_file_contents += [
"plop",
"g",
"",
]
# Enter oper mode
run_file_contents += [
"oper",
]
# Set parameters
run_file_contents += [
"m"
f"mn {self.op_point.mach()}",
f"v {self.op_point.velocity}",
f"d {self.op_point.atmosphere.density()}",
"g 9.81",
""
]
# Set analysis state
p_bar = self.op_point.p * self.airplane.b_ref / (2 * self.op_point.velocity)
q_bar = self.op_point.q * self.airplane.c_ref / (2 * self.op_point.velocity)
r_bar = self.op_point.r * self.airplane.b_ref / (2 * self.op_point.velocity)
run_file_contents += [
f"a a {self.op_point.alpha}",
f"b b {self.op_point.beta}",
f"r r {p_bar}",
f"p p {q_bar}",
f"y y {r_bar}"
]
return run_file_contents
def _run_avl(self,
run_command: str = None,
) -> Dict[str, np.ndarray]:
"""
Private function to run AVL.
Args: run_command: A string with any AVL keystroke inputs that you'd like. By default, you start off within the OPER
menu. All of the inputs indicated in the constructor have been set already, but you can override them here (
for this run only) if you want.
Returns: A dictionary containing all of your results.
"""
with tempfile.TemporaryDirectory() as directory:
directory = Path(directory)
### Alternatively, work in another directory:
if self.working_directory is not None:
directory = Path(self.working_directory) # For debugging
# Designate an intermediate file for file I/O
output_filename = "output.txt"
with open(directory / output_filename, "w+") as f:
pass
# Handle the airplane file
airplane_file = "airplane.avl"
self.airplane.write_avl(directory / airplane_file)
# Handle the run file
keystroke_file_contents = self._default_keystroke_file_contents()
if run_command is not None:
keystroke_file_contents += [run_command]
keystroke_file_contents += [
"x",
"st",
f"{output_filename}",
"o",
"",
"",
"quit"
]
keystroke_file = "keystroke_file.txt"
with open(directory / keystroke_file, "w+") as f:
f.write(
"\n".join(keystroke_file_contents)
)
command = f'{self.avl_command} {airplane_file} < {keystroke_file}'
### Execute
subprocess.call(
command,
shell=True,
cwd=directory,
stdout=None if self.verbose else subprocess.DEVNULL
)
##### Parse the output file
# Read the file
with open(directory / output_filename, "r") as f:
output_data = f.read()
# Trim off the first few lines that contain name, # of panels, etc.
output_data = "\n".join(output_data.split("\n")[8:])
### Iterate through the string to find all the numeric values, based on where "=" appears.
values = []
index = output_data.find("=")
while index != -1:
output_data = output_data[index + 1:]
number = output_data[:12].split("\n")[0]
number = float(number)
values.append(number)
index = output_data.find("=")
### Record the keys associated with those values:
keys = [
"Sref",
"Cref",
"Bref",
"Xref",
"Yref",
"Zref",
"alpha",
"pb/2V",
"p'b/2V",
"beta",
"qc/2V",
"mach",
"rb/2V",
"r'b/2V",
"CX", # Note: these refer to "CXtot", etc. in AVL, but the "tot" is redundant.
"Cl",
"Cl'",
"CY",
"Cm",
"CZ",
"Cn",
"Cn'",
"CL",
"CD",
"CDvis",
"CDind",
"CLff",
"CDff",
"Cyff",
"e",
"CLa",
"CLb",
"CYa",
"CYb",
"Cla",
"Clb",
"Cma",
"Cmb",
"Cna",
"Cnb",
"CLp",
"CLq",
"CLr",
"CYp",
"CYq",
"CYr",
"Clp",
"Clq",
"Clr",
"Cmp",
"Cmq",
"Cmr",
"Cnp",
"Cnq",
"Cnr",
"Xnp",
"Clb Cnr / Clr Cnb"
]
if len(values) != 57 and len(values) != 56: # Sometimes the spiral mode term is inexplicably not displayed by AVL
raise RuntimeError(
"AVL could not run for some reason!\n"
"Investigate by turning on the `verbose` flag and looking at the output.\n"
"(Common culprit: angular rates too high.)"
)
res = {
k: v
for k, v in zip(
keys, values
)
}
##### Add a few more outputs for ease of use
res["p"] = res["pb/2V"] * (2 * self.op_point.velocity / self.airplane.b_ref)
res["q"] = res["qc/2V"] * (2 * self.op_point.velocity / self.airplane.c_ref)
res["r"] = res["rb/2V"] * (2 * self.op_point.velocity / self.airplane.b_ref)
return res
if __name__ == '__main__':
### Import Vanilla Airplane
import aerosandbox as asb
from pathlib import Path
geometry_folder = Path(asb.__file__).parent.parent / "tutorial" / "04 - Geometry" / "example_geometry"
import sys
sys.path.insert(0, str(geometry_folder))
from vanilla import airplane as vanilla
### Do the AVL run
avl = AVL(
airplane=vanilla,
op_point=OperatingPoint(
atmosphere=asb.Atmosphere(altitude=0),
velocity=1,
alpha=0.433476,
beta=0,
p=0,
q=0,
r=0,
),
)
res = avl.run()
for k, v in res.items():
print(f"{str(k).rjust(10)} : {v}")
| StarcoderdataPython |
190034 | <filename>scanpy/external/pp/_harmony_integrate.py
"""
Use harmony to integrate cells from different experiments.
"""
from typing import Optional
from anndata import AnnData
def harmony_integrate(
adata: AnnData,
key: str,
basis: str = "X_pca",
adjusted_basis: str = "X_pca_harmony",
**kwargs,
):
"""\
Use harmonypy [Korunsky19]_ to integrate different experiments.
Harmony [Korunsky19]_ is an algorithm for integrating single-cell
data from multiple experiments. This function uses the python
port of Harmony, ``harmonypy``, to integrate single-cell data
stored in an AnnData object. As Harmony works by adjusting the
principal components, this function should be run after performing
PCA but before computing the neighbor graph, as illustrated in the
example below.
Parameters
----------
adata
The annotated data matrix.
key
The name of the column in ``adata.obs`` that differentiates
among experiments/batches.
basis
The name of the field in ``adata.obsm`` where the PCA table is
stored. Defaults to ``'X_pca'``, which is the default for
``sc.tl.pca()``.
adjusted_basis
The name of the field in ``adata.obsm`` where the adjusted PCA
table will be stored after running this function. Defaults to
``X_pca_harmony``.
kwargs
Any additional arguments will be passed to
``harmonypy.run_harmony()``.
Returns
-------
Updates adata with the field ``adata.obsm[obsm_out_field]``,
containing principal components adjusted by Harmony such that
different experiments are integrated.
Example
-------
First, load libraries and example dataset, and preprocess.
>>> import scanpy as sc
>>> import scanpy.external as sce
>>> adata = sc.datasets.pbmc3k()
>>> sc.pp.recipe_zheng17(adata)
>>> sc.tl.pca(adata)
We now arbitrarily assign a batch metadata variable to each cell
for the sake of example, but during real usage there would already
be a column in ``adata.obs`` giving the experiment each cell came
from.
>>> adata.obs['batch'] = 1350*['a'] + 1350*['b']
Finally, run harmony. Afterwards, there will be a new table in
``adata.obsm`` containing the adjusted PC's.
>>> sce.pp.harmony_integrate(adata, 'batch')
>>> 'X_pca_harmony' in adata.obsm
True
"""
try:
import harmonypy
except ImportError:
raise ImportError("\nplease install harmonypy:\n\n\tpip install harmonypy")
harmony_out = harmonypy.run_harmony(adata.obsm[basis], adata.obs, key, **kwargs)
adata.obsm[adjusted_basis] = harmony_out.Z_corr.T
| StarcoderdataPython |
3374085 | from LECA.consensus import consensus_ages
import cPickle as pickle
import sys, os
### This program will create the consensus (mode) age calls
### by trimming databases that oversplit co-orthologous groups.
###
### **Note: if this script does not find the file LDORESULTS, it will
### silently calculate a consensus without it, so make sure this path is
### correct.
############# User input #######################
INFILE = "binAges_<SPECIES>.csv"
LDORESULTS = "../Errors/Oversplitting/<SPECIES>_LDO_results.p"
FALSEPOSITIVES = "../Errors/Losses/FalsePos_<SPECIES>.p"
TAXON = "<SPECIES>"
## Set one or other to None to create a consensus output without
## filtering algorithms by oversplitting or false positive criteria
#FALSEPOSITIVES = None
#LDORESULTS = None
############ Don't change #######################
with open("../OtherInput/ageLists.p") as f:
ageLists = pickle.load(f)
assert TAXON in ageLists, "Taxon %s not found in age order file" % TAXON
AGES = ageLists[TAXON]
if os.path.exists(LDORESULTS):
for line in consensus_ages(INFILE,AGES,LDORESULTS,FALSEPOSITIVES):
print line
else:
for line in consensus_ages(INFILE,AGES,LDO_dict=None,lossTaxa_dict=FALSEPOSITIVES):
print line
| StarcoderdataPython |
1790474 | <reponame>camille1874/FinQA
#coding:utf8
import jieba
import jieba.posseg as pseg
import os,sys
'''
initialize jieba Segment
'''
def jieba_initialize():
jieba.load_userdict(os.path.dirname(os.path.split(os.path.realpath(__file__))[0])+'/resources/QAattrdic.txt')
jieba.initialize()
'''
Segment words by jieba
'''
def wordSegment(text):
text = text.strip()
seg_list = jieba.cut(text)
result = " ".join(seg_list)
return result
'''
POS Tagging
'''
def postag(text):
words = pseg.cut(text)
# for w in words:
# print w.word, w.flag
return words
'''
proecss xiaohuangji corpus
'''
def xiaohuangji_textprocess(fr_path,fw_path):
fr = open(fr_path,'r')
fw = open(fw_path,'a')
line = fr.readline()
i = 0
while line:
if line[0] == 'E':
question = fr.readline()[2:].strip()
answer = fr.readline()[2:]
print (question)
print (answer)
if len(question)<20 and len(answer)<30:
i +=1
qa_pair = question+":::"+answer
fw.write(qa_pair)
line = fr.readline()
fw.close()
fr.close()
print ('Finished')
'''
q:::a text processing
'''
def tp2(fr_path,fw_path):
fr = open(fr_path,'r')
fw = open(fw_path,'a')
line = fr.readline()
while line:
flag = 0
words = pseg.cut(line)
for w in words:
print (w.word + w.flag)
if w.flag == 'nr':
flag = 1
if flag == 0:
fw.write(line)
line = fr.readline()
fw.close()
fr.close()
print ('Finished')
'''
Load baike attributi name
'''
def load_baikeattr_name(attrdic):
fr = open(attrdic,'r')
attr = []
line = fr.readline()
while line:
attr.append(line.strip())
line = fr.readline()
fr.close()
return attr
'''
Synonyms Analysis,return word in baike attr
word 原始词
synsdic 同义词典
attr 属性
'''
def load_synonyms_word_inattr(word,synsdic,attr):
fr = open(synsdic,'r')
tar_word = ''
line = fr.readline().strip()
while line:
words = line.split(" ")
if word in words:
for w in words:
if w in attr:
tar_word = w
break
if tar_word != '':
break
line = fr.readline()
fr.close()
if tar_word == '':
tar_word = 'Empty'
return tar_word
| StarcoderdataPython |
1791801 | # coding=UTF-8
# ex:ts=4:sw=4:et=on
# Copyright (c) 2013, <NAME>
# All rights reserved.
# Complete license can be found in the LICENSE file.
__version__ = "0.8.4"
| StarcoderdataPython |
3333173 | from decimal import Decimal
class TradeResult(object):
def __init__(
self,
received: float,
remains: float,
order_id: int,
funds: {},
):
self.received = received
self.remains = remains
self.order_id = order_id
self.funds = funds
@property
def received(self) -> Decimal:
return self._received
@received.setter
def received(self, value: float):
self._received = Decimal(value)
@property
def remains(self) -> Decimal:
return self._remains
@remains.setter
def remains(self, value: float):
self._remains = Decimal(value)
@property
def order_id(self) -> int:
return self._order_id
@order_id.setter
def order_id(self, value: int):
self._order_id = int(value)
@property
def funds(self) -> {}:
return self._funds
@funds.setter
def funds(self, value: {}):
self._funds = value
| StarcoderdataPython |
1614999 | # coding: utf-8
from __future__ import unicode_literals
import logging
from wxpy.utils import handle_response
from .chat import Chat
logger = logging.getLogger(__name__)
class User(Chat):
"""
好友(:class:`Friend`)、群聊成员(:class:`Member`),和公众号(:class:`MP`) 的基础类
"""
def __init__(self, raw, bot):
super(User, self).__init__(raw, bot)
@property
def remark_name(self):
"""
备注名称
"""
return self.raw.get('RemarkName')
@handle_response()
def set_remark_name(self, remark_name):
"""
设置或修改好友的备注名称
:param remark_name: 新的备注名称
"""
logger.info('setting remark name for {}: {}'.format(self, remark_name))
return self.bot.core.set_alias(userName=self.user_name, alias=remark_name)
@property
def sex(self):
"""
性别,目前有::
# 男性
MALE = 1
# 女性
FEMALE = 2
未设置时为 `None`
"""
return self.raw.get('Sex')
@property
def province(self):
"""
省份
"""
return self.raw.get('Province')
@property
def city(self):
"""
城市
"""
return self.raw.get('City')
@property
def signature(self):
"""
个性签名
"""
return self.raw.get('Signature')
@property
def is_friend(self):
"""
判断当前用户是否为好友关系
:return: 若为好友关系,返回对应的好友,否则返回 False
"""
if self.bot:
try:
friends = self.bot.friends()
index = friends.index(self)
return friends[index]
except ValueError:
return False
def add(self, verify_content=''):
"""
把当前用户加为好友
:param verify_content: 验证信息(文本)
"""
return self.bot.add_friend(user=self, verify_content=verify_content)
def accept(self, verify_content=''):
"""
接受当前用户为好友
:param verify_content: 验证信息(文本)
:return: 新的好友对象
:rtype: :class:`wxpy.Friend`
"""
return self.bot.accept_friend(user=self, verify_content=verify_content)
| StarcoderdataPython |
37709 | """
Start local development server
"""
import argparse
import logging
import shlex
import subprocess
import webbrowser
from contextlib import suppress
from http.server import HTTPServer, SimpleHTTPRequestHandler
from pathlib import Path
from ssl import wrap_socket
from tempfile import NamedTemporaryFile
from threading import Thread
from livereload.server import LogFormatter, Server
from watchdog.observers import Observer
from watchdog.tricks import ShellCommandTrick
import build
PARCEL_CLI = "./node_modules/.bin/parcel"
BUNDLER_COMMAND = f"{PARCEL_CLI} watch --no-hmr src/*.html"
LIVERELOAD_DELAY = 0.1
ROOT_DIR = "dist/"
PATHS_TO_WATCH_FOR_THEMATIQUES = (
"build.py",
"mistune_toc.py",
"contenus/meta/*.md",
"contenus/thematiques/*.md",
"templates/thematique.html",
)
PATHS_TO_WATCH_FOR_INDEX = (
"build.py",
"contenus/conseils/*.md",
"contenus/meta/*.md",
"contenus/questions/*.md",
"contenus/réponses/*.md",
"contenus/statuts/*.md",
"contenus/suivi/*.md",
"templates/index.html",
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--address", default="0.0.0.0")
parser.add_argument("--port", type=int, default=None)
parser.add_argument("--ssl", action="store_true")
parser.add_argument("--ssl-cert", default="cert.pem")
parser.add_argument("--ssl-key", default="key.pem")
parser.add_argument("--open", action="store_true")
parser.add_argument("--watch", action="store_true")
return parser.parse_args()
def serve(address, port, open_, watch, ssl, ssl_cert, ssl_key, bundler_watch_filename):
if ssl:
return serve_https(
address=args.address,
port=args.port or 8443,
open_=args.open,
watch=args.watch,
ssl_cert=args.ssl_cert,
ssl_key=args.ssl_key,
)
else:
return serve_http(
address=args.address,
port=args.port or 5500,
open_=args.open,
watch=args.watch,
bundler_watch_filename=bundler_watch_filename,
)
class CustomServer(Server):
"""
Custom server with logger that decodes bytes in logs
"""
def _setup_logging(self):
super()._setup_logging()
logger = logging.getLogger("livereload")
formatter = self.BytesFormatter()
for handler in logger.handlers:
handler.setFormatter(formatter)
class BytesFormatter(LogFormatter):
def format(self, record):
if isinstance(record.msg, bytes):
with suppress(UnicodeDecodeError):
record.msg = record.msg.decode("utf-8")
return super().format(record)
def serve_http(address, port, open_, watch, bundler_watch_filename):
server = CustomServer()
if watch:
for path in PATHS_TO_WATCH_FOR_THEMATIQUES:
server.watch(path, build.thematiques, delay="forever")
for path in PATHS_TO_WATCH_FOR_INDEX:
server.watch(path, build.index, delay="forever")
server.watch(bundler_watch_filename, delay=LIVERELOAD_DELAY)
server.serve(
host=address,
port=port,
root=ROOT_DIR,
open_url_delay=0.1 if open_ else None,
)
def serve_https(address, port, open_, watch, ssl_cert, ssl_key):
class MyHTTPRequestHandler(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=ROOT_DIR, **kwargs)
def log_request(self, *args, **kwargs):
pass
class BuildThematiquesEventHandler(ShellCommandTrick):
def __init__(self):
super().__init__(
shell_command="python3 build.py thematiques",
wait_for_process=True,
drop_during_process=True,
)
def on_any_event(self, event):
if event.event_type == "modified" and not event.is_directory:
super().on_any_event(event)
class BuildIndexEventHandler(ShellCommandTrick):
def __init__(self):
super().__init__(
shell_command="python3 build.py index",
wait_for_process=True,
drop_during_process=True,
)
def on_any_event(self, event):
if event.event_type == "modified" and not event.is_directory:
super().on_any_event(event)
if watch:
observer = Observer()
thematiques_handler = BuildThematiquesEventHandler()
for pattern in PATHS_TO_WATCH_FOR_THEMATIQUES:
directory = Path(pattern).parts[0]
observer.schedule(thematiques_handler, directory, recursive=True)
index_handler = BuildIndexEventHandler()
for pattern in PATHS_TO_WATCH_FOR_THEMATIQUES:
directory = Path(pattern).parts[0]
observer.schedule(index_handler, directory, recursive=True)
observer.start()
url = f"https://{address}:{port}/"
print(f"Listening on {url}")
if open_:
webbrowser.open(url)
logging.getLogger()
httpd = HTTPServer((address, port), MyHTTPRequestHandler)
httpd.socket = wrap_socket(
httpd.socket, certfile=ssl_cert, keyfile=ssl_key, server_side=True
)
httpd.serve_forever()
class BundlerThread(Thread):
def __init__(self, watch_file):
super().__init__()
self.watch_file = watch_file
self.daemon = True
def run(self):
proc = subprocess.Popen(shlex.split(BUNDLER_COMMAND), stdout=subprocess.PIPE)
while True:
for line_bytes in proc.stdout:
line = line_bytes.decode("utf-8")
print(line)
if line.startswith("✨ Built in"):
self.trigger_livereload()
def trigger_livereload(self):
self.watch_file.truncate(0)
if __name__ == "__main__":
args = parse_args()
with NamedTemporaryFile(delete=True) as bundler_watch_file:
bundler_thread = BundlerThread(watch_file=bundler_watch_file)
bundler_thread.start()
serve(
address=args.address,
port=args.port,
open_=args.open,
watch=args.watch,
ssl=args.ssl,
ssl_cert=args.ssl_cert,
ssl_key=args.ssl_key,
bundler_watch_filename=bundler_watch_file.name,
)
| StarcoderdataPython |
143464 | <reponame>Anancha/OpenCV-Python-Tutorial
# -*- coding: utf-8 -*-
# @Time : 2017/7/17 下午12:03
# @Author : play4fun
# @File : 画圆圈.py
# @Software: PyCharm
"""
画圆圈.py:随机覆盖,不同颜色,
"""
from time import sleep
import cv2
import numpy as np
def click_event(event, x, y, flags, param):
'''
用左键点击屏幕,打印坐标
:param event:
:param x:
:param y:
:param flags:
:param param:
:return:
'''
if event == cv2.EVENT_LBUTTONDOWN:
print(x, y, flags, param)
cv2.namedWindow('Canvas', cv2.WINDOW_GUI_EXPANDED)
cv2.setMouseCallback("Canvas", click_event)
canvas = np.zeros((300, 300, 3), dtype="uint8")
while True:
try:
for i in range(0, 25):
radius = np.random.randint(5, high=200)
color = np.random.randint(0, high=256, size=(3,)).tolist()
pt = np.random.randint(0, high=300, size=(2,))
cv2.circle(canvas, tuple(pt), radius, color, -1)
cv2.imshow("Canvas", canvas)
key = cv2.waitKey(1000) # 等待1秒
if key == ord('q'):
break
else:
# sleep(1)
continue
except KeyboardInterrupt as e:
print('KeyboardInterrupt', e)
finally:
cv2.imwrite('random-circles2.jpg', canvas)
| StarcoderdataPython |
1738802 | <filename>adv/zace.py
from core.advbase import *
from slot.d import *
from slot.a import *
def module():
return Zace
class Zace(Adv):
a1 = ('s',0.2)
conf = {}
conf['slots.a'] = Resounding_Rendition()+Jewels_of_the_Sun()
conf['acl'] = """
`dragon
`s3, not self.s3_buff
`s1
`s2
`fs, x=5
"""
coab = ['Ieyasu','Wand','Tiki']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) | StarcoderdataPython |
177963 | <gh_stars>0
# Creating a program that uses the min() without using the min() function. Without knowing the user inputted values of num1 and num2, create a program that outputs the lower value without using the min()
num1 = int(input('Enter a value: '))
num2 = int(input('Enter a value: '))
if num1 <= num2:
print(num1)
else:
print(num2)
| StarcoderdataPython |
78445 | import pandas as pd
import itertools
cat_features = ['col1', 'co2', 'col3', 'col4', 'col5']
def combine_colums(df, cat_features)
df_combine = pd.DataFrame(index=df.index)
for colA, colB in itertools.combinations(cat_features, 2):
new_col_name = '_'.join([colA, colB])
# Convert to strings and combine
new_values = clicks[colA].map(str) + "_" + clicks[colB].map(str)
encoder = preprocessing.LabelEncoder()
df_combine[new_col_name] = encoder.fit_transform(new_values) | StarcoderdataPython |
1601702 | <filename>blog_app/api/errors/invalid_arguments_for_creation_error.py
class InvalidArgumentsForCreationException(Exception):
code = 422
def __init__(self, errors):
Exception.__init__(self)
self.errors = errors
def to_dict(self):
return {
"success": False,
"errors": self.errors
}
| StarcoderdataPython |
3293784 | <filename>Deployment Files/WeatherWear/Combos/views.py
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from . import WeatherWear as ww
import tensorflow as tf
import requests, ast
from users.models import UserProfile
from os import path
from google.cloud import storage
valuedic = {}
siteinfo = {}
key = 'key'
#This function will generate the homepage/dashboard for the logged in user
def index(request, username):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
#If the user logged out on another device, this prevents the current device from getting an error by retrieving necessary info
if not path.exists(f"environement/Userdata/{request.user.username}/{request.user.username}.index"):
bucketname = 'bucketname'
storage_client = storage.Client.from_service_account_json('json file')
bucket = storage_client.get_bucket(bucketname)
blob = bucket.blob(f'{user.username}/{user.username}.data-00000-of-00001')
blob.download_to_filename(f'environment/Userdata/{user.username}/{user.username}.data-00000-of-00001')
blob = bucket.blob(f'{user.username}/{user.username}.index')
blob.download_to_filename(f'environment/Userdata/{user.username}/{user.username}.index')
#This will get weather informaiton to display about the logged in user
entries = UserProfile.objects.filter(user=request.user)
entry = entries.first()
city = entry.city
country = entry.country
response = requests.get(f'https://api.openweathermap.org/data/2.5/weather?q={city},{country}&appid={key}&units=metric')
content = response.content
dictver = content.decode("UTF-8")
weatherdata = ast.literal_eval(dictver)
weather = [weatherdata['main']['feels_like'],weatherdata['weather'][0]['main'], weatherdata['name'], weatherdata['sys']['country']]
#These function calls will get information to feed into the machine to predict what to wear
weatherinfo = ww.getweatherinfo(request)
alldata = ww.makeset(weatherinfo)
ww.makemodel(request)
numresults, stringresults = ww.predict(alldata)
#If no results happen, general recommendations will be given
if len(numresults) == 0:
numresults, stringresults = ww.getbackup(alldata)
ww.restartmodel(request)
#This value is stored and will be used later
valuedic[request.user.username] = numresults
siteinfo[request.user.username] = (stringresults,weather)
#The NN is cleared and results are passed into the html file
ww.model = tf.keras.models.Sequential()
tf.keras.backend.clear_session()
return render(request, 'Combos/index.html', {
'clothes': stringresults,
'weatherinfo': weather
})
#This function handles removing certain combinations
def remove(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
if request.method == "POST":
#Bad items and all items are retrieved and will be used
baditems = request.POST.getlist('want')
allitems = valuedic[request.user.username]
#These two lists are made to store the indicies of the good combos and bad combos
goodlist = []
badlist = []
#The two lists are filled based on the data from the form
for num in baditems:
badlist.append(int(num)-1)
for i in range(len(allitems)):
if i not in badlist:
goodlist.append(i)
#Data is transferred from the two lists into one list and their corresponding number (1 or 0) is stored in the other
newdata1 = []
newdata2 = []
for i in range(len(badlist)):
newdata1.append(allitems[badlist[i]])
newdata2.append(0)
for i in range(len(goodlist)):
newdata1.append(allitems[goodlist[i]])
newdata2.append(1)
#The retrain function is called and then the user will be taken back to the homepage
ww.wt.retrain(newdata1, newdata2, request)
return HttpResponseRedirect(reverse("index"))
#This function is similar to index but instead it will go straight to the backup options
def getbackup(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
if request.method == "POST":
entries = UserProfile.objects.filter(user=request.user)
entry = entries.first()
city = entry.city
country = entry.country
response = requests.get(f'https://api.openweathermap.org/data/2.5/weather?q={city},{country}&appid={key}&units=metric')
content = response.content
dictver = content.decode("UTF-8")
weatherdata = ast.literal_eval(dictver)
weather = [weatherdata['main']['feels_like'],weatherdata['weather'][0]['main'], weatherdata['name'], weatherdata['sys']['country']]
weatherinfo = ww.getweatherinfo(request)
alldata = ww.makeset(weatherinfo)
ww.makemodel(request)
numresults, stringresults = ww.getbackup(alldata)
ww.restartmodel(request)
valuedic[request.user.username] = numresults
#siteinfo[request.user.username] = (stringresults,weather)
ww.model = tf.keras.models.Sequential()
tf.keras.backend.clear_session()
return render(request, 'Combos/index.html', {
'clothes': stringresults,
'weatherinfo': weather
})
#This function will add a good item to the NN
def trainnew(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
if request.method == "POST":
spec = request.POST.getlist('si')
under = request.POST.getlist('fl')
pant = request.POST.getlist('p')
outer = request.POST.getlist('sl')
coat = request.POST.getlist('wc')
#All items are listed with one hot encoding
good = [0,0,0,0,0,0,0,0,0,0,0,0,0]
#The user will be brought back to the same page if they did not fill out an option
if spec[0] == 'Choose...' or under[0] == 'Choose...' or pant[0] == 'Choose...' or outer[0] == 'Choose...' or coat[0] == 'Choose...':
clothes = siteinfo[request.user.username][0]
weather = siteinfo[request.user.username][1]
return render(request, 'Combos/index.html', {
'note':'Please select one option from each section',
'clothes': clothes,
'weatherinfo': weather
})
#A list is made and will contain all of the responses
nums = []
nums.append(int(spec[0]))
nums.append(int(under[0]))
nums.append(int(pant[0]))
nums.append(int(outer[0]))
nums.append(int(coat[0]))
#Each num in nums contains the index of the clothing item they want to wear
for num in nums:
if num != -1:
good[num] = 1.0
#Weather info is retrieved and appended to clothing combo
weatherinfo = ww.getweatherinfo(request)
good = good + weatherinfo
#1 is made for another list to represent a good combo
result = [1]
#The two lists are passed in as well as the request in order to retrain the NN
ww.wt.retrain([good], [result], request)
return HttpResponseRedirect(reverse("index"))
#This will render the how to use page for existing users if they are logged in
def howtouse(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
return render(request, 'Combos/help.html')
#This will render the clothing help page if the user is logged in
def clothes(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
return render(request, 'Combos/clothing.html')
#This will load the loading page before getting to the dashboard if the user is logged in.
def loading(request):
if not request.user.is_authenticated:
return HttpResponseRedirect(reverse("login"))
return render(request, 'Combos/loading.html')
| StarcoderdataPython |
4827109 | value = "not-none"
<caret>if value is None:
print("None")
else:
print("Not none") | StarcoderdataPython |
3251015 | from django.shortcuts import render
from django.contrib.auth.models import User, Group
from .models import Pytanie
from rest_framework import viewsets
from rest_framework import permissions
from .serializers import UserSerializer, GroupSerializer,PytanieSerializer
# Create your views here.
class UserViewSet(viewsets.ModelViewSet):
queryset =User.objects.all().order_by('-date_joined')
serializer_class=UserSerializer
permission_classes=[permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
queryset =Group.objects.all()
serializer_class=GroupSerializer
permission_classes=[permissions.IsAuthenticated]
class PytanieViewSet(viewsets.ModelViewSet):
queryset =Pytanie.objects.all()
serializer_class=PytanieSerializer
permission_classes=[permissions.IsAuthenticated] | StarcoderdataPython |
163786 | """
Code that goes along with the Airflow located at:
http://airflow.readthedocs.org/en/latest/tutorial.html
"""
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from taxi import get_taxi_data, transform_taxi_data, load_taxi_data, get_position_taxi
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2018, 5, 24),
'end_date': datetime(2018, 4, 24),
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG(
'taxi', default_args=default_args,
schedule_interval='0 */10 * * 1',
catchup=False
)
# t1, t2 and t3 are examples of tasks created by instantiating operators
extract_data_taxis = PythonOperator(
task_id='extract_data_taxis',
python_callable=get_taxi_data,
provide_context=True,
op_args=[
'taxi_mtl',
'taxis'
],
dag=dag
)
extract_data_ads = PythonOperator(
task_id='extract_data_ads',
python_callable=get_taxi_data,
provide_context=True,
op_args=[
'taxi_mtl',
'ads'
],
dag=dag
)
extract_data_vehicles = PythonOperator(
task_id='extract_data_vehicles',
python_callable=get_taxi_data,
provide_context=True,
op_args=[
'taxi_mtl',
'vehicles'
],
dag=dag
)
extract_data_positions = PythonOperator(
task_id='extract_data_positions',
python_callable=get_position_taxi,
provide_context=True,
op_args=[
'taxi_mtl'
],
dag=dag
)
transform_data = PythonOperator(
task_id='transform_data_taxi',
python_callable=transform_taxi_data,
provide_context=True,
dag=dag
)
load_data = PythonOperator(
task_id='load_data_taxi',
python_callable=load_taxi_data,
provide_context=True,
op_args=[
'home_taxi_mtl'
],
dag=dag
)
extract_data_ads >> transform_data >> load_data
extract_data_taxis >> transform_data
extract_data_vehicles >> transform_data
extract_data_positions >> transform_data
| StarcoderdataPython |
3348016 | # <NAME>
# 1351040
import numpy as np
import cv2
class LBP:
def compute(self, img, keypoints):
img = np.asarray(img)
img = (1 << 7) * (img[0:-2, 0:-2] >= img[1:-1, 1:-1]) \
+ (1 << 6) * (img[0:-2, 1:-1] >= img[1:-1, 1:-1]) \
+ (1 << 5) * (img[0:-2, 2:] >= img[1:-1, 1:-1]) \
+ (1 << 4) * (img[1:-1, 2:] >= img[1:-1, 1:-1]) \
+ (1 << 3) * (img[2:, 2:] >= img[1:-1, 1:-1]) \
+ (1 << 2) * (img[2:, 1:-1] >= img[1:-1, 1:-1]) \
+ (1 << 1) * (img[2:, :-2] >= img[1:-1, 1:-1]) \
+ (1 << 0) * (img[1:-1, :-2] >= img[1:-1, 1:-1])
res = []
for x in keypoints:
rows = int(x.pt[1] - 1)
cols = int(x.pt[0] - 1)
size = int(x.size)
rows_range = (max(0, rows - size), min(img.shape[0], rows + size + 1))
cols_range = (max(0, cols - size), min(img.shape[1], cols + size + 1))
window = img[rows_range[0]:rows_range[1], cols_range[0]:cols_range[1]].flatten()
hist = np.histogram(window, bins=range(257))
res.append(hist[0])
res = np.array(res)
res = np.reshape(res, (-1, 256))
res = np.uint8(res)
return keypoints, res
| StarcoderdataPython |
3394342 | """Aula 7 - Operadores aritméticos.
+ = Soma
- = Subtração
* = Multiplicação
/ = Divisão
** = Potenciação
// = Divisão Inteira
% = Resto da Divisão
Ordem de Precedência dos Operadores
1° = ()
2° = **
3° = *, /, //, %
4° = +, -
Dica: end = '' (não quebra a linha)
\n (Quebra a linha)"""
| StarcoderdataPython |
144998 | import os
import re
import sys, time
import numpy as np
final=''#global vars to save results of op
fresult=''#global vars to save results of for
fcall=''#global vars to save results of call
def check(newcontext):
nc=newcontext
#TODO:cannot deal with multiple problems,need help
lk=nc.count('(')
rk=nc.count(')')
ll=nc.count('[')
rl=nc.count(']')
ld=nc.count('{')
rd=nc.count('}')
kc=lk-rk
lc=ll-rl
dc=ld-rd
while kc>0:
nc+=')'
kc-=1
while lc>0:
nc+=']'
lc-=1
while dc>0:
nc+='}'
dc-=1
'''
if tryflag==1:
i=0
for i in range(0,len(trycache)):
if trycache[i]!=' ':
break
nc=nc+'\n'+trycache[:i]+'except Exception:\n'+trycache[:i]+' '+'pass'
'''
return nc
def recheck(l):
line=l
line=re.sub('return ','',line)
line=re.sub('\[\'.*\'\]','',line)
line=re.sub('\[\".*\"\]','',line)
line=re.sub('\(\'.*\'\)','',line)
line=re.sub('\(\".*\"\)','',line)
line=re.sub('\[[0-9\.\-\s\:]+\]','',line)
line=re.sub('\([0-9\.\-\s\:]+\)','',line)
line=re.sub('\{[0-9\.\-\s\:]+\}','',line)
line=re.sub('\[.*[\+\:]+.*\]','',line)
line=re.sub('\+\=','=',line)
#line=re.sub(' ','',line)
line=re.sub('r\'.*\'\,*\s*','',line)
line=re.sub('b\'.*\'\,*\s*','',line)
line=re.sub('rb\'.*\'\,*\s*','',line)
line=re.sub('f\'.*\'\,*\s*','',line)
line=re.sub('\'.*\'\,*\s*','',line)
line=re.sub('\".*\"\,*\s*','',line)
line=re.sub('r\".*\"\,*\s*','',line)
line=re.sub('b\".*\"\,*\s*','',line)
line=re.sub('rb\".*\"\,*\s*','',line)
line=re.sub('f\".*\"\,*\s*','',line)
line=re.sub('\(\)','',line)
line=re.sub('\{\}','',line)
line=re.sub('\[\]','',line)
#line=recheck(line)
line=line.strip()
return line
def del_arg_op(op):
starti=endi=0
for i in range(0,len(op)):
if op[i]=='(':
starti=i
elif op[i]==')':
endi=i
return op[:starti]+'-->'+op[starti+1:endi]+op[endi+1:len(op)]
def dealarg_for(ty):
#print "yes!"
starti=endi=0
left=right=0
ret=''
for i in range(0,len(ty)):
if ty[i]=='(':
if left==right:
starti=i
left=left+1
else:
left=left+1
elif ty[i]==')':
if left==right+1:
endi=i
right=right+1
#print left,right,starti,endi
if starti+1<endi:
#print "okkk",y[starti+1:endi]+" --> "+y[:starti]
#print "here!",ty,starti+1,endi,left,right
ret=ret+ty[:starti]+"-->"+ty[starti+1:endi]
#print ret
break
else:
right=right+1
#ret=ret[:-3]
return ret+ty[(endi+1):len(ty)]
def dealarg_call(ty):
#print "yes!"
starti=endi=0
left=right=0
ret=''
for i in range(0,len(ty)):
if ty[i]=='(':
if left==right:
starti=i
left=left+1
else:
left=left+1
elif ty[i]==')':
if left==right+1:
endi=i
right=right+1
#print left,right,starti,endi
if starti+1<endi:
#print "okkk",y[starti+1:endi]+" --> "+y[:starti]
#print "here!",ty,starti+1,endi,left,right
ret=ret+ty[:starti]+"-->"+ty[starti+1:endi]+ty[endi+1:len(ty)]
#print ret
break
else:
right=right+1
#ret=ret[:-3]
if ret=='':
return ty
else:
return ret
def dealarg(ty):
starti=endi=0
for i in range(0,len(ty)):
if ty[i]=='(':
starti=i
break
i=len(ty)-1
while(i>0):
if ty[i]==')':
endi=i
break
i=i-1
return ty[:starti]+"-->"+ty[starti+1:endi]+ty[endi+1:len(ty)]
#apart from consdering data-flow relationship, also consider which var is more relevant to target api, so the order of list is inverse to arg.
def dealist(ty):
starti=endi=0
for i in range(0,len(ty)):
if ty[i]=='[':
starti=i
break
i=len(ty)-1
while(i>0):
if ty[i]==']':
endi=i
break
i=i-1
return ty[:starti]+'-->'+ty[starti+1:endi]
def deallist(ty):
#print "yes"
starti=endi=0
for i in range(0,len(ty)):
if ty[i]=='[':
starti=i
elif ty[i]==']':
endi=i
return ty[:starti]+"-->"+ty[starti+1:endi]
def del_multi_arg(ty):
si=ei=0
for i in range(0,len(ty)):
if ty[i]=='(':
si=i
break
i=len(ty)-1
while(i>-1):
if ty[i]==')':
ei=i
break
i=i-1
args=ty[si+1:ei]
#print "args:",args
larg=args.split(',')
sarg=''
for arg in larg:
if '=' in arg:
lr=arg.split('=')
sarg=sarg+lr[1]+'-->'+lr[0]+'|'
else:
sarg=sarg+arg+'|'
sarg=sarg[:-1]
return sarg+'-->'+ty[:si]
def addty(ty,i,lsy):
ret=''
#print ty,i,lsy
if len(lsy)==1:
ret = ty
#print "ret:",ret,"\n"
return ret
else:
for j in range(0,i):
ret=ret+lsy[j]+'-->'
ret=ret+ty+"-->"
for j in range(i+1,len(lsy)):
ret=ret+lsy[j]+'-->'
ret=ret[:-3]
#print "ret:",ret,"\n"
return ret
def delop(op):
lsop=op.split('-->')
global final
for i in range(0,len(lsop)):
ty=lsop[i]
if re.match('[_a-zA-Z0-9\.\[\]\|]+\(.*\)',ty) and ',' in ty and '=' in ty:
#print "yes!",ty
ty=del_multi_arg(ty)
#print "multi_arg:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif ',' in ty:
ty=re.sub(',','|',ty)
#print "a|b:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif re.match('[_a-zA-Z0-9\.\[\]\|]+\(.*=.*\)',ty):
ty=del_arg_op(ty)
#print "call-op:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif '=' in ty:
lr=ty.split('=')
ty=lr[1]+'-->'+lr[0]
#print "deal with op:",ty
op=addty(ty,i,lsop)
final=op
#print "new op:",op
delop(op)
elif re.match('[_a-zA-Z0-9\.\[\]]+\(.*\)',ty):
ty=dealarg_for(ty)
#print "deal with arg:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif re.match('[_a-zA-Z0-9\.\[\]]+\[.*\]',ty):
ty=dealist(ty)
#print "deal with list:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
elif '.' in ty:
ty=re.sub('\.','-->',ty)
#print "deal with point:",ty
op=addty(ty,i,lsop)
#print "new op:",op
final=op
delop(op)
def GetMiddleStr(content,startStr,endStr):
startIndex = content.index(startStr)
if startIndex>=0:
startIndex += len(startStr)
endIndex = content.index(endStr)
return content[startIndex:endIndex]
def prex(x):
x=re.sub(' ','',x)
if re.match('\(.*,.*\)\,[a-zA-Z0-9_\'\"\(\)|]+',x) or re.match('[a-zA-Z0-9_\'\"\(\)|]+\,\(.*,.*\)',x) or re.match('\(.*,.*\)\,\(.*,.*\)',x):
x=re.sub('[\(\)]+','',x)
#print "yes:",x
return x
def dealtuple(ty):
my=re.sub(' ','',ty)
my=my[1:-1]
lsmy=my.split(",")
ret=''
for i in lsmy:
ret=ret+i+"|"
ret=ret[:-1]
#print "ret1:",ret
return ret
def deald(ty):
return re.sub(',','|',ty)
def dealcall(ty):
#print "ty:",ty
#print "re:",re.sub('\.','-->',ty)
return re.sub('\.','-->',ty)
def rbl(tempy):
ls=0
rs=0
ln=0
rn=0
ret=0
for i in range(0,len(tempy)):
if tempy[i]=='(':
ls=i
ln+=1
elif tempy[i]==')':
rs=i
rn+=1
if rn>ln:
ret=1
return ret
elif rs<ls:
ret=1
return ret
return ret
def findcircle_call(tempy):
global fcall
if tempy.count('(') != tempy.count(')') or rbl(tempy)!=0:
#global fcall
fcall=''
return
tempy=recheck(tempy)
ls=tempy.split('-->')
for i in range(0,len(ls)):
ty=ls[i]
#print ty
ty=re.sub(' ','',ty)
if ',' in ty:
#print 'yes!',ty
ty=re.sub(',','|',ty)
#print 'later',ty
tempy=addty(ty,i,ls)
fcall=tempy
#print 2,ty,tempy
findcircle_call(tempy)
elif '.' in ty and not re.match('.*\(.*\..*\).*',ty):
#print "ty1",ty
ty=re.sub('\.','-->',ty)
tempy=addty(ty,i,ls)
#print 3,ty,tempy
#global final
fcall=tempy
findcircle_call(tempy)
elif re.match('.*[a-zA-Z0-9_]+\(.*[a-zA-Z0-9_\'\"\(\)\|\-\>\:\[\]\,\.]+\).*',ty) and re.match('.*\(.*[a-zA-Z0-9_]+.*\).*',ty):
ty=re.sub('\(\)','',ty)
ty=re.sub('\(\[\]\)','',ty)
if not (re.match('.*[a-zA-Z0-9_]+\(.*[a-zA-Z0-9_\'\"\(\)\|\-\>\:\[\]\,\.]+\).*',ty) and re.match('.*\(.*[a-zA-Z0-9_]+.*\).*',ty)):
tempy=addty(ty,i,ls)
final=tempy
#print "4.1",ty,tempy
findcircle_call(tempy)
continue
#print ty
ty=dealarg_call(ty)
tempy=addty(ty,i,ls)
#print 4,ty,tempy
#global final
fcall=tempy
findcircle_call(tempy)
elif '.' in ty :
#print "ty2",ty
ty=re.sub('\.','-->',ty)
tempy=addty(ty,i,ls)
#print 5,ty,tempy
#global final
fcall=tempy
findcircle_call(tempy)
elif re.match('[a-zA-Z0-9_]+\[[a-zA-Z0-9_]+\]',ty):
ty=deallist(ty)
tempy=addty(ty,i,ls)
fcall=tempy
#print 6,ty,tempy
findcircle_call(tempy)
#return tempy
def del_call(line):
#print(line)
calls=re.findall('[_a-zA-Z0-9\.\[\]\'\"\(\)\{\}\,\:]+\(.*\)',line)
#print(calls)
call=''
if len(calls)>0:
call=calls[0]
else:
return call
call=re.sub('\(\'.*\'\)','',call)
call=re.sub('\(\".*\"\)','',call)
call=re.sub('\[\'.*\'\]','',call)
call=re.sub('\[\".*\"\]','',call)
call=re.sub('\(\)','',call)
call=re.sub('\([0-9]+\)','',call)
call=re.sub('\[[0-9:\-]+\]','',call)
call=call.strip()
call=re.sub(' ','',call)
call=recheck(call)
findcircle_call(call)
#print(fcall,'\n')
return fcall
def findcircle(tempy):
global fresult
#print "temp:",tempy
lsy=tempy.split("-->")
#print "lsy:",lsy
for i in range(0,len(lsy)):
ty=lsy[i]
ty=ty.strip()
#print "i:",i,ty
if re.match(r'\(.*,.*\)',ty):
#print "matchtuple:",ty
ty=dealtuple(ty)
#print "addty"
tempy=addty(ty,i,lsy)
fresult=tempy
#print fresult
findcircle(tempy)
elif ',' in ty and '\',\'' not in ty:
#print "matchmulti"
#print "2:",ty,i,lsy
ty=deald(ty)
tempy=addty(ty,i,lsy)
#print "yes!",ty,tempy
fresult=tempy
#print fresult
findcircle(tempy)
elif re.match('.*[a-zA-Z0-9_]+\(.*[a-zA-Z0-9_\'\"\(\)\|\-\>\:]+\).*',ty):
#print "matcharg:",ty
ty=dealarg_for(ty)
#print "addty"
tempy=addty(ty,i,lsy)
fresult=tempy
#print fresult
#print "1:",ty,i,lsy
findcircle(tempy)
elif '.' in ty and '\'\.\'' not in ty:
#print "matchpoint"
ty=dealcall(ty)
tempy=addty(ty,i,lsy)
#print "yes!",tempy
fresult=tempy
#print fresult
findcircle(tempy)
elif re.match('.*\[\'.*\'\].*',ty) or re.match('.*\[\".*\"\].*',ty) or re.match('.*\[[0-9:]+\].*',ty):
#print "yes:",ty
tempy=re.sub('\[.*\]','',ty)
#print "new:",tyy
fresult=tempy
#print fresult
findcircle(tempy)
#elif re.match('[a-zA-Z0-9_]+',ty):
#print "result:",tempy,"\n"
#global fresult
#print "tempy:",ty,tempy
#fresult=tempy
#print lsy
#if ty==lsy[len(lsy)-1]:
#break
#findcircle(tempy)
#return tempy
#fresult=tempy
#return tempy
def delfor(line):
#if re.match('.*\[.*for\s.*\sin\s.*\].*',line):
#return
#forp=line.find('for ')
#print forp
#print line[forp+4:]
#ls=line[forp+4:].split(" in ")
#print ls
#x=ls[0]
#if len(ls) < 2:
#return
#ls2=ls[1].split(":\n")
#print ls2
#y=ls2[0]
#print x
#print y
ops=re.findall('for\s[_a-zA-Z0-9\.\,\s]+\sin\s[_a-zA-Z0-9\,\.\[\]\(\)\{\}\s]+',line)
#print(ops)
s=''
if len(ops)>0:
s=ops[0]
#s=recheck(s)
else:
return s
if s.endswith(','):
s=s[:-1]
if (s.endswith(']') and s.count('[')<s.count(']')) or (s.endswith(')') and s.count('(')<s.count(')')) or (s.endswith('}') and s.count('{')<s.count('}')):
s=s[:-1]
s=recheck(s)
if s.strip().endswith('in'):
return ''
#print(s)
try:
x=GetMiddleStr(s,'for ',' in ')
except Exception:
return ''
#y=GetMiddleStr(line,'in',':')
x=x.strip()
y=s.split(' in ')[1].strip()
#print('x,y')
#print(x,y)
#print "x:",x
#print "START"+"#"+str(num)
#print(line[:-1])
y=re.sub(' ','',y)
x=re.sub(' ','',x)
x=re.sub('\(\)','',x)
y=re.sub('\(\)','',y)
y=re.sub('\[\'.*\'\]','',y)
y=re.sub('\[\".*\"\]','',y)
y=re.sub('\(\'.*\'\)','',y)
y=re.sub('\(\".*\"\)','',y)
y=re.sub('\[[0-9:]+\]','',y)
y=re.sub('\([0-9:]+\)','',y)
y=re.sub('\[.*[\+\:]+.*\]','',y)
y=re.sub('\+\=','',y)
y=re.sub('r\'.*\'\,','',y)
x=re.sub('\[\'.*\'\]','',x)
x=re.sub('\[\".*\"\]','',x)
x=re.sub('\(\'.*\'\)','',x)
x=re.sub('\(\".*\"\)','',x)
x=re.sub('\[[0-9:]+\]','',x)
x=re.sub('\([0-9:]+\)','',x)
x=re.sub('\[.*[\+\:]+.*\]','',x)
x=re.sub('\+\=','',x)
x=re.sub('r\'.*\'\,','',x)
#print(x,y)
#TODO:meici xu t<NAME>
y=recheck2(y)
findcircle(y)
global fresult
if fresult=='':
rety=y
else:
rety=fresult
fresult=''
x=prex(x)
findcircle(x)
if fresult=='':
retx=x
else:
retx=fresult
#print "result:",rety,"-->",retx,"\n"
fresult=''
forx=rety+"-->"+retx
#if forx.count('-->') >10:
#s="START:\n"+line+rety+"-->"+retx+"\n"+"END\n"
s2=rety+"-->"+retx+"\n"
#print(s)
#print(s2)
return s2
def finalcheck(s):
s=re.sub('\*\*','',s)
s=re.sub('\*args','args',s)
s=re.sub('[\+\/\*]','|',s)
s=re.sub('\n','',s)
if s.count('-->')==1:
ls=s.split('-->')
if ls[0]==ls[1]:
s=''
return s
class ShowProcess():
i = 0
max_steps = 0
max_arrow = 50
infoDone = 'done'
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps)
num_line = self.max_arrow - num_arrow
percent = self.i * 100.0 / self.max_steps
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r'
sys.stdout.write(process_bar)
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
def recheck2(l):
line=l
line=re.sub('return ','',line)
line=re.sub('\[.*\]','',line)
line=re.sub('\(.*\)','',line)
line=re.sub('\{.*\}','',line)
line=re.sub('\+\=','=',line)
#line=re.sub(' ','',line)
line=re.sub('r\'.*\'\,*\s*','',line)
line=re.sub('b\'.*\'\,*\s*','',line)
line=re.sub('rb\'.*\'\,*\s*','',line)
line=re.sub('f\'.*\'\,*\s*','',line)
line=re.sub('\'.*\'\,*\s*','',line)
line=re.sub('\".*\"\,*\s*','',line)
line=re.sub('r\".*\"\,*\s*','',line)
line=re.sub('b\".*\"\,*\s*','',line)
line=re.sub('rb\".*\"\,*\s*','',line)
line=re.sub('f\".*\"\,*\s*','',line)
#line=recheck(line)
line=line.strip()
return line
def get_current_dataflow2(current_context,caller):
dataflows=[]
lines=current_context.split('\n')
#process_bar = ShowProcess(len(lines), 'Start to deal with the file')
for line in lines:
if (not caller in line) and (caller!='__all__') :
continue
if not ('.' in line and '(' in line):
continue
line=line.strip()
if line == '' or line.endswith('='):
continue
#print('NOTE!',line)
tpline=line
if line.startswith('#') or line.startswith('def ') or line.startswith('class '):
continue
elif 'lambda' in line:
continue
elif re.match('.*=\s*[0-9\.\:\-]+',line):
continue
line2=re.sub(' ','',line)
if re.match('.*=\'.*\'.*',line2) or re.match('.*=\".*\".*',line2) or re.match('.*=[0-9\.]+.*',line2) or re.match('.*=None.*',line2) or re.match('.*=True.*',line2) or re.match('.*=False.*',line2) or "==" in line2 or line2.endswith('='):
#print('yes!')
continue
#print(tpline,line)
line=re.sub('#.*','',line)
if '=' in line:
#print(line)
#print('yes!')
line=recheck2(line)
if line.endswith('='):
continue
text = re.compile(r".*[a-zA-Z]$")
if not text.match(line):
continue
ops=re.findall('[_a-zA-Z0-9\.\[\]\"\'\(\)\{\}]+\s*=\s*[_a-zA-Z0-9\.\[\]\"\'\(\)\{\}\*\/\-\%\*\,\=\s\+]+',line)
if len(ops)==0:
continue
line=ops[0]
line=re.sub('[\+\-\/\*]+','|',line)
#print('op',tpline,line)
ls=line.split('=')
x=ls[0]
y=ls[1]
x=re.sub('\.','-->',x)
y=re.sub('\.','-->',y)
tf=y+'-->'+x
#print(tf)
opps=re.findall('[\(\{\)\}\[\]\'\"]',tf)
if len(opps)!=0:
continue
tf=tf.strip()
if tf!='' and not tf in dataflows:
dataflows.append(tf)
elif re.match('.*for\s.*\sin\s.*',line):
line=recheck(line)
#print('FOR_EXPR')
#print(file,tpline)
fors=delfor(line)
#print('FOR DATAFLOW:')
#print(str(fors),'\n')
tff=str(fors)
tff=finalcheck(tff)
#print('for',tpline)
#print(tff)
opps=re.findall('[\(\{\)\}\[\]\'\"]',tff)
if len(opps)!=0:
continue
tff=tff.strip()
if tff!='' and not tff in dataflows:
dataflows.append(tff)
#print(tff)
#with open('tmp_dataflow/for_expr.txt','a+') as ff:
#ff.write(file+'#'+str(num)+": "+tpline+'\n'+str(fors)+'\n\n')
elif re.match('.*[_a-zA-Z0-9\.\[\]\'\"\(\)\{\}\,\:]+\(.*\).*',line) and not line.startswith('def ') and not line.startswith('class '):
#print(file)
#print(line,'\n')
#line=recheck(line)
#print(line)
#cas=del_call(line)
#print('CALL DATAFLOW:')
#print(cas,'\n')
#cas=finalcheck(cas)
calls=re.findall('[_a-zA-Z0-9\.\[\]\'\"\(\)\{\}\,\:]+\(.*\)',line)
call=''
if len(calls)>0:
call=calls[0]
else:
continue
line=recheck2(call)
line=re.sub('[\+\-\/]+','|',line)
#print('call',tpline,line)
cas=re.sub('\.','-->',line)
#print(cas)
opps=re.findall('[\(\{\)\}\[\]\'\"]',cas)
if len(opps)!=0:
continue
if not '-->' in cas:
continue
cas=cas.strip()
if cas!='' and not cas in dataflows:
dataflows.append(cas)
#print(cas)
#callflow.append(ls2.strip())
#with open('tmp_dataflow/call_expr.txt','a+') as fc:
#fc.write(file+'#'+str(num)+'\n'+line+'\n')
#process_bar.show_process()
newflows=[]
oldflows=dataflows
lens=5*len(dataflows)
used=[0]*lens
for i in range(0,len(dataflows)):
#flag=0
current_flow_end=dataflows[i].split('-->')[-1]
current_flow_head=dataflows[i].split('-->')[0]
if current_flow_end==current_flow_head:
continue
for j in range(i,len(dataflows)):
#print(j,len(dataflows))
current_flow_end=dataflows[i].split('-->')[-1]
next_flow_head=dataflows[j].split('-->')[0]
s1=current_flow_end+'|'
s2='|'+current_flow_end
s3=next_flow_head+'|'
s4='|'+next_flow_head
if current_flow_end == next_flow_head or s1 in next_flow_head or s2 in next_flow_head:
y=dataflows[j].replace(next_flow_head,'',1)
#y=re.sub(next_flow_head,'',dataflows[j])
newflow=dataflows[i]+y
#print('yes1!')
#print(i,current_flow_end,next_flow_head,s1,s2)
#print(next_flow_head)
#print(dataflows[i])
#print(dataflows[j])
#print(y)
#print(newflow)
if not newflow in newflows:
tmp=[i,newflow]
newflows.append(tmp)
#if not newflow in dataflows:
#dataflows.append(newflow)
#print(newflow)
#dataflows[i]=newflow
#print('yes!')
#print(dataflows[i],' , ',dataflows[j])
#print(newflow)
#i=i-1
#used[j]=1
#del dataflows[j]
#j=j-1
#flag=1
elif s3 in current_flow_end or s4 in current_flow_end:
#x=re.sub(current_flow_end,'',dataflows[i])
x=dataflows[i].replace(current_flow_end,'')
#print('flow_end:',current_flow_end)
#print('xxxx',x)
newflow=x+dataflows[j]
#dataflows[i]=newflow
#print('yes2!')
#print(dataflows[i])
#print(dataflows[j])
#print(x)
#print(newflow)
if not newflow in newflows:
tmp=[i,newflow]
newflows.append(tmp)
#if not newflow in dataflows:
#dataflows.append(newflow)
#print(newflow)
#dataflows[i]=newflow
#print('yes2!')
#print(dataflows[i],' , ',dataflows[j])
#print(newflow)
#i=i-1
#used[j]=1
#del dataflows[j]
#j=j-1
#flag=1
#print('\n')
updateflow=[]
for i in range(0,len(newflows)):
#flag=0
pos=newflows[i][0]
flow=newflows[i][1]
for j in range(pos+1,len(dataflows)):
#print(j,len(dataflows))
current_flow_end=flow.split('-->')[-1]
next_flow_head=dataflows[j].split('-->')[0]
s1=current_flow_end+'|'
s2='|'+current_flow_end
s3=next_flow_head+'|'
s4='|'+next_flow_head
if current_flow_end == next_flow_head or s1 in next_flow_head or s2 in next_flow_head:
y=dataflows[j].replace(next_flow_head,'',1)
#y=re.sub(next_flow_head,'',dataflows[j])
newflow=flow+y
if not newflow in updateflow:
#print('yes!',newflow)
updateflow.append(newflow)
elif s3 in current_flow_end or s4 in current_flow_end:
#x=re.sub(current_flow_end,'',dataflows[i])
x=flow.replace(current_flow_end,'')
#print('flow_end:',current_flow_end)
#print('xxxx',x)
newflow=x+dataflows[j]
if not newflow in updateflow:
#print('yes!',newflow)
updateflow.append(newflow)
for i in range(0,len(newflows)):
flow=newflows[i][1]
dataflows.append(flow)
#process_bar.show_process()
retflow=[]
for flow in dataflows:
if 'unknown_api' in flow:
retflow.append(flow)
if caller=='__all__':
return dataflows
else:
return retflow
def get_current_dataflow(current_context,caller):
dataflows=[]
lines=current_context.split('\n')
#process_bar = ShowProcess(len(lines), 'Start to deal with the file')
for line in lines:
if (not caller in line) and (caller!='__all__') :
continue
if line.strip()=='':
continue
#print('NOTE!',line)
tpline=line.strip()
line=line.strip()
if line.startswith('#') or line.startswith('def ') or line.startswith('class '):
continue
elif line.endswith('(') or line.endswith('[') or line.endswith('{'):
line=line[:-1]
elif line.startswith(')') or line.startswith('}') or line.startswith(']'):
continue
elif line.count('(') != line.count(')') or line.count('[') != line.count(']') or line.count('{') != line.count('}'):
continue
elif 'lambda' in line:
continue
elif re.match('.*=\s*[0-9\.]+',line.strip()):
continue
line2=re.sub(' ','',line)
if re.match('.*=\'.*\'.*',line2) or re.match('.*=\".*\".*',line2) or re.match('.*=[0-9\.]+.*',line2) or re.match('.*=None.*',line2) or re.match('.*=True.*',line2) or re.match('.*=False.*',line2) or re.match('.*=\{\}.*',line2) or re.match('.*=\(\).*',line2) or re.match('.*=\[\].*',line2) or "==" in line2 or line2.endswith('='):
#print('yes!')
continue
line=re.sub('#.*','',line)
if '=' in line:
#print(line)
#print('yes!')
line=recheck(line)
if line.endswith('='):
continue
if line.endswith(',') or line.endswith(':') or line.endswith('+') or line.endswith('-') or line.endswith('*') or line.endswith('/'):
line=line[:-1].strip()
#print(line)
ops=re.findall('[_a-zA-Z0-9\.\[\]\"\'\(\)\{\}]+\s*=\s*[_a-zA-Z0-9\.\[\]\"\'\(\)\{\}\*\/\-\%\*\,\=\s\+]+',line)
#print(ops)
if len(ops)>0:
s=ops[0]
s=recheck(s)
rs=s.split('=')[1]
ps=re.findall('[\,\-\+\*\/\%]+',rs)
if len(ps)==0 and rs.count(' ')>1:
#print('ignored\n')
continue
elif s.endswith(')') and s.count(')')-s.count('(')==1:
s=s[:-1]
elif s.endswith(', )'):
s=s[:-3]+')'
s=re.sub('\)\,.*$','',s)
s=check(s)
if s.count('(') != s.count(')') or s.count('[') != s.count(']') or s.count('{') != s.count('}'):
#print('ignored\n')
continue
else:
#s=re.sub('\)\,.*$','',s)
#print(s)
s=re.sub(' ','',s)
delop(s)
#print(file)
#print(s,final,'\n')
#print('OP DATAFLOW:')
#print(final,'\n')
tf=final
tf=finalcheck(tf)
if tf!='' and not tf in dataflows:
dataflows.append(tf)
#print(tf)
#with open('tmp_dataflow/op_expr.txt','a+') as fo:
#fo.write(file+'#'+str(num)+": "+tpline+'\n'+s+'\n'+final+'\n\n')
elif re.match('.*for\s.*\sin\s.*',line):
line=recheck(line)
#print('FOR_EXPR')
#print(file,tpline)
fors=delfor(line)
#print('FOR DATAFLOW:')
#print(str(fors),'\n')
tff=str(fors)
tff=finalcheck(tff)
if tff!='' and not tff in dataflows:
dataflows.append(tff)
#print(tff)
#with open('tmp_dataflow/for_expr.txt','a+') as ff:
#ff.write(file+'#'+str(num)+": "+tpline+'\n'+str(fors)+'\n\n')
elif re.match('.*[_a-zA-Z0-9\.\[\]\'\"\(\)\{\}\,\:]+\(.*\).*',line) and not line.startswith('def ') and not line.startswith('class '):
#print(file)
#print(line,'\n')
#line=recheck(line)
#print(line)
cas=del_call(line)
#print('CALL DATAFLOW:')
#print(cas,'\n')
cas=finalcheck(cas)
if cas!='' and not cas in dataflows:
dataflows.append(cas)
#print(cas)
#callflow.append(ls2.strip())
#with open('tmp_dataflow/call_expr.txt','a+') as fc:
#fc.write(file+'#'+str(num)+'\n'+line+'\n')
#process_bar.show_process()
newflows=[]
oldflows=dataflows
lens=5*len(dataflows)
used=[0]*lens
for i in range(0,len(dataflows)):
#flag=0
current_flow_end=dataflows[i].split('-->')[-1]
current_flow_head=dataflows[i].split('-->')[0]
if current_flow_end==current_flow_head:
continue
for j in range(i,len(dataflows)):
#print(j,len(dataflows))
current_flow_end=dataflows[i].split('-->')[-1]
next_flow_head=dataflows[j].split('-->')[0]
s1=current_flow_end+'|'
s2='|'+current_flow_end
s3=next_flow_head+'|'
s4='|'+next_flow_head
if current_flow_end == next_flow_head or s1 in next_flow_head or s2 in next_flow_head:
y=dataflows[j].replace(next_flow_head,'',1)
#y=re.sub(next_flow_head,'',dataflows[j])
newflow=dataflows[i]+y
#print('yes1!')
#print(i,current_flow_end,next_flow_head,s1,s2)
#print(next_flow_head)
#print(dataflows[i])
#print(dataflows[j])
#print(y)
#print(newflow)
if not newflow in newflows:
tmp=[i,newflow]
newflows.append(tmp)
#if not newflow in dataflows:
#dataflows.append(newflow)
#print(newflow)
#dataflows[i]=newflow
#print('yes!')
#print(dataflows[i],' , ',dataflows[j])
#print(newflow)
#i=i-1
#used[j]=1
#del dataflows[j]
#j=j-1
#flag=1
elif s3 in current_flow_end or s4 in current_flow_end:
#x=re.sub(current_flow_end,'',dataflows[i])
x=dataflows[i].replace(current_flow_end,'')
#print('flow_end:',current_flow_end)
#print('xxxx',x)
newflow=x+dataflows[j]
#dataflows[i]=newflow
#print('yes2!')
#print(dataflows[i])
#print(dataflows[j])
#print(x)
#print(newflow)
if not newflow in newflows:
tmp=[i,newflow]
newflows.append(tmp)
#if not newflow in dataflows:
#dataflows.append(newflow)
#print(newflow)
#dataflows[i]=newflow
#print('yes2!')
#print(dataflows[i],' , ',dataflows[j])
#print(newflow)
#i=i-1
#used[j]=1
#del dataflows[j]
#j=j-1
#flag=1
'''
if flag==0 and used[i]==0:
if not dataflows[i] in newflows:
newflows.append(dataflows[i])
if flag==1:
i=i-1
'''
#print('\n')
updateflow=[]
for i in range(0,len(newflows)):
#flag=0
pos=newflows[i][0]
flow=newflows[i][1]
for j in range(pos+1,len(dataflows)):
#print(j,len(dataflows))
current_flow_end=flow.split('-->')[-1]
next_flow_head=dataflows[j].split('-->')[0]
s1=current_flow_end+'|'
s2='|'+current_flow_end
s3=next_flow_head+'|'
s4='|'+next_flow_head
if current_flow_end == next_flow_head or s1 in next_flow_head or s2 in next_flow_head:
y=dataflows[j].replace(next_flow_head,'',1)
#y=re.sub(next_flow_head,'',dataflows[j])
newflow=flow+y
if not newflow in updateflow:
#print('yes!',newflow)
updateflow.append(newflow)
elif s3 in current_flow_end or s4 in current_flow_end:
#x=re.sub(current_flow_end,'',dataflows[i])
x=flow.replace(current_flow_end,'')
#print('flow_end:',current_flow_end)
#print('xxxx',x)
newflow=x+dataflows[j]
if not newflow in updateflow:
#print('yes!',newflow)
updateflow.append(newflow)
for i in range(0,len(newflows)):
flow=newflows[i][1]
dataflows.append(flow)
#process_bar.show_process()
retflow=[]
for flow in dataflows:
if 'unknown_api' in flow:
retflow.append(flow)
if caller=='__all__':
return dataflows
else:
return retflow
def lcs(X, Y):
# find the length of the strings
m = len(X)
n = len(Y)
L = [[None]*(n + 1) for i in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0 :
L[i][j] = 0
elif X[i-1] == Y[j-1]:
L[i][j] = L[i-1][j-1]+1
else:
L[i][j] = max(L[i-1][j], L[i][j-1])
# L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
return L[m][n]
# end of function lcs
def get_sim_score(api,token,d):
lcsn=lcs(api,token)
lcsn=float(lcsn)
ret=float((lcsn*2.0) / (float(d)*float(len(api)+len(token))))
#print(api,token,ret)
return ret
def get_tosim_score(api,maxflow):
if ' ' in maxflow:
flows=maxflow.split(' ')
for flow in flows:
if 'unknown_api' in flow:
mfx=flow
break
else:
mfx=maxflow
ls=mfx.split('-->')
apindex=len(ls)
for k in range(0,len(ls)):
if 'unknown_api' in ls[k]:
apindex=k
tosim=0.0
for i in range(0,len(ls)):
if i!=apindex:
sim_score=get_sim_score(api,ls[i],abs(apindex-i))
tosim+=sim_score
tosim=float(tosim/float(len(ls)))
#print(tosim)
return tosim
def standard(scsk):
scs=scsk
data=[]
for k in scs.keys():
scs[k]=pow(10,scs[k])
data.append(scs[k])
lenth = len(data)
if lenth==0:
return scsk
total = sum(data)
ave = float(total)/lenth
tempsum = sum([pow(data[i] - ave,2) for i in range(lenth)])
tempsum = pow(float(tempsum)/lenth,0.5)
try:
for k in scs.keys():
scs[k] = (scs[k] - ave)/tempsum
scs[k] = 1 / (1 + np.exp(-scs[k]))
except Exception:
return scsk
return scs
def get_ngram_scores(flows,apis,callee):
s=''
#print(apis)
#print(flows)
ngramscore={}
for flow in flows:
s=s+flow+'\n'
with open('output/test.txt','w+') as f:
f.write(s)
#print(s)
#os.chdir('dataflow/')
os.system('srilm-1.7.2/lm/bin/i686-m64/ngram -ppl output/test.txt -order 4 -lm trainfile.lm -debug 2 > output/'+callee+'.ppl')
with open('output/'+callee+'.ppl',encoding='ISO-8859-1') as f:
lines=f.readlines()
for key in apis:
flag=0
for i in range(0,len(lines)):
kname=lines[i].strip().split(' ')
for item in kname:
if item==key:
flag=1
break
if flag==1:
#print(lines[i])
j=i+1
while 'logprob=' not in lines[j]:
j=j+1
score=re.findall('logprob=\s[0-9\-\.]+',lines[j])
ngramscore[key]=float(score[0][9:])
break
if flag==0:
ngramscore[key]=0.0
#ngramscore=standard(ngramscore)
#print(ngramscore)
#ngramscore=sorted(ngramscore.items(), key=lambda x: x[1], reverse=True)
#print(ngramscore)
os.system('rm output/'+callee+'.ppl')
#os.chdir('../')
return ngramscore
def get_ngram_score(apis,current_dataflow,baseflag,basetype,callee):
flows=[]
if baseflag==1:
for api in apis:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
#print(api)
flow=basetype+' '+api
flows.append(flow)
#ngram_score=get_basetype_score(flow)
else:
#print(current_dataflow)
#print(apis)
for flow in current_dataflow:
for api in apis:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
flow1=re.sub('unknown_api',api,flow)
#print(flow1)
flow2=re.sub('-->',' ',flow1)
#print(flow2)
flows.append(flow2)
#print(flows,apis,callee)
dataflow_ngram_scores=get_ngram_scores(flows,apis,callee)
#print('data1',dataflow_ngram_scores)
return dataflow_ngram_scores
def get_api_scores(apis,maxflow,current_dataflow,ft,callee):
dataflow_ngram_score={}
basetypes=['int','str','float','list','dict','set','tuple','buffer','frozenset','complex','bool','unicode','bytes','bytearray']
basetype=''
baseflag=0
for bt in basetypes:
if bt==ft:
#print(bt,api)
basetype=bt
if re.match('List\[.*\]',ft):
#print('list',api)
basetype='list'
ft='list'
elif re.match('Dict\[.*\]',ft):
#print('dict',api)
basetype='dict'
ft='dict'
if basetype!='':
baseflag=1
dataflow_ngram_scores=get_ngram_score(apis,current_dataflow,baseflag,ft,callee)
#print("data",dataflow_ngram_scores)
final_scores={}
tosim_scores={}
for api in apis:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
tosim_scores[api]=get_tosim_score(api,maxflow)
tosim_scores=standard(tosim_scores)
#tosim_scores = sorted(tosim_scores.items(),key = lambda x:x[1],reverse = True)
#print(tosim_scores)
#for k in tosim_scores.keys():
#final_scores[k]=0.5+float(dataflow_ngram_scores[k]+tosim_scores[k])/4.0
dataflow_ngram_scores=sorted(dataflow_ngram_scores.items(), key=lambda x: x[1], reverse=True)
tosim_scores = sorted(tosim_scores.items(),key = lambda x:x[1],reverse = True)
#final_scores= sorted(final_scores.items(),key = lambda x:x[1],reverse = True)
#print(final_scores)
print("NGRAM-SCORE: ",dataflow_ngram_scores[:20])
print("SIMILAR-SCORE: ",tosim_scores[:20])
#print("ADD-SCORE: ",final_scores[:20])
#return final_scores
drank=21
nrank=21
if len(dataflow_ngram_scores)<20:
k=len(dataflow_ngram_scores)
else:
k=20
for i in range(0,k):
if dataflow_ngram_scores[i][0]==callee:
drank=i+1
if tosim_scores[i][0]==callee:
nrank=i+1
print(drank,nrank)
return drank,nrank
def get_dataflow_scores(apis,maxflow,current_dataflow,ft,callee):
dataflow_ngram_score={}
basetypes=['int','str','float','list','dict','set','tuple','buffer','frozenset','complex','bool','unicode','bytes','bytearray']
basetype=''
baseflag=0
for bt in basetypes:
if bt==ft:
#print(bt,api)
basetype=bt
if re.match('List\[.*\]',ft):
#print('list',api)
basetype='list'
ft='list'
elif re.match('Dict\[.*\]',ft):
#print('dict',api)
basetype='dict'
ft='dict'
if basetype!='':
baseflag=1
dataflow_ngram_scores=get_ngram_score(apis,current_dataflow,baseflag,ft,callee)
return dataflow_ngram_scores
def get_tosim_scores(apis,maxflow,current_dataflow,ft,callee):
tosim_scores={}
for api in apis:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
tosim_scores[api]=get_tosim_score(api,maxflow)
#tosim_scores=standard(tosim_scores)
return tosim_scores
| StarcoderdataPython |
3272468 | import boto3
from EOSS.aws.utils import dev_client, prod_client, user_input, pprint
class Cluster:
def __init__(self, dev=False):
if dev:
self.client = dev_client('ecs')
else:
self.client = prod_client('ecs')
self.cluster_name = 'evaluator-cluster'
def get_or_create_cluster(self):
cluster_arn = self.does_cluster_exist(self.cluster_name)
if cluster_arn is None:
if not user_input('\n\n evaluator-cluster IS ABOUT TO BE CREATED, WOULD YOU LIKE TO CONTINUE (yes/no): '):
exit(0)
response = self.client.create_cluster(
clusterName=self.cluster_name,
capacityProviders=['FARGATE'],
tags=[
{'key': 'name', 'value': 'evaluator-cluster'}
]
)
print('--> CLUSTER CREATE REQUEST RESPONSE', response)
return response['cluster']['clusterArn']
else:
print('---> evaluator-cluster ALREADY EXISTS WITH ARN ', cluster_arn)
return cluster_arn
def does_cluster_exist(self, cluster_name):
print('\n\n ---> CHECKING IF CLUSTER EXISTS: ', cluster_name)
list_cluster_response = self.client.list_clusters()
if 'clusterArns' not in list_cluster_response:
print('--> NO CLUSTERS')
return None
cluster_arns = list_cluster_response['clusterArns']
clusters = self.client.describe_clusters(clusters=cluster_arns, include=['ATTACHMENTS', 'SETTINGS'])['clusters']
for cluster in clusters:
pprint(cluster)
if cluster['clusterName'] == cluster_name:
return cluster['clusterArn']
return None
# _____ _____ _
# | __ \ / ____| (_)
# | |__) | ___ _ __ ___ ___ __ __ ___ | (___ ___ _ __ __ __ _ ___ ___ ___
# | _ / / _ \| '_ ` _ \ / _ \\ \ / // _ \ \___ \ / _ \| '__|\ \ / /| | / __|/ _ \/ __|
# | | \ \| __/| | | | | || (_) |\ V /| __/ ____) || __/| | \ V / | || (__| __/\__ \
# |_| \_\\___||_| |_| |_| \___/ \_/ \___| |_____/ \___||_| \_/ |_| \___|\___||___/
def remove_services(self):
print('\n\n---------- REMOVING CLUSTER SERVICES ----------')
# 1. Get all the services in the evaluator cluster
service_arns = self.get_cluster_service_arns()
if not service_arns:
return 0
# 2. Stop all the tasks for each service in the cluster
service_details = self.get_cluster_service_descriptions(service_arns)
print('\n\n', service_details)
if not user_input('---> Above are the services to be removed. Would you like to continue (yes/no): '):
exit(0)
for details in service_details:
self.stop_service_tasks(details)
self.update_service_desired_task_count(details)
self.delete_service(details)
print('--- FINISHED\n\n')
# Returns a list of service ARNs running on evaluator-cluster
def get_cluster_service_arns(self):
# Check to see if the cluster exists first
cluster_arn = self.does_cluster_exist(self.cluster_name)
if cluster_arn is None:
return []
response = self.client.list_services(
cluster=self.cluster_name,
launchType='FARGATE',
)
if 'serviceArns' not in response:
return []
else:
return response['serviceArns']
# Returns full info of all cluster services
def get_cluster_service_descriptions(self, service_arns):
response = self.client.describe_services(
cluster=self.cluster_name,
services=service_arns,
include=[
'TAGS',
]
)
if 'services' not in response:
return []
else:
return response['services']
def stop_service_tasks(self, service_details):
# 1. List all tasks and filter on service
list_tasks_response = self.client.list_tasks(
cluster=self.cluster_name,
serviceName=service_details['serviceName'],
launchType='FARGATE'
)
if 'taskArns' not in list_tasks_response:
# The service has no tasks
return
task_arns = list_tasks_response['taskArns']
# 2. Stop returned tasks
for task_arn in task_arns:
stop_task_response = self.client.stop_task(task=task_arn)
return
def update_service_desired_task_count(self, service_details, count=0):
response = self.client.update_service(
cluster=self.cluster_name,
service=service_details['serviceName'],
desiredCount=count
)
print('---> UPDATING SERVICE DESIRED TASK COUNT')
return
def delete_service(self, service_details):
response = self.client.delete_service(
cluster=self.cluster_name,
service=service_details['serviceName'],
force=True
)
print('---> DELETING SERVICE', response)
| StarcoderdataPython |
174275 | import signnow
import json
if __name__ == "__main__":
signnow.Config(
client_id="0fccdbc73581ca0f9bf8c379e6a96813",
client_secret="<KEY>",
base_url="https://api-eval.signnow.com",
)
# Enter your own credentials
username = ""
password = ""
# Create the access_token for the user
print "Creating access token:"
access_token = signnow.OAuth2.request_token(username, password, "*")
print username + "'s access token: " + access_token["access_token"]
print "The access token's scope: " + access_token["scope"]
print "\n"
# Get the users root folder
print "Getting users root folder:"
root_folder = signnow.Folder.root_folder(access_token["access_token"])
print "Folder name:", root_folder["name"]
print "Folder id:", root_folder["id"]
print "Number of documents in the folder:", root_folder["total_documents"]
print "\n"
# Get the documents folder with its first 50 documents, that are signed, and in descending order by created date.
print 'Getting "Documents" folder:'
documents_folder_id = [
document
for document in root_folder["folders"]
if document["name"] == "Documents"
][0]["id"]
filter_object = {"filters": "signing-status", "filter-values": "signed"}
sort_object = {"sortby": "created", "order": "desc"}
documents_folder = signnow.Folder.get(
access_token["access_token"],
documents_folder_id,
50,
0,
filter_object,
sort_object,
)
print "Folder name:", documents_folder["name"]
print "Folder id:", documents_folder["id"]
print "Total documents that meet criteria:", documents_folder["total_documents"]
print "Number of documents returned:", len(documents_folder["documents"])
print "\n"
# Obtain all documents from Documents folder in groups of 20
print 'Getting all documents in "Documents" folder:'
offset = 0
documents_folder = signnow.Folder.get(
access_token["access_token"], documents_folder_id, 20, offset
)
print "Folder name:", documents_folder["name"]
total_documents = documents_folder["total_documents"]
print "Total number of documents:", total_documents
documents_list = documents_folder["documents"]
while len(documents_list) < total_documents:
offset += 20
documents_folder = signnow.Folder.get(
access_token["access_token"], documents_folder_id, 20, offset
)
documents_list.extend(documents_folder["documents"])
print "The number of documents in my compiled list is equal to total documents:", len(
documents_list
) == total_documents
| StarcoderdataPython |
3219315 | <filename>3rdparty/pymdown-extensions/tools/gen_gemoji.py
"""Generate gemoji data."""
import sys
import os
import json
current_dir = os.path.dirname(os.path.abspath(__file__))
U_JOIN = 0x200d
U_VARIATION_SELECTOR_16 = 0xfe0f
U_EXTRA = (U_JOIN, U_VARIATION_SELECTOR_16)
if sys.maxunicode == 0xFFFF:
# For ease of supporting, just require uniseq for both narrow and wide PY27.
def get_code_points(s):
"""Get the Unicode code points."""
pt = []
def is_full_point(p, point):
"""
Check if we have a full code point.
Surrogates are stored in point.
"""
v = ord(p)
if 0xD800 <= v <= 0xDBFF:
del point[:]
point.append(p)
return False
if point and 0xDC00 <= v <= 0xDFFF:
point.append(p)
return True
del point[:]
return True
return [(''.join(pt) if pt else c) for c in s if is_full_point(c, pt)]
def get_ord(c):
"""Get Unicode ordinal number."""
if len(c) == 2:
high, low = [ord(p) for p in c]
ordinal = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000
else:
ordinal = ord(c)
return ordinal
else:
def get_code_points(s):
"""Get the Unicode code points."""
return [c for c in s]
def get_ord(c):
"""Get Unicode ordinal number."""
return ord(c)
def get_unicode(value):
"""Get Unicode."""
uc = '-'.join(
['%04x' % get_ord(point) for point in get_code_points(value['emoji']) if get_ord(point) not in U_EXTRA]
)
uc_alt = '-'.join(
['%04x' % get_ord(point) for point in get_code_points(value['emoji'])]
)
if uc == uc_alt:
uc_alt = None
return uc, uc_alt
def get_gemoji_specific(value):
"""Get alternate Unicode form or return the original."""
return value['aliases'][0]
def parse(repo, tag):
"""Save test files."""
# Load emoji database
with open(os.path.join(current_dir, 'tags', repo, repo, 'db', 'emoji.json'), 'r') as f:
emojis = json.loads(f.read())
emoji_db = {}
shortnames = set()
aliases = {}
for v in emojis:
short = v['aliases'][0]
shortnames.add(':%s:' % short)
if 'emoji' in v:
uc, uc_alt = get_unicode(v)
emoji_db[':%s:' % short] = {
'name': v.get('description', short),
'unicode': uc,
'category': v['category']
}
if uc_alt:
emoji_db[':%s:' % short]['unicode_alt'] = uc_alt
else:
emoji_db[':%s:' % short] = {
'name': v.get('description', short)
}
for alias in v['aliases'][1:]:
aliases[':%s:' % alias] = ':%s:' % short
# Save test files
for test in ('png', 'entities'):
with open('../tests/extensions/emoji/gemoji (%s).txt' % test, 'w') as f:
f.write('# Emojis\n')
count = 0
for emoji in sorted(shortnames):
f.write(''.join('%s %s<br>\n' % (emoji[1:-1], emoji)))
count += 1
if test != 'png' and count == 10:
break
with open(os.path.join(current_dir, 'tags', repo, repo, 'LICENSE'), 'r') as f:
license_content = f.read()
# Write out essential info
with open('../pymdownx/gemoji_db.py', 'w') as f:
# Dump emoji db to file and strip out PY2 unicode specifiers
f.write('"""Gemoji autogen.\n\nGenerated from gemoji source. Do not edit by hand.\n\n%s"""\n' % license_content)
f.write('from __future__ import unicode_literals\n')
f.write('version = "%s"\n' % tag)
f.write('name = "gemoji"\n')
f.write('emoji = %s\n' % json.dumps(emoji_db, sort_keys=True, indent=4, separators=(',', ': ')))
f.write('aliases = %s\n' % json.dumps(aliases, sort_keys=True, indent=4, separators=(',', ': ')))
| StarcoderdataPython |
199448 | <gh_stars>1-10
from .base import AST
from .nodes import *
from .suite import *
| StarcoderdataPython |
44529 | <filename>achievements/admin.py
from models import Achievement, Category, Trophy, CollectionAchievement, Progress, ProgressAchievement, Task, TaskAchievement, TaskProgress
from django.contrib import admin
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.db import models
# set display and search field for category table
class CategoryAdmin(admin.ModelAdmin):
list_display=['name', 'parent_category']
search_fields = ('name', 'parent_category')
# ModelForm for validating, if an user has reached the achievement
class AchievementAdminForm(forms.ModelForm):
class Meta:
model = Achievement
def clean(self):
users = self.cleaned_data.get('users')
progress = Progress.objects.filter(progress_achievement__id = self.instance.id)
taskprogress = TaskProgress.objects.filter(task_achievement__id = self.instance.id)
task_accomplished_user = []
progress_accomplished_user = []
# check, if achievement already exists
if self.instance.id:
# check, if achievement has any users
if users:
# check, if achievement is one of the sub types
try:
progressachievement = ProgressAchievement.objects.get(id = self.instance.id)
except:
try:
taskachievement = TaskAchievement.objects.get(id = self.instance.id)
except:
try:
collectionachievement = CollectionAchievement.objects.get(id = self.instance.id)
except:
# if achievement is not one of them, it can be saved, because there are no requirements, which have to be checked
return self.cleaned_data
else:
# check, if user in CollectionAchievement has accomplished all achievements, which are required in the CollectionAchievement
for achievement in collectionachievement.achievements.all():
for user in users:
if not user in achievement.users.all():
raise ValidationError('This User has not earned this achievement yet')
return self.cleaned_data
else:
# check, if there is any TaskProgress for this TaskAchievement
if not taskprogress:
raise ValidationError('This User has not earned this achievement yet')
else:
for pro in taskprogress:
if pro.user in users:
# check, if user has accomplished all required tasks
if not pro.completed_tasks.count() == taskachievement.tasks.count():
raise ValidationError('This User has not earned this achievement yet')
else:
# check, if users contains only 1 entry
# if not, the user of the accomplished achievement will be saved in an array
if not users.count() == 1:
task_accomplished_user.append(pro.user)
else:
return self.cleaned_data
else:
# check, if TaskProgress contains only 1 entry
if taskprogress.count() == 1:
raise ValidationError('This User has not earned this achievement yet')
# check, if amount of entries in array, which contains the user of the accomplished achievements,
# is the same as the amount of entries of users list
if not len(task_accomplished_user) == users.count():
raise ValidationError('This User has not earned this achievement yet')
else:
return self.cleaned_data
else:
# check, if there is any Progress for this ProgressAchievement
if not progress:
raise ValidationError('This User has not earned this achievement yet')
else:
for pro in progress:
if pro.user in users:
# check, if user has accomplished the required amount
if not pro.achieved_amount == progressachievement.required_amount:
raise ValidationError('This User has not earned this achievement yet')
else:
# check, if users contains only 1 entry
# if not, the user of the accomplished achievement will be saved in an array
if not users.count() == 1:
progress_accomplished_user.append(pro.user)
else:
return self.cleaned_data
else:
# check, if TaskProgress contains only 1 entry
if progress.count() == 1:
raise ValidationError('This User has not earned this achievement yet')
# check, if amount of entries in array, which contains the user of the accomplished achievements,
# is the same as the amount of entries of users list
if not len(progress_accomplished_user) == users.count():
raise ValidationError('This User has not earned this achievement yet')
else:
return self.cleaned_data
else:
return self.cleaned_data
else:
return self.cleaned_data
# set display and search field for achievement table
# include AchievementAdminForm
# set ManyToManyField users to FilteredSelectMultiple
class AchievementAdmin(admin.ModelAdmin):
form = AchievementAdminForm
list_display=['name', 'description', 'category']
search_fields = ('name', 'category')
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("user", False)}
}
# set display field for progress table
class ProgressAdmin(admin.ModelAdmin):
list_display=['progress_achievement', 'achieved_amount', 'user']
# ModelForm for validating, if an user has reached the ProgressAchievement
class ProgressAchievementAdminForm(forms.ModelForm):
class Meta:
model = ProgressAchievement
def clean(self):
users = self.cleaned_data.get('users')
required_amount = self.cleaned_data.get('required_amount')
progress = Progress.objects.filter(progress_achievement__id = self.instance.id)
accomplished_user = []
if self.instance.id:
if users:
# check, if there is any Progress for this ProgressAchievement
if not progress:
raise ValidationError('This User has not earned this achievement yet')
else:
for pro in progress:
if pro.user in users:
# check, if user has accomplished the required amount
if not pro.achieved_amount == required_amount:
raise ValidationError('This User has not earned this achievement yet')
else:
# check, if users contains only 1 entry
# if not, the user of the accomplished achievement will be saved in an array
if not users.count() == 1:
accomplished_user.append(pro.user)
else:
return self.cleaned_data
else:
# check, if TaskProgress contains only 1 entry
if progress.count() == 1:
raise ValidationError('This User has not earned this achievement yet')
# check, if amount of entries in array, which contains the user of the accomplished achievements,
# is the same as the amount of entries of users list
if not len(accomplished_user) == users.count():
raise ValidationError('This User has not earned this achievement yet')
else:
return self.cleaned_data
else:
return self.cleaned_data
# if ProgressAchievement is new, it cannot be accomplished yet
elif users:
raise ValidationError('You can not add user for this achievement yet')
else:
return self.cleaned_data
# set display and search field for ProgressAchievement table
# include ProgressAchievementAdminForm
# set ManyToManyField users to FilteredSelectMultiple
class ProgressAchievementAdmin(admin.ModelAdmin):
form = ProgressAchievementAdminForm
list_display=['name', 'description', 'category']
search_fields = ('name', 'category')
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("user", False)}
}
# ModelForm for validating, if an user has reached the TaskAchievement
class TaskAchievementAdminForm(forms.ModelForm):
class Meta:
model = TaskAchievement
def clean(self):
users = self.cleaned_data.get('users')
tasks = self.cleaned_data.get('tasks')
progress = TaskProgress.objects.filter(task_achievement__id = self.instance.id)
accomplished_user = []
if self.instance.id:
if users:
# check, if there is any TaskProgress for this TaskAchievement
if not progress:
raise ValidationError('This User has not earned this achievement yet')
else:
for pro in progress:
if pro.user in users:
# check, if user has accomplished all required tasks
if not pro.completed_tasks.count() == tasks.count():
raise ValidationError('This User has not earned this achievement yet')
else:
# check, if users contains only 1 entry
# if not, the user of the accomplished achievement will be saved in an array
if not users.count() == 1:
accomplished_user.append(pro.user)
else:
return self.cleaned_data
else:
# check, if TaskProgress contains only 1 entry
if progress.count() == 1:
raise ValidationError('This User has not earned this achievement yet')
# check, if amount of entries in array, which contains the user of the accomplished achievements,
# is the same as the amount of entries of users list
if not len(accomplished_user) == users.count():
raise ValidationError('This User has not earned this achievement yet')
else:
return self.cleaned_data
else:
return self.cleaned_data
# if TaskAchievement is new, it cannot be accomplished yet
elif users:
raise ValidationError('You can not add user for this achievement yet')
else:
return self.cleaned_data
# set display and search field for TaskAchievement table
# include TaskAchievementAdminForm
# set ManyToManyField tasks to FilteredSelectMultiple
# set ManyToManyField users to FilteredSelectMultiple
class TaskAchievementAdmin(admin.ModelAdmin):
form = TaskAchievementAdminForm
list_display=['name', 'description', 'category']
search_fields = ('name', 'category')
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("tasks", False)}
}
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("users", False)}
}
# ModelForm for validating, if an user has reached the CollectionAchievement
class CollectionAchievementAdminForm(forms.ModelForm):
class Meta:
model = CollectionAchievement
def clean(self):
users = self.cleaned_data.get('users')
achievements = self.cleaned_data.get('achievements')
if users:
# check, if user in CollectionAchievement has accomplished all achievements, which are required in the CollectionAchievement
for achievement in achievements:
for user in users:
if not user in achievement.users.all():
raise ValidationError('This User has not earned this achievement yet')
return self.cleaned_data
else:
return self.cleaned_data
# set display and search field for CollectionAchievement table
# include CollectionAchievementAdminForm
# set ManyToManyField achievements to FilteredSelectMultiple
class CollectionAchievementAdmin(admin.ModelAdmin):
form = CollectionAchievementAdminForm
list_display=['name', 'description', 'category']
search_fields = ('name', 'category')
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("achievements", False)}
}
# set display field for Task table
class TaskAdmin(admin.ModelAdmin):
list_display=['name', 'description']
# set display field for TaskProgress table
# # set ManyToManyField tasks to FilteredSelectMultiple
class TaskProgressAdmin(admin.ModelAdmin):
list_display=['task_achievement', 'user']
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("tasks", False)}
}
# set display field for Trophy table
class TrophyAdmin(admin.ModelAdmin):
list_display=['achievement', 'position']
admin.site.register(Achievement, AchievementAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(ProgressAchievement, ProgressAchievementAdmin)
admin.site.register(Progress, ProgressAdmin)
admin.site.register(TaskAchievement, TaskAchievementAdmin)
admin.site.register(Task, TaskAdmin)
admin.site.register(TaskProgress, TaskProgressAdmin)
admin.site.register(Trophy, TrophyAdmin)
admin.site.register(CollectionAchievement, CollectionAchievementAdmin) | StarcoderdataPython |
4837738 | print(Hello world
)
| StarcoderdataPython |
1752935 |
# ===========================================================
# File Name: pixel_distance.py
# Author: <NAME>, Georgia Institute of Technology
# Creation Date: 04-25-2019
#
# This file is made available under
# the terms of the BSD license (see the COPYING file).
# ===========================================================
from scipy.spatial import distance_matrix
import numpy as np
import cv2
def px_dist_matches(kpts1, kpts2, geo_info, thresh):
"""
Inputs:
kpts1: np.array (Nx2) of keypoint coordinates from image 1
kpts2: np.array (Mx2) of keypoint coordinates from image 2
Returns:
"""
homog_1_to_2 = geo_info['H']
if kpts1.ndim > 2:
kpts1 = kpts1[:,:2]
kpts2 = kpts2[:,:2]
kpts1_clean, kpts2_clean = extract_relevant_keypoints(kpts1, kpts2, geo_info)
min_kpts = min(kpts1_clean.shape[0], kpts2_clean.shape[0])
if len(kpts1_clean) == 0 or len(kpts2_clean) == 0 or kpts1_clean is None or kpts2_clean is None:
return np.array([]),np.array([]), np.array([]), 0, 0
kpts1_transformed = transform_points(kpts1_clean, homog_1_to_2)
kpt_distances = distance_matrix(kpts1_transformed, kpts2_clean)
match_indices = perform_greedy_matching(kpt_distances, thresh = thresh)
match_indices = np.array(match_indices)
if match_indices.shape[0] == 0:
return np.array([]),np.array([]), np.array([]), len(kpts1_clean), len(kpts2_clean)
kpts1_matched = kpts1_clean[match_indices[:,0],:]
kpts2_matched = kpts2_clean[match_indices[:,1],:]
dist = kpt_distances[match_indices[:,0],match_indices[:,1]]
return kpts1_matched, kpts2_matched, dist, len(kpts1_clean), len(kpts2_clean)
def extract_relevant_keypoints(kpts1, kpts2, geo_info):
# Helper Homogeneous Vectors
img1_h = geo_info['ima_size'][0]
img1_w = geo_info['ima_size'][1]
img2_h = geo_info['imb_size'][0]
img2_w = geo_info['imb_size'][1]
homog_1_to_2 = geo_info['H']
homog_2_to_1 = np.linalg.inv(homog_1_to_2)
kpts1_in2 = transform_points(kpts1, homog_1_to_2)
kpts2_in1 = transform_points(kpts2, homog_2_to_1)
indx_kpt1= np.where((kpts1_in2[:,0]<=img2_h) & (kpts1_in2[:,0]>=0) & (kpts1_in2[:,1]<=img2_w) & (kpts1_in2[:,1]>=0))
indx_kpt2= np.where((kpts2_in1[:,0]<=img1_h) & (kpts2_in1[:,0]>=0) & (kpts2_in1[:,1]<=img1_w) & (kpts2_in1[:,1]>=0))
return (kpts1[indx_kpt1[0],:], kpts2[indx_kpt2[0],:])
def perform_greedy_matching(kpt_distance_matrix, thresh):
num_kpt1, num_kpt2 = kpt_distance_matrix.shape
pair_dists = []
for i in range(num_kpt1):
for j in range(num_kpt2):
pair_dists += [(i,j,kpt_distance_matrix[i,j])]
pair_dists = np.array(pair_dists)
inds = np.argsort(pair_dists[:,2])
pair_dists = pair_dists[inds]
matches = []
while pair_dists.size > 0:
if pair_dists[0,2] > thresh:
return matches
a,b = pair_dists[0,:2]
matches += [(int(a),int(b))]
pair_dists = pair_dists[1:]
col0_nondup = np.logical_not(pair_dists[:,0]==a)
col1_nondup = np.logical_not(pair_dists[:,1]==b)
non_dup = np.logical_and(col0_nondup,col1_nondup)
pair_dists = pair_dists[non_dup]
return matches
def transform_points(kpts, homog):
"""
Args:
- kpts: Numpy n-d array of shape (N,2), representing keypoints detected
in an image
Returns:
- kpts_trans: np array of shape (N,2) representing kpts transformed
by the homograph
"""
kpts_homogeneous = cv2.convertPointsToHomogeneous(kpts)
# (N,1,3)->(N,3) because cv2 adds intermediate axis
kpts_homogeneous = np.squeeze(kpts_homogeneous,axis=1).T
kpts_homogeneous_transformed = np.matmul(homog, kpts_homogeneous).T
kpts_transformed = cv2.convertPointsFromHomogeneous(kpts_homogeneous_transformed)
# (N,1,3)->(N,3) because cv2 has weird axis
kpts_trans = np.squeeze(kpts_transformed,axis=1)
return kpts_trans
| StarcoderdataPython |
172135 | <filename>tests/test_entropy_encoders/test_arithmetic_coding.py
from typing import List, Sequence
import hypothesis.strategies as st
from entropy_encoders import arithmetic_coding
from hypothesis import given
EOF = "\n"
text_strategy = st.text(st.characters(blacklist_characters=EOF),
max_size=10**9)
@given(st.lists(text_strategy))
def test_list_of_strings(symbol_list: List):
symbol_list += EOF
enc = arithmetic_coding.encode(symbol_list, EOF)
dec = arithmetic_coding.decode(enc)
assert symbol_list == dec
def test_handwritten():
pt = {
"R": 0.4,
"G": 0.5,
"B": 0.1,
}
string = list("GGB")
enc = arithmetic_coding.encode(string, "B", probability_table=pt)
assert enc.decimal == "83"
dec = arithmetic_coding.decode(enc)
if isinstance(string, str):
dec = "".join(dec)
assert string == dec
| StarcoderdataPython |
1721391 | <filename>Utils/custom_data_augmenter.py<gh_stars>1-10
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
def rotate_segmentation_data(images, masks, percent):
num_of_images = images.shape[0]
# include the origional instances to the final list of augmented data
images_rotated, masks_rotated = list(images), list(masks)
for idx in range(0, num_of_images, int(1/percent)): # 1/percent is the step size
# rotate the image and its mask by degrees in [90, 270, 360]
for angle in [90, 270, 360]:
image_rotated = ImageDataGenerator().apply_transform(images[idx], {'theta': angle})
mask_rotated = ImageDataGenerator().apply_transform(masks[idx], {'theta': angle})
images_rotated.append(image_rotated)
masks_rotated.append(mask_rotated)
images_rotated = np.array(images_rotated)
masks_rotated = np.array(masks_rotated)
return images_rotated, masks_rotated
def fliped_segmentation_data_horizontally(images, masks, percent):
num_of_images = images.shape[0]
# include the origional instances to the final list of augmented data
images_fliped_horizontally, masks_fliped_horizontally = list(images), list(masks)
for idx in range(0, num_of_images, int(1/percent)): # 1/percent is the step size
# flip the image and its mask horizontally
image_fliped_horizontally = ImageDataGenerator().apply_transform(images[idx], {'flip_horizontal': True})
mask_fliped_horizontally = ImageDataGenerator().apply_transform(masks[idx], {'flip_horizontal': True})
images_fliped_horizontally.append(image_fliped_horizontally)
masks_fliped_horizontally.append(mask_fliped_horizontally)
images_fliped_horizontally = np.array(images_fliped_horizontally)
masks_fliped_horizontally = np.array(masks_fliped_horizontally)
return images_fliped_horizontally, masks_fliped_horizontally
def fliped_segmentation_data_vertically(images, masks, percent):
num_of_images = images.shape[0]
# include the origional instances to the final list of augmented data
images_fliped_vertically, masks_fliped_vertically = list(images), list(masks)
for idx in range(0, num_of_images, int(1/percent)): # 1/percent is the step size
# flip the image and its mask vertically
image_fliped_vertically = ImageDataGenerator().apply_transform(images[idx], {'flip_vertical': True})
mask_fliped_vertically = ImageDataGenerator().apply_transform(masks[idx], {'flip_vertical': True})
images_fliped_vertically.append(image_fliped_vertically)
masks_fliped_vertically.append(mask_fliped_vertically)
images_fliped_vertically = np.array(images_fliped_vertically)
masks_fliped_vertically = np.array(masks_fliped_vertically)
return images_fliped_vertically, masks_fliped_vertically
def augment_segmentation_data(images, masks, rotate=False, flip_horizontal=False, flip_vertical=False,
rotate_percent=1, flip_horizontal_percent=1, flip_vertical_percent=1):
augmented_images, augmented_masks = images.copy(), masks.copy()
if rotate:
augmented_images, augmented_masks = rotate_segmentation_data(augmented_images, augmented_masks, rotate_percent)
if flip_horizontal:
augmented_images, augmented_masks = fliped_segmentation_data_horizontally(augmented_images, augmented_masks, flip_horizontal_percent)
if flip_vertical:
augmented_images, augmented_masks = fliped_segmentation_data_horizontally(augmented_images, augmented_masks, flip_vertical_percent)
return augmented_images, augmented_masks
| StarcoderdataPython |
1672971 | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.optim.lr_scheduler import StepLR
import torchvision
import torchvision.transforms as transforms
from torchvision import models
import tensorly as tl
import tensorly
from itertools import chain
from tensorly.decomposition import parafac, partial_tucker, matrix_product_state
import os
import matplotlib.pyplot as plt
import numpy as np
import time
def cp_decomposition_conv_layer(layer, rank):
l, f, v, h = parafac(layer.weight.data, rank=rank)[1]
factors = [l, f, v, h]
#print([f.shape for f in factors])
pointwise_s_to_r_layer = torch.nn.Conv2d(
in_channels=f.shape[0],
out_channels=f.shape[1],
kernel_size=1,
stride=1,
padding=0,
dilation=layer.dilation,
bias=False)
depthwise_vertical_layer = torch.nn.Conv2d(
in_channels=v.shape[1],
out_channels=v.shape[1],
kernel_size=(v.shape[0], 1),
stride=1, padding=(layer.padding[0], 0),
dilation=layer.dilation,
groups=v.shape[1],
bias=False)
depthwise_horizontal_layer = torch.nn.Conv2d(
in_channels=h.shape[1],
out_channels=h.shape[1],
kernel_size=(1, h.shape[0]),
stride=layer.stride,
padding=(0, layer.padding[0]),
dilation=layer.dilation,
groups=h.shape[1],
bias=False)
pointwise_r_to_t_layer = torch.nn.Conv2d(
in_channels=l.shape[1],
out_channels=l.shape[0],
kernel_size=1,
stride=1,
padding=0,
dilation=layer.dilation,
bias=True)
pointwise_r_to_t_layer.bias.data = layer.bias.data
depthwise_horizontal_layer.weight.data = torch.transpose(h, 1, 0).unsqueeze(1).unsqueeze(1)
depthwise_vertical_layer.weight.data = torch.transpose(v, 1, 0).unsqueeze(1).unsqueeze(-1)
pointwise_s_to_r_layer.weight.data = torch.transpose(f, 1, 0).unsqueeze(-1).unsqueeze(-1)
pointwise_r_to_t_layer.weight.data = l.unsqueeze(-1).unsqueeze(-1)
new_layers = [pointwise_s_to_r_layer, depthwise_vertical_layer,
depthwise_horizontal_layer, pointwise_r_to_t_layer]
#for l in new_layers:
# print(l.weight.data.shape)
return nn.Sequential(*new_layers)
def tucker_decomposition_conv_layer(layer, ranks):
core, [last, first] = partial_tucker(layer.weight.data, modes=[0, 1], ranks=ranks, init='svd')
#print(core.shape, last.shape, first.shape)
# A pointwise convolution that reduces the channels from S to R3
first_layer = torch.nn.Conv2d(in_channels=first.shape[0],
out_channels=first.shape[1], kernel_size=1,
stride=1, padding=0, dilation=layer.dilation, bias=False)
# A regular 2D convolution layer with R3 input channels
# and R3 output channels
core_layer = torch.nn.Conv2d(in_channels=core.shape[1],
out_channels=core.shape[0], kernel_size=layer.kernel_size,
stride=layer.stride, padding=layer.padding, dilation=layer.dilation, bias=False)
# A pointwise convolution that increases the channels from R4 to T
last_layer = torch.nn.Conv2d(in_channels=last.shape[1], \
out_channels=last.shape[0], kernel_size=1, stride=1,
padding=0, dilation=layer.dilation, bias=True)
last_layer.bias.data = layer.bias.data
first_layer.weight.data = torch.transpose(first, 1, 0).unsqueeze(-1).unsqueeze(-1)
last_layer.weight.data = last.unsqueeze(-1).unsqueeze(-1)
core_layer.weight.data = core
new_layers = [first_layer, core_layer, last_layer]
#for l in new_layers:
# print(l.weight.data.shape)
return nn.Sequential(*new_layers)
def tt_decomposition_conv_layer(layer, ranks):
data = layer.weight.data
data2D = tl.base.unfold(data, 0)
first, last = matrix_product_state(data2D, rank=ranks)
factors = [first, last]
#print([f.shape for f in factors])
first = first.reshape(data.shape[0], ranks, 1, 1)
last = last.reshape(ranks, data.shape[1], layer.kernel_size[0], layer.kernel_size[1])
pointwise_s_to_r_layer = torch.nn.Conv2d(
in_channels=last.shape[1],
out_channels=last.shape[0],
kernel_size=layer.kernel_size,
stride=layer.stride,
padding=layer.padding,
dilation=layer.dilation,
bias=False)
pointwise_r_to_t_layer = torch.nn.Conv2d(
in_channels=first.shape[1],
out_channels=first.shape[0],
kernel_size=1,
stride=1,
padding=0,
dilation=layer.dilation,
bias=True)
pointwise_r_to_t_layer.bias.data = layer.bias.data
pointwise_s_to_r_layer.weight.data = last
pointwise_r_to_t_layer.weight.data = first
new_layers = [pointwise_s_to_r_layer, pointwise_r_to_t_layer]
#for l in new_layers:
# print(l.weight.data.shape)
return nn.Sequential(*new_layers) | StarcoderdataPython |
1655603 | <gh_stars>1-10
"""
Useful semantics "macro" instructions built on top of
the primitives.
"""
from __future__ import absolute_import
from cdsl.operands import Operand
from cdsl.typevar import TypeVar
from cdsl.instructions import Instruction, InstructionGroup
from base.types import b1
from base.immediates import imm64
from cdsl.ast import Var
from cdsl.xform import Rtl
from semantics.primitives import bv_from_imm64, bvite
import base.formats # noqa
GROUP = InstructionGroup("primitive_macros", "Semantic macros instruction set")
AnyBV = TypeVar('AnyBV', bitvecs=True, doc="")
x = Var('x')
y = Var('y')
imm = Var('imm')
a = Var('a')
#
# Bool-to-bv1
#
BV1 = TypeVar("BV1", bitvecs=(1, 1), doc="")
bv1_op = Operand('bv1_op', BV1, doc="")
cond_op = Operand("cond", b1, doc="")
bool2bv = Instruction(
'bool2bv', r"""Convert a b1 value to a 1-bit BV""",
ins=cond_op, outs=bv1_op)
v1 = Var('v1')
v2 = Var('v2')
bvone = Var('bvone')
bvzero = Var('bvzero')
bool2bv.set_semantics(
v1 << bool2bv(v2),
Rtl(
bvone << bv_from_imm64(imm64(1)),
bvzero << bv_from_imm64(imm64(0)),
v1 << bvite(v2, bvone, bvzero)
))
GROUP.close()
| StarcoderdataPython |
1704870 | from player import Player
class Batter(Player):
''' A batter and all of his stats as collected from various means and
manipulated in the base Player class most likely
'''
def __init__(self, name, name_display, id):
"""
Return a batter object
:param name:
:param name_first_last:
:param id: The mlbgame id for this batter, more useful than name for stats work
"""
super(Batter,self).__init__(name, name_display, id)
# cache the number of hits the last time the stats were calculated
hits_today = 0
# initialize the containers for all the stats we will be keeping
self._columns = ['row','s_rbi','s_r','s_so','s_h','s_bb','so','h','ao','hbp','bb','slg','obp','ops','avg']
def __repr__(self):
return 'Batter(%s)' % self.name_display
def get_hits(self, index):
'''
Returns the hits for this batter for the index to be used for testing algorithms
This could be made easier, but for now the last entry (yesterday) is -1, etc
:param index: the requested date index (from the start day) of the hits
:return: The number of hits for the batter
'''
return self._df["h"].iloc[index] | StarcoderdataPython |
1762156 | <reponame>raza-al-pakistani/raza-al-pakistani--v20022.3.1
# A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2007-2021 NV Access Limited, Babbage B.V.
from typing import Dict
from utils.displayString import DisplayStringStrEnum
class IsCurrent(DisplayStringStrEnum):
"""Values to use within NVDA to denote 'current' values.
These describe if an item is the current item within a particular kind of selection.
EG aria-current
"""
NO = "false"
YES = "true"
PAGE = "page"
STEP = "step"
LOCATION = "location"
DATE = "date"
TIME = "time"
@property
def _displayStringLabels(self):
return _isCurrentLabels
@property
def displayString(self):
try:
return super().displayString
except KeyError:
return self.YES.displayString
#: Text to use for 'current' values. These describe if an item is the current item
#: within a particular kind of selection. EG aria-current
_isCurrentLabels: Dict[IsCurrent, str] = {
IsCurrent.NO: "", # There is nothing extra to say for items that are not current.
# Translators: Presented when an item is marked as current in a collection of items
IsCurrent.YES: _("current"),
# Translators: Presented when a page item is marked as current in a collection of page items
IsCurrent.PAGE: _("current page"),
# Translators: Presented when a step item is marked as current in a collection of step items
IsCurrent.STEP: _("current step"),
# Translators: Presented when a location item is marked as current in a collection of location items
IsCurrent.LOCATION: _("current location"),
# Translators: Presented when a date item is marked as current in a collection of date items
IsCurrent.DATE: _("current date"),
# Translators: Presented when a time item is marked as current in a collection of time items
IsCurrent.TIME: _("current time"),
}
| StarcoderdataPython |
1614611 | import pygame
class Score():
"""表示分数的类"""
def __init__(self, init_settings, screen):
"""导入屏幕和设定"""
self.screen = screen
self.init_settings = init_settings
"""导入图片资源"""
self.images = [
pygame.image.load('resources/sprites/font_048.png'),
pygame.image.load('resources/sprites/font_049.png'),
pygame.image.load('resources/sprites/font_050.png'),
pygame.image.load('resources/sprites/font_051.png'),
pygame.image.load('resources/sprites/font_052.png'),
pygame.image.load('resources/sprites/font_053.png'),
pygame.image.load('resources/sprites/font_054.png'),
pygame.image.load('resources/sprites/font_055.png'),
pygame.image.load('resources/sprites/font_056.png'),
pygame.image.load('resources/sprites/font_057.png')
]
def showScore(self, score):
"""显示分数"""
# 计算图片位置
scoreList = [int(i) for i in list(str(score))]
total_width = 0
for digit in scoreList:
total_width += self.images[digit].get_width()
pos_x = (self.init_settings.screen_width - total_width) // 2
pos_y = int(0.2 * self.init_settings.screen_height)
# 显示分数
for digit in scoreList:
self.screen.blit(self.images[digit], (pos_x, pos_y))
pos_x += self.images[digit].get_width()
| StarcoderdataPython |
3336300 | # -*- coding: utf-8 -*-
"""
reNamer, Author <NAME>(https://github.com/Eshleron/reNamer)
Requirements:
- json
- os
- pathlib
- random
- sys
- time
- PyQt5
Python:
- 3.5.4
This file (reName.py) is part of reNamer.
"""
import json
import os
from pathlib import Path
import random
import sys
import time
from PyQt5.QtWidgets import QApplication, QMessageBox, QFileDialog
import gui
class MainApplication(gui.MainWindow):
def __init__(self, parent=None):
super(MainApplication, self).__init__(parent)
'''Connect gui elements to functions'''
self.ui.pick_folder.clicked.connect(self.pick_folder)
self.ui.rnd_name.toggled.connect(self.random_file_name)
self.ui.not_rnd_name.toggled.connect(self.not_random_file_name)
self.ui.file_type.toggled.connect(self.type_name)
self.ui.file_set_name.toggled.connect(self.increment_name)
'''Variables'''
self.name = ''
self.start_value = ''
self.increment = ''
self.folder = ''
self.extensions_dict = {}
self.file_list = []
self.qty_files = 0
def reconnect(self, new_handler):
"""
First, deletes previous connection with handlers for specified signal.
Second, connects specified signal with new_handler.
"""
button = self.ui.launch.clicked
try:
button.disconnect()
except TypeError:
pass
button.connect(new_handler)
def random_file_name(self):
self.ui.file_type.setCheckable(False)
self.ui.file_type.setEnabled(False)
self.ui.file_set_name.setCheckable(False)
self.ui.file_set_name.setEnabled(False)
self.ui.set_basis.setEnabled(False)
self.ui.set_start_value.setEnabled(False)
self.ui.set_increment.setEnabled(False)
self.reconnect(self.random_rename)
def not_random_file_name(self):
self.ui.file_type.setCheckable(True)
self.ui.file_type.setEnabled(True)
self.ui.file_set_name.setCheckable(True)
self.ui.file_set_name.setEnabled(True)
def type_name(self):
self.ui.set_basis.setEnabled(False)
self.ui.set_start_value.setEnabled(False)
self.ui.set_increment.setEnabled(False)
self.reconnect(self.type_rename)
def increment_name(self):
self.ui.set_basis.setEnabled(True)
self.ui.set_start_value.setEnabled(True)
self.ui.set_increment.setEnabled(True)
self.reconnect(self.increment_rename)
def unique_file_name(self, extension):
"""
This function finds a unique name for the file depending on the the extension that file has.
In form of:
{'.docx': 2, '.css': 1, '.html': 1,}
"""
try:
if self.extensions_dict[extension] != '':
self.extensions_dict[extension] += 1
except KeyError:
self.extensions_dict[extension] = 1
def pick_folder(self):
self.folder = str(QFileDialog.getExistingDirectory(self.ui, "Select Directory"))
self.check_folder()
def check_folder(self):
"""This function counts total files and checks if target folder is empty/not empty."""
self.file_list = []
self.extensions_dict = {}
self.qty_files = 0
if self.folder:
for file in os.walk(self.folder):
self.file_list.append(file)
'''Count quantity of files'''
if self.file_list[0][2]:
for file in self.file_list[0][2]:
extension = str(Path(file).suffix)
self.unique_file_name(extension)
self.qty_files += 1
self.ui.show_path.setStyleSheet('color: green')
self.ui.show_path.setText(self.folder)
self.ui.progress_bar.setRange(1, self.qty_files)
'''Creating visual effect of refilling the bar'''
self.ui.progress_bar.setValue(1)
time.sleep(.01)
else:
'''Check if target folder is empty'''
self.ui.show_path.setStyleSheet('color: #b22900')
self.ui.show_path.setText('Target folder may be empty!\n' + self.folder)
self.ui.show_extensions.hide()
self.ui.show_extensions.setText('')
'''Print dict values'''
dict_values = json.dumps(self.extensions_dict)
if dict_values != '{}':
self.ui.show_extensions.setText(dict_values)
self.ui.show_extensions.show()
def generic_rename(self, name='', start_value='', increment='', func=''):
"""General function for all other types of renaming functions."""
if self.folder:
self.check_folder()
address = self.file_list[0][0]
pb_value = 0
if func == 'increment':
self.name = name
self.start_value = int(start_value) - 1
self.increment = int(increment)
for file in self.file_list[0][2]:
extension = str(Path(file).suffix)
folder_path = address + '/'
obj = folder_path + file
if func == 'type':
self.start_value = ''
self.name = self.extensions_dict[extension]
self.extensions_dict[extension] -= 1
elif func == 'rnd':
self.name = random.randint(100000, 10000000)
elif func == 'increment':
self.start_value += self.increment
try:
os.rename(obj, folder_path + str(self.name) + str(self.start_value) + extension)
pb_value += 1
self.ui.progress_bar.setValue(pb_value)
except FileNotFoundError:
QMessageBox.warning(self.ui, "Warning!", "File not found.")
except FileExistsError:
QMessageBox.warning(self.ui, "Warning", "File already exists.")
def type_rename(self):
self.generic_rename(func='type')
def random_rename(self):
self.generic_rename(func='rnd')
def increment_rename(self):
basis = self.ui.set_basis.text()
start_value = self.ui.set_start_value.text()
increment = self.ui.set_increment.text()
self.generic_rename(name=basis, start_value=start_value, increment=increment, func='increment')
def main():
app = QApplication(sys.argv)
win = MainApplication()
win.show()
app.exec_()
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
117045 | <filename>examples/distributed_dl/distributed_ml.py
from ray_on_aml.core import Ray_On_AML
from azureml.core import Run
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
import ray.train.torch
from ray import train
from ray.train import Trainer
from ray import tune
# from ray.tune import Callback
import torch
import torch.nn as nn
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
from torch.optim import Adam
import numpy as np
def train_func(config):
cuda = torch.device('cuda')
n = 100
# create a toy dataset
# data : X - dim = (n, 4)
# target : Y - dim = (n, 1)
X = torch.Tensor(np.random.normal(0, 1, size=(n, 4))).detach().to(cuda)
Y = torch.Tensor(np.random.uniform(0, 1, size=(n, 1))).detach().to(cuda)
# toy neural network : 1-layer
# wrap the model in DDP
model = ray.train.torch.prepare_model(nn.Linear(4, 1))
criterion = nn.MSELoss()
optimizer = Adam(model.parameters(), lr=3e-4)
for epoch in range(config["num_epochs"]):
y = model.forward(X)
# compute loss
loss = criterion(y, Y)
print("epoch ", epoch, " loss ", loss)
# back-propagate loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
# To fetch non-DDP state_dict
# w/o DDP: model.state_dict()
# w/ DDP: model.module.state_dict()
# See: https://github.com/ray-project/ray/issues/20915
state_dict = model.state_dict()
consume_prefix_in_state_dict_if_present(state_dict, "module.")
train.save_checkpoint(epoch=epoch, model_weights=state_dict)
if __name__ == "__main__":
run = Run.get_context()
ws = run.experiment.workspace
ray_on_aml =Ray_On_AML()
ray = ray_on_aml.getRay()
if ray: #in the headnode
print("head node detected")
print("test distributed DL trainining")
print("resources for ray cluster ", ray.cluster_resources())
trainer = Trainer(backend="torch", num_workers=2,use_gpu =True)
trainer.start()
trainer.run(train_func, config={"num_epochs": 5})
trainer.shutdown()
print(trainer.latest_checkpoint)
else:
print("in worker node")
| StarcoderdataPython |
1629107 | from celery.utils.log import get_task_logger
from wikimetrics.api import CohortService
from report import ReportNode
from metric_report import MetricReport
__all__ = ['MultiProjectMetricReport']
task_logger = get_task_logger(__name__)
class MultiProjectMetricReport(ReportNode):
"""
A node responsbile for running a single metric on a potentially
project-heterogenous cohort. This just abstracts away the task
of grouping the cohort by project and calling a MetricReport on
each project-homogenous list of user_ids.
"""
show_in_ui = False
def __init__(self, cohort, metric, *args, **kwargs):
"""
Parameters:
metric : an instance of a Metric class
cohort : a logical cohort object
args : should include any parameters needed by ReportNode
kwargs : should include any parameters needed by ReportNode
"""
super(MultiProjectMetricReport, self).__init__(*args, **kwargs)
cohort_service = CohortService()
self.children = []
for project, user_ids in cohort_service.get_users_by_project(cohort):
# note that user_ids is actually just an iterator
self.children.append(
MetricReport(metric, cohort.id, user_ids, project, *args, **kwargs)
)
def finish(self, metric_results):
merged_individual_results = {}
for res in metric_results:
merged_individual_results.update(res)
return merged_individual_results
| StarcoderdataPython |
1610238 | #!/usr/bin/env python
# -*- coding: utf-8; mode: python; -*-
"""Module providing access to third party resources.
Attributes:
LCSI (dict): mapping from verb to a set of classes
BROWN_CLUSTERS (dict): mapping from word to a set of
Brown clusters
CONNS (set): set of explcit connectives
CONNTOK2CONN (defaultdict): mapping from connective to its
enumerated tokens
CONNTOKS (set): set of tokens which can be part of a connective
INQUIRER (dict): mapping from word to General Inquirer class
STEMMED_INQUIRER (dict): mapping from stemmed word to General Inquirer class
W2V (dict): word2vec embeddings
"""
##################################################################
# Imports
from __future__ import absolute_import, print_function
from dsenser.constants import ENCODING, DFLT_BROWN_PATH, DFLT_ECONN_PATH, \
DFLT_INQUIRER_PATH, DFLT_LCSI_PATH, DFLT_MPQA_PATH, DFLT_W2V_PATH
from dsenser.scorer.conn_head_mapper import ConnHeadMapper
from collections import defaultdict
from nltk.stem.porter import PorterStemmer
import codecs
import gc
import re
import sys
##################################################################
# Constants
BAR_RE = re.compile(r'\|')
CHM = ConnHeadMapper()
ELLIPSIS_RE = re.compile(r"[.][.]+")
EQ_RE = re.compile("=+")
HASH_RE = re.compile("\s*#\s*")
SPACE_RE = re.compile("\s+")
TAB_RE = re.compile("\t+")
PSTEMMER = PorterStemmer()
WORD1 = "word1"
POL = "priorpolarity"
POL_IDX = 0
INTENS = "type"
INTENS_IDX = 1
POS = "pos1"
POS_IDX = 2
NEGATIONS = set(["cannot", "not", "none", "nothing",
"nowhere", "neither", "nor", "nobody",
"hardly", "scarcely", "barely", "never",
"n't", "noone", "havent", "hasnt",
"hadnt", "cant", "couldnt", "shouldnt",
"wont", "wouldnt", "dont", "doesnt",
"didnt", "isnt", "arent", "aint", "no"
])
##################################################################
# Methods
def load_conns(a_fname):
"""Load explicit connectives from file.
Args:
a_fname (str): file containing connectives
Returns:
set: set of loaded connectives
"""
ret = set()
iconn = None
with codecs.open(a_fname, 'r', ENCODING,
errors="replace") as ifile:
for iline in ifile:
iline = iline.strip().lower()
if not iline:
continue
iconn = tuple(tuple(itok.split()) for itok
in ELLIPSIS_RE.split(iline))
ret.add(iconn,)
return ret
def conn2str(a_conn):
"""Convert connective tuple to string.
Args:
a_conn (tuple):
tuple of connective tokens
Returns:
str: connective string
"""
return '_'.join(itok for ipart in a_conn for itok in ipart)
def load_LCSI(a_fname):
"""Load LCSI verb classes from file.
Args:
a_fname (str): file containing LCSI data
Returns:
dict:
mapping from verb to a set of classes
"""
ret = dict()
iword = iclasses = iclass_str = None
with codecs.open(a_fname, 'r', ENCODING,
errors="replace") as ifile:
for iline in ifile:
iline = iline.strip()
if not iline:
continue
iword, iclass_str = SPACE_RE.split(iline, 1)
iword = iword.lower()
iclasses = set(HASH_RE.split(iclass_str))
if iword in ret:
ret[iword].update(iclasses)
else:
ret[iword] = iclasses
return ret
def load_BROWN(a_fname):
"""Load BROWN clusters from file.
Args:
a_fname (str): file containing Brown clusters
Returns:
dict:
mapping from word to a set of Brown clusters
"""
ret = defaultdict(set)
iword = iclass = None
print("Loading {:s}... ".format(a_fname), end="", file=sys.stderr)
with codecs.open(a_fname, 'r', ENCODING,
errors="replace") as ifile:
for iline in ifile:
iline = iline.strip()
if not iline:
continue
iclass, iword, _ = SPACE_RE.split(iline, 2)
iword = iword.lower()
ret[iword].add(iclass)
print("done", file=sys.stderr)
# convert defaultdict back to the normal one
return {k: '|'.join(cls) for k, cls in ret.iteritems()}
def load_INQUIRER(a_fname):
"""Load Inquirer data from file.
Args:
a_fname (str): file containing Inquirer data
Returns:
tuple:
mapping from word to Inquirer classes and the same mapping for stemmed
words
"""
ret = dict()
stem_ret = dict()
iword = iclass = None
with codecs.open(a_fname, 'r', ENCODING,
errors="replace") as ifile:
for iline in ifile:
iline = iline.strip()
if not iline:
continue
iword, iclass = TAB_RE.split(iline, 1)
iword = iword.strip().lower()
iclass = [bool(el) if el else False for el in BAR_RE.split(iclass)]
ret[iword] = iclass
stem_ret[PSTEMMER.stem(iword)] = iclass
# convert defaultdict back to the normal one
return (ret, stem_ret)
def load_MPQA(a_fname):
"""Load MPQA data from file.
Args:
a_fname (str): file containing MPQA data
Returns:
dict:
mapping from word to Inquirer classes and the same mapping for stemmed
words
"""
ret = defaultdict(lambda: [None] * 3)
attrs = None
with codecs.open(a_fname, 'r', ENCODING,
errors="replace") as ifile:
for iline in ifile:
iline = iline.strip()
if not iline:
continue
attrs = dict(EQ_RE.split(iword.lower(), 1)
for iword in SPACE_RE.split(iline)
if EQ_RE.search(iword))
ret[attrs[WORD1]] = (attrs[POL], attrs[INTENS], attrs[POS])
# convert defaultdict back to the normal one
return dict(ret.iteritems())
def load_W2V(a_fname):
"""Load Word2Vec data from file.
Args:
a_fname (str): file containing W2V data
Returns:
dict:
mapping from word to Inquirer classes and the same mapping for stemmed
words
"""
from gensim.models.word2vec import Word2Vec
print("Loading {:s}... ".format(a_fname), end="", file=sys.stderr)
w2v = Word2Vec.load_word2vec_format(a_fname, binary=True)
print("done", file=sys.stderr)
return w2v
##################################################################
# Class
class LoadOnDemand(object):
"""Custom class for deferring loading of huge resources.
Loads resources only if they are actually used.
Attributes:
resource (object or None): loaded resource
cmd (method): method to load the resource
args (list): arguments to pass to ``cmd``
kwargs (dict): keyword arguments to pass to ``cmd``
"""
def __init__(self, a_cmd, *a_args, **a_kwargs):
"""Class cosntructor.
Args:
a_cmd (method): custom method to load the resource
args (list): arguments to pass to ``a_cmd``
kwargs (dict): keyword arguments to pass to ``a_cmd``
"""
self.resource = None
self.cmd = a_cmd
self.args = a_args
self.kwargs = a_kwargs
def __contains__(self, a_name):
"""Proxy method for looking up a word in the resource.
Args:
a_name (str): word to look up in the resource
Note:
forwards the request to the underlying resource
"""
self.load()
return a_name in self.resource
def __getitem__(self, a_name):
"""Proxy method for accessing the resource.
Args:
a_name (str): word to look up in the resource
Note:
forwards the request to the underlying resource
"""
# initialize the resource if needed
self.load()
return self.resource.__getitem__(a_name)
def load(self):
"""Force loading the resource.
Note:
loads the resource
"""
if self.resource is None:
self.resource = self.cmd(*self.args, **self.kwargs)
return self.resource
def unload(self):
"""Unload the resource.
Note:
unloads the resource
"""
if self.resource is not None:
print("Unloading resource '{:s}'...".format(repr(self.resource)),
file=sys.stderr)
del self.resource
self.resource = None
gc.collect()
##################################################################
# Resources
LCSI = load_LCSI(DFLT_LCSI_PATH)
BROWN_CLUSTERS = LoadOnDemand(load_BROWN, DFLT_BROWN_PATH)
CONNS = load_conns(DFLT_ECONN_PATH)
CONNTOK2CONN = defaultdict(list)
itok = None
for iconn in CONNS:
for i, ipart in enumerate(iconn):
itok = ipart[0]
CONNTOK2CONN[itok].append((i, iconn))
for iconns in CONNTOK2CONN.itervalues():
iconns.sort(key=lambda el: el[0])
CONNTOKS = set(CONNTOK2CONN.keys())
INQUIRER, STEMMED_INQUIRER = load_INQUIRER(DFLT_INQUIRER_PATH)
MPQA = load_MPQA(DFLT_MPQA_PATH)
W2V = LoadOnDemand(load_W2V, DFLT_W2V_PATH)
| StarcoderdataPython |
3373813 | import collections
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
USE_WANDB = False # if enabled, logs data on wandb server
class ReplayBuffer:
def __init__(self, buffer_limit):
self.buffer = collections.deque(maxlen=buffer_limit)
def put(self, transition):
self.buffer.append(transition)
def sample_chunk(self, batch_size, chunk_size):
start_idx = np.random.randint(0, len(self.buffer) - chunk_size, batch_size)
s_lst, a_lst, r_lst, s_prime_lst, done_lst = [], [], [], [], []
for idx in start_idx:
for chunk_step in range(idx, idx + chunk_size):
s, a, r, s_prime, done = self.buffer[chunk_step]
s_lst.append(s)
a_lst.append(a)
r_lst.append(r)
s_prime_lst.append(s_prime)
done_lst.append(done)
n_agents, obs_size = len(s_lst[0]), len(s_lst[0][0])
return torch.tensor(s_lst, dtype=torch.float).view(batch_size, chunk_size, n_agents, obs_size), \
torch.tensor(a_lst, dtype=torch.float).view(batch_size, chunk_size, n_agents), \
torch.tensor(r_lst, dtype=torch.float).view(batch_size, chunk_size, n_agents), \
torch.tensor(s_prime_lst, dtype=torch.float).view(batch_size, chunk_size, n_agents, obs_size), \
torch.tensor(done_lst, dtype=torch.float).view(batch_size, chunk_size, 1)
def size(self):
return len(self.buffer)
class MixNet(nn.Module):
def __init__(self, observation_space, hidden_dim=32, hx_size=64, recurrent=False):
super(MixNet, self).__init__()
state_size = sum([_.shape[0] for _ in observation_space])
self.hidden_dim = hidden_dim
self.hx_size = hx_size
self.n_agents = len(observation_space)
self.recurrent = recurrent
hyper_net_input_size = state_size
if self.recurrent:
self.gru = nn.GRUCell(state_size, self.hx_size)
hyper_net_input_size = self.hx_size
self.hyper_net_weight_1 = nn.Linear(hyper_net_input_size, self.n_agents * hidden_dim)
self.hyper_net_weight_2 = nn.Linear(hyper_net_input_size, hidden_dim)
self.hyper_net_bias_1 = nn.Linear(hyper_net_input_size, hidden_dim)
self.hyper_net_bias_2 = nn.Sequential(nn.Linear(hyper_net_input_size, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, 1))
def forward(self, q_values, observations, hidden):
batch_size, n_agents, obs_size = observations.shape
state = observations.view(batch_size, n_agents * obs_size)
x = state
if self.recurrent:
hidden = self.gru(x, hidden)
x = hidden
weight_1 = torch.abs(self.hyper_net_weight_1(x))
weight_1 = weight_1.view(batch_size, self.hidden_dim, n_agents)
bias_1 = self.hyper_net_bias_1(x).unsqueeze(-1)
weight_2 = torch.abs(self.hyper_net_weight_2(x))
bias_2 = self.hyper_net_bias_2(x)
x = torch.bmm(weight_1, q_values.unsqueeze(-1)) + bias_1
x = torch.relu(x)
x = (weight_2.unsqueeze(-1) * x).sum(dim=1) + bias_2
return x, hidden
def init_hidden(self, batch_size=1):
return torch.zeros((batch_size, self.hx_size))
class QNet(nn.Module):
def __init__(self, observation_space, action_space, recurrent=False):
super(QNet, self).__init__()
self.num_agents = len(observation_space)
self.recurrent = recurrent
self.hx_size = 32
for agent_i in range(self.num_agents):
n_obs = observation_space[agent_i].shape[0]
setattr(self, 'agent_feature_{}'.format(agent_i), nn.Sequential(nn.Linear(n_obs, 128),
nn.ReLU(),
nn.Linear(128, self.hx_size),
nn.ReLU()))
if recurrent:
setattr(self, 'agent_gru_{}'.format(agent_i), nn.GRUCell(self.hx_size, self.hx_size))
setattr(self, 'agent_q_{}'.format(agent_i), nn.Linear(self.hx_size, action_space[agent_i].n))
def forward(self, obs, hidden):
q_values = [torch.empty(obs.shape[0], )] * self.num_agents
next_hidden = [torch.empty(obs.shape[0], 1, self.hx_size, )] * self.num_agents
for agent_i in range(self.num_agents):
x = getattr(self, 'agent_feature_{}'.format(agent_i))(obs[:, agent_i, :])
if self.recurrent:
x = getattr(self, 'agent_gru_{}'.format(agent_i))(x, hidden[:, agent_i, :])
next_hidden[agent_i] = x.unsqueeze(1)
q_values[agent_i] = getattr(self, 'agent_q_{}'.format(agent_i))(x).unsqueeze(1)
return torch.cat(q_values, dim=1), torch.cat(next_hidden, dim=1)
def sample_action(self, obs, hidden, epsilon):
out, hidden = self.forward(obs, hidden)
mask = (torch.rand((out.shape[0],)) <= epsilon)
action = torch.empty((out.shape[0], out.shape[1],))
action[mask] = torch.randint(0, out.shape[2], action[mask].shape).float()
action[~mask] = out[~mask].argmax(dim=2).float()
return action, hidden
def init_hidden(self, batch_size=1):
return torch.zeros((batch_size, self.num_agents, self.hx_size))
def train(q, q_target, mix_net, mix_net_target, memory, optimizer, gamma, batch_size, update_iter=10, chunk_size=10,
grad_clip_norm=5):
_chunk_size = chunk_size if q.recurrent else 1
for _ in range(update_iter):
s, a, r, s_prime, done = memory.sample_chunk(batch_size, _chunk_size)
hidden = q.init_hidden(batch_size)
target_hidden = q_target.init_hidden(batch_size)
mix_net_target_hidden = mix_net_target.init_hidden(batch_size)
mix_net_hidden = [torch.empty_like(mix_net_target_hidden) for _ in range(_chunk_size + 1)]
mix_net_hidden[0] = mix_net_target.init_hidden(batch_size)
loss = 0
for step_i in range(_chunk_size):
q_out, hidden = q(s[:, step_i, :, :], hidden)
q_a = q_out.gather(2, a[:, step_i, :].unsqueeze(-1).long()).squeeze(-1)
pred_q, next_mix_net_hidden = mix_net(q_a, s[:, step_i, :, :], mix_net_hidden[step_i])
max_q_prime, target_hidden = q_target(s_prime[:, step_i, :, :], target_hidden.detach())
max_q_prime = max_q_prime.max(dim=2)[0].squeeze(-1)
q_prime_total, mix_net_target_hidden = mix_net_target(max_q_prime, s_prime[:, step_i, :, :],
mix_net_target_hidden.detach())
target_q = r[:, step_i, :].sum(dim=1, keepdims=True) + (gamma * q_prime_total * (1 - done[:, step_i]))
loss += F.smooth_l1_loss(pred_q, target_q.detach())
done_mask = done[:, step_i].squeeze(-1).bool()
hidden[done_mask] = q.init_hidden(len(hidden[done_mask]))
target_hidden[done_mask] = q_target.init_hidden(len(target_hidden[done_mask]))
mix_net_hidden[step_i + 1][~done_mask] = next_mix_net_hidden[~done_mask]
mix_net_hidden[step_i + 1][done_mask] = mix_net.init_hidden(len(mix_net_hidden[step_i][done_mask]))
mix_net_target_hidden[done_mask] = mix_net_target.init_hidden(len(mix_net_target_hidden[done_mask]))
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(q.parameters(), grad_clip_norm, norm_type=2)
torch.nn.utils.clip_grad_norm_(mix_net.parameters(), grad_clip_norm, norm_type=2)
optimizer.step()
def test(env, num_episodes, q):
score = 0
for episode_i in range(num_episodes):
state = env.reset()
done = [False for _ in range(env.n_agents)]
with torch.no_grad():
hidden = q.init_hidden()
while not all(done):
action, hidden = q.sample_action(torch.Tensor(state).unsqueeze(0), hidden, epsilon=0)
next_state, reward, done, info = env.step(action[0].data.cpu().numpy().tolist())
score += sum(reward)
state = next_state
return score / num_episodes
def main(env_name, lr, gamma, batch_size, buffer_limit, log_interval, max_episodes,
max_epsilon, min_epsilon, test_episodes, warm_up_steps, update_iter, chunk_size,
update_target_interval, recurrent):
# create env.
env = gym.make(env_name)
test_env = gym.make(env_name)
memory = ReplayBuffer(buffer_limit)
# create networks
q = QNet(env.observation_space, env.action_space, recurrent)
q_target = QNet(env.observation_space, env.action_space, recurrent)
q_target.load_state_dict(q.state_dict())
mix_net = MixNet(env.observation_space, recurrent=recurrent)
mix_net_target = MixNet(env.observation_space, recurrent=recurrent)
mix_net_target.load_state_dict(mix_net.state_dict())
optimizer = optim.Adam([{'params': q.parameters()}, {'params': mix_net.parameters()}], lr=lr)
score = 0
for episode_i in range(max_episodes):
epsilon = max(min_epsilon, max_epsilon - (max_epsilon - min_epsilon) * (episode_i / (0.6 * max_episodes)))
state = env.reset()
done = [False for _ in range(env.n_agents)]
with torch.no_grad():
hidden = q.init_hidden()
while not all(done):
action, hidden = q.sample_action(torch.Tensor(state).unsqueeze(0), hidden, epsilon)
action = action[0].data.cpu().numpy().tolist()
next_state, reward, done, info = env.step(action)
memory.put((state, action, (np.array(reward)).tolist(), next_state, [int(all(done))]))
score += sum(reward)
state = next_state
if memory.size() > warm_up_steps:
train(q, q_target, mix_net, mix_net_target, memory, optimizer, gamma, batch_size, update_iter, chunk_size)
if episode_i % update_target_interval:
q_target.load_state_dict(q.state_dict())
mix_net_target.load_state_dict(mix_net.state_dict())
if episode_i % log_interval == 0 and episode_i != 0:
test_score = test(test_env, test_episodes, q)
train_score = score / log_interval
print("#{:<10}/{} episodes , avg train score : {:.1f}, test score: {:.1f} n_buffer : {}, eps : {:.1f}"
.format(episode_i, max_episodes, train_score, test_score, memory.size(), epsilon))
if USE_WANDB:
wandb.log({'episode': episode_i, 'test-score': test_score,
'buffer-size': memory.size(), 'epsilon': epsilon, 'train-score': train_score})
score = 0
env.close()
test_env.close()
if __name__ == '__main__':
# Lets gather arguments
import argparse
parser = argparse.ArgumentParser(description='Qmix')
parser.add_argument('--env-name', required=False, default='ma_gym:Checkers-v0')
parser.add_argument('--seed', type=int, default=1, required=False)
parser.add_argument('--no-recurrent', action='store_true')
parser.add_argument('--max-episodes', type=int, default=10000, required=False)
# Process arguments
args = parser.parse_args()
kwargs = {'env_name': args.env_name,
'lr': 0.001,
'batch_size': 32,
'gamma': 0.99,
'buffer_limit': 50000,
'update_target_interval': 20,
'log_interval': 100,
'max_episodes': args.max_episodes,
'max_epsilon': 0.9,
'min_epsilon': 0.1,
'test_episodes': 5,
'warm_up_steps': 2000,
'update_iter': 10,
'chunk_size': 10, # if not recurrent, internally, we use chunk_size of 1 and no gru cell is used.
'recurrent': not args.no_recurrent}
if USE_WANDB:
import wandb
wandb.init(project='minimal-marl', config={'algo': 'qmix', **kwargs})
main(**kwargs)
| StarcoderdataPython |
3368821 | <gh_stars>0
import os
import unittest
import k3ut
import k3utfjson
import json
dd = k3ut.dd
this_base = os.path.dirname(__file__)
class TestUTFJson(unittest.TestCase):
def test_load(self):
self.assertEqual(None, k3utfjson.load(None))
self.assertEqual({}, k3utfjson.load('{}'))
# load unicode, result in utf-8
self.assertEqual('我', k3utfjson.load('"\\u6211"'))
self.assertEqual(str, type(k3utfjson.load('"\\u6211"')))
# unicode and string in a dictionary.
obj = '{"a": "\u6211", "b": "1"}'
rst = k3utfjson.load(obj)
self.assertEqual({"a": b"\xe6\x88\x91".decode("utf-8"), "b": "1"}, rst)
self.assertEqual(str, type(rst["a"]))
self.assertEqual(str, type(rst["b"]))
# load utf-8, result in str
rst = k3utfjson.load(b"\xe6\x88\x91")
self.assertEqual('我', rst)
self.assertEqual(str, type(rst))
# load gbk, result in str, in gbk encoding
gbk = b'\xb6\xd4\xd5\xbd\xc6\xbd\xcc\xa8\xb9\xd9\xb7\xbd\xd7\xee\xd0\xc2\xb0\xe6'
self.assertEqual('对战平台官方最新版', k3utfjson.load(gbk, encoding="gbk"))
self.assertEqual(str, type(k3utfjson.load(gbk, encoding="gbk")))
# load any
s = '"\xbb"'
rst = k3utfjson.load(s)
self.assertEqual('\xbb', rst)
self.assertEqual(str, type(rst))
def test_load_backslash_x_encoded(self):
s = '"\x61"'
self.assertEqual('a', k3utfjson.load(s))
s = '"\x61"'
self.assertEqual('a', k3utfjson.load(s))
s = b'\xe6\x88\x91'
self.assertEqual('我', k3utfjson.load(s))
self.assertRaises(json.JSONDecodeError, k3utfjson.load, '"\\"')
self.assertRaises(json.JSONDecodeError, k3utfjson.load, '"\\x"')
self.assertRaises(json.JSONDecodeError, k3utfjson.load, '"\\x6"')
def test_load_decode(self):
self.assertEqual('我', k3utfjson.load('"我"'))
self.assertEqual(u'我', k3utfjson.load('"我"', encoding='utf-8'))
self.assertEqual(str, type(k3utfjson.load('"我"', encoding='utf-8')))
self.assertEqual({'a': u"我"}, k3utfjson.load('{"a": "\\u6211"}'))
self.assertEqual({'a': u"我"}, k3utfjson.load('{"a": "我"}', encoding='utf-8'))
self.assertEqual({'a': "我"}, k3utfjson.load('{"a": "我"}'))
self.assertEqual({'a': "我"}, k3utfjson.load('{"a": "我"}'))
self.assertEqual(["我"], k3utfjson.load('["我"]'))
def test_dump(self):
self.assertEqual('null', k3utfjson.dump(None))
self.assertEqual('{}', k3utfjson.dump({}))
self.assertRaises(TypeError, k3utfjson.dump, b"\xe6\x88\x91", encoding=None)
self.assertRaises(TypeError, k3utfjson.dump, {b"\xe6\x88\x91": 1}, encoding=None)
self.assertRaises(TypeError, k3utfjson.dump, {1: b"\xe6\x88\x91"}, encoding=None)
self.assertRaises(TypeError, k3utfjson.dump, [b"\xe6\x88\x91"], encoding=None)
self.assertRaises(TypeError, k3utfjson.dump, [(b"\xe6\x88\x91",)], encoding=None)
self.assertEqual('"\\u6211"', k3utfjson.dump(u'我', encoding=None))
self.assertEqual("\"" + b'\xb6\xd4'.decode('gbk') + "\"", k3utfjson.dump(u'对', encoding='gbk'))
self.assertEqual("\"" + b"\xe6\x88\x91".decode("utf-8") + "\"", k3utfjson.dump('我', encoding='utf-8'))
self.assertEqual("\"" + b"\xe6\x88\x91".decode("utf-8") + "\"", k3utfjson.dump(u'我'))
self.assertEqual("\"" + b"\xe6\x88\x91".decode("utf-8") + "\"", k3utfjson.dump('我'))
# by default unicode are encoded
self.assertEqual("{\"" + b"\xe6\x88\x91".decode("utf-8") + "\": \"" + b"\xe6\x88\x91".decode("utf-8") + "\"}"
, k3utfjson.dump({"我": "我"}))
self.assertEqual("{\"" + b"\xe6\x88\x91".decode("utf-8") + "\": \"" + b"\xe6\x88\x91".decode("utf-8") + "\"}"
, k3utfjson.dump({"我": u"我"}))
self.assertEqual("{\"" + b"\xe6\x88\x91".decode("utf-8") + "\": \"" + b"\xe6\x88\x91".decode("utf-8") + "\"}"
, k3utfjson.dump({u"我": "我"}))
self.assertEqual("{\"" + b"\xe6\x88\x91".decode("utf-8") + "\": \"" + b"\xe6\x88\x91".decode("utf-8") + "\"}"
, k3utfjson.dump({u"我": u"我"}))
self.assertEqual("[\""+b"\xe6\x88\x91".decode("utf-8") + "\"]",
k3utfjson.dump((u"我",)))
self.assertEqual('{"\\u6211": "\\u6211"}', k3utfjson.dump({u"我": u"我"}, encoding=None))
self.assertEqual('"\\""', k3utfjson.dump('"'))
# encoded chars and unicode chars in one string
self.assertEqual('/aaa\xe7\x89\x88\xe6\x9c\xac/jfkdsl\x01',
k3utfjson.load('"\/aaa\xe7\x89\x88\xe6\x9c\xac\/jfkdsl\\u0001"'))
self.assertEqual(
'{\n "我": "我"\n}', k3utfjson.dump({"我": "我"}, indent=2))
self.assertEqual(
'{\n "我": "我"\n}', k3utfjson.dump({"我": "我"}, indent=4))
| StarcoderdataPython |
1776911 | <reponame>robert-giaquinto/survae_flows
import argparse
import pickle
import numpy as np
import torch
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Matern
from model.unconditional_flow import UnconditionalFlow
from model.concrete_dropout import DropoutNet
def add_baseline_args(parser):
# Model params
parser.add_argument('--baseline', type=str, choices=["gp", "dropout"])
parser.add_argument('--kernel', type=str, default='matern', choices=['rbf', 'matern'])
parser.add_argument('--gp_length_scale', type=float, default=1.0)
parser.add_argument('--gp_alpha', type=float, default=1.0)
parser.add_argument('--hidden_units', type=int, default=100)
def get_baseline(args):
path_args = '{}/args.pickle'.format(args.teacher_model)
path_check = '{}/check/checkpoint.pt'.format(args.teacher_model)
with open(path_args, 'rb') as f:
teacher_args = pickle.load(f)
teacher_model = UnconditionalFlow(num_flows=teacher_args.num_flows,
actnorm=teacher_args.actnorm,
affine=teacher_args.affine,
scale_fn_str=teacher_args.scale_fn,
hidden_units=teacher_args.hidden_units,
activation=teacher_args.activation,
range_flow=teacher_args.range_flow,
augment_size=teacher_args.augment_size,
base_dist=teacher_args.base_dist)
checkpoint = torch.load(path_check)
teacher_model.load_state_dict(checkpoint['model'])
print('Loaded weights for teacher model at {}/{} epochs'.format(checkpoint['current_epoch'], teacher_args.epochs))
if args.baseline == "gp":
if args.kernel == 'matern':
kernel = ConstantKernel(1.0, (1e-3, 1e3)) * Matern(args.gp_length_scale, (1e-1, 10.0), nu=1.5)
elif args.kernel == 'rbf':
kernel = ConstantKernel(1.0, (1e-3, 1e3)) * RBF(args.gp_length_scale, (1e-3, 1e3)) # more flexibility
model = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10, alpha=args.gp_alpha)
else:
if args.cond_trans.lower().startswith("split") or args.cond_trans.lower().startswith("multiply"):
cond_size = 1
else:
cond_size = 2
l = 1e-4 # Lengthscale
wr = l**2. / args.train_samples
dr = 2. / args.train_samples
model = DropoutNet(input_size=cond_size,
output_size=2,
hidden_units=args.hidden_units,
weight_regularizer=wr,
dropout_regularizer=dr)
return model, teacher_model, teacher_args.dataset
| StarcoderdataPython |
3393970 | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello_world():
context = {
'text': 'hello zhiliao, hello ketang'
}
return render_template('index.html', **context)
@app.template_filter('cut')
def cut(value):
value = value.replace('hello', 'welcome')
return value
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
1731773 | """Implements Document Object Model Level 2 Style Sheets
http://www.w3.org/TR/2000/PR-DOM-Level-2-Style-20000927/stylesheets.html
"""
__all__ = ['MediaList', 'MediaQuery', 'StyleSheet', 'StyleSheetList']
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from .medialist import *
from .mediaquery import *
from .stylesheet import *
from .stylesheetlist import *
| StarcoderdataPython |
3335642 | <reponame>tsbxmw/leetcode
# 给定一个会议时间安排的数组,每个会议时间都会包括开始和结束的时间 [[s1,e1],[s2,e2],...] (si < ei),请你判断一个人是否能够参加这里面的全部会议。
# 示例 1:
# 输入: [[0,30],[5,10],[15,20]]
# 输出: false
# 示例 2:
# 输入: [[7,10],[2,4]]
# 输出: true
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/meeting-rooms
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
class Solution:
def canAttendMeetings(self, intervals: List[List[int]]) -> bool:
ln = len(intervals)
if ln == 0:
return True
rev = []
intervals.sort(key=lambda x: x[0])
for i, x in enumerate(intervals):
if i == 0:
rev = x[::]
else:
if rev[1] > x[0]:
return False
else:
rev = x[::]
return True | StarcoderdataPython |
1731094 | from dataclasses import dataclass
from datetime import date, datetime
import mock
from pdfminer.layout import LTChar, LTCurve, LTFigure, LTImage, LTTextBoxHorizontal, LTTextLineHorizontal
from typing import List
from rdr_service.services.consent import files
from tests.helpers.unittest_base import BaseTestCase
class ConsentFileParsingTest(BaseTestCase):
def __init__(self, *args, **kwargs):
super(ConsentFileParsingTest, self).__init__(*args, **kwargs)
self.uses_database = False
def test_vibrent_primary_consent(self):
for consent_example in self._get_vibrent_primary_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
self.assertEqual(consent_example.expected_to_be_va_file, consent_file.get_is_va_consent())
def test_vibrent_cabor_consent(self):
for consent_example in self._get_vibrent_cabor_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
def test_vibrent_ehr_consent(self):
for consent_example in self._get_vibrent_ehr_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
self.assertEqual(consent_example.expected_to_be_va_file, consent_file.get_is_va_consent())
def test_vibrent_gror_consent(self):
for consent_example in self._get_vibrent_gror_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
self.assertEqual(consent_example.has_yes_selected, consent_file.is_confirmation_selected())
def test_vibrent_primary_update_consent(self):
for consent_example in self._get_vibrent_primary_update_test_data():
consent_file = consent_example.file
self.assertEqual(consent_example.expected_signature, consent_file.get_signature_on_file())
self.assertEqual(consent_example.expected_sign_date, consent_file.get_date_signed())
self.assertEqual(consent_example.has_yes_selected, consent_file.is_agreement_selected())
self.assertEqual(consent_example.expected_to_be_va_file, consent_file.get_is_va_consent())
def _get_primary_consent_elements(self):
return [
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='understand the information in this form. All of my questions\n'
),
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='have been answered. I freely and willingly choose to take part in\n'
),
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='the All of Us Research Program.\n'
)
]
),
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='Sign Your Full Name: \n')
]
)
]
def _get_vibrent_primary_test_data(self) -> List['PrimaryConsentTestData']:
"""
Builds a list of PDFs that represent the different layouts of Vibrent's primary consent
that have been encountered. Add to this if the code incorrectly parses any Vibrent primary pdf
"""
test_data = []
# elements that usually appear on the signature page
description_elements = self._get_primary_consent_elements()
# Build basic file with signature of Test Name and signing date of August 17, 2019
pdf = self._build_pdf(pages=[
[
*description_elements,
self._build_form_element(text='<NAME>', bbox=(116, 147, 517, 169)),
self._build_form_element(text='Aug 17, 2019', bbox=(116, 97, 266, 119))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature='<NAME>',
expected_sign_date=date(2019, 8, 17)
)
)
# Build an older style of primary layout, with signature box higher up on the page
pdf = self._build_pdf(pages=[
[
*description_elements,
self._build_form_element(text='Nick', bbox=(116, 585, 517, 605)),
self._build_form_element(text='Dec 25, 2017', bbox=(116, 565, 266, 585))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature='Nick',
expected_sign_date=date(2017, 12, 25)
)
)
# Build basic VA primary file
pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='you will get care at a VA facility')
]
)
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature=None,
expected_sign_date=None,
expected_to_be_va_file=True
)
)
# Build file with an empty text element instead of a signature and date
pdf = self._build_pdf(pages=[
[
*description_elements,
self._build_form_element(text='', bbox=(116, 147, 521, 171)),
self._build_form_element(text='', bbox=(116, 97, 266, 119))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature=None,
expected_sign_date=None
)
)
# Build consent with an image instead of a typed signature
pdf = self._build_pdf(pages=[
[
*description_elements,
self._build_form_element(
bbox=(200, 125, 400, 191),
children=[
self._build_pdf_element(cls=LTImage, bbox=(200, 125, 400, 191))
]
),
self._build_form_element(text='December 7, 2018', bbox=(116, 97, 266, 119))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature=True,
expected_sign_date=date(2018, 12, 7)
)
)
# Build older style consent with different signature description formatting
pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='this form. All of my questions have been answered. I freely and\n'
),
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='willingly choose to take part in the All of Us Research Program.\n'
),
]
),
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
children=[
self._build_pdf_element(LTTextLineHorizontal, text='Sign Your \n'),
self._build_pdf_element(LTTextLineHorizontal, text='Full Name: \n')
]
)
]
),
self._build_form_element(text='2018 Participant', bbox=(116, 147, 521, 171)),
self._build_form_element(text='Feb 19, 2018', bbox=(116, 96, 521, 120))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature='2018 Participant',
expected_sign_date=date(2018, 2, 19)
)
)
# Build Spanish version of the Primary file
pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='Decido participar libremente y por voluntad propia'
)
]
),
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(
cls=LTTextLineHorizontal,
children=[
self._build_pdf_element(LTTextLineHorizontal, text='Firme con su nombre completo:')
]
)
]
),
self._build_form_element(text='Spanish Participant', bbox=(116, 147, 517, 169)),
self._build_form_element(text='Mar 3, 2021', bbox=(116, 97, 266, 119))
]
])
test_data.append(
PrimaryConsentTestData(
file=files.VibrentPrimaryConsentFile(pdf=pdf, blob=mock.MagicMock()),
expected_signature='Spanish Participant',
expected_sign_date=date(2021, 3, 3)
)
)
return test_data
def _get_vibrent_cabor_test_data(self) -> List['ConsentTestData']:
"""Builds a list of PDFs that represent the different layouts of Vibrent's CaBOR consent"""
basic_cabor_pdf = self._build_pdf(pages=[
[
self._build_form_element(text='Test cabor', bbox=(116, 100, 517, 140)),
self._build_form_element(text='April 27, 2020', bbox=(500, 100, 600, 140))
]
])
basic_cabor_case = ConsentTestData(
file=files.VibrentCaborConsentFile(pdf=basic_cabor_pdf, blob=mock.MagicMock()),
expected_signature='Test cabor',
expected_sign_date=date(2020, 4, 27)
)
older_cabor_pdf = self._build_pdf(pages=[
[
self._build_form_element(text='2017 Cabor', bbox=(150, 150, 350, 188)),
self._build_form_element(text='Sep 8, 2017', bbox=(434, 153, 527, 182))
]
])
older_cabor_case = ConsentTestData(
file=files.VibrentCaborConsentFile(pdf=older_cabor_pdf, blob=mock.MagicMock()),
expected_signature='2017 Cabor',
expected_sign_date=date(2017, 9, 8)
)
return [basic_cabor_case, older_cabor_case]
def _get_vibrent_ehr_test_data(self) -> List['EhrConsentTestData']:
six_empty_pages = [[], [], [], [], [], []] # The EHR signature is expected to be on the 7th page
basic_ehr_pdf = self._build_pdf(pages=[
*six_empty_pages,
[
self._build_form_element(text='Test ehr', bbox=(125, 150, 450, 180)),
self._build_form_element(text='Dec 21, 2019', bbox=(125, 100, 450, 130))
]
])
basic_ehr_case = EhrConsentTestData(
file=files.VibrentEhrConsentFile(pdf=basic_ehr_pdf, blob=mock.MagicMock()),
expected_signature='Test ehr',
expected_sign_date=date(2019, 12, 21)
)
va_ehr_pdf = self._build_pdf(pages=[
*six_empty_pages,
[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='We may ask you to go to a local clinic to be measured'
),
self._build_form_element(text='Test va ehr', bbox=(125, 150, 450, 180)),
self._build_form_element(text='Oct 10, 2020', bbox=(125, 100, 450, 130))
]
])
va_ehr_case = EhrConsentTestData(
file=files.VibrentEhrConsentFile(pdf=va_ehr_pdf, blob=mock.MagicMock()),
expected_signature='Test va ehr',
expected_sign_date=date(2020, 10, 10),
expected_to_be_va_file=True
)
return [basic_ehr_case, va_ehr_case]
def _get_vibrent_gror_test_data(self) -> List['GrorConsentTestData']:
# The GROR signature is expected to be on the 10th page
nine_empty_pages = [
[], [], [], [], [], [], [], [], []
]
basic_gror_pdf = self._build_pdf(pages=[
*nine_empty_pages,
[
self._build_form_element(
children=[self._build_pdf_element(LTCurve)],
bbox=(65, 470, 75, 480)
),
self._build_form_element(text='Test gror', bbox=(140, 150, 450, 180)),
self._build_form_element(text='Jan 1st, 2021', bbox=(125, 100, 450, 130))
]
])
basic_gror_case = GrorConsentTestData(
file=files.VibrentGrorConsentFile(pdf=basic_gror_pdf, blob=mock.MagicMock()),
expected_signature='Test gror',
expected_sign_date=date(2021, 1, 1),
has_yes_selected=True
)
gror_missing_check = self._build_pdf(pages=[
*nine_empty_pages,
[
self._build_form_element(text='no confirmation', bbox=(140, 150, 450, 180)),
self._build_form_element(text='Feb 1st, 2021', bbox=(125, 100, 450, 130))
]
])
no_confirmation_case = GrorConsentTestData(
file=files.VibrentGrorConsentFile(pdf=gror_missing_check, blob=mock.MagicMock()),
expected_signature='no confirmation',
expected_sign_date=date(2021, 2, 1),
has_yes_selected=False
)
spanish_gror_pdf = self._build_pdf(pages=[
*nine_empty_pages,
[
self._build_pdf_element(
cls=LTTextLineHorizontal,
text='¿Desea conocer alguno de sus resultados de ADN?'
),
self._build_form_element(
children=[self._build_pdf_element(LTCurve)],
bbox=(30, 478, 40, 488)
),
self._build_form_element(text='spanish gror', bbox=(140, 150, 450, 180)),
self._build_form_element(text='May 1st, 2018', bbox=(125, 100, 450, 130))
]
])
spanish_gror_case = GrorConsentTestData(
file=files.VibrentGrorConsentFile(pdf=spanish_gror_pdf, blob=mock.MagicMock()),
expected_signature='spanish gror',
expected_sign_date=date(2018, 5, 1),
has_yes_selected=True
)
return [basic_gror_case, no_confirmation_case, spanish_gror_case]
def _get_vibrent_primary_update_test_data(self) -> List['PrimaryUpdateConsentTestData']:
basic_update_pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='Do you agree to this updated consent?')
]
),
self._build_form_element(
children=[self._build_pdf_element(LTChar, text='4')],
bbox=(34, 669, 45, 683)
),
self._build_form_element(text='Test update', bbox=(116, 146, 521, 170)),
self._build_form_element(text='Jan 1st, 2021', bbox=(116, 96, 521, 120))
]
])
basic_update_case = PrimaryUpdateConsentTestData(
file=files.VibrentPrimaryConsentUpdateFile(
pdf=basic_update_pdf,
blob=mock.MagicMock(),
consent_date=datetime.now()
),
expected_signature='Test update',
expected_sign_date=date(2021, 1, 1),
has_yes_selected=True,
expected_to_be_va_file=False
)
va_update_pdf = self._build_pdf(pages=[
[
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='Do you agree to this updated consent?')
]
),
self._build_pdf_element(
cls=LTTextBoxHorizontal,
children=[
self._build_pdf_element(cls=LTTextLineHorizontal, text='you will get care at a VA facility')
]
),
self._build_form_element(text='Test update', bbox=(116, 146, 521, 170)),
self._build_form_element(text='Jan 1st, 2021', bbox=(116, 96, 521, 120))
]
])
va_update_case = PrimaryUpdateConsentTestData(
file=files.VibrentPrimaryConsentUpdateFile(
pdf=va_update_pdf,
blob=mock.MagicMock(),
consent_date=datetime.now()
),
expected_signature='Test update',
expected_sign_date=date(2021, 1, 1),
has_yes_selected=False,
expected_to_be_va_file=True
)
# Build basic primary file for older version of PrimaryUpdate
pdf = self._build_pdf(pages=[
[
*self._get_primary_consent_elements(),
self._build_form_element(text='<NAME>', bbox=(116, 147, 517, 169)),
self._build_form_element(text='Aug 9, 2020', bbox=(116, 97, 266, 119))
]
])
older_update_case = PrimaryUpdateConsentTestData(
file=files.VibrentPrimaryConsentUpdateFile(
pdf=pdf,
blob=mock.MagicMock(),
consent_date=datetime(2020, 8, 9)
),
expected_signature='Test Name',
expected_sign_date=date(2020, 8, 9),
has_yes_selected=True
)
return [basic_update_case, va_update_case, older_update_case]
@classmethod
def _build_pdf(cls, pages) -> files.Pdf:
"""
Builds a consent_files.Pdf object
:param pages A list where each item represents a page,
and each item is a list of pdf elements for what should be on that page
"""
page_mocks = []
for page_elements in pages:
page_mock = mock.MagicMock()
page_mock.__iter__.return_value = page_elements
page_mocks.append(page_mock)
return files.Pdf(pages=page_mocks)
def _build_pdf_element(self, cls, text: str = None, children: list = None, bbox=None):
"""Create a generic pdf element to add to the page"""
element = mock.MagicMock(spec=cls)
self._set_bbox(bbox, element)
if children:
element.__iter__.return_value = children
if hasattr(element, 'get_text'):
if text is None:
get_text_result = ''.join([child.get_text() for child in children])
else:
get_text_result = text
element.get_text.return_value = get_text_result
return element
def _build_form_element(self, bbox, text: str = None, children: list = None):
"""
Form elements don't have a get_text method, and (at least with the Vibrent PDFs) any text within them is
laid out character by character
"""
element = mock.MagicMock(spec=LTFigure)
self._set_bbox(bbox, element)
if children:
element.__iter__.return_value = children
else:
char_list = []
for char_str in text:
char_element = mock.MagicMock(spec=LTChar)
char_element.get_text.return_value = char_str
char_list.append(char_element)
if text == '':
char_element = mock.MagicMock(spec=LTChar)
char_element.get_text.return_value = ''
char_list.append(char_element)
element.__iter__.return_value = char_list
return element
def _set_bbox(self, bbox, element_mock):
"""Set the data for a PDF element's bounding box on the Mock object"""
if not bbox:
left, bottom = self.fake.random_int(), self.fake.random_int()
right, top = self.fake.random_int() + left, self.fake.random_int() + bottom
bbox = (left, bottom, right, top)
(x0, y0, x1, y1) = bbox
element_mock.x0 = x0
element_mock.y0 = y0
element_mock.x1 = x1
element_mock.y1 = y1
element_mock.width = x1-x0
element_mock.height = y1-y0
element_mock.bbox = bbox
@dataclass
class ConsentTestData:
file: files.ConsentFile
expected_signature: str or bool # Text of the signature, or True if it's an image
expected_sign_date: date or None
@dataclass
class PrimaryConsentTestData(ConsentTestData):
file: files.PrimaryConsentFile
expected_to_be_va_file: bool = False
@dataclass
class EhrConsentTestData(ConsentTestData):
file: files.EhrConsentFile
expected_to_be_va_file: bool = False
@dataclass
class GrorConsentTestData(ConsentTestData):
file: files.GrorConsentFile
has_yes_selected: bool = False
@dataclass
class PrimaryUpdateConsentTestData(ConsentTestData):
file: files.PrimaryConsentUpdateFile
has_yes_selected: bool = False
expected_to_be_va_file: bool = False
| StarcoderdataPython |
3292176 | <reponame>pratiman-91/proplot<gh_stars>100-1000
#!/usr/bin/env python3
"""
Utilities related to matplotlib text objects.
"""
import matplotlib.patheffects as mpatheffects
import matplotlib.text as mtext
from . import ic # noqa: F401
def _transfer_text(src, dest):
"""
Transfer the input text object properties and content to the destination
text object. Then clear the input object text.
"""
text = src.get_text()
dest.set_color(src.get_color()) # not a font property
dest.set_fontproperties(src.get_fontproperties()) # size, weight, etc.
if not text.strip(): # WARNING: must test strip() (see _align_axis_labels())
return
dest.set_text(text)
src.set_text('')
def _update_text(text, props=None, **kwargs):
"""
Add a monkey patch for ``Text.update`` with pseudo "border" and "bbox"
properties without wrapping the entire class. This facillitates inset titles.
"""
props = props or {}
props = props.copy() # shallow copy
props.update(kwargs)
# Update border
border = props.pop('border', None)
bordercolor = props.pop('bordercolor', 'w')
borderinvert = props.pop('borderinvert', False)
borderwidth = props.pop('borderwidth', 2)
borderstyle = props.pop('borderstyle', 'miter')
if border:
facecolor, bgcolor = text.get_color(), bordercolor
if borderinvert:
facecolor, bgcolor = bgcolor, facecolor
kw = {
'linewidth': borderwidth,
'foreground': bgcolor,
'joinstyle': borderstyle,
}
text.set_color(facecolor)
text.set_path_effects(
[mpatheffects.Stroke(**kw), mpatheffects.Normal()],
)
elif border is False:
text.set_path_effects(None)
# Update bounding box
# NOTE: We use '_title_pad' and '_title_above' for both titles and a-b-c
# labels because always want to keep them aligned.
# NOTE: For some reason using pad / 10 results in perfect alignment for
# med-large labels. Tried scaling to be font size relative but never works.
pad = text.axes._title_pad / 10 # default pad
bbox = props.pop('bbox', None)
bboxcolor = props.pop('bboxcolor', 'w')
bboxstyle = props.pop('bboxstyle', 'round')
bboxalpha = props.pop('bboxalpha', 0.5)
bboxpad = props.pop('bboxpad', None)
bboxpad = pad if bboxpad is None else bboxpad
if bbox is None:
pass
elif isinstance(bbox, dict): # *native* matplotlib usage
props['bbox'] = bbox
elif not bbox:
props['bbox'] = None # disable the bbox
else:
props['bbox'] = {
'edgecolor': 'black',
'facecolor': bboxcolor,
'boxstyle': bboxstyle,
'alpha': bboxalpha,
'pad': bboxpad,
}
return mtext.Text.update(text, props)
| StarcoderdataPython |
68236 | <reponame>kb2ma/openvisualizer
# Copyright (c) 2010-2013, Regents of the University of California.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import logging
from openvisualizer.utils import buf2int, hex2buf
log = logging.getLogger('SixLowPanFrag')
log.setLevel(logging.INFO)
log.addHandler(logging.NullHandler())
# ============================ parameters ======================================
class ReassembleEntry(object):
def __init__(self, wanted, received, frag):
self.total_bytes = wanted
self.recvd_bytes = received
self.fragments = frag
class Fragmentor(object):
"""
Class which performs fragmentation and reassembly of 6LoWPAN packets for transport of IEEE 802.15.4 networks.
This class implements the following RFCs;
* *https://tools.ietf.org/html/rfc4944*
Transmission of IPv6 Packets over IEEE 802.15.4 Networks.
"""
FRAG1_DISPATCH = 0xC0
FRAGN_DISPATCH = 0xE0
FRAG_DISPATCH_MASK = 0xF8
FRAG_SIZE_MASK = 0x7FF
# If L2 security is not active in the network we can use up to 96 bytes of payload per fragment.
# Since openvisualizer is not aware of the security configuration of the network, we use by default a smaller
# fragment payload size.
MAX_FRAGMENT_SIZE = 80
FRAG1_HDR_SIZE = 4
FRAGN_HDR_SIZE = 5
def __init__(self, tag=1):
self.reassemble_buffer = dict()
self.datagram_tag = tag
def do_reassemble(self, lowpan_pkt):
reassembled_pkt = None
# parse fragmentation header
dispatch = lowpan_pkt[0] & self.FRAG_DISPATCH_MASK
datagram_size = buf2int(lowpan_pkt[:2]) & self.FRAG_SIZE_MASK
if dispatch not in [self.FRAG1_DISPATCH, self.FRAGN_DISPATCH]:
return lowpan_pkt
# extract fragmentation tag
datagram_tag = buf2int(lowpan_pkt[2:4])
if dispatch == self.FRAG1_DISPATCH:
payload = lowpan_pkt[4:]
offset = 0
else:
payload = lowpan_pkt[5:]
offset = lowpan_pkt[4]
if datagram_tag in self.reassemble_buffer:
entry = self.reassemble_buffer[datagram_tag]
entry.recvd_bytes += len(payload)
entry.fragments.append((offset, payload))
else:
new_entry = ReassembleEntry(datagram_size, len(payload), [(offset, payload)])
self.reassemble_buffer[datagram_tag] = new_entry
# check if we can reassemble
num_of_frags = 0
used_tag = 0
for tag, entry in self.reassemble_buffer.items():
if entry.total_bytes == entry.recvd_bytes:
frags = sorted(entry.fragments, key=lambda frag: frag[0])
used_tag = tag
num_of_frags = len(frags)
reassembled_pkt = []
for frag in frags:
reassembled_pkt.extend(frag[1])
del self.reassemble_buffer[tag]
if reassembled_pkt is not None:
log.success("[GATEWAY] Reassembled {} frags with tag {} into an IPv6 packet of size {}".format(
num_of_frags, used_tag, len(reassembled_pkt)))
return reassembled_pkt
def do_fragment(self, ip6_pkt):
fragment_list = []
original_length = len(ip6_pkt)
if len(ip6_pkt) <= self.MAX_FRAGMENT_SIZE + self.FRAGN_HDR_SIZE:
return [ip6_pkt]
while len(ip6_pkt) > 0:
frag_header = []
fragment = []
datagram_tag = hex2buf("{:04x}".format(self.datagram_tag))
if len(ip6_pkt) > self.MAX_FRAGMENT_SIZE:
frag_len = self.MAX_FRAGMENT_SIZE
else:
frag_len = len(ip6_pkt)
if len(fragment_list) == 0:
# first fragment
dispatch_size = hex2buf("{:02x}".format((self.FRAG1_DISPATCH << 8) | original_length))
frag_header.extend(dispatch_size)
frag_header.extend(datagram_tag)
else:
# subsequent fragment
dispatch_size = hex2buf("{:02x}".format((self.FRAGN_DISPATCH << 8) | original_length))
offset = [len(fragment_list) * (self.MAX_FRAGMENT_SIZE / 8)]
frag_header.extend(dispatch_size)
frag_header.extend(datagram_tag)
frag_header.extend(offset)
fragment.extend(frag_header)
fragment.extend(ip6_pkt[:frag_len])
fragment_list.append(fragment)
ip6_pkt = ip6_pkt[frag_len:]
# increment the tag for the new set of fragments
self.datagram_tag += 1
log.info("[GATEWAY] Fragmenting incoming IPv6 packet (size: {}) into {} fragments with tag {}".format(
original_length, len(fragment_list), self.datagram_tag - 1))
return fragment_list
| StarcoderdataPython |
3213267 | <gh_stars>1-10
"""
# f_it package
"""
from .fit import FIt
from .version import version as __version__ # noqa: F401
from .version import version_tuple as __version_info__ # noqa: F401
__all__ = ["FIt"]
| StarcoderdataPython |
62665 | <reponame>sandeep-krishna/100DaysOfCode
'''
<NAME>'s birthday is in next month. This time he is planning to invite N of his friends. He wants to distribute some chocolates to all of his friends after party. He went to a shop to buy a packet of chocolates.
At chocolate shop, each packet is having different number of chocolates. He wants to buy such a packet which contains number of chocolates, which can be distributed equally among all of his friends.
Help Mr. X to buy such a packet.
Input:
First line contains T, number of test cases.
Each test case contains two integers, N and M. where is N is number of friends and M is number number of chocolates in a packet.
Output:
In each test case output "Yes" if he can buy that packet and "No" if he can't buy that packet.
Constraints:
1<=T<=20
1<=N<=100
1<=M<=10^5
SAMPLE INPUT
2
5 14
3 21
SAMPLE OUTPUT
No
Yes
Explanation
Test Case 1:
There is no way such that he can distribute 14 chocolates among 5 friends equally.
Test Case 2:
There are 21 chocolates and 3 friends, so he can distribute chocolates eqally. Each friend will get 7 chocolates.
'''
t= int(input())
for i in range(t):
p,t = map(int,input().split())
print("Yes" if t % p == 0 else "No") | StarcoderdataPython |
115408 | <reponame>americanpezza/reqmapper
from nltk import word_tokenize, pos_tag
from nltk.corpus import wordnet as wn
import progressbar
class SemanticChecker:
def __init__(self, req, minScore=0.75, maxScore=1.0):
self.requirements = req
self.similarities = []
self.threshold = minScore
self.maxScore = maxScore
self.valueRanges = {}
def penn_to_wn(self, tag):
""" Convert between a Penn Treebank tag to a simplified Wordnet tag """
result = None
for l in ('N', 'V', 'J', 'R'):
if tag.startswith(l):
result = l.lower()
if l == 'J':
result = 'a'
break
return result
def tagged_to_synset(self, word, tag):
wn_tag = self.penn_to_wn(tag)
if wn_tag is None:
return None
try:
return wn.synsets(word, wn_tag)[0]
except:
return None
def getSimilarity(self, sentence1, sentence2):
""" compute the sentence similarity using Wordnet """
# Tokenize and tag
sentence1 = pos_tag(word_tokenize(sentence1))
sentence2 = pos_tag(word_tokenize(sentence2))
# Get the synsets for the tagged words
synsets1 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence1]
synsets2 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence2]
# Filter out the Nones
synsets1 = [ss for ss in synsets1 if ss]
synsets2 = [ss for ss in synsets2 if ss]
score, count = 0.0, 0
# For each word in the first sentence
for synset in synsets1:
# Get the similarity value of the most similar word in the other sentence
best=-1
for ss in synsets2:
sc = synset.path_similarity(ss)
if sc is not None and sc > best:
best = sc
#best_score = max([synset.path_similarity(ss) for ss in synsets2])
best_score = best
# Check that the similarity could have been computed
if best_score is not None:
score += best_score
count += 1
# Average the values
if count > 0:
score /= count
else:
score = 0
return score
def check(self):
similarities = []
counter=0
self.valueRanges={}
print("Using threshold %s to %s" % (self.threshold, self.maxScore))
with progressbar.ProgressBar(max_value=len(self.requirements)) as bar:
for reqkey1, req1 in self.requirements.items():
for reqkey2, req2 in self.requirements.items():
if reqkey1 != reqkey2 and not self.isDuplicatePair(similarities, [req1, req2]):
score = self.getSimilarity(req1.getFullText(), req2.getFullText())
self.updateRanges(score)
if score >= self.threshold and score <= self.maxScore:
similarities.append( { "reqs": [req1, req2], "score": score } )
counter = counter + 1
bar.update(counter)
return (similarities, self.valueRanges)
def isDuplicatePair(self, pool, newPair):
result = False
for item in pool:
s = item['score']
pair = item['reqs']
if newPair[0] in pair and newPair[1] in pair:
result = True
break
return result
def updateRanges(self, score):
if score < 0:
score = 0
v = int(score*10)
if v not in self.valueRanges.keys():
self.valueRanges[v] = 0
self.valueRanges[v] += 1
def prettyPrint(self, similarities):
for s in similarities:
print("Req1: %s\nReq2: %s\n*** Score: %s\n" % (s['reqs'][0], s['reqs'][1], s['score']))
| StarcoderdataPython |
172148 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import httplib2
import json
import urllib
# returns a name escaped so it can be used in a url.
def getSafeName(name):
safeChars = '@:./'
return urllib.quote(name, safeChars)
def bootstrap(key, namespace, triggerName, main, actualParameters):
try:
result = main(actualParameters)
http = httplib2.Http()
url = 'https://openwhisk.ng.bluemix.net/api/v1/namespaces/%(namespace)s/triggers/%(name)s' % {
'namespace': urllib.quote(namespace),
'name': getSafeName(triggerName)
}
headers = {'Content-type': 'application/json' }
auth = base64.encodestring(key).replace('\n', '')
headers['Authorization'] = 'Basic %s' % auth
payload = json.dumps(result)
response, content = http.request(url, 'POST', headers=headers, body=payload)
except:
pass
| StarcoderdataPython |
1611170 | from app.core import App
DEFAULT_CYKIT_ADDRESS = 'localhost'
DEFAULT_CYKIT_PORT = 5151
| StarcoderdataPython |
31775 | <reponame>jasondunsmore/python-heatclient
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from heatclient.common import http
from heatclient import exc
from heatclient.openstack.common import jsonutils
from keystoneclient.v2_0 import client as ksclient
def script_keystone_client(token=None):
if token:
ksclient.Client(auth_url='http://no.where',
insecure=False,
tenant_id='tenant_id',
token=token).AndReturn(FakeKeystone(token))
else:
ksclient.Client(auth_url='http://no.where',
insecure=False,
password='password',
tenant_name='tenant_name',
username='username').AndReturn(FakeKeystone(
'abcd1234'))
def script_heat_list(url=None):
if url is None:
url = '/stacks?'
resp_dict = {"stacks": [
{
"id": "1",
"stack_name": "teststack",
"stack_status": 'CREATE_COMPLETE',
"creation_time": "2012-10-25T01:58:47Z"
},
{
"id": "2",
"stack_name": "teststack2",
"stack_status": 'IN_PROGRESS',
"creation_time": "2012-10-25T01:58:47Z"
}]
}
resp = FakeHTTPResponse(200,
'success, you',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
http.HTTPClient.json_request('GET', url).AndReturn((resp, resp_dict))
def script_heat_normal_error():
resp_dict = {
"explanation": "The resource could not be found.",
"code": 404,
"error": {
"message": "The Stack (bad) could not be found.",
"type": "StackNotFound",
"traceback": "",
},
"title": "Not Found"
}
resp = FakeHTTPResponse(400,
'The resource could not be found',
{'content-type': 'application/json'},
jsonutils.dumps(resp_dict))
http.HTTPClient.json_request('GET', '/stacks/bad').AndRaise(
exc.from_response(resp))
def script_heat_error(resp_string):
resp = FakeHTTPResponse(400,
'The resource could not be found',
{'content-type': 'application/json'},
resp_string)
http.HTTPClient.json_request('GET', '/stacks/bad').AndRaise(
exc.from_response(resp))
def fake_headers():
return {'X-Auth-Token': '<KEY>',
'Content-Type': 'application/json',
'Accept': 'application/json',
'User-Agent': 'python-heatclient'}
class FakeServiceCatalog():
def url_for(self, endpoint_type, service_type):
return 'http://192.168.1.5:8004/v1/f14b41234'
class FakeKeystone():
service_catalog = FakeServiceCatalog()
def __init__(self, auth_token):
self.auth_token = auth_token
class FakeRaw():
version = 110
class FakeHTTPResponse():
version = 1.1
def __init__(self, status_code, reason, headers, content):
self.headers = headers
self.content = content
self.status_code = status_code
self.reason = reason
self.raw = FakeRaw()
def getheader(self, name, default=None):
return self.headers.get(name, default)
def getheaders(self):
return self.headers.items()
def read(self, amt=None):
b = self.content
self.content = None
return b
def iter_content(self, chunksize):
return self.content
def json(self):
return jsonutils.loads(self.content)
| StarcoderdataPython |
167352 | <gh_stars>0
class NaturalNumbers:
def __init__(self):
pass
def get_first_n_for(self, n): # Ejemplo
"""
Obtener los primeros n naturales en una lista con for
"""
first_n = [] # Se declara una lista donde almacenaremos los numeros
for i in range(n): # Se itera sobre range que genera un rango de 0 a n
first_n.append(i) # Almacenamos la variable del ciclo en la lista con append
print("FIRST n (n={}) FOR: {}".format(n, first_n))
return first_n # Regresamos la lista
def get_first_n_while(self, n): # Ejemplo
"""
Obtener los primeros n naturales en una lista con while
"""
first_n = [] # Se declara una lista donde almacenaremos los numeros
n_count = 0 # Inicializamos un contador para saber en que iteracion vamos dentro del ciclo
while n_count < n: # Condición de terminación del ciclo
first_n.append(n_count) # ALmacenamos el contador (contablizador del ciclo) en la lista
n_count += 1 # Sumamos uno al contador puesto que termina ek ciclo, si no nunca n_count será mayor o igual que n y tendremos un loop infinito
print(f"FIRST n (n={n}) WHILE: {first_n}")
return first_n
def get_first_n_pair_for(self, n): # Ejercicio
"""
Obtener los primeros n pares en una lista con for
"""
return []
def get_first_n_pair_while(self, n): # Ejercicio
"""
Obtener los primeros n pares en una lista con while
"""
return []
def get_factorial_for(self, n): # Ejercicio
"""
Obtener el factorial de n con for, regresa un int
"""
return 0
def get_factorial_while(self, n): # Ejercicio
"""
Obtener el factorial de n con while, regresa un int
"""
return 0
def get_factorial_recursive(self, n): #Ejemplo
"""
Obtener el factorial de n recursivamente, regresa un int
"""
if n <= 1:
return 1
return n * self.get_factorial_recursive(n-1)
def get_n_pow_2_for(self, n): # Ejemplo
"""
Obtener el cuadrado de los primeros n con for, regresa una lista
"""
n_pow_2 = []
for i in range(n):
n_pow_2.append(
i ** 2
)
print(f"FIRST n (n={n}) POW 2: {n_pow_2}")
return n_pow_2
def get_n_pow_2_while(self, n): # Ejercicio
"""
Obtener el cuadrado de los primeros n con while, regresa una lista
"""
return []
def get_n_sum_recursive(self, n): #Ejemplo
"""
Obtener la suma de los primeros n recursivamente, regresa un int
"""
if n <= 0:
return 0
return n + self.get_n_sum_recursive(n-1)
def get_n_sum_for(self, n): # Ejercicio
"""
Obtener la suma de los primeros n con for, regresa un int
"""
return 0
def get_n_sum_while(self, n): # Ejercicio
"""
Obtener la suma de los primeros n con while, regresa un int
"""
return 0 | StarcoderdataPython |
4817214 | <reponame>maorp/NeuralGraph<filename>utils/sdf_utils.py<gh_stars>100-1000
import numpy as np
def scale_grid(xyz, x_scale, y_scale, z_scale, disp=1.0):
X, Y, Z = xyz
X = X*x_scale
Y = Y*y_scale
Z = Z*z_scale
points = np.concatenate((X[np.newaxis, ...], Y[np.newaxis, ...], Z[np.newaxis, ...]), axis=0)
points = points.reshape(3, -1).astype(np.float32)
return 2.0 * points - disp
def sample_grid_points(dim, disp=1.0, use_supersampling=False, num_supersamples=2):
# Generate regular input.
if use_supersampling:
edges = dim-1
supersamples = dim + edges * num_supersamples
coords_x = scale_grid(np.mgrid[:supersamples, :dim, :dim], disp/(supersamples-1.0), disp/(dim-1.0), disp/(dim-1.0), disp)
coords_y = scale_grid(np.mgrid[:dim, :supersamples, :dim], disp/(dim-1.0), disp/(supersamples-1.0), disp/(dim-1.0), disp)
coords_z = scale_grid(np.mgrid[:dim, :dim, :supersamples], disp/(dim-1.0), disp/(dim-1.0), disp/(supersamples-1.0), disp)
points_x = coords_x.reshape(3, -1).astype(np.float32)
points_y = coords_y.reshape(3, -1).astype(np.float32)
points_z = coords_z.reshape(3, -1).astype(np.float32)
points = np.concatenate((points_x, points_y, points_z), axis=1)
else:
coords = scale_grid(np.mgrid[:dim, :dim, :dim], disp/(dim-1.0), disp/(dim-1.0), disp/(dim-1.0), disp)
points = coords.reshape(3, -1).astype(np.float32)
return points
| StarcoderdataPython |
151240 | import dash
from utils.code_and_show import example_app
dash.register_page(
__name__, description="Compare three regression models to predict revenue"
)
filename = __name__.split("pages.")[1]
notes = """
#### Plotly Documentation:
- [Visualize regression in scikit-learn](https://plotly.com/python/ml-regression/)
#### Contributed by:
This example app was contributed by [Plotly](https://plotly.com/python/)
"""
layout = example_app(filename, notes=notes)
| StarcoderdataPython |
1746779 | <reponame>feiwencaho/sharezone
from api.service import dao
from api.utils import map
from api.const import GeoTableId
from django.db import transaction
def publish(user, **kwargs):
"""
发布需求
:param user:
:param kwargs:
:return:
"""
with transaction.atomic():
demand = dao.demand.create(user=user, **kwargs)
lat = kwargs.get('lat')
lng = kwargs.get('lng')
poi_data = {
'demand_title': kwargs.get('title'),
'demand_id': demand.id,
'demand_uid': user.id
}
# 1/0
# 创建poi数据
poi_id = map.create_poi(GeoTableId.DEMAND, lat, lng, poi_data)
demand.poi_id = poi_id
demand.geotable_id = GeoTableId.DEMAND
demand.save()
return demand
def get_demands(**kwargs):
return dao.demand.get_demands(**kwargs)
| StarcoderdataPython |
1645970 | '''
<NAME>
difficulty: 35%
run time: 0:00
answer: 168
***
115 Counting Block Combinations II
NOTE: This is a more difficult version of Problem 114.
A row measuring n units in length has red blocks with a minimum length of m units placed on it, such that any two red blocks (which are allowed to be different lengths) are separated by at least one black square.
Let the fill-count function, F(m, n), represent the number of ways that a row can be filled.
For example, F(3, 29) = 673135 and F(3, 30) = 1089155.
That is, for m = 3, it can be seen that n = 30 is the smallest value for which the fill-count function first exceeds one million.
In the same way, for m = 10, it can be verified that F(10, 56) = 880711 and F(10, 57) = 1148904, so n = 57 is the least value for which the fill-count function first exceeds one million.
For m = 50, find the least value of n for which the fill-count function first exceeds one million.
'''
mem = {}
def F(m, length):
return F_r(True, m, length) + F_r(False, m, length)
def F_r(red, m, length):
if red:
if length == m:
return 1
elif length < m:
return 0
else:
if length == 1:
return 1
elif length < 1:
return 0
if (red, length) in mem:
return mem[(red, length)]
count = 1
if red:
for i in range(m, length+1):
count += F_r(False, m, length-i)
else:
for i in range(1, length+1):
count += F_r(True, m, length-i)
mem[(red,length)] = count
return count
assert F(3,7) == 17
mem = {}
assert F(10,57) == 1148904
mem = {}
for n in range(100,1000):
if F(50, n) > 10**6:
print(n)
break
mem = {}
assert F(50, 167) <= 10**6 and F(50, 168) > 10**6
| StarcoderdataPython |
1613077 | from __future__ import annotations
from copy import deepcopy
from typing import Tuple, Callable
import numpy as np
from IMLearn import BaseEstimator
def cross_validate(estimator: BaseEstimator, X: np.ndarray, y: np.ndarray,
scoring: Callable[[np.ndarray, np.ndarray, ...], float],
cv: int = 5) -> Tuple[float, float]:
"""
Evaluate metric by cross-validation for given estimator
Parameters
----------
estimator: BaseEstimator
Initialized estimator to use for fitting the data
X: ndarray of shape (n_samples, n_features)
Input data to fit
y: ndarray of shape (n_samples, )
Responses of input data to fit to
scoring: Callable[[np.ndarray, np.ndarray, ...], float]
Callable to use for evaluating the performance of the cross-validated model.
When called, the scoring function receives the true- and predicted values for each sample
and potentially additional arguments. The function returns the score for given input.
cv: int
Specify the number of folds.
Returns
-------
train_score: float
Average train score over folds
validation_score: float
Average validation score over folds
"""
X_parts = np.array_split(X, cv)
y_parts = np.array_split(y, cv)
train_sum, validation_sum = 0, 0
for k in range(cv):
X_k_fold = np.concatenate(
[part for j, part in enumerate(X_parts) if k != j])
y_k_fold = np.concatenate(
[part for j, part in enumerate(y_parts) if k != j])
estimator.fit(X_k_fold, y_k_fold)
train_sum += scoring(y_k_fold, estimator.predict(X_k_fold))
validation_sum += scoring(y_parts[k], estimator.predict(X_parts[k]))
return train_sum / cv, validation_sum / cv
| StarcoderdataPython |
3289236 | from rlil.nn import RLNetwork
from .approximation import Approximation
class VNetwork(Approximation):
def __init__(
self,
model,
optimizer,
name='v',
**kwargs
):
model = VModule(model)
super().__init__(
model,
optimizer,
name=name,
**kwargs
)
class VModule(RLNetwork):
def forward(self, states):
return super().forward(states).squeeze(-1)
| StarcoderdataPython |
3399307 | <gh_stars>1000+
import json
from django.test import TestCase
from suggestion.models import Study
from suggestion.algorithm.abstract_algorithm import AbstractSuggestionAlgorithm
from suggestion.algorithm.skopt_bayesian_optimization import SkoptBayesianOptimization
class RandomSearchAlgorithmTest(TestCase):
def setUp(self):
study_configuration_json = {
"goal":
"MAXIMIZE",
"maxTrials":
5,
"maxParallelTrials":
1,
"randomInitTrials":
1,
"params": [{
"parameterName": "hidden1",
"type": "INTEGER",
"minValue": 1,
"maxValue": 10,
"scalingType": "LINEAR"
}, {
"parameterName": "learning_rate",
"type": "DOUBLE",
"minValue": 0.01,
"maxValue": 0.5,
"scalingType": "LINEAR"
}]
}
study_configuration = json.dumps(study_configuration_json)
self.study = Study.create("SkoptBayesianOptimizationStudy", study_configuration)
def tearDown(self):
pass
def test_init(self):
instance = SkoptBayesianOptimization()
self.assertTrue(isinstance(instance, AbstractSuggestionAlgorithm))
self.assertEqual(instance.__class__, SkoptBayesianOptimization)
def test_get_new_suggestions(self):
algorithm = SkoptBayesianOptimization()
new_trials = algorithm.get_new_suggestions(
self.study.id, number=1)
new_trial = new_trials[0]
new_parameter_values_json = json.loads(new_trial.parameter_values)
self.assertTrue(10 >= new_parameter_values_json["hidden1"] >= 1)
self.assertTrue(0.5 >= new_parameter_values_json["learning_rate"] >= 0.01)
def test_get_multiple_new_suggestions(self):
algorithm = SkoptBayesianOptimization()
# Assert getting one trial
new_trials = algorithm.get_new_suggestions(
self.study.id, number=1)
self.assertEqual(len(new_trials), 1)
# Assert getting multiple trials
new_trials = algorithm.get_new_suggestions(
self.study.id, number=10)
self.assertEqual(len(new_trials), 10)
| StarcoderdataPython |
1638549 | from ConnectSignal.Lambda import (
connect_slider_moved_abstract,
connect_slider_released_abstract,
connect_def_str_lineedit_abstract,
connect_name_change_abstract
)
from ConnectSignal.ConnectMacros import (
connect_colour,
connect_fill_pattern,
connect_dash,
connect_o_arrow,
connect_d_arrow
)
def connect_circle(scene):
"""Connect signals in the circle tab."""
scene.ui.circle_def_str.editingFinished.connect(
lambda: connect_def_str_lineedit_abstract(scene, scene.ui.circle_def_str))
scene.ui.circle_name.editingFinished.connect(
lambda: connect_name_change_abstract(scene.ui.circle_name, scene))
scene.ui.circle_line_width_slider.sliderMoved.connect(
lambda x: connect_slider_moved_abstract(x, scene, ['line'], 'line_width', lambda x: x / 10.0,
scene.ui.circle_line_width_spin))
scene.ui.circle_line_width_slider.sliderReleased.connect(
lambda: connect_slider_released_abstract(scene))
scene.ui.circle_double_distance_slider.sliderMoved.connect(
lambda x: connect_slider_moved_abstract(x, scene, ['line', 'double'], 'distance', lambda x: x / 10.0,
scene.ui.circle_double_distance_spin))
scene.ui.circle_double_distance_slider.sliderReleased.connect(
lambda: connect_slider_released_abstract(scene))
connect_fill_pattern(scene, ['fill', 'pattern'],
scene.ui.circle_pattern_type,
scene.ui.circle_pattern_distance_spin,
scene.ui.circle_pattern_distance_slider,
scene.ui.circle_pattern_size_spin,
scene.ui.circle_pattern_size_slider,
scene.ui.circle_pattern_rotation_spin,
scene.ui.circle_pattern_rotation_slider,
scene.ui.circle_pattern_xshift_spin,
scene.ui.circle_pattern_xshift_slider,
scene.ui.circle_pattern_yshift_spin,
scene.ui.circle_pattern_yshift_slider)
connect_colour(scene, ['fill', 'colour'],
scene.ui.circle_marker_colour_name,
scene.ui.circle_marker_colour_mix_name,
scene.ui.circle_marker_colour_mixratio_spin,
scene.ui.circle_marker_colour_mixratio_slider,
scene.ui.circle_marker_colour_strength_spin,
scene.ui.circle_marker_colour_strength_slider)
connect_colour(scene, ['line', 'colour'],
scene.ui.circle_border_colour_name,
scene.ui.circle_border_colour_mix_name,
scene.ui.circle_border_colour_mixratio_spin,
scene.ui.circle_border_colour_mixratio_slider,
scene.ui.circle_border_colour_strength_spin,
scene.ui.circle_border_colour_strength_slider)
connect_colour(scene, ['line', 'double', 'colour'],
scene.ui.circle_double_colour_name,
scene.ui.circle_double_colour_mix_name,
scene.ui.circle_double_colour_mixratio_spin,
scene.ui.circle_double_colour_mixratio_slider,
scene.ui.circle_double_colour_strength_spin,
scene.ui.circle_double_colour_strength_slider)
connect_o_arrow(scene,
scene.ui.circle_o_tip,
scene.ui.circle_o_side,
scene.ui.circle_o_reversed,
scene.ui.circle_o_length_spin,
scene.ui.circle_o_length_slider,
scene.ui.circle_o_width_spin,
scene.ui.circle_o_width_slider)
connect_d_arrow(scene,
scene.ui.circle_d_tip,
scene.ui.circle_d_side,
scene.ui.circle_d_reversed,
scene.ui.circle_d_length_spin,
scene.ui.circle_d_length_slider,
scene.ui.circle_d_width_spin,
scene.ui.circle_d_width_slider)
connect_dash(scene, ['line' 'dash'], scene.ui.circle_line_stroke, scene.ui.circle_custom_dash)
| StarcoderdataPython |
3317589 | <filename>code/sample_1-2-8.py
x = [int(i) for i in input().split()]
print(x)
| StarcoderdataPython |
37638 | """
迭代器 --> yield
"""
class CommodityController:
def __init__(self):
self.__commoditys = []
def add_commodity(self, cmd):
self.__commoditys.append(cmd)
def __iter__(self):
index = 0
yield self.__commoditys[index]
index += 1
yield self.__commoditys[index]
index += 1
yield self.__commoditys[index]
controller = CommodityController()
controller.add_commodity("屠龙刀")
controller.add_commodity("倚天剑")
controller.add_commodity("芭比娃娃")
for item in controller:
print(item)
# iterator = controller.__iter__()
# while True:
# try:
# item = iterator.__next__()
# print(item)
# except StopIteration:
# break
| StarcoderdataPython |
1725304 | #encoding:utf8
from pymongo import MongoClient
from flask import Flask, request, jsonify
DB_COUNT = 32
dbs = {}
DBNAME = "replay"
COLLECTION = "data"
def _hash(hash_str):
s = 0
for i in range(1, len(hash_str)+1):
c = ord(hash_str[i-1])
s = s + c * i
return (s % DB_COUNT) + 1
def _get_collection(replay_id):
id = _hash(replay_id)
name = DBNAME + str(id)
db = dbs[name]
return db[COLLECTION]
def get_replay(replay_id):
collection = _get_collection(replay_id)
t = collection.find_one({"replay_id":replay_id})
return t
def init():
client = MongoClient("localhost", 27017)
for i in range(1, DB_COUNT+1):
name = DBNAME + str(i)
dbs[name] = client[name]
init()
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/get_replay')
def web_get_replay():
replay_id = request.args.get("replay_id", "")
replay_id = replay_id.encode("utf8")
ret = get_replay(replay_id)
if not ret:
return jsonify({})
return jsonify(**ret)
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
3265877 | from django.contrib.auth.backends import ModelBackend
from app.core.models import Customer
class CustomerUserBackend(ModelBackend):
def authenticate(self, username=None, password=<PASSWORD>, t_password=None, **kwargs):
UserModel = Customer
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
try:
user = UserModel._default_manager.get_by_natural_key(username)
if user.check_password(password, t_password):
return user
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a non-existing user (#20760).
UserModel().set_password(password)
def get_user(self, user_id):
UserModel = Customer
try:
return UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
| StarcoderdataPython |
101774 | <filename>pwtools/test/test_parameter_study.py
import os
import numpy as np
from pwtools import comb, batch, common, sql
from pwtools.test.tools import all_types_equal, assert_all_types_equal
from .testenv import testdir
pj = os.path.join
def check_key_in_file(lines, key, file_target):
"""If line "key=<value>" is found in file, then convert the string
containing <value> to Python type and assert value==file_target.
"""
for ll in lines:
if ll.strip().startswith(key):
file_val_str = ll.split('=')[1].strip()
print("check_key_in_file: key={0}, "
"file_val_str={1}, file_target={2}".format(key, file_val_str, file_target))
# hack to convert string from file to correct type, failed
# conversion raises ValueError
ret = False
for converter in [repr, str, int, float]:
try:
file_val = converter(file_val_str)
ok = True
except ValueError:
ok = False
if ok:
ret = all_types_equal(file_target, file_val)
if ret:
break
assert ret, ("not equal: file_target={}, "
"file_val={}".format(file_target, file_val))
def check_generated(calc_root, machine_dct, params_lst, revision):
"""Check consistency of calc database values, replacement params in
`params_lst` and all written files.
"""
dbfn = pj(calc_root, 'calc.db')
db = sql.SQLiteDB(dbfn, table='calc')
print("database content:")
print(db.get_dict("select * from calc"))
db_colnames = [x[0] for x in db.get_header()]
for idx,hostname_str in db.execute("select idx,hostname from calc \
where revision==?", (revision,)).fetchall():
for hostname in hostname_str.split(','):
machine = machine_dct[hostname]
calc_dir = pj(calc_root, 'calc_%s' %machine.hostname, str(idx))
for base in ['pw.in', machine.get_jobfile_basename()]:
fn = pj(calc_dir, base)
assert os.path.exists(fn)
lines = common.file_readlines(fn)
# assemble all possible replacements in one list of SQLEntry
# instances, some things are redundantely checked twice ...
sql_lst = params_lst[idx] + list(machine.get_sql_record().values())
for db_key in db_colnames:
db_val = db.get_single("select %s from calc "
"where idx==?" %db_key,
(idx,))
if db_val is not None:
sql_lst.append(sql.SQLEntry(key=db_key, sqlval=db_val))
# for each replacement key, check if they are correctly placed
# in the database (if applicable) and in the written files
for sqlentry in sql_lst:
if sqlentry.key in db_colnames:
db_val = db.get_single("select %s from calc "
"where idx==?" \
%sqlentry.key, (idx,))
assert_all_types_equal(db_val, sqlentry.sqlval)
else:
db_val = 'NOT_DEFINED_IN_DB'
print("check_generated: idx={0}, sqlentry.key={1}, "
"sqlentry.sqlval={2}, db_val={3}".format(idx, sqlentry.key,
sqlentry.sqlval,
db_val))
check_key_in_file(lines, sqlentry.key, sqlentry.sqlval)
db.finish()
def test_parameter_study():
templ_dir = 'files/calc.templ'
calc_root = pj(testdir, 'calc_test_param_study')
# filename: FileTemplate built from that internally
host0 = batch.Machine(hostname='host0',
subcmd='qsub_host0',
home='/home/host0/user',
scratch='/tmp/host0',
filename='files/calc.templ/job.host0')
# template: provide FileTemplate directly
host1 = batch.Machine(hostname='host1',
subcmd='qsub_host1',
home='/home/host1/user',
scratch='/tmp/host1',
template=batch.FileTemplate(basename='job.host1',
templ_dir=templ_dir))
# use template text here instead of a file
host2_txt = """
subcmd=XXXSUBCMD
scratch=XXXSCRATCH
home=XXXHOME
calc_name=XXXCALC_NAME
idx=XXXIDX
revision=XXXREVISION
study_name=XXXSTUDY_NAME
"""
host2 = batch.Machine(hostname='host2',
subcmd='qsub_host2',
home='/home/host2/user',
scratch='/tmp/host2',
template=batch.FileTemplate(basename='job.host2',
txt=host2_txt))
study_name = 'convergence'
templates = [batch.FileTemplate(basename='pw.in', templ_dir=templ_dir)]
param0 = sql.sql_column(key='param0', lst=[25.0, 50.0])
param1 = sql.sql_column(key='param1', lst=['2x2x2','3x3x3','4x4x4'])
param2 = sql.sql_column(key='param2', lst=[77,88,99,111])
# only needed for this test
machine_dct = {'host0': host0,
'host1': host1,
'host2': host2,
}
nparam0 = len(param0)
nparam1 = len(param1)
nparam2 = len(param2)
#------------------------------------------------------------------------
# revision=0
#------------------------------------------------------------------------
params_lst0 = comb.nested_loops([param0])
calc = batch.ParameterStudy(machines=host0,
templates=templates,
params_lst=params_lst0,
study_name=study_name,
calc_root=calc_root)
# same as mode='w' + backup=True
calc.write_input(mode='a', backup=True)
check_generated(calc_root, machine_dct, params_lst0, revision=0)
#------------------------------------------------------------------------
# revision=0, no backup, erase all
#------------------------------------------------------------------------
params_lst0 = comb.nested_loops([param0])
calc = batch.ParameterStudy(machines=host0,
templates=templates,
params_lst=params_lst0,
study_name=study_name,
calc_root=calc_root)
calc.write_input(mode='w', backup=False)
check_generated(calc_root, machine_dct, params_lst0, revision=0)
assert not os.path.exists(pj(calc_root, 'calc_host0.0'))
assert not os.path.exists(pj(calc_root, 'calc.db.0'))
# only calc_foo/0 ... calc_foo/{N-1}
for ii in range(nparam0):
assert os.path.exists(pj(calc_root, 'calc_host0/%i' %ii))
for jj in range(1,5):
assert not os.path.exists(pj(calc_root, 'calc_host0/%i' %(ii+jj,)))
#------------------------------------------------------------------------
# revision=0, backup, then erase all
#------------------------------------------------------------------------
params_lst0 = comb.nested_loops([param0])
calc = batch.ParameterStudy(machines=host0,
templates=templates,
params_lst=params_lst0,
study_name=study_name,
calc_root=calc_root)
calc.write_input(mode='w', backup=True)
check_generated(calc_root, machine_dct, params_lst0, revision=0)
assert os.path.exists(pj(calc_root, 'calc_host0.0'))
assert os.path.exists(pj(calc_root, 'calc.db.0'))
# only calc_foo/0 ... calc_foo/{N-1}
for ii in range(nparam0):
assert os.path.exists(pj(calc_root, 'calc_host0/%i' %ii))
for jj in range(1,5):
assert not os.path.exists(pj(calc_root, 'calc_host0/%i' %(ii+jj,)))
#------------------------------------------------------------------------
# revision=1, backup and extend
#------------------------------------------------------------------------
params_lst1 = comb.nested_loops([param1,param2])
calc = batch.ParameterStudy(machines=[host0,host1,host2],
templates=templates,
params_lst=params_lst1,
study_name=study_name,
calc_root=calc_root)
calc.write_input(mode='a', backup=True)
assert os.path.exists(pj(calc_root, 'calc_host0.1'))
assert os.path.exists(pj(calc_root, 'calc.db.1'))
for ii in range(nparam0 + nparam1*nparam2):
assert os.path.exists(pj(calc_root, 'calc_host0/%i' %ii))
for ii in range(nparam0):
assert not os.path.exists(pj(calc_root, 'calc_host1/%i' %ii))
assert not os.path.exists(pj(calc_root, 'calc_host2/%i' %ii))
for ii in range(nparam0+1, nparam1*nparam2):
assert os.path.exists(pj(calc_root, 'calc_host1/%i' %ii))
assert os.path.exists(pj(calc_root, 'calc_host2/%i' %ii))
# excl_push
excl_fn = pj(calc_root, 'excl_push')
# ['0', '1', '2', ...]
assert common.file_read(excl_fn).split() == \
[str(x) for x in range(len(params_lst0))]
# sum params_lstm b/c we use `idx` from calc.db and that counts params_lst0
# + params_lst1, i.e. all paramseter sets from revision=0 up to now
check_generated(calc_root, machine_dct, params_lst0+params_lst1, revision=1)
def test_default_repl_keys():
batch.default_repl_keys()
| StarcoderdataPython |
3254011 | """
You are given two integer arrays nums1 and nums2, sorted in non-decreasing order, and two integers m and n,
representing the number of elements in nums1 and nums2 respectively.
Merge nums1 and nums2 into a single array sorted in non-decreasing order.
The final sorted array should not be returned by the function, but instead be stored inside the array nums1.
To accommodate this, nums1 has a length of m + n, where the first m elements denote the elements that should be merged,
and the last n elements are set to 0 and should be ignored. nums2 has a length of n.
"""
def merge_sorted(nums1, m, nums2, n):
nums1[:m + n] = sorted(nums1[:m] + nums2)
return nums1
def main():
nums1 = [1, 2, 3, 0, 0, 0]
m = 3
nums2 = [2, 5, 6]
n = 3
print(f"First array: {nums1}")
print(f"Second array: {nums2}")
result = merge_sorted(nums1, m, nums2, n)
print(f"Sorted array: {result}")
if __name__ == '__main__':
main()
"""
Output:
First array: [1, 2, 3, 0, 0, 0]
Second array: [2, 5, 6]
Sorted array: [1, 2, 2, 3, 5, 6]
"""
| StarcoderdataPython |
1622322 | import numpy
from scipy.misc import imread
from matplotlib import pyplot as plt
from PIL import Image
from PIL import ImageDraw
def upload_recognized_text_lines(file_orf):
color = 0
heights = []
blocks = []
min_x = 100000
min_y = 100000
max_x = 0
max_y = 0
lines = []
for line in open(file_orf):
if ";" in line:
y, x, dy, dx = line.split(";")[0].split()
y, x, dy, dx = int(y), int(x), int(dy), int(dx)
min_x = min(x, min_x)
min_y = min(y, min_y)
max_x = max(x + dx, max_x)
max_y = max(y + dy, max_y)
blocks += [ (x, y, x + dx, y + dy) ]
heights += [dx]
elif heights:
heights.sort()
quarter = len(heights) / 4
mean, std = numpy.mean(heights[quarter:-quarter]), numpy.std(heights[quarter:-quarter])
print mean, std, heights
lines += [((mean, std), (min_x, min_y, max_x, max_y), blocks)]
heights = []
blocks = []
min_x = 100000
min_y = 100000
max_x = 0
max_y = 0
if heights:
heights.sort()
quarter = len(heights) / 4
mean, std = numpy.mean(heights[quarter:-quarter]), numpy.std(heights[quarter:-quarter])
print mean, std, heights
lines += [((mean, std), (min_x, min_y, max_x, max_y), blocks)]
return lines
"""
original_image = Image.open("2.pbm")
original_image = original_image.convert("RGB")
draw = ImageDraw.Draw(original_image)
lines = upload_recognized_text_lines("1.orf")
for height_params, borders, blocks in lines:
min_x, min_y, max_x, max_y = borders
import random
color= (random.randint(0,255),random.randint(0,255),random.randint(0,255))
draw.rectangle((min_y, min_x, max_y, max_x), outline=color )
del draw
original_image.save("2.png")
exit()
"""
def build_profile(img, borders, axis):
profile = numpy.zeros(borders[axis][1] - borders[axis][0])
counter_axis_length = borders[1 - axis][1] - borders[1 - axis][0]
if axis:
for x in xrange(borders[axis][0], borders[axis][1]):
profile[x - borders[axis][0]] = sum(img[borders[1 - axis][0] : borders[1 - axis][1], x]) / float(counter_axis_length)
else:
for x in xrange(borders[axis][0], borders[axis][1]):
profile[x - borders[axis][0]] = sum(img[x, borders[1 - axis][0] : borders[1 - axis][1]]) / float(counter_axis_length)
return profile
def get_borders(img, axis):
MAX_ABSOLUTE_TRASH_SIZE = 10
if axis:
start = 0
for x in xrange(img.shape[1]):
if sum(img[:, x]) > MAX_ABSOLUTE_TRASH_SIZE:
start = x
break
end = img.shape[1]
for x in xrange(img.shape[1] - 1, -1, -1):
if sum(img[:, x]) > MAX_ABSOLUTE_TRASH_SIZE:
end = x + 1
break
else:
start = 0
for x in xrange(img.shape[0]):
if sum(img[x, :]) > MAX_ABSOLUTE_TRASH_SIZE:
start = x
break
end = img.shape[0]
for x in xrange(img.shape[0] - 1, -1, -1):
if sum(img[x, :]) > MAX_ABSOLUTE_TRASH_SIZE:
end = x + 1
break
start = max(0, start - MAX_ABSOLUTE_TRASH_SIZE)
end = min(end + MAX_ABSOLUTE_TRASH_SIZE, img.shape[axis])
return (start, end)
def empty_intervals(profile):
intervals = []
start = -1
for end in xrange(len(profile)):
if profile[end] > 0:
if start > -1:
if start != 0:
intervals.append((end - start, start))
start = -1
elif start == -1:
start = end
#if start > -1:
# intervals.append((len(profile) - start, start))
return intervals
def split_on_major_blocks(img, draw, block, MIN_BORDER_WIDTH):
by_axis = [[], []]
max_lengths = [-1, -1]
for axis in xrange(2):
profile = build_profile(img, block, axis)
intervals = empty_intervals(profile)
intervals = [(length, start) for length, start in intervals if length >= MIN_BORDER_WIDTH[axis]]
by_axis[axis] = intervals
if intervals:
max_lengths[axis] = max(intervals)[0]
axis2choose = max_lengths.index(max(max_lengths))
if max_lengths[axis2choose] == -1:
return
intervals = by_axis[axis2choose]
axis = axis2choose
intervals_centers_abs = [start + length / 2 + block[axis][0] for length, start in intervals]
intervals_centers_abs.sort()
new_blocks = []
if not axis:
intervals_centers_abs = [block[0][0]] + intervals_centers_abs + [block[0][1]]
for border_index in xrange(1, len(intervals_centers_abs)):
new_blocks += [((intervals_centers_abs[border_index - 1], intervals_centers_abs[border_index]), block[1])]
else:
intervals_centers_abs = [block[1][0]] + intervals_centers_abs + [block[1][1]]
for border_index in xrange(1, len(intervals_centers_abs)):
new_blocks += [(block[0], (intervals_centers_abs[border_index - 1], intervals_centers_abs[border_index]))]
for new_block in new_blocks:
draw.line((new_block[1][0], new_block[0][0], new_block[1][1], new_block[0][0] ) , width = 20 )
draw.line((new_block[1][0], new_block[0][0], new_block[1][0], new_block[0][1] ) , width = 20 )
draw.line((new_block[1][1], new_block[0][0], new_block[1][1], new_block[0][1] ) , width = 20 )
draw.line((new_block[1][0], new_block[0][1], new_block[1][1], new_block[0][1] ) , width = 20 )
#draw.rectangle((new_block[1][0], new_block[0][0], new_block[1][1], new_block[0][1]), outline=0, width = 3)
split_on_major_blocks(img, draw, new_block, MIN_BORDER_WIDTH)
def rotate(image, angle, color, filter=Image.NEAREST):
if image.mode == "P" or filter == Image.NEAREST:
matte = Image.new("1", image.size, 1) # mask
else:
matte = Image.new("L", image.size, 255) # true matte
bg = Image.new(image.mode, image.size, color)
bg.paste(image.rotate(angle, filter),
matte.rotate(angle, filter))
return bg
def adjust_rotation(original_image):
best_angle = 0
max_vert_space = 0
for rotation in xrange(-5, 5, 1):
img = rotate(original_image, rotation, "white")
#img = original_image.rotate(rotation)
mat = img.load()
empty_count = 0
for y in xrange(img.size[1]):
inked = 0
for x in xrange(img.size[0]):
if mat[x, y] != 255:
inked += 1
if inked >= 10:
break
if inked < 10:
empty_count += 1
if empty_count > max_vert_space:
best_angle = rotation
max_vert_space = empty_count
return best_angle
import os
img_path = "chemtxt/tiff_scrappler/imgs/"
out_path = "rotated/"
processed = 0
for fname in os.listdir(img_path):
if not fname.endswith(".tif"):
continue
original_image = Image.open(img_path + fname)
rotate_on_angle = adjust_rotation(original_image)
if abs(rotate_on_angle) > 2:
print "rotate_on_angle",rotate_on_angle, fname
original_image = rotate(original_image, rotate_on_angle, 255)
original_image.save(out_path + fname)
continue
original_image_mat = original_image.load()
draw = ImageDraw.Draw(original_image)
img = numpy.zeros((original_image.size[1], original_image.size[0]))
for x in xrange(img.shape[0]):
for y in xrange(img.shape[1]):
if original_image_mat[y, x] == 0:
img[x, y] = 1
initial_block = (get_borders(img, 0), get_borders(img, 1))
MIN_BORDER_WIDTH = ((initial_block[0][1] - initial_block[0][0]) / 100, (initial_block[1][1] - initial_block[1][0]) / 20)
draw.rectangle((initial_block[1][0], initial_block[0][0], initial_block[1][1], initial_block[0][1]), outline=0)
split_on_major_blocks(img, draw, initial_block, MIN_BORDER_WIDTH)
del draw
original_image.save("processed/" + fname)
| StarcoderdataPython |
70031 | from django.conf.urls import url
from . import constants, views # isort:skip
urlpatterns = [
url(
r'^create-alias/$',
views.create_alias_view,
name=constants.CREATE_ALIAS_URL_NAME,
),
url(
r'^aliases/$',
views.CategoryListView.as_view(),
name=constants.CATEGORY_LIST_URL_NAME,
),
url(
r'^aliases/category/(?P<category_pk>\d+)/$',
views.AliasListView.as_view(),
name=constants.LIST_ALIASES_URL_NAME,
),
url(
r'^aliases/(?P<pk>\d+)/usage/$',
views.alias_usage_view,
name=constants.USAGE_ALIAS_URL_NAME,
),
url(
r'^detach-alias/(?P<plugin_pk>\d+)/$',
views.detach_alias_plugin_view,
name=constants.DETACH_ALIAS_PLUGIN_URL_NAME,
),
url(
r'^delete-alias/(?P<pk>\d+)/$',
views.delete_alias_view,
name=constants.DELETE_ALIAS_URL_NAME,
),
url(
r'^set-alias-position/$',
views.set_alias_position_view,
name=constants.SET_ALIAS_POSITION_URL_NAME,
),
url(
r'^select2/$',
views.AliasSelect2View.as_view(),
name=constants.SELECT2_ALIAS_URL_NAME,
),
]
| StarcoderdataPython |
3350825 | <reponame>rmaguire31/sisr
"""PyTorch Dataset utilities for SiSR super-resolution dataset
"""
import os
import glob
import random
import logging
import torchvision.transforms.functional as TF
from PIL import Image
from torch.utils.data import Dataset as BaseDataset
__all__ = 'Dataset', 'JointRandomTransform'
logger = logging.getLogger(__name__)
class Dataset(BaseDataset):
"""Paired dataset of input and target images
"""
FILE_EXTENSIONS = {'png', 'PNG', 'jpg', 'JPG'}
def __init__(self, data_dir, transform=None):
self.transform = transform
filenames = set()
for file_extension in self.FILE_EXTENSIONS:
input_glob = os.path.join(
data_dir,
'inputs',
'*.%s' % file_extension)
target_glob = os.path.join(
data_dir,
'targets',
'*.%s' % file_extension)
input_filenames = glob.glob(input_glob)
target_filenames = glob.glob(target_glob)
input_basenames = {
os.path.basename(f)
for f in input_filenames
if os.path.isfile(f)}
target_basenames = {
os.path.basename(f)
for f in target_filenames
if os.path.isfile(f)}
basenames = input_basenames & target_basenames
input_filenames = sorted(
f for f in input_filenames
if os.path.basename(f) in basenames)
target_filenames = sorted(
f for f in target_filenames
if os.path.basename(f) in basenames)
filenames.update(set(zip(input_filenames, target_filenames)))
self.filenames = sorted(filenames)
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
input_filename, target_filename = self.filenames[idx]
# Open PIL Images
input_img = Image.open(input_filename)
target_img = Image.open(target_filename)
if self.transform is not None:
input_img, target_img = self.transform(input_img, target_img)
return input_img, target_img
class JointRandomTransform:
"""Apply the same to two input and target images with different scales
Applies random crop, flip and rotation
"""
def __init__(self, input_size=None):
if input_size is None:
self.crop_width = self.crop_height = None
else:
self.crop_width, self.crop_height = input_size
def __call__(self, input, target):
# Random patch extraction
if self.crop_width is not None and self.crop_height is not None:
width, height = input.size
scaled_width, scaled_height = target.size
# Determine image scale factor
scale = scaled_width / width
if scale != scaled_height / height:
logger.warning("Input and target image have different aspect "
"ratios: %r, %r",
input.size, target.size)
if not scale.is_integer():
logger.warning("Target image size is not an integer multiple "
"of input image size: %r, %r",
input.size, target.size)
scale = int(scale)
# Random top, left position for patch
left = random.randrange(0, width - self.crop_width)
top = random.randrange(0, height - self.crop_height)
# Crop
input = TF.crop(input, top, left,
self.crop_height, self.crop_width)
target = TF.crop(target, scale * top, scale * left,
scale * self.crop_height, scale * self.crop_width)
# Random horizontal flip and rotation
width, height = input.size
if width == height:
angle = random.randrange(0, 360, 90)
else:
angle = random.randrange(0, 360, 180)
flip = random.randint(0, 1)
if angle:
input = TF.rotate(input, angle)
target = TF.rotate(target, angle)
if flip:
input = TF.hflip(input)
target = TF.hflip(target)
# Convert to tensor
input = TF.to_tensor(input)
target = TF.to_tensor(target)
return input, target
| StarcoderdataPython |
3230274 | <filename>reservation_rest_api.py
from flask import Flask, request
from reservation_service import get_qnode, read_data, register, delete_namespace
import json
import logging
from tabulate import tabulate
app = Flask(__name__)
ALLOWED_EXTENSIONS = {'xls', 'yaml', 'csv', 'json'}
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# logging.basicConfig(format=FORMAT, stream=sys.stdout, level=logging.DEBUG)
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(name)s %(lineno)d -- %(message)s",
datefmt='%m-%d %H:%M:%S',
filename='reservation_rest_api.log',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# # set a format which is simpler for console use
formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s %(lineno)d -- %(message)s", '%m-%d %H:%M:%S')
# # tell the handler to use this format
console.setFormatter(formatter)
# # add the handler to the root logger
logging.getLogger('').addHandler(console)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/<namespace>', methods=['GET'])
def get_ns_list(namespace):
data = read_data()
if data:
table = []
headers = ['Satellite', 'Satellite URI', 'Latest qnode number', 'Prefix', 'num_of_0']
if namespace == 'all':
logger.debug('return all namespaces')
for k, v in data.items():
table.append([k, v['uri'], v['latest'], v['prefix'], v['num_of_0']])
else:
if namespace in data.keys():
logger.debug('return ' + namespace + ' namespace')
table.append([namespace, data[namespace]['uri'], data[namespace]['latest'],
data[namespace]['prefix'], data[namespace]['num_of_0']])
else:
raise Exception('Namespace does not exist in satellite.')
return tabulate(table, headers, tablefmt="psql")
return 'There is no satellite. Please register your satellite at first.'
@app.route('/<namespace>/reservation', methods=['GET', 'POST'])
def get_qnode_by_ns(namespace):
if namespace:
data = get_qnode(namespace)
if data:
logger.debug('reserve a qnode in ' + namespace + ' namespace')
return json.dumps({'Latest qnode': data}, indent=2)
else:
raise Exception('Please register your satellite at first.')
return 'Welcome to the reservation service.'
@app.route('/delete', methods=['GET', 'POST'])
def delete_ns():
namespace = request.values.get('namespace')
if namespace:
flag = delete_namespace(namespace)
if flag:
logger.debug('delete ' + namespace + ' namespace success.')
return 'Success'
else:
raise Exception('Namespace does not exist in satellite.')
return 'Welcome to the reservation service.'
@app.route('/register', methods=['GET', 'POST'])
def register_ns():
namespace = request.values.get('namespace')
uri = request.values.get('uri')
prefix = request.values.get('prefix')
num_of_0 = request.values.get('num_of_0')
if not num_of_0:
num_of_0 = 7
if namespace and uri and prefix:
flag = register(namespace, uri, prefix, num_of_0)
if flag:
logger.debug('register ' + namespace + ' namespace success.')
return 'Register successfully and you are ready to use this satellite. '
else:
raise Exception('This satellite already exists.')
return 'Welcome to the reservation service.'
if __name__ == '__main__':
app.run() | StarcoderdataPython |
3244742 | # (c) 2012, <NAME> <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
DOCUMENTATION:
author:
- <NAME> <<EMAIL>>
lookup: fileglob
version_added: historical
short_description: return a list of matched files
description:
- Given a shell glob (?, * [1-9]) return a list of files that matched from the local filesystem.
- Uses the python glob library to accomplish this .
options:
_raw:
description:
- the list of path globs to match
type: list
element_type: string
required: True
notes:
- The first top relative path to match will be used using normal lookup search paths, i.e if in a role and looking for files/*
the files/ directory in the role will be chosen over files/ directory in play.
EXAMPLES:
- name: "copy configs"
copy: src={{ item }} dest=/etc/conf.d/
with_fileglob:
- 'files/conf.d/*.conf'
- name: "list all yaml files"
debug: msg="{{ lookup('fileglob', ['/etc/*.yml', 'vars/*.yml', 'vars/*/*.yml' ]) }}"
RETURN:
_list:
description:
- list of files matched
type: list
element_type: string
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import glob
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
class LookupModule(LookupBase):
GLOBS = frozenset(['?', '*', '['])
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
# find smallest unglobbed match
min_spot = len(term)
for symbol in self.GLOBS:
x = term.find(symbol, 0, min_spot)
if x > 0:
min_spot = x
if min_spot == len(term):
dwimmed_path = self.find_file_in_search_path(variables, 'files', term)
ret.append(dwimmed_path)
else:
term_root = term[:min_spot]
dwimmed_path = self.find_file_in_search_path(variables, 'files', os.path.dirname(term_root))
globbed = glob.glob(to_bytes(os.path.join(dwimmed_path, os.path.basename(term_root)) + term[min_spot:], errors='surrogate_or_strict'))
ret.extend(to_text(g, errors='surrogate_or_strict') for g in globbed if os.path.isfile(g))
return ret
| StarcoderdataPython |
1784955 | import numpy as np
from basics.orig import update_S, update_V, solve_U, E
from utils.math_utils import U_converged
from utils.metrics import nmi_acc
def iteration(X, U, V, labels, p, logger):
N = len(X)
C = len(V)
gamma, epsilon = p.gamma, p.epsilon
capped = p.capped or True
S = np.ones((N, C))
t = 0
while True:
new_U = solve_U(S, X, V, gamma)
delta, converged = U_converged(new_U, U)
U = new_U
V = update_V(S, U, X)
S = update_S(X, V, epsilon, capped)
metric_now = nmi_acc(U, labels)
E_now = E(U, V, X, gamma, epsilon, capped)
if converged:
break
logger.log_middle(E_now, metric_now)
t += 1
return U, V, t, metric_now
| StarcoderdataPython |
191355 | <gh_stars>0
"""Config flow for MitBlod integration."""
import pymitblod
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.typing import ConfigType
from homeassistant.config_entries import ConfigFlow, CONN_CLASS_CLOUD_POLL
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD
)
from .const import (
CONF_IDENTIFICATION,
CONF_INSTITUTION,
DOMAIN,
CONF_ADDITIONAL_DATA,
MITBLOD_SCHEMA,
USERDATA_SCHEMA,
_LOGGER
)
async def validate_login(hass:HomeAssistant, user_input:dict):
institution_enum = pymitblod.Institutions.get_enum_with(value=user_input[CONF_INSTITUTION])
patient = pymitblod.MitBlod(
identification=user_input[CONF_IDENTIFICATION],
password=user_input[CONF_PASSWORD],
institution=institution_enum
)
return hass.async_add_executor_job(patient.can_login)
async def get_mitblod_name(hass:HomeAssistant, user_input:dict):
institution_enum = pymitblod.Institutions.get_enum_with(value=user_input[CONF_INSTITUTION])
patient = pymitblod.MitBlod(
identification=user_input[CONF_IDENTIFICATION],
password=user_input[CONF_PASSWORD],
institution=institution_enum
)
return hass.async_add_executor_job(patient.mitblod_name)
class MitBlodFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for MitBlod."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_CLOUD_POLL
def __init__(self) -> None:
super().__init__()
self._init_data = {}
self._additional_data = {}
async def async_step_user(self, user_input:dict=None) -> FlowResult:
"""Handle a flow initiated by the user."""
errors={}
if user_input is not None:
if await validate_login(hass=self.hass, user_input=user_input):
self._init_data = user_input
return await self.async_step_additional()
return self.async_show_form(step_id="user", data_schema=MITBLOD_SCHEMA, errors=errors)
async def async_step_additional(self, user_input:dict=None) -> FlowResult:
errors = {}
if user_input is not None:
self._additional_data = user_input
return await self.async_step_finish()
return self.async_show_form(step_id="additional", data_schema=USERDATA_SCHEMA, errors=errors)
async def async_step_finish(self) -> FlowResult:
data = {**self._init_data, **self._additional_data}
name = data[CONF_NAME] if CONF_NAME in self._additional_data else get_mitblod_name(self.hass, self._init_data)
return self.async_create_entry(title=f"{name} at {data[CONF_INSTITUTION]}", data=data) | StarcoderdataPython |
1701074 |
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pylab as plt
import sys, os
sys.path.append(os.path.join(os.path.dirname("__file__"), '..', '..'))
from AI_scientist.util import plot_matrices, make_dir, get_args, Early_Stopping, record_data
from AI_scientist.settings.filepath import variational_model_PATH
from AI_scientist.pytorch.net import Net
from AI_scientist.variational.variational_meta_learning import get_tasks, plot_individual_tasks_bounce
seed = 1
np.random.seed(seed)
# In[ ]:
num_train_tasks = 100
num_test_tasks = 100
input_size = 1
task_id_list = [
# "latent-linear",
# "polynomial-3",
# "Legendre-3",
# "M-sawtooth",
# "M-sin",
# "M-Gaussian",
# "M-tanh",
# "M-softplus",
"bounce-states",
# "bounce-images",
]
task_settings = {
"zdim": 1,
"z_settings": ["Gaussian", (0, 1)],
"num_layers": 1,
"xlim": (-4, 4),
"activation": "softplus",
"input_size": input_size,
"test_size": 0.2,
"num_examples": 2000,
}
tasks_train, tasks_test = get_tasks(task_id_list, num_train_tasks, num_test_tasks, task_settings = task_settings, render = False)
# In[ ]:
plot_individual_tasks_bounce(tasks_train, num_examples_show = 40, num_tasks_show = 9)
# In[3]:
tasks_train, tasks_test = get_tasks(task_id_list, num_train_tasks, num_test_tasks, task_settings = task_settings)
epochs = 1000
for i in range(epochs):
################
#Train with training tasks:
################
for task_key, task in tasks_train.items():
((X_train, y_train), (X_test, y_test)), info = task
################
# Evaluation with testing tasks
################
# In[ ]:
((X_train, y_train), (X_test, y_test)), info = tasks_train['master_tanh_10']
# In[5]:
plt.plot(X_train.data.numpy(), y_train.data.numpy(), ".")
| StarcoderdataPython |
26280 | # Copyright (c) 2021 <NAME>. All Rights Reserved.
import pymel.core as pm
import piper_config as pcfg
import piper.mayapy.util as myu
import piper.mayapy.convert as convert
import piper.mayapy.attribute as attribute
from .rig import curve # must do relative import in python 2
def get(node_type, ignore=None, search=True):
"""
Gets the selected given node type or all the given node types in the scene if none selected.
Args:
node_type (string): Type of node to get.
ignore (string): If given and piper node is a child of given ignore type, do not return the piper node.
search (boolean): If True, and nothing is selected, will attempt to search the scene for all of the given type.
Returns:
(list) All nodes of the given node type.
"""
piper_nodes = []
selected = pm.selected()
if selected:
# get only the piper nodes from selection
piper_nodes = pm.ls(selected, type=node_type)
# traverse hierarchy for piper nodes
if not piper_nodes:
piper_nodes = set()
for node in selected:
first_type_parent = myu.getFirstTypeParent(node, node_type)
piper_nodes.add(first_type_parent) if first_type_parent else None
# search the whole scene for the piper node
elif search:
piper_nodes = pm.ls(type=node_type)
# don't include any nodes that are a child of the given ignore type
if ignore:
piper_nodes = [node for node in piper_nodes if not myu.getFirstTypeParent(node, ignore)]
return piper_nodes
def multiply(transform, main_term=None, weight=None, inputs=None):
"""
Creates the multiply node and hooks up all the given given inputs to the given transform's scale.
Args:
transform (pm.nodetypes.Transform): Node to hook multiply onto its scale.
main_term (pm.general.Attribute): Attribute to connect onto the multiply main_term.
weight (pm.general.Attribute): Attribute to connect onto the multiply weight.
inputs (list): Attributes to connect to the input plug of the multiply node.
Returns:
(pm.nodetypes.piperMultiply): Multiply node created.
"""
multiply_node = pm.createNode('piperMultiply', n=transform.name(stripNamespace=True) + '_scaleMultiply')
multiply_node.output >> transform.scale
if main_term:
main_term >> multiply_node.mainTerm
if weight:
weight >> multiply_node.weight
if not inputs:
return multiply_node
[attr >> multiply_node.input[i] for i, attr in enumerate(inputs)]
return multiply_node
def divide(dividend=1.0, divisor=1.0, result_input=None):
"""
Creates a node that divides the given dividend by the given divisor.
Args:
dividend (pm.general.Attribute or float): Number that will be divided.
divisor (pm.general.Attribute or float): Number that will perform the division.
result_input (pm.general.Attribute): Attribute to plug in division output into.
Returns:
(pm.nodetypes.piperSafeDivide): Division node created.
"""
divide_node = pm.createNode('piperSafeDivide')
if isinstance(dividend, pm.general.Attribute):
dividend_name = dividend.name().split(':')[-1].replace('.', '_')
dividend >> divide_node.input1
else:
dividend_name = str(dividend)
divide_node.input1.set(dividend)
if isinstance(divisor, pm.general.Attribute):
divisor_name = divisor.name().split(':')[-1].replace('.', '_')
divisor >> divide_node.input2
else:
divisor_name = str(divisor)
divide_node.input2.set(divisor)
if result_input:
divide_node.output >> result_input
divide_node.rename(dividend_name + '_DIV_' + divisor_name)
return divide_node
def inputOutput(node_type, source=None, output=None):
"""
Creates a node that has an input and output attribute based on given node type.
Args:
node_type (string): Type of node to create.
source (pm.general.Attribute): Attribute to plug into node's input.
output (pm.general.Attribute): Attribute to plug node's output into.
Returns:
(pm.nodetypes.DependNode): Node created.
"""
name = source.node().name(stripNamespace=True) + '_' if source else ''
suffix = node_type.split('piper')[-1]
node = pm.createNode(node_type, name=name + suffix)
if source:
source >> node.input
if output:
node.output >> output
return node
def oneMinus(source=None, output=None):
"""
Creates a one minus node that turns a 0 to 1 range into a 1 to 0 or vice versa.
Args:
source (pm.general.Attribute): Attribute to plug into one minus input.
output (pm.general.Attribute): Attribute to plug one minus' output into.
Returns:
(pm.nodetypes.piperOneMinus): One minus node created.
"""
return inputOutput('piperOneMinus', source=source, output=output)
def reciprocal(source=None, output=None):
"""
Creates a node that takes in the given source attribute and output its reciprocal. Reciprocal == 1/X
Args:
source (pm.general.Attribute): Attribute to plug into reciprocal's input.
output (pm.general.Attribute): Attribute to plug reciprocal's output into.
Returns:
(pm.nodetypes.piperReciprocal): Reciprocal node created.
"""
return inputOutput('piperReciprocal', source=source, output=output)
def create(node_type, color=None, name=None, parent=None):
"""
Creates the given node type with the given color and given name/parent.
Args:
node_type (string): Node type to create.
color (string): Name of color to turn outliner text to. Currently supporting:
cyan, pink.
name (string): Name of node.
parent (PyNode or string): Parent of new node.
Returns:
(PyNode): Node created.
"""
name = name if name else node_type
piper_node = pm.createNode(node_type, name=name, parent=parent, skipSelect=True)
rgb = convert.colorToRGB(color)
if rgb:
piper_node.useOutlinerColor.set(True)
piper_node.outlinerColor.set(rgb)
return piper_node
def createShaped(node_type, name=None, control_shape=curve.circle):
"""
Creates piper IK transform with given control shape curve
Args:
node_type (string): Name for the type of node to create.
name (string): Name to give the transform node.
control_shape (method): Method that generates nurbs curve the transform will use.
Returns:
(PyNode): Transform node created with control shape curves as child(ren).
"""
transform = create(node_type, name=name)
transform._.lock()
ctrl = control_shape()
curves = ctrl.getChildren(type='nurbsCurve')
pm.parent(curves, transform, shape=True, add=True)
pm.delete(ctrl)
return transform
def createFK(name=None, control_shape=curve.circle):
"""
Creates piper FK transform with given control shape curve
Args:
name (string): Name for the piper IK nodes.
control_shape (method): Method that generates nurbs curve that Piper FK transform will use.
Returns:
(pm.nodetypes.piperFK): Piper FK node created.
"""
return createShaped('piperFK', name, control_shape)
def createIK(name=None, control_shape=curve.circle):
"""
Creates piper IK transform with given control shape curve
Args:
name (string): Name for the piper IK nodes.
control_shape (method): Method that generates nurbs curve that Piper IK transform will use.
Returns:
(pm.nodetypes.piperIK): Piper IK node created.
"""
return createShaped('piperIK', name, control_shape)
def createOrientMatrix(position, orientation, name=None):
"""
Creates a piper orient matrix node that keeps given position matrix, but maintains given orientation matrix.
Args:
position (pm.general.Attribute or pm.dt.Matrix): position to plug into orient matrix position attribute.
orientation (pm.general.Attribute or pm.dt.Matrix): orientation to plug into orient matrix orient attribute.
name (string): Name to give piper orient matrix node.
Returns:
(pm.nodetypes.piperOrientMatrix): Piper Orient Matrix node created.
"""
if not name:
name = 'orientMatrix'
node = pm.createNode('piperOrientMatrix', name=name)
if isinstance(position, pm.general.Attribute):
position >> node.positionMatrix
elif isinstance(position, pm.dt.Matrix):
node.positionMatrix.set(position)
if isinstance(orientation, pm.general.Attribute):
orientation >> node.orientMatrix
elif isinstance(orientation, pm.dt.Matrix):
node.orientMatrix.set(orientation)
return node
def createSwingTwist(driver, target, axis='y', swing=0, twist=1):
"""
Creates the swing twist node with given axis, swing, and twist attributes.
Args:
driver (pm.nodetypes.Transform): Node that will drive given target. Must have BIND used as rest matrix.
target (pm.nodetypes.Transform): Node that will be driven with twist/swing through offsetParentMatrix.
axis (string): Axis in which node will output twist.
swing (float): Weight of swing rotation.
twist (float): Weight of twist rotation.
Returns:
(pm.nodetypes.swingTwist): Swing Twist node created.
"""
name = target.name(stripNamespace=True) + '_ST'
swing_twist = pm.createNode('swingTwist', n=name)
axis_index = convert.axisToIndex(axis)
swing_twist.twistAxis.set(axis_index)
swing_twist.swing.set(swing)
swing_twist.twist.set(twist)
driver_bind = convert.toBind(driver, fail_display=pm.error)
driver.matrix >> swing_twist.driverMatrix
driver_bind.matrix >> swing_twist.driverRestMatrix
offset_driver = swing_twist.outMatrix
node_plug = attribute.getSourcePlug(target.offsetParentMatrix)
if node_plug:
mult_matrix = pm.createNode('multMatrix', n=name + '_MM')
swing_twist.outMatrix >> mult_matrix.matrixIn[0]
node_plug >> mult_matrix.matrixIn[1]
offset_driver = mult_matrix.matrixSum
offset_driver >> target.offsetParentMatrix
return swing_twist
def createMesh():
"""
Creates a piper mesh group(s) based on whether user has selection, shift held, and scene saved.
Returns:
(pm.nt.piperMesh or list): Usually PyNode created. If Shift held, will return list or all piperMesh(es) created.
"""
selected = pm.selected()
scene_name = pm.sceneName().namebase
if selected:
# if shift held, create a a piper mesh for each selected object.
if myu.isShiftHeld():
piper_meshes = []
for node in selected:
parent = node.getParent()
name = pcfg.mesh_prefix + node.nodeName()
piper_mesh = create('piperMesh', 'cyan', name=name, parent=parent)
pm.parent(node, piper_mesh)
piper_meshes.append(piper_mesh)
return piper_meshes
else:
# If user selected stuff that is not a mesh, warn the user.
non_mesh_transforms = [node for node in selected if not node.getShapes()]
if non_mesh_transforms:
pm.warning('The following are not meshes! \n' + '\n'.join(non_mesh_transforms))
# Get the parent roots and parent them under the piper mesh node to not mess up any hierarchies.
name = pcfg.mesh_prefix
name += scene_name if scene_name else selected[-1].nodeName()
piper_mesh = create('piperMesh', 'cyan', name=name)
parents = myu.getRootParents(selected)
pm.parent(parents, piper_mesh)
return piper_mesh
name = '' if scene_name.startswith(pcfg.mesh_prefix) else pcfg.mesh_prefix
name += scene_name if scene_name else 'piperMesh'
piper_mesh = create('piperMesh', 'cyan', name=name)
meshes = pm.ls(type='mesh')
parents = myu.getRootParents(meshes)
pm.parent(parents, piper_mesh)
return piper_mesh
def createSkinnedMesh():
"""
Creates a skinned mesh node for each root joint found in the skin clusters
Returns:
(list): PyNodes of nodes created.
"""
selected = pm.selected()
scene_name = pm.sceneName().namebase
if selected:
skin_clusters = set()
skin_clusters.update(set(pm.listConnections(selected, type='skinCluster')))
skin_clusters.update(set(pm.listHistory(selected, type='skinCluster')))
else:
skin_clusters = pm.ls(type='skinCluster')
if not skin_clusters:
pm.warning('No skin clusters found!')
piper_skinned_mesh = create('piperSkinnedMesh', 'pink', name=pcfg.skinned_mesh_prefix + 'piperSkinnedMesh')
return [piper_skinned_mesh]
piper_skinned_meshes = []
skinned_meshes = myu.getSkinnedMeshes(skin_clusters)
for root_joint, geometry in skinned_meshes.items():
name = '' if scene_name.startswith(pcfg.skinned_mesh_prefix) else pcfg.skinned_mesh_prefix
name += scene_name if scene_name else next(iter(geometry)).nodeName()
piper_skinned_mesh = create('piperSkinnedMesh', 'pink', name=name)
piper_skinned_meshes.append(piper_skinned_mesh)
geometry_parents = myu.getRootParents(geometry)
pm.parent(root_joint, geometry_parents, piper_skinned_mesh)
return piper_skinned_meshes
def createRig(name=''):
"""
Creates the node that houses all rig nodes.
Args:
name (string): If given, will use the given name as the name for the rig node.
Returns:
(pm.nodetypes.piperRig): Rig node created.
"""
name = name if name else 'piperRig'
piper_rig = create('piperRig', 'burnt orange', name=name)
piper_rig.addAttr(pcfg.message_root_control, at='message')
piper_rig._.lock()
attribute.nonKeyable(piper_rig.highPolyVisibility)
attribute.lockAndHideCompound(piper_rig)
attribute.addSeparator(piper_rig)
return piper_rig
def createAnimation():
"""
Creates the node that houses a rig. Used to export animation.
Returns:
(pm.nodetypes.piperAnimation): Animation node created.
"""
scene_name = pm.sceneName().namebase
name = scene_name if scene_name else 'piperAnimation'
piper_animation = create('piperAnimation', 'dark green', name=pcfg.animation_prefix + name)
attribute.lockAndHideCompound(piper_animation)
rigs = get('piperRig', ignore='piperAnimation')
pm.parent(rigs[0], piper_animation) if len(rigs) == 1 else pm.warning('{} rigs found!'.format(str(len(rigs))))
return piper_animation
| StarcoderdataPython |
136750 | <gh_stars>10-100
import random as rand
class Qbit:
def __init__(self, index, prev_1q_gate):
self.index = index
self.prev_1q_gate = prev_1q_gate
self.gate_dict = {'T':('Y','X'), 'Y':('X','T'), 'X': ('T','Y')}
def h(self):
self.prev_1q_gate = 'H'
return self.index
def random_gate(self):
# After a CZ-gate, randomly select X_1_2 or Y_1_2
gate_choices = ['X','Y']
coin_flip = rand.randint(0,1)
self.prev_1q_gate = gate_choices[coin_flip]
return gate_choices[coin_flip]
| StarcoderdataPython |
126099 | CLIENT_ID = "mxxgwertsps7ry9zsdkk7r3"
CLIENT_SECRET = "<KEY>" | StarcoderdataPython |
42810 | <filename>hawkbot/__main__.py
from . import bot
from configparser import ConfigParser
import sys
def get_config(filename):
config = ConfigParser()
config.read(filename)
return config
def main():
config = get_config(sys.argv[1])
bot.config = config
bot.run(config['login']['token'])
if __name__ == '__main__':
main() | StarcoderdataPython |
161022 | <gh_stars>0
# Standard libraries
import io
import os
import re
from setuptools import setup, find_packages
from typing import List
# Constants
PATH_ROOT = os.path.dirname(__file__)
def _load_requirements(path_dir: str, file_name: str = "requirements.txt", comment_char: str = "#") -> List[str]:
"""Load requirements from a file."""
with open(os.path.join(path_dir, file_name), "r") as file:
lines = [ln.strip() for ln in file.readlines()]
reqs = []
for ln in lines:
# Filer all comments
if comment_char in ln:
ln = ln[: ln.index(comment_char)].strip()
# Skip directly installed dependencies
if ln.startswith("http"):
continue
if ln: # if requirement is not empty
reqs.append(ln)
return reqs
with io.open('plums/__init__.py', 'rt', encoding='utf8') as f:
version = re.search(r'__version__ = \'(.*?)\'', f.read(), re.M).group(1)
setup(
name='plums',
version=str(version),
packages=find_packages(exclude=['tests', 'tests.*', 'docs', 'docs.*']),
author="Airbus DS GEO",
author_email="<EMAIL>",
description="Playground ML Unified Microlib Set: The Playground ML python toolbox package",
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
license="MIT",
include_package_data=True,
zip_safe=False,
python_requires=">=3.6",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
install_requires=_load_requirements(path_dir=os.path.join(PATH_ROOT), file_name="requirements.txt"),
extras_require={
"docs": _load_requirements(path_dir=os.path.join(PATH_ROOT, "requirements"), file_name="requirements-docs.txt"),
"lint": _load_requirements(path_dir=os.path.join(PATH_ROOT, "requirements"), file_name="requirements-lint.txt"),
"tests": _load_requirements(path_dir=os.path.join(PATH_ROOT, "requirements"), file_name="requirements-tests.txt"),
},
)
| StarcoderdataPython |
1661479 | <reponame>codepipe/netapp-ansible<gh_stars>10-100
#!/usr/bin/python
import sys
import json
from ansible.module_utils import ntap_util
try:
from NaServer import *
NASERVER_AVAILABLE = True
except ImportError:
NASERVER_AVAILABLE = False
if not NASERVER_AVAILABLE:
module.fail_json(msg="The NetApp Manageability SDK library is not installed")
DOCUMENTATTION = '''
---
module: asup_modify
version_added: "1.0"
author: "<NAME> (@jeorryb)"
short_description: Modify autosupport
description:
- Ansible module to modify autosupport settings on NetApp CDOT arrays via the NetApp python SDK.
requirements:
- NetApp Manageability SDK
options:
cluster:
required: True
description:
- "The ip address or hostname of the cluster"
user_name:
required: True
description:
- "Administrator user for the cluster/node"
password:
required: True
description:
- "password for the admin user"
val_certs:
default: True
description:
- "Perform SSL certificate validation"
from_addr:
required: False
description:
- "sender of the autosupport message"
is_node_subject:
required: False
description:
- "Specifies whether the node name is included in the subject line"
mail_host:
required: False
description:
- "Name or IP of the smtp server to use"
node:
required: True
description:
- "Name of the node you are modifying"
partner:
required: False
description:
- "You can specify up to 5 partner vendor addresses"
to_addr:
required: False
description:
- "You can specify up to 5 recipient addresses"
transport:
required: False
description:
- "Name of transport protocol; smtp|http|https"
enabled:
required: False
description:
- "Specifies whether asup daemon is enabled"
'''
EXAMPLES = '''
# Modify ASUP
- name: Modify asup settings
asup_modify:
cluster: "192.168.0.1"
user_name: "admin"
password: "<PASSWORD>"
from_addr: "<EMAIL>"
is_node_subject: True
mail_host: "smtp.widget.com"
node: "atlcdot-01"
partner: "<EMAIL>"
to_addr: "<EMAIL>"
transport: "https"
enabled: True
'''
def asup_modify(module):
from_addr = module.params['from_addr']
is_node_subject = module.params['is_node_subject']
mail_host = module.params['mail_host']
node = module.params['node']
partner = module.params['partner']
to_addr = module.params['to_addr']
transport = module.params['transport']
enabled = module.params['enabled']
results = {}
results['changed'] = False
api = NaElement("autosupport-config-modify")
api.child_add_string("node-name", node)
if module.params['from_addr']:
api.child_add_string("from", from_addr)
if module.params['enabled']:
api.child_add_string("is-enabled", enabled)
if module.params['is_node_subject']:
api.child_add_string("is-node-in-subject", is_node_subject)
if module.params['mail_host']:
xi3 = NaElement("mail-hosts")
api.child_add(xi3)
for smtp in mail_host:
xi3.child_add_string("string", smtp)
if module.params['partner']:
xi1 = NaElement("partner-address")
api.child_add(xi1)
for addr in partner:
xi1.child_add_string("mail-address", addr)
if module.params['to_addr']:
xi2 = NaElement("to")
api.child_add(xi2)
for addr in to_addr:
xi2.child_add_string("mail-address", addr)
if module.params['transport']:
api.child_add_string("transport", transport)
connection = ntap_util.connect_to_api(module)
xo = connection.invoke_elem(api)
if(xo.results_errno() != 0):
r = xo.results_reason()
module.fail_json(msg=r)
results['changed'] = False
else:
results['changed'] = True
return results
def main():
argument_spec = ntap_util.ntap_argument_spec()
argument_spec.update(dict(
from_addr=dict(required=False),
is_node_subject=dict(required=False, type='bool'),
mail_host=dict(required=False, type='list'),
node=dict(required=True),
partner=dict(required=False, type='list'),
to_addr=dict(required=False, type='list'),
transport=dict(required=False, choices=['https', 'http', 'smtp']),
enabled=dict(required=False, type='bool'),))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
results = asup_modify(module)
module.exit_json(**results)
from ansible.module_utils.basic import *
main()
| StarcoderdataPython |
1635984 | <reponame>GabrielMMelo/turing-machine<filename>src/tm.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from .reader import Reader
class Tm():
"""Classe que representa uma máquina de Turing determinística para computação de funções numéricas."""
def __init__(self, filename):
"""
:param filename: Nome do arquivo de entrada.
:type filename: str
"""
reader = Reader(filename)
self.Q, self.S, self.q0, self.tape = reader.read_file()
self.transitions = {}
self.make_transitions()
self.actual = self.q0
self.position = 0
def make_transitions(self):
"""Método que cria dicionário de dicionários para armazenamento das transições de cada estado.
A estrutura é dada no seguinte formato abaixo:
- { ``estado_i``: { ``simbolo_leitura_j``: (``proximo_estado_k``, ``simbolo_escrita_k``, ``direcao_k``), ...}, ...}
Por exemplo:
- {'q0': {'B': ('q1', '1', 'R'), ...}, 'q1': {'1', ...}, ... 'qn': { ...}}
"""
for state in self.Q:
self.transitions[state] = {}
for source, in_symbol, destination, out_symbol, direction, _ in self.S:
if source == state:
self.transitions[state][in_symbol] = [destination, out_symbol, direction]
def show_tape(self):
"""Método que imprime o estado atual e a configuração da fita."""
print("".join([self.tape[:self.position], "{", self.actual, "}", \
self.tape[self.position:]]))
def compute(self):
"""Método que executa uma computação na máquina de Turing,
alterando -*ou não*- o estado atual, escrevendo um símbolo e movendo
a cabeça de leitura para esquerda ou direita.
"""
self.show_tape()
destination, out_symbol, direction = self.get_transition()
self.write_actual(out_symbol)
self.actual = destination
if direction == 'R':
self.move_right()
else:
self.move_left()
def get_transition(self):
"""Método que retorna a transição que o estado atual irá realizar lendo o símbolo atual.
:return: (*List*) Lista de ``str`` contendo uma transição no formato lista['``qi``', '``ri``', '``D``'], onde:
- ``qi``: Estado destino;
- ``ri``: Símbolo a ser escrito;
- ``D``: Direção a mover a cabeça de leitura (*L* ou *R*)
"""
return self.transitions[self.actual][self.read_actual()]
def clean_right(self):
"""Método que remove símbolo branco (*'B'*) excedente à direita da fita da máquina de Turing."""
if self.tape[-1] == 'B' and self.tape[-2] == 'B':
self.tape = self.tape[:len(self.tape)-1]
def move_left(self):
"""Método que move a cabeça de leitura para esquerda."""
#self.clean_right()
try:
self.position -= 1
if self.position == -1:
raise Exception("Movimento inválido!")
except Exception as error:
sys.exit('Erro encontrado: ' + str(error))
def move_right(self):
"""Método que move a cabeça de leitura para direita."""
self.position += 1
if self.position == len(self.tape):
self.tape = "".join([self.tape, 'B'])
def write_actual(self, value):
"""Método que realiza a escrita na posição onde a cabeça de leitura se encontra.
:param value: Valor a ser escrito na posição atual
:type value: str
"""
input_list = list(self.tape)
input_list[self.position] = value
self.tape = "".join(input_list)
# Caso da escrita na "última" posição da fita com símbolo diferente de 'B'
if self.position == len(self.tape) - 1 and not self.tape[self.position] == 'B':
self.tape = "".join([self.tape, 'B'])
def read_actual(self):
"""Método que retorna o símbolo da posição onde está a cabeça de leitura.
:return: (*str*) Símbolo da fita na posição que a cabeça de leitura se encontra.
"""
return self.tape[self.position]
| StarcoderdataPython |
72089 | from functools import partial
from django.db import models
from model_utils.managers import InheritanceManager
from coberturas_medicas.models import Cobertura
from core.models import Persona, Profesional
from dj_utils.mixins import ShowInfoMixin
from dj_utils.models import BaseModel, uploadTenantFilename
class Paciente(BaseModel):
"""
Persona que se atiende en el lugar.
"""
persona = models.OneToOneField(Persona, verbose_name='persona', on_delete=models.CASCADE)
fecha_ingreso = models.DateField('fecha de ingreso')
observaciones = models.TextField('observaciones', blank=True)
# relaciones
cobertura_medica = models.ForeignKey(Cobertura, verbose_name='cobertura', null=True, on_delete=models.SET_NULL)
def __str__(self):
return "{}".format(self.persona)
class Meta:
ordering = ('persona', )
verbose_name = "paciente"
verbose_name_plural = "pacientes"
def tratamiento_activo(self, el_dia=None):
from tratamientos.models import Planificacion, MotivoConsulta
try:
if el_dia:
return self.motivos_de_consulta.filter(creado_el__lte=el_dia).latest('creado_el')
return self.motivos_de_consulta.filter(
planificaciones__estado__in=Planificacion.estados_activos()).latest('creado_el')
except MotivoConsulta.DoesNotExist:
return None
def ultimo_motivo_consulta(self):
from tratamientos.models import MotivoConsulta
try:
return self.motivos_de_consulta.latest('creado_el')
except MotivoConsulta.DoesNotExist:
return None
class RegistroBiometrico(BaseModel, ShowInfoMixin):
"""
Registro de datos biométricos. Como varían en el tiempo, se deja constancia de la
fecha.
"""
paciente = models.ForeignKey(Paciente, related_name='registros_biometricos', on_delete=models.CASCADE)
peso = models.DecimalField('peso (kg)', max_digits=5, decimal_places=2, null=True)
altura = models.DecimalField('altura (mts)', max_digits=5, decimal_places=2, null=True)
# demás datos biomédicos.
profesional = models.ForeignKey(Profesional, on_delete=models.CASCADE)
# archivos
def __str__(self):
return "Registro biométrico de {} ({})".format(self.paciente, self.creado_el)
class Meta:
verbose_name = 'registro biométrico'
verbose_name_plural = 'registros biométricos'
field_info = ('modificado_el', 'peso', 'altura', )
class Antecedente(BaseModel, ShowInfoMixin):
"""
Representa la historia médica del paciente.
Contiene datos médicos y relevantes sobre el paciente.
"""
paciente = models.OneToOneField(Paciente, on_delete=models.CASCADE)
patologicos = models.TextField('patológicos', blank=True)
quirurgicos = models.TextField('quirúrgicos', blank=True)
traumaticos = models.TextField('traumáticos', blank=True)
alergicos = models.TextField('alérgicos', blank=True)
heredo_familiar = models.TextField('heredo familiar', blank=True)
habitos_fisiologicos = models.TextField('hábitos fisiológicos', blank=True)
actividad_fisica= models.TextField('actividad física', blank=True)
habitos_patologicos = models.TextField('hábitos patológicos', blank=True)
medicaciones = models.TextField('medicaciones', blank=True)
estudios_complementarios = models.TextField('estudios complementarios', blank=True)
menarca = models.DateField('MENARCA', null=True)
fum = models.DateField('FUM', null=True)
tipo_partos = models.TextField('tipo de partos', blank=True)
observaciones = models.TextField('observaciones', blank=True)
def __unicode__(self):
return "Antecedentes de {}".format(
self.paciente.persona.nombre)
class Meta:
verbose_name = "antecedente"
verbose_name_plural = "antecedentes"
field_info = ('patologicos', 'quirurgicos', 'traumaticos', 'alergicos', 'heredo_familiar',
'habitos_fisiologicos', 'actividad_fisica', 'habitos_patologicos', 'medicaciones',
'estudios_complementarios', 'menarca', 'fum', 'tipo_partos', 'observaciones')
class EntradaHistoriaClinica(BaseModel, ShowInfoMixin):
paciente = models.ForeignKey(Paciente, related_name="entradas_historiaclinica", on_delete=models.CASCADE)
profesional = models.ForeignKey(Profesional, on_delete=models.CASCADE)
objects = InheritanceManager()
class Meta:
verbose_name_plural = "Entradas de historia clínica"
verbose_name = "Entrada de historia clínica"
def __str__(self):
return "Entrada de {} por {}".format(self.paciente, self.profesional)
class ComentariosHistoriaClinica(EntradaHistoriaClinica):
"""
Representa una entrada en la historia clínica del paciente.
"""
comentarios = models.TextField(verbose_name="comentarios")
class Meta:
verbose_name_plural = "comentarios de historia clinica"
verbose_name = "comentario de historia clinica"
def __str__(self):
return "Comentario de {}".format(self.paciente)
field_info = ('comentarios', )
class ImagenesHistoriaClinica(EntradaHistoriaClinica):
"""
Representa una imagen ingresada en la historia clinica
"""
imagen = models.ImageField(
verbose_name="imágen", upload_to=partial(uploadTenantFilename, "historia_imagenes"))
comentarios = models.TextField(verbose_name="comentarios", null=True, blank=True)
class Meta:
verbose_name_plural = "imágenes de historia clínica"
verbose_name = "imagen de historia clínica"
def __str__(self):
return "Imágen de {}".format(self.paciente)
field_info = ('imagen', 'comentarios', )
| StarcoderdataPython |
3324992 | <gh_stars>0
import googlemaps
from datetime import datetime
import time
gmaps = googlemaps.Client(key='<KEY>')
arrTime = int(datetime(2019, 8, 5, 7, 0, 0).timestamp())
destination = 'Universidad De Los Andes, Bogota Colombia'
direction = 'Cr 50 # 106-06, Bogota Colombia'
start_time = time.time()
loc = gmaps.geocode(direction)
print(loc)
print(loc)
def timeToWork(direction):
return googlemaps.distance_matrix.distance_matrix(
client=gmaps, origins=direction, destinations=destination, departure_time=arrTime) | StarcoderdataPython |
1672736 | import numpy as np
def convolution2d_multichannel(image, kernel, bias):
_, y, x = image.shape
# kernel shape: (output channels, input channels, x, y)
chO, chI, _, _ = kernel.shape
new_image = np.empty([chO, y, x])
# for adding the images when num channel out < channel in
layer_image = np.empty([chI, y, x])
for i, kernel_arr in enumerate(kernel):
# i ... iteration no.
# kernel_arr shape: (input channels, x, y)
print("i: %d" % i)
padding = 9//2
if chO < chI: # Layers 2 and 3
padding = 5//2
for j, subkernel in enumerate(kernel_arr):
layer_image[j] = convolution2d(
image[0, ...], subkernel, bias[i], padding)
new_image[i] = np.sum(layer_image, axis=0) + bias[i]
else: # Layer 1
new_image[i] = convolution2d(
image[0, ...], kernel_arr[0, ...], bias[i], padding) + bias[i]
new_image = np.clip(new_image, 0.0, None)
return new_image
def convolution2d(image, kernel, bias, padding):
m, n = kernel.shape
if (m == n): # if kernel is quadratic
y, x = image.shape
new_image = np.zeros((y, x), dtype='float32') # create new temp array
image = np.pad(image, padding, 'edge')
for i in range(y):
for j in range(x):
new_image[i][j] = np.sum(image[i:i+m, j:j+m]*kernel) + bias
return new_image
| StarcoderdataPython |
1674916 | from pyexcelerate import Workbook
data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] # data is a 2D array
wb = Workbook()
wb.new_sheet("sheet name", data=data)
wb.save("output.xlsx")
| StarcoderdataPython |
1618344 | '''
Matheus estava conversando com a sua noiva via mensagem de texto, quando ela lhe enviou a seguinte mensagem:
1-4-3
Ele não entendeu a mensagem, então ele perguntou o que isso significava, e ela respondeu que era 'I Love You" e logo ele percebeu
que cada número separado por um ' - ' é a quantidade de caracteres de cada uma das palavras que compõem a frase. Com isso, ele teve a
ideia de criar um programa que inserindo determinada frase, ele calcula a quantidade de caracteres de cada uma das palavras e separa
os valores por ' - '. Mas ele ainda teve a ideia de que o programa deveria receber várias frases linha por linha e ainda no final da
execução do programa, a palavra com a maior quantidade de letras deveria ser exibida.
Entrada
A entrada consiste de vários casos de teste. A primeira linha de um caso de teste contém uma 1 ≤ String ≤ 100 com uma única palavra ou um
conjunto de palavras que formam uma frase. Os casos de teste serão processados quando o número 0 for recebido. Não pode haver mais que um espaço separando cada palavra.
Saída
Para cada caso de teste, exiba o número de caracteres de cada palavra que compõe a frase recebida. Separe a quantidade de caracteres de
cada palavra por um ' - '. Exiba também a palavra com a maior quantidade de caracteres de todas as frases recebidas.
Obs: Se possuir palavras com números identicos de caracteres, cosiderar a última recebida.
'''
conj_frases = []
while True:
frase = input()
if frase == '0':
break
conj_frases.append(frase.split())
maior_palavra = ''
max_letras = 0
for frase in conj_frases:
qt_letras = []
for palavra in frase:
qt_letras.append(str(len(palavra)))
if len(palavra) >= max_letras:
max_letras = len(palavra)
maior_palavra = palavra
print('-'.join(qt_letras))
print('\nThe biggest word: {}'.format(maior_palavra))
| StarcoderdataPython |
3328457 | class Idol:
"""Represents an Idol/Celebrity."""
def __init__(self, **kwargs):
self.id = kwargs.get('id')
self.full_name = kwargs.get('fullname')
self.stage_name = kwargs.get('stagename')
self.former_full_name = kwargs.get('formerfullname')
self.former_stage_name = kwargs.get('formerstagename')
self.birth_date = kwargs.get('birthdate')
self.birth_country = kwargs.get('birthcountry')
self.birth_city = kwargs.get('birthcity')
self.gender = kwargs.get('gender')
self.description = kwargs.get('description')
self.height = kwargs.get('height')
self.twitter = kwargs.get('twitter')
self.youtube = kwargs.get('youtube')
self.melon = kwargs.get('melon')
self.instagram = kwargs.get('instagram')
self.vlive = kwargs.get('vlive')
self.spotify = kwargs.get('spotify')
self.fancafe = kwargs.get('fancafe')
self.facebook = kwargs.get('facebook')
self.tiktok = kwargs.get('tiktok')
self.aliases = []
self.local_aliases = {} # server_id: [aliases]
self.groups = [] # group ids, not group objects.
self.zodiac = kwargs.get('zodiac')
self.thumbnail = kwargs.get('thumbnail')
self.banner = kwargs.get('banner')
self.blood_type = kwargs.get('bloodtype')
self.photo_count = 0
# amount of times the idol has been called.
self.called = 0
self.tags = kwargs.get('tags')
self.difficulty = kwargs.get('difficulty') or "medium" # easy = 1, medium = 2, hard = 3
if self.tags:
self.tags = self.tags.split(',')
| StarcoderdataPython |
1742359 | <reponame>dangervon/ironic<filename>ironic/tests/unit/drivers/modules/ibmc/test_management.py
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for iBMC Management interface."""
import itertools
from unittest import mock
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import boot_modes
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.ibmc import mappings
from ironic.drivers.modules.ibmc import utils
from ironic.tests.unit.drivers.modules.ibmc import base
constants = importutils.try_import('ibmc_client.constants')
ibmc_client = importutils.try_import('ibmc_client')
ibmc_error = importutils.try_import('ibmc_client.exceptions')
class IBMCManagementTestCase(base.IBMCTestCase):
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
properties = task.driver.get_properties()
for prop in utils.COMMON_PROPERTIES:
self.assertIn(prop, properties)
@mock.patch.object(utils, 'parse_driver_info', autospec=True)
def test_validate(self, mock_parse_driver_info):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_parse_driver_info.assert_called_once_with(task.node)
def test_get_supported_boot_devices(self):
if not mock._is_instance_mock(ibmc_client):
mock.patch.object(ibmc_client, 'connect', autospec=True).start()
connect_ibmc = ibmc_client.connect
conn = self.mock_ibmc_conn(connect_ibmc)
# mock return value
_supported_boot_devices = list(mappings.GET_BOOT_DEVICE_MAP)
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
supported_boot_devices=_supported_boot_devices
)
)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
supported_boot_devices = (
task.driver.management.get_supported_boot_devices(task))
connect_ibmc.assert_called_with(**self.ibmc)
expect = sorted(list(mappings.GET_BOOT_DEVICE_MAP.values()))
self.assertEqual(expect, sorted(supported_boot_devices))
def test_set_boot_device(self):
if not mock._is_instance_mock(ibmc_client):
mock.patch.object(ibmc_client, 'connect', autospec=True).start()
connect_ibmc = ibmc_client.connect
conn = self.mock_ibmc_conn(connect_ibmc)
# mock return value
conn.system.set_boot_source.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
device_mapping = [
(boot_devices.PXE, constants.BOOT_SOURCE_TARGET_PXE),
(boot_devices.DISK, constants.BOOT_SOURCE_TARGET_HDD),
(boot_devices.CDROM, constants.BOOT_SOURCE_TARGET_CD),
(boot_devices.BIOS,
constants.BOOT_SOURCE_TARGET_BIOS_SETUP),
('floppy', constants.BOOT_SOURCE_TARGET_FLOPPY),
]
persistent_mapping = [
(True, constants.BOOT_SOURCE_ENABLED_CONTINUOUS),
(False, constants.BOOT_SOURCE_ENABLED_ONCE)
]
data_source = list(itertools.product(device_mapping,
persistent_mapping))
for (device, persistent) in data_source:
task.driver.management.set_boot_device(
task, device[0], persistent=persistent[0])
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.set_boot_source.assert_called_once_with(
device[1],
enabled=persistent[1])
# Reset mocks
connect_ibmc.reset_mock()
conn.system.set_boot_source.reset_mock()
def test_set_boot_device_fail(self):
if not mock._is_instance_mock(ibmc_client):
mock.patch.object(ibmc_client, 'connect', autospec=True).start()
connect_ibmc = ibmc_client.connect
conn = self.mock_ibmc_conn(connect_ibmc)
# mock return value
conn.system.set_boot_source.side_effect = (
ibmc_error.IBMCClientError
)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaisesRegex(
exception.IBMCError, 'set iBMC boot device',
task.driver.management.set_boot_device, task,
boot_devices.PXE)
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.set_boot_source.assert_called_once_with(
constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_ONCE)
def test_get_boot_device(self):
if not mock._is_instance_mock(ibmc_client):
mock.patch.object(ibmc_client, 'connect', autospec=True).start()
connect_ibmc = ibmc_client.connect
conn = self.mock_ibmc_conn(connect_ibmc)
# mock return value
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
target=constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS
)
)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
result_boot_device = task.driver.management.get_boot_device(task)
conn.system.get.assert_called_once()
connect_ibmc.assert_called_once_with(**self.ibmc)
expected = {'boot_device': boot_devices.PXE,
'persistent': True}
self.assertEqual(expected, result_boot_device)
def test_get_supported_boot_modes(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
supported_boot_modes = (
task.driver.management.get_supported_boot_modes(task))
self.assertEqual(list(mappings.SET_BOOT_MODE_MAP),
supported_boot_modes)
def test_set_boot_mode(self):
if not mock._is_instance_mock(ibmc_client):
mock.patch.object(ibmc_client, 'connect', autospec=True).start()
connect_ibmc = ibmc_client.connect
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
target=constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS
)
)
conn.system.set_boot_source.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_values = [
(boot_modes.LEGACY_BIOS, constants.BOOT_SOURCE_MODE_BIOS),
(boot_modes.UEFI, constants.BOOT_SOURCE_MODE_UEFI)
]
for ironic_boot_mode, ibmc_boot_mode in expected_values:
task.driver.management.set_boot_mode(task,
mode=ironic_boot_mode)
conn.system.get.assert_called_once()
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.set_boot_source.assert_called_once_with(
constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS,
mode=ibmc_boot_mode)
# Reset
connect_ibmc.reset_mock()
conn.system.set_boot_source.reset_mock()
conn.system.get.reset_mock()
def test_set_boot_mode_fail(self):
if not mock._is_instance_mock(ibmc_client):
mock.patch.object(ibmc_client, 'connect', autospec=True).start()
connect_ibmc = ibmc_client.connect
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
target=constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS
)
)
conn.system.set_boot_source.side_effect = (
ibmc_error.IBMCClientError
)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_values = [
(boot_modes.LEGACY_BIOS, constants.BOOT_SOURCE_MODE_BIOS),
(boot_modes.UEFI, constants.BOOT_SOURCE_MODE_UEFI)
]
for ironic_boot_mode, ibmc_boot_mode in expected_values:
self.assertRaisesRegex(
exception.IBMCError, 'set iBMC boot mode',
task.driver.management.set_boot_mode, task,
ironic_boot_mode)
conn.system.set_boot_source.assert_called_once_with(
constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS,
mode=ibmc_boot_mode)
conn.system.get.assert_called_once()
connect_ibmc.assert_called_with(**self.ibmc)
# Reset
connect_ibmc.reset_mock()
conn.system.set_boot_source.reset_mock()
conn.system.get.reset_mock()
def test_get_boot_mode(self):
if not mock._is_instance_mock(ibmc_client):
mock.patch.object(ibmc_client, 'connect', autospec=True).start()
connect_ibmc = ibmc_client.connect
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
target=constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS,
mode=constants.BOOT_SOURCE_MODE_BIOS,
)
)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
response = task.driver.management.get_boot_mode(task)
conn.system.get.assert_called_once()
connect_ibmc.assert_called_with(**self.ibmc)
expected = boot_modes.LEGACY_BIOS
self.assertEqual(expected, response)
def test_get_sensors_data(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(NotImplementedError,
task.driver.management.get_sensors_data, task)
def test_inject_nmi(self):
if not mock._is_instance_mock(ibmc_client):
mock.patch.object(ibmc_client, 'connect', autospec=True).start()
connect_ibmc = ibmc_client.connect
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.reset.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.inject_nmi(task)
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.reset.assert_called_once_with(constants.RESET_NMI)
def test_inject_nmi_fail(self):
if not mock._is_instance_mock(ibmc_client):
mock.patch.object(ibmc_client, 'connect', autospec=True).start()
connect_ibmc = ibmc_client.connect
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.reset.side_effect = (
ibmc_error.IBMCClientError
)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaisesRegex(
exception.IBMCError, 'inject iBMC NMI',
task.driver.management.inject_nmi, task)
connect_ibmc.assert_called_with(**self.ibmc)
conn.system.reset.assert_called_once_with(constants.RESET_NMI)
| StarcoderdataPython |
1789444 | <reponame>phzwart/qlty
import torch
import einops
def weed_sparse_classification_training_pairs_2D(tensor_in, tensor_out, missing_label, border_tensor):
"""
After tensors have been unstitched, we want want to be able to remove patches that have no data.
To this extent, we inspect every patch and remove any that do not contain any data. In additon, we remove
observations that lie in the border area. For this to work, a border_tensor must be supplied.
The selection is made on the basis of the supplied 'tensor_out' data field.
Parameters
----------
tensor_in: input tensor
tensor_out: output tensor
missing_label: missing label flag (typically -1)
border_tensor: the border tensor, obtained from the NCXYQuilt or NCZYXQuilt class
Returns
-------
A new set of tensors that has valid training data.
"""
tmp = torch.clone(tensor_out)
sel = (tmp!=missing_label).type(torch.int)
sel = sel*border_tensor
if len(border_tensor.shape)==2:
sel = einops.reduce( sel, "N Y X -> N", reduction='sum')
if len(border_tensor.shape)==3:
sel = einops.reduce( sel, "N C Y X -> N", reduction='sum')
sel = sel == 0
newin = tensor_in[~sel,...]
newout = tensor_out[~sel,...]
return newin, newout
| StarcoderdataPython |
4839253 | <reponame>deeuu/supriya
import collections
from supriya import CalculationRate
from supriya.ugens.Filter import Filter
class BRF(Filter):
"""
A 2nd order Butterworth band-reject filter.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> b_r_f =supriya.ugens.BRF.ar(source=source)
>>> b_r_f
BRF.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Filter UGens"
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 440.0), ("reciprocal_of_q", 1.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
| StarcoderdataPython |
3270063 | <gh_stars>0
from plaster.tools.pipeline.pipeline import PipelineTask
from plaster.run.prep.prep_result import PrepResult
from plaster.run.sim.sim_result import SimResult
from plaster.run.survey_nn.survey_nn_params import SurveyNNParams
from plaster.run.survey_nn.survey_nn_worker import survey_nn
class SurveyNNTask(PipelineTask):
def start(self):
survey_nn_params = SurveyNNParams(**self.config.parameters)
prep_result = PrepResult.load_from_folder(self.inputs.prep)
sim_result = SimResult.load_from_folder(self.inputs.sim)
survey_nn_result = survey_nn(
survey_nn_params,
prep_result,
sim_result,
progress=self.progress,
pipeline=self,
)
survey_nn_result.save()
| StarcoderdataPython |
110013 | def checkio(f, g):
def call(function, *args, **kwargs):
try: return function(*args, **kwargs)
except Exception: return None
def h(*args, **kwargs):
value_f, value_g = call(f, *args, **kwargs), call(g, *args, **kwargs)
status = ""
if (value_f is None and value_g is None): status = "both_error"
elif (value_f is None): status = "f_error"
elif (value_g is None): status = "g_error"
elif (value_f == value_g): status = "same"
else: status = "different"
if (value_f is None and value_g is None): return (None, status)
elif (value_f is None): return (value_g, status)
else: return (value_f, status)
return h
if __name__ == "__main__":
#These "asserts" using only for self-checking and not necessary for auto-testing
# (x+y)(x-y)/(x-y)
assert checkio(lambda x,y:x+y,
lambda x,y:(x**2-y**2)/(x-y))\
(1,3)==(4,"same"), "Function: x+y, first"
assert checkio(lambda x,y:x+y,
lambda x,y:(x**2-y**2)/(x-y))\
(1,2)==(3,"same"), "Function: x+y, second"
assert checkio(lambda x,y:x+y,
lambda x,y:(x**2-y**2)/(x-y))\
(1,1.01)==(2.01,"different"), "x+y, third"
assert checkio(lambda x,y:x+y,
lambda x,y:(x**2-y**2)/(x-y))\
(1,1)==(2,"g_error"), "x+y, fourth"
# Remove odds from list
f = lambda nums:[x for x in nums if ~x%2]
def g(nums):
for i in range(len(nums)):
if nums[i]%2==1:
nums.pop(i)
return nums
assert checkio(f,g)([2,4,6,8]) == ([2,4,6,8],"same"), "evens, first"
assert checkio(f,g)([2,3,4,6,8]) == ([2,4,6,8],"g_error"), "evens, second"
# Fizz Buzz
assert checkio(lambda n:("Fizz "*(1-n%3) + "Buzz "*(1-n%5))[:-1] or str(n),
lambda n:("Fizz"*(n%3==0) + " " + "Buzz"*(n%5==0)).strip())\
(6)==("Fizz","same"), "fizz buzz, first"
assert checkio(lambda n:("Fizz "*(1-n%3) + "Buzz "*(1-n%5))[:-1] or str(n),
lambda n:("Fizz"*(n%3==0) + " " + "Buzz"*(n%5==0)).strip())\
(30)==("Fizz Buzz","same"), "fizz buzz, second"
assert checkio(lambda n:("Fizz "*(1-n%3) + "Buzz "*(1-n%5))[:-1] or str(n),
lambda n:("Fizz"*(n%3==0) + " " + "Buzz"*(n%5==0)).strip())\
(7)==("7","different"), "fizz buzz, third"
| StarcoderdataPython |
3344212 | <filename>services/spider/worker/__init__.py
# -*- coding: utf-8 -*-
import os
from celery import Celery
##################
# Celery配置
from kombu import Queue
from webs import create_app
class CeleryConfig(object):
# 任务与劣化为json,从Celery4.0开始,默认序列化器将为json
task_serializer = 'json'
# 结果序列化为json
result_serializer = 'json'
# 定时任务过期时间
result_expires = 60 * 60 * 24
# 关闭worker事件监听 防止队列溢出
worker_send_task_events = False
# 允许接收的任务类型
accept_content = ["json"]
# 每个进程预取任务数,启动参数进行覆盖设置,此处仅作为标记使用
worker_prefetch_multiplier = 4
# 每个worker执行1个任务就销毁重启,启动参数进行覆盖设置,此处仅作为标记使用
worker_max_tasks_per_child = 1
# 时区设置
timezone = 'Asia/Shanghai'
enable_utc = True
##################
# 初始化celery worker
def init_celery(app=None, celery_type='usual'):
app = app or create_app()
celery_app = Celery(__name__, broker=os.environ.get('CRAWL_CELERY_BROKER_URL'))
celery_app.config_from_object(CeleryConfig)
# 导入相关任务模块
if celery_type == 'usual':
celery_app.conf.update(imports=['worker.fetch', 'worker.results'])
celery_app.conf.task_queues = (
Queue("priority_fetch", queue_arguments={'x-max-priority': 5}),
Queue("results"),
)
elif celery_type == 'beat':
pass
# celery_app.conf.update(
# imports=['project.api.tasks.cron', 'project.api.tasks.event_cron', 'project.api.tasks.visual_cron'])
# celery_app.conf.update(
# CELERYBEAT_SCHEDULE={
# }
# )
# 在flask上下文中执行
class ContextTask(celery_app.Task):
"""Make celery tasks work with Flask app context"""
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery_app.Task = ContextTask
return celery_app
celery_app = init_celery()
# beat_app = init_celery(celery_type='beat')
| StarcoderdataPython |
3287833 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import TYPE_CHECKING
from cdm.enums import CdmObjectType
from cdm.enums.cdm_operation_type import OperationTypeConvertor, CdmOperationType
from cdm.persistence.cdmfolder.types import OperationExcludeAttributes
from cdm.utilities.logging import logger
from cdm.utilities.string_utils import StringUtils
if TYPE_CHECKING:
from cdm.objectmodel import CdmCorpusContext, CdmOperationExcludeAttributes
from cdm.utilities import ResolveOptions, CopyOptions
_TAG = 'OperationExcludeAttributesPersistence'
class OperationExcludeAttributesPersistence:
"""Operation ExcludeAttributes persistence"""
@staticmethod
def from_data(ctx: 'CdmCorpusContext', data: 'OperationExcludeAttributes') -> 'CdmOperationExcludeAttributes':
if not data:
return None
exclude_attributes_op = ctx.corpus.make_object(CdmObjectType.OPERATION_EXCLUDE_ATTRIBUTES_DEF)
if data.type and not StringUtils.equals_with_ignore_case(data.type, OperationTypeConvertor._operation_type_to_string(CdmOperationType.EXCLUDE_ATTRIBUTES)):
logger.error(_TAG, ctx, '$type {} is invalid for this operation.'.format(data.type))
else:
exclude_attributes_op.type = CdmOperationType.EXCLUDE_ATTRIBUTES
if data.explanation:
exclude_attributes_op.explanation = data.explanation
exclude_attributes_op.exclude_attributes = data.excludeAttributes
return exclude_attributes_op
@staticmethod
def to_data(instance: 'CdmOperationExcludeAttributes', res_opt: 'ResolveOptions', options: 'CopyOptions') -> 'OperationExcludeAttributes':
if not instance:
return None
obj = OperationExcludeAttributes()
obj.type = OperationTypeConvertor._operation_type_to_string(CdmOperationType.EXCLUDE_ATTRIBUTES)
obj.explanation = instance.explanation
obj.excludeAttributes = instance.exclude_attributes
return obj
| StarcoderdataPython |
123697 | #!/usr/bin/env python
"""
Copyright (c) 2020-End_Of_Life
See the file 'LICENSE' for copying permission
"""
# import standard library required
import argparse
import sys
# import tool required
from route.route import route
from route.execute import execute
from chemsynth.chemsynth import Chemsynth, ChemsynthException
from chemsynth.chempoint import ChemsynthPoint, ChemsynthPointException
# =========
# Interface
# =========
def prepare(to_do_list):
'''
this function will format to_do_list so it will be do_list. format means remove repeating same function
on same index sequentially and append the count of it to the to do_list
'''
# prepare
f_idx, idx = to_do_list[0]
x = 1
do_list = []
# formatting to_do_list
for i in range(1, len(to_do_list)):
f_idx2, idx2 = to_do_list[i]
if f_idx == f_idx2 and idx == idx2:
x += 1
continue
do_list.append([f_idx, idx, x])
f_idx = f_idx2
idx = idx2
x = 1
do_list.append([f_idx, idx, x])
# return do_list
return do_list
def percentage(dom, tar):
'''
return percentage of equality
'''
value = ChemsynthPoint._ChemsynthPoint__point1(dom, tar)
length = len(dom)
value = int((value / length) * 100)
return value
def print_step(dom, tar, do_list, advance=False):
# list of tool name sorted based on index of function in execute.py
func_name = ["\"Centrifuge\"", "\"Stirrer\"", "\"Catalyst\"", "\"Replicator\""]
# prepare
chem = Chemsynth(dom)
tar = tar.upper()
step = 1
# table header
print("{step:^5} {tool:^12} {block:^12} {times:^12} {complete:^12}".format(step='STEP', tool='TOOL', block='BLOCK', times='TIMES', complete='COMPLETE'))
for f_idx, idx, x in do_list:
# get old chemsynth tank
temp = str(chem)
# executing as many as x
for y in range(x):
execute(chem, [[f_idx, idx]])
# print the step and the content
print("{step:<5} {tool:^12} {block:^12} {times:^12} {complete:^12}".format(step='#'+str(step), tool=func_name[f_idx], block=idx+1, times=x, complete=str(percentage(chem.dom, tar))+'%'))
# if advance == True, print with tank state
if advance == True:
print(temp, "->", chem)
step += 1
def get_parser():
'''
preparing for argument parser
'''
parser = argparse.ArgumentParser(prog='Chemsynth Router', description='Chemsynth Router by whoami and mrx', add_help=False)
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', '--doc', action='store_const', const=1, default=0, dest='doc', help='documentation about Chemsynth Router')
group.add_argument('-h', '--help', action='store_const', const=1, default=0, dest='help', help='show this help message')
group.add_argument('-q', '--quit', action='store_const', const=1, default=0, dest='quit', help='quit from program')
group.add_argument('-r', '--route', action="extend", nargs=2, help='route based on DOMAIN and TARGET, [COLOR] can be R, Y, G, B, P', metavar='[COLOR]', dest='route')
parser.add_argument('-a', '--advance', action='store_const', const=1, default=0, dest='advance', help='show step with realtime Chemsynth Tank color state, optionally with -r/--route')
group.add_argument('-v', '--version', action='store_const', const=1, default=0, dest='version', help='show program version')
return parser
def arg_check(Namespace):
'''
optional argument specified more than one in one line
'''
if Namespace.doc + Namespace.version + Namespace.quit + Namespace.help + Namespace.advance > 1:
raise ArgumentError
if Namespace.advance == 1 and not(any([Namespace.doc, Namespace.version, Namespace.quit, Namespace.help, Namespace.route])):
raise ArgumentError
def welcome():
print("Chemsynth Router [Version 2.0] by whoami and mrx\n"
"type -h or --help for more informations\n")
def doc():
print("Chemsynth Router v 2.0 is an open source program written in Python 3\n"
"created by whoami and mrx\n"
"source available at https://github.com/0xwhoami/Growtopia-Chemsynth-Router\n")
def help(parser):
parser.print_help()
def quit():
raise SystemExit2
def version():
print("Chemsynth Router 2.0")
# =====
# Error
# =====
class SystemExit2(Exception): pass
class ArgumentError(Exception): pass
# =====
# Start
# =====
welcome()
# get parser for argument
parser = get_parser()
while True:
try:
to_do_list = []
result = parser.parse_args(input(">>> ").split())
# checking argument
arg_check(result)
if result.doc:
doc()
elif result.help:
help(parser)
elif result.quit:
quit()
elif result.version:
version()
elif result.route:
# get domain and target
dom = result.route[0]
tar = result.route[1]
# routing
to_do_list = route(dom, tar)
# we can't route
if to_do_list == []:
print("sorry we can't route, it's the maximum we can do :(")
continue
do_list = prepare(to_do_list)
# print route step by step
print_step(dom, tar, do_list, result.advance)
except (ChemsynthException, ChemsynthPointException) as e:
print("error:", e)
except (ArgumentError, EOFError, KeyboardInterrupt):
help(parser)
except SystemExit2:
sys.exit(0)
except SystemExit:
pass
except:
# logging
log = open('log.txt', 'a')
print("error:", sys.exc_info()[:2], '\n',
"arg:", result,
file=log, end='\n\n')
log.close()
raise
| StarcoderdataPython |
113399 | #!/usr/bin/python3
import hid
import traceback
hid_max_pkt_size = 64
if __name__ == '__main__':
import argparse
import sys
import binascii
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--descriptor', help='Print Descriptor', action='store_true')
args = parser.parse_args()
d_path = ''
device = None
devices = hid.enumerate()
print(devices)
if not d_path: # no hid device specified
if not devices:
print('No devices to read.')
sys.exit()
elif d_path and d_path not in [d['path'] for d in devices]:
print('Requested device not found.')
sys.exit()
else:
print('Available devices:')
for d in devices:
print('\t%s' % d['path'].decode('utf-8'))
for k in sorted(d.keys()):
h = k.replace('_', ' ').capitalize()
v = d[k].decode('utf-8') if isinstance(d[k], bytes) else d[k]
print('\t\t%s: %s' % (h, v))
device = devices[0]
d_path = device['path'].decode('utf-8')
print('Reading: %s' % d_path)
d = hid.device()
d.open(device['vendor_id'], device['product_id'])
if args.descriptor:
pass # TODO
while True:
# TODO: set max packet size based on descriptor
try:
data = bytes(d.read(hid_max_pkt_size))
dout = binascii.hexlify(data).upper()
dout = b' '.join(dout[i:i+2] for i in range(0, len(dout), 2)).strip()
#dout = ' '.join("{:02x}".format(c) for c in dout)
print(dout.decode('utf-8'), end='\r')
except OSError as e:
print('%s: %s' % (type(e).__name__, e))
sys.exit()
except IOError as e:
print('%s: %s' % (type(e).__name__, e))
sys.exit()
except Exception as e:
# TODO: do something useful
print(traceback.format_exc())
sys.exit()
| StarcoderdataPython |
1692103 | from PoolThread import PoolThread
from Stage import Stage
from Task import Task
| StarcoderdataPython |
154787 | import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.description = "GUI to profilers such as Valgrind"
self.defaultTarget = 'master'
def setDependencies(self):
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["kde/frameworks/tier1/karchive"] = None
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = None
self.runtimeDependencies["kde/frameworks/tier2/kdoctools"] = None
self.runtimeDependencies["kde/frameworks/tier1/kwidgetsaddons"] = None
self.runtimeDependencies["kde/frameworks/tier3/kxmlgui"] = None
self.runtimeDependencies["kde/frameworks/tier4/kdelibs4support"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
| StarcoderdataPython |
Subsets and Splits