id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
218666
|
<reponame>nam-pi/data_conversion
"""The module for the Namespace class.
Classes:
Namespace
"""
from rdflib import Namespace
from modules.cli_param import Env
def _base_ns(resource: str) -> Namespace:
return Namespace(
Env.data_namespace_prefix + resource + "/"
if Env.data_namespace_prefix.endswith("/")
else Env.data_namespace_prefix + "/" + resource + "/"
)
def _purl_ns(resource: str, hash_separator: bool = False) -> Namespace:
return Namespace(
"http://purl.org/nampi/{}{}".format(resource, "#" if hash_separator else "/")
)
class Nampi_ns:
"""A collection for namespaces to be used in the NAMPI input table conversion."""
act = _base_ns("acts")
aspect = _base_ns("aspects")
author = _base_ns("authors")
core = _purl_ns("owl/core", hash_separator=True)
event = _base_ns("events")
group = _base_ns("groups")
mona = _purl_ns("owl/monastic-life", hash_separator=True)
object = _base_ns("objects")
person = _base_ns("persons")
place = _base_ns("places")
source = _base_ns("sources")
|
StarcoderdataPython
|
5125551
|
#!/usr/bin/env python
from __future__ import print_function
import angles
import math
import os.path
import rospy
from geometry_msgs.msg import Point, Pose, PoseStamped, Quaternion
from nav_msgs.msg import Odometry, Path
from tf.transformations import euler_from_quaternion, quaternion_from_euler
POSE_FORMAT = """\
- pos_x: {pos_x:5f}
pos_y: {pos_y:5f}
yaw: {yaw:5f}
linear_x: {linear_x:5f}
angular_z: {angular_z:5f}
"""
class RecordTrajectory(object):
def __init__(self):
self.write_data = True
self.dir = rospy.get_param('~directory', os.path.join(os.path.expanduser('~'), 'arti', 'recording'))
self.file = rospy.get_param('~file', os.path.join(self.dir, 'trajectory_recorded.yaml'))
self.distance_thresh = rospy.get_param('~distance_thresh', 0.3) # in meter
self.angular_thresh_rad = rospy.get_param('~angular_thresh', 5.0) * math.pi / 180.0 # in degree
self.prev_pose = None
rospy.loginfo("saving trajectory to: %s", self.file)
self.data_file = open(self.file, 'w')
self.cx = []
self.cy = []
self.cyaw = []
self.path_pub = rospy.Publisher('~path', Path, latch=True, queue_size=1)
rospy.Subscriber("/ukf_pose", Odometry, self.odom_callback, queue_size=1)
def odom_callback(self, msg):
"""
:type msg: Odometry
"""
orientation = msg.pose.pose.orientation
_roll, _pitch, yaw = euler_from_quaternion([orientation.x, orientation.y, orientation.z, orientation.w])
if not self.cx:
self.data_file.write("frame_id: {frame_id}\n".format(frame_id=msg.header.frame_id))
self.data_file.write("trajectory:\n")
self.add_pose(msg, yaw)
else:
prev_o = self.prev_pose.pose.pose.orientation
_prev_roll, _prev_pitch, prev_yaw = euler_from_quaternion([prev_o.x, prev_o.y, prev_o.z, prev_o.w])
angle_diff = angles.normalize_angle(yaw - prev_yaw)
dist = math.hypot((msg.pose.pose.position.x - self.prev_pose.pose.pose.position.x),
(msg.pose.pose.position.y - self.prev_pose.pose.pose.position.y))
# check if traveled distance increased relevant enough
if dist >= self.distance_thresh or abs(angle_diff) > self.angular_thresh_rad:
self.add_pose(msg, yaw)
def add_pose(self, msg, yaw):
self.cx.append(msg.pose.pose.position.x)
self.cy.append(msg.pose.pose.position.y)
self.cyaw.append(yaw)
self.data_file.write(POSE_FORMAT.format(pos_x=msg.pose.pose.position.x, pos_y=msg.pose.pose.position.y, yaw=yaw,
linear_x=msg.twist.twist.linear.x, angular_z=msg.twist.twist.angular.z))
self.prev_pose = msg
rospy.loginfo("saved %d poses to trajectory", len(self.cx))
self.publish_plan(msg.header)
def publish_plan(self, header):
"""
publish the global plan
"""
msg = Path(header=header)
for x, y, yaw in zip(self.cx, self.cy, self.cyaw):
orientation = Quaternion(*quaternion_from_euler(0, 0, yaw))
msg.poses.append(PoseStamped(header=header, pose=Pose(position=Point(x=x, y=y), orientation=orientation)))
self.path_pub.publish(msg)
if __name__ == '__main__':
rospy.init_node('record_trajectory')
try:
RecordTrajectory()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
StarcoderdataPython
|
364628
|
import string
import time
letters = string.ascii_letters
char_list = list(letters)
char_tuple = tuple(letters)
char_set = set(letters)
print(char_list)
def membership_test(n,container):
for i in range(n):
if 'z' in container:
pass
#test array
start = time.perf_counter()
membership_test(1000000,char_list) #一百万次
end = time.perf_counter()
print('list:',end-start)
#test tuple
start = time.perf_counter()
membership_test(1000000,char_tuple)
end = time.perf_counter()
#test set
print('set:',end-start)
start = time.perf_counter()
membership_test(1000000,char_set)
end = time.perf_counter()
print('list:',end-start)
|
StarcoderdataPython
|
3227212
|
<filename>v1/banks/urls.py
from django.urls import path
from .views.bank import BankView
urlpatterns = [
# Banks
path('banks', BankView.as_view()),
]
|
StarcoderdataPython
|
3382134
|
import rss_reader_kapitonov
def main():
rss_reader_kapitonov.main()
|
StarcoderdataPython
|
331409
|
# Configuration file for the Sphinx documentation builder.
#
# Full list of options can be found in the Sphinx documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import re
import sys
from pathlib import Path
# Set package variables and add to path
package_name = "hijri_converter"
package_path = Path("../src").joinpath(package_name).resolve()
sys.path.append(str(package_path.parent))
def read_version():
content = package_path.joinpath("__init__.py").read_text()
pattern = re.compile(r"(?<=__version__\s=\s\").*(?=\")")
return pattern.search(content).group()
# Add custom extensions to path
sys.path.append(str(Path("_extensions").resolve()))
#
# -- Project information ---------------------------------------------------------------
#
project = "hijri-converter" # project name at PyPI and GitHub
author = "<NAME> (@mhalshehri)"
project_copyright = "2018 <NAME> (@mhalshehri) and contributors"
version = read_version()
#
# -- General configuration -------------------------------------------------------------
#
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"myst_parser",
"notfound.extension",
"custom_sitemap",
]
exclude_patterns = ["manpage.*"]
templates_path = ["_templates"]
source_suffix = {
".rst": "restructuredtext",
".md": "markdown",
}
pygments_style = "colorful"
add_module_names = False
#
# -- Options for autodoc ---------------------------------------------------------------
#
autodoc_default_options = {
"members": True,
"member-order": "bysource",
"undoc-members": False,
"show-inheritance": True,
}
autoclass_content = "both"
autodoc_typehints = "description"
#
# -- Options for intersphinx -----------------------------------------------------------
#
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
}
#
# -- Options for napoleon -----------------------------------------------------------
#
napoleon_google_docstring = True
napoleon_numpy_docstring = False
#
# -- Options for notfound --------------------------------------------------------------
#
notfound_urls_prefix = "/en/stable/"
#
# -- Options for sitemap ---------------------------------------------------------------
#
sitemap_excluded_pages = [
"contributing",
"license",
]
#
# -- Options for Markdown files --------------------------------------------------------
#
myst_enable_extensions = [
"smartquotes",
]
myst_heading_anchors = 2
#
# -- Options for HTML output -----------------------------------------------------------
#
html_baseurl = f"https://{project}.readthedocs.io/en/stable/"
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"collapse_navigation": False,
"display_version": True,
"navigation_depth": 1,
"includehidden": True,
"titles_only": True,
}
html_logo = None
html_favicon = None
html_css_files = ["custom.css"]
html_static_path = ["_static"]
html_extra_path = ["_extra"]
html_copy_source = False
html_show_sourcelink = False
html_show_sphinx = False
#
# -- Options for manual pages output ---------------------------------------------------
#
man_pages = [
("manpage", package_name, "convert Hijri-Gregorian dates", author, 7)
]
|
StarcoderdataPython
|
4822547
|
<reponame>YufeiCui/CSCA48
# Provided by Dr. <NAME> & <NAME>. Edited by <NAME>
class EmptyStackException(Exception):
pass
class Stack(object):
''' this class defines a LIFO/FILO stack of items and raise an exception in case the Stack is empty where pop() or
top() is requested '''
def __init__(self):
'''(Stack) -> Nonetype
creates an empty stack'''
# Representation Invariant
# _stack is a list
# if _stack is not empty then
# _stack[0] refers to the top of the stack
# _stack[:] refers to the elements of the stack in the order of insertion
self._stack = []
def push(self, element):
''' (Stack, obj) -> NoneType
add element to the top of the stack'''
# The element goes to the top of the stack
self._stack.insert(0, element)
def pop(self):
'''(Stack) -> obj
remove and returns the element at the ftop of the stack
raise an exception if _stack is empty'''
if self.is_empty():
raise EmptyStackException("This stack is empty")
# remove and return the item at the top
return self._stack.pop(0)
def is_empty(self):
''' (Stack) -> bool
returns true if _stack is empty'''
return len(self._stack) == 0
def size(self):
'''(Stack) -> int
returns the number of elements, which are in _stack'''
return len(self._stack)
def top(self):
'''(Stack) -> obj
returns the first element, which is in _queue
It raises an exception if this queue is empty'''
if self.is_empty():
raise EmptyStackException("This Stack is Empty")
return self._stack[0]
|
StarcoderdataPython
|
1796529
|
<gh_stars>1-10
xs_gen = """\
set title "[CHAR] {reactor} Cross Section Generator"
set acelib "{xsdata}"
% --- Matrial Definitions ---
% Initial Fuel Stream
mat fuel -{fuel_density}
{fuel}
% Cladding Stream
mat cladding -{clad_density}
{cladding}
% Coolant Stream
mat coolant -{cool_density} moder lwtr 1001
{coolant}
therm lwtr lwj3.20t
% --- Run Specification ---
% Periodic boundary conditions
set bc 3
% Fuel universe
set gcu 100
% 1/8 square symmetry
{sym_flag}set sym 8
% Group Stucture
set egrid 5E-05 {group_lower_bound} {group_upper_bound}
set nfg {n_groups}
{group_inner_structure}
% Criticality calc
set pop {k_particles} {k_cycles} {k_cycles_skip}
% --- Geometry ---
pin 1
fill 100 {fuel_radius}
void {void_radius}
cladding {clad_radius}
coolant
pin 2
coolant
surf 100 inf
cell 110 100 fuel -100
lat 10 1 0.0 0.0 {lattice_xy} {lattice_xy} {cell_pitch}
{lattice}
surf 3000 sqc 0.0 0.0 {half_lattice_pitch}
cell 300 0 fill 10 -3000
cell 301 0 outside 3000
% --- Graphs ---
%plot 3 800 800
%mesh 3 800 800
% --- Group Constant Generation ---
% Energy group structure
ene energies 1
{group_structure}
% Total flux in {detector_mat}
det phi de energies dm {detector_mat}
% Group constant material
mat xsmat 1.0 {xsnuc} 1.0
% Set group transfer probability to this material
set gtpmat xsmat
% Specify the detectors
{xsdet}
"""
burnup = """\
set title "[CHAR] {reactor} Burnup Calculation"
set acelib "{xsdata}"
% --- Matrial Definitions ---
% Initial Fuel Stream
mat fuel -{fuel_density} burn {num_burn_regions}
{fuel}
% Cladding Stream
mat cladding -{clad_density}
{cladding}
% Coolant Stream
mat coolant -{cool_density} moder lwtr 1001
{coolant}
therm lwtr lwj3.20t
% --- Run Specification ---
% Periodic boundary conditions
set bc 3
% 1/8 square symmetry
{sym_flag}set sym 8
% Group Stucture
set egrid 5E-05 {group_lower_bound} {group_upper_bound}
set nfg {n_groups}
{group_inner_structure}
% Criticality calc
set pop {k_particles} {k_cycles} {k_cycles_skip}
% --- Geometry ---
pin 1
fuel {fuel_radius}
void {void_radius}
cladding {clad_radius}
coolant
pin 2
coolant
lat 10 1 0.0 0.0 {lattice_xy} {lattice_xy} {cell_pitch}
{lattice}
surf 3000 sqc 0.0 0.0 {half_lattice_pitch}
cell 300 0 fill 10 -3000
cell 301 0 outside 3000
% --- Graphs ---
%plot 3 800 800
%mesh 3 800 800
% Decay and fission yield libraries
set declib "{decay_lib}"
set nfylib "{fission_yield_lib}"
% Burnup calculation options
set bumode 2 % CRAM method
set pcc 1 % Predictor-corrector calculation on
set xscalc 2 % Calc cross sections from spectrum (fast)
set powdens {fuel_specific_power} % Fuel specific power [W/g]
% Depletion cycle
dep daytot
{depletion_times}
% Nuclide inventory
set inventory
{transmute_inventory}
"""
|
StarcoderdataPython
|
6468259
|
print ("H<NAME>!")
a = int(input ("How many maytes are there?"))
if a < 2:
print("There are 2 maytes here!")
|
StarcoderdataPython
|
4832496
|
"""
This is a python file to control and get information about optical drives.
Eventually aim to handle CD, DVD , BluRay on Windows and Linux
"""
import subprocess
class ODMedia:
def __init__(self):
self.path_root = r"C:\Program Files (x86)\CDBurnerXP\cdbxpcmd.exe"
#Safety check for known situation
if self.number_of_drives == 1:
print('Found D drive all ok')
else:
raise Exception(f"Didn't find 1 drive, num = {media.number_of_drives}")
def version(self):
result = subprocess.run([self.path_root, "--version"], stdout=subprocess.PIPE)
return result.stdout.decode('utf-8')
def list_drives(self):
def parse_media(line):
a = line.split(':', maxsplit=1)
drive_number = int(a[0])
b = a[1].strip().split(' (', maxsplit=1)
drive_name = b[0].strip()
drive_letter = b[1][0]
return [drive_number, drive_name, drive_letter]
result = subprocess.run([self.path_root, "--list_drives"], stdout=subprocess.PIPE
)
lines = result.stdout.decode('utf-8').strip().split('\n') # split into lines
try:
trim_lines = [x.strip() for x in lines]
return [parse_media(x) for x in trim_lines]
except:
raise Exception(f'list drives failed trying to parse: {lines}')
@property
def number_of_drives(self):
return len(self.list_drives())
def disk_open(self):
result = subprocess.run([self.path_root, "--eject", "--drivename:0"], stdout=subprocess.PIPE)
return result.stdout.decode('utf-8')
def disk_close(self):
result = subprocess.run([self.path_root, "--load", "--drivename:0"], stdout=subprocess.PIPE)
return result.stdout.decode('utf-8')
def burn_disk(self, file_name):
""" eg """
result = subprocess.run([self.path_root, "--burn-iso", "-device:0",
f'-file:{file_name}'], stdout=subprocess.PIPE)
print(result)
return result.stdout.decode('utf-8')
|
StarcoderdataPython
|
1897954
|
"""
Во входном файле (вы можете читать данные из sys.stdin, подключив библиотеку sys) записан текст. Словом считается
последовательность непробельных символов идущих подряд, слова разделены одним или большим числом пробелов или
символами конца строки. Определите, сколько различных слов содержится в этом тексте.
Формат ввода
Вводится текст.
Формат вывода
Выведите ответ на задачу.
"""
print(len(set(open("input.txt", "r", encoding="utf8").read().split())))
|
StarcoderdataPython
|
6578380
|
<reponame>vidakDK/colour<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .datasets import * # noqa
from . import datasets
from .prediction import (corresponding_chromaticities_prediction_CIE1994,
corresponding_chromaticities_prediction_CMCCAT2000,
corresponding_chromaticities_prediction_Fairchild1990,
corresponding_chromaticities_prediction_VonKries,
CORRESPONDING_CHROMATICITIES_PREDICTION_MODELS,
corresponding_chromaticities_prediction)
__all__ = []
__all__ += datasets.__all__
__all__ += [
'corresponding_chromaticities_prediction_CIE1994',
'corresponding_chromaticities_prediction_CMCCAT2000',
'corresponding_chromaticities_prediction_Fairchild1990',
'corresponding_chromaticities_prediction_VonKries',
'CORRESPONDING_CHROMATICITIES_PREDICTION_MODELS',
'corresponding_chromaticities_prediction'
]
|
StarcoderdataPython
|
3390559
|
<reponame>fyrestartr/Readers-Underground<gh_stars>1-10
from os import listdir, remove
from os.path import join, normpath
from utils.zip import unzip
class FolderItemsUnzipper:
def __init__(self, folder_path):
self.folder_path = folder_path
def run(self):
for file in listdir(self.folder_path):
if not file[-4:].lower() == '.zip':
continue
zip_file_path = normpath(join(self.folder_path, file))
print('Unzipping {}... '.format(file), end='')
unzip(zip_file_path, self.folder_path)
remove(zip_file_path)
print('Done.')
|
StarcoderdataPython
|
11254418
|
<reponame>qe-team/marmot
import sys
from subprocess import Popen, PIPE
from marmot.features.feature_extractor import FeatureExtractor
from marmot.exceptions.no_data_error import NoDataError
from marmot.exceptions.no_resource_error import NoResourceError
class POSFeatureExtractor(FeatureExtractor):
"""
POS for source and target words, tagged with TreeTagger
"""
def __init__(self, tagger=None, par_file_src=None, par_file_tg=None):
self.tagger = tagger
self.par_src = par_file_src
self.par_tg = par_file_tg
# tag words if context_obj has no tagging
# returns tags for all words in sentence
def _call_tagger(self, tok_list, lang='tg'):
par_file = self.par_tg if lang == 'tg' else self.par_src
out = []
if self.tagger is None:
raise NoResourceError('tagger', 'POSFeatureExtractor')
if par_file is None:
raise NoResourceError('tagging parameters', 'POSFeatureExtractor')
p = Popen([self.tagger, '-quiet', par_file], stdin=PIPE, stdout=PIPE)
out = p.communicate(input='\n'.join([tok.encode('utf-8') for tok in tok_list]))[0].decode('utf-8').split('\n')
return out
def get_features(self, context_obj):
if 'target_pos' not in context_obj:
if 'target' in context_obj and context_obj['target'] is not None:
context_obj['target_pos'] = self._call_tagger(context_obj['target'])
else:
raise NoDataError('target_pos', context_obj, 'POSFeatureExtractor')
if 'source_pos' not in context_obj:
if 'source' in context_obj and context_obj['source'] is not None:
context_obj['source_pos'] = self._call_tagger(context_obj['source'], lang='src')
else:
raise NoDataError('source_pos', context_obj, 'POSFeatureExtractor')
# extract POS features:
# - target POS
# - source POS (may be more than 1)
# - something else?
tg_pos = context_obj['target_pos'][context_obj['index']] if context_obj['target_pos'] != [] else ''
src_pos = []
if 'source_pos' in context_obj and context_obj['source_pos'] != [] and 'alignments' in context_obj:
align_idx = context_obj['alignments'][context_obj['index']]
if align_idx is not None:
src_pos = context_obj['source_pos'][align_idx]
else:
src_pos = '__unaligned__'
return [tg_pos, src_pos]
def get_feature_names(self):
return ['target_pos', 'aligned_source_pos']
|
StarcoderdataPython
|
3351061
|
<reponame>htlcnn/ironpython-stubs<gh_stars>100-1000
# encoding: utf-8
# module Grasshopper.Kernel.Special.SketchElements calls itself SketchElements
# from Grasshopper,Version=1.0.0.20,Culture=neutral,PublicKeyToken=dda4f5ec2cd80803
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class GH_SketchBox(object):
""" GH_SketchBox() """
class GH_SketchCloud(object):
""" GH_SketchCloud() """
class GH_SketchElement(object,IGH_SketchElement,IGH_InstanceDescription,GH_ISerializable):
# no doc
def IsPickPoint(self,*__args):
"""
IsPickPoint(self: GH_SketchElement,box: RectangleF,bCrossingBox: GH_PickBox) -> bool
IsPickPoint(self: GH_SketchElement,pt: PointF) -> bool
"""
pass
def NewInstanceGuid(self,UUID=None):
""" NewInstanceGuid(self: GH_SketchElement,UUID: Guid)NewInstanceGuid(self: GH_SketchElement) """
pass
def Read(self,reader):
""" Read(self: GH_SketchElement,reader: GH_IReader) -> bool """
pass
def Write(self,writer):
""" Write(self: GH_SketchElement,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
BoundingBox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BoundingBox(self: GH_SketchElement) -> RectangleF
"""
Category=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Category(self: GH_SketchElement) -> str
Set: Category(self: GH_SketchElement)=value
"""
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Description(self: GH_SketchElement) -> str
Set: Description(self: GH_SketchElement)=value
"""
GraphicsPath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphicsPath(self: GH_SketchElement) -> GraphicsPath
"""
HasCategory=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: HasCategory(self: GH_SketchElement) -> bool
"""
HasSubCategory=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: HasSubCategory(self: GH_SketchElement) -> bool
"""
Icon_24x24=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_24x24(self: GH_SketchElement) -> Image
"""
InstanceDescription=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: InstanceDescription(self: GH_SketchElement) -> str
"""
InstanceGuid=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: InstanceGuid(self: GH_SketchElement) -> Guid
"""
Keywords=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Keywords(self: GH_SketchElement) -> IEnumerable[str]
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: GH_SketchElement) -> str
Set: Name(self: GH_SketchElement)=value
"""
NickName=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: NickName(self: GH_SketchElement) -> str
Set: NickName(self: GH_SketchElement)=value
"""
SubCategory=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SubCategory(self: GH_SketchElement) -> str
Set: SubCategory(self: GH_SketchElement)=value
"""
class GH_SketchEllipse(object):
""" GH_SketchEllipse() """
class GH_SketchFreehandStroke(object):
""" GH_SketchFreehandStroke() """
class GH_SketchLine(object):
""" GH_SketchLine() """
class IGH_SketchElement(IGH_InstanceDescription,GH_ISerializable):
# no doc
def IsPickPoint(self,*__args):
"""
IsPickPoint(self: IGH_SketchElement,box: RectangleF,bCrossingBox: GH_PickBox) -> bool
IsPickPoint(self: IGH_SketchElement,pt: PointF) -> bool
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
BoundingBox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BoundingBox(self: IGH_SketchElement) -> RectangleF
"""
GraphicsPath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: GraphicsPath(self: IGH_SketchElement) -> GraphicsPath
"""
Icon_24x24=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Icon_24x24(self: IGH_SketchElement) -> Image
"""
|
StarcoderdataPython
|
12823676
|
import time
from PIL import Image
import hashlib
import numbers
from google.cloud import pubsub_v1
from typing import List
from fastapi import APIRouter, Depends, UploadFile, File, Form, HTTPException
from starlette.requests import Request
from func_timeout import func_set_timeout
from sqlalchemy.orm import Session
from google.cloud import storage
from image_upload import settings
from image_upload.utils import fallback
from image_upload.database import crud, models, schemas, get_db, engine
try:
models.Base.metadata.create_all(bind=engine, checkfirst=True)
except:
pass
router = APIRouter()
# Instantiates a client
storage_client = storage.Client()
bucket_name = "super_skrivni_bozickov_zaklad"
bucket = storage_client.bucket(bucket_name)
# Pub/sub.
publisher = pubsub_v1.PublisherClient()
subscriber = pubsub_v1.SubscriberClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(
project_id='forward-leaf-258910',
topic='image_to_process',
)
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id='forward-leaf-258910',
sub='image-upload',
)
def callback(message):
print(message)
tags = message.attributes["image_tags"]
image_id = message.attributes["image_id"]
crud.update_tags(db=next(get_db()), image_id=int(image_id), tags=tags)
message.ack()
future = subscriber.subscribe(subscription_name, callback)
@router.post('/images', response_model=schemas.Image)
def upload(*, user_id: int = Form(...), file: UploadFile = File(...), db: Session = Depends(get_db)):
try:
Image.open(file.file)
except:
raise HTTPException(status_code=400, detail='Uploaded file is not an image.')
if not isinstance(user_id, numbers.Number):
raise HTTPException(status_code=400, detail='user_id is not a number.')
# Get hash.
file_hash = hashlib.sha1(file.filename.encode('utf-8')).hexdigest() + "." + file.filename.split(".")[-1]
# Save to DB.
new_image = crud.create_image(db=db, file_name=file.filename, file_hash=file_hash, user_id=user_id)
iid = new_image.id
# Upload to GC, append file ID to hash.
file.file.seek(0)
try:
blob = bucket.blob(str(iid) + file_hash)
blob.upload_from_file(file.file)
except:
crud.delete_image(db=db, image_id=iid)
raise HTTPException(status_code=400, detail='Upload to gCloud failed.')
# Send to image processor.
url_r = str(iid) + file_hash
url_l = "https://storage.googleapis.com/super_skrivni_bozickov_zaklad/"
publisher.publish(topic_name, b'', image_id=str(iid), image_url=url_l + url_r)
return new_image
@router.delete('/images/{image_id}', response_model=schemas.Image)
def delete_image(image_id: int, db: Session = Depends(get_db)):
db_image = crud.delete_image(db=db, image_id=image_id)
if db_image is None:
raise HTTPException(status_code=404, detail='Image not found')
return db_image
@router.get('/settings')
async def test_configs(request: Request):
return {"Config for X:": f"{settings.config_x}", "Config for Y:": f"{settings.config_y}"}
|
StarcoderdataPython
|
3578627
|
#!/usr/bin/env python
import pyspark
import sys
if len(sys.argv) != 3:
raise Exception("Exactly 2 arguments are required: <inputUri> <outputUri>")
inputUri=sys.argv[1]
outputUri=sys.argv[2]
sc = pyspark.SparkContext()
lines = sc.textFile(sys.argv[1])
words = lines.flatMap(lambda line: line.split())
wordCounts = words.map(lambda word: (word, 1)).reduceByKey(lambda count1, count2: count1 + count2)
wordCounts.saveAsTextFile(sys.argv[2])
|
StarcoderdataPython
|
9798346
|
# -*- coding: utf-8 -*-
from django import template
from djR.conf import DEFAULT_DB
register = template.Library()
@register.simple_tag
def get_default_db():
return DEFAULT_DB
|
StarcoderdataPython
|
3263496
|
<gh_stars>1-10
from myoperator import RowOperator
import math,sys
class TFIDF(RowOperator):
"""
Generate cleaned word vectors and respective count vector
and term idf vector.
term idf = log(total number of terms in the dataset /
number of documents where terms appears)
# we assume any term occurs just once in any document
Creates data columns 'word','wordcount','termidf'
Inputs: data column 'cleandesc'
"""
def __init__(self, glob):
sys.stderr.write("# Init TFIDF\n")
self.glob = glob
# define nicknames for column indices
[self.cleandesc_col,self.word_col,self.wordcount_col,self.termidf_col,self.vectorlength_col]=self.glob.use_sheet("data").use_columns(['desc','word','wordcount','termidf','vector_length'])
# use online dictionary. Object handles in glob are hardcoded
self.glob.use_online_dictionaries(["WORDCOUNT"])
def process(self,row, verbose=False):
cleandesc=row[self.cleandesc_col]
# create word, wordcount, termidf vectors
tmp=cleandesc.upper().split(" ")
words=[]
counts=[]
termidf=[]
tmp.sort()
ssq=0.0
for word in tmp:
if not word in self.glob.wordcounts:
if verbose: sys.stderr.write("# Warning: unknown word %s\n%s\n" %(word,tmp))
continue
words.append(word)
cnt=self.glob.wordcounts[word]
if not cnt: cnt="1"
counts.append(str(cnt))
# PK's script uses nwordtotal instead of nprot
x=math.log(self.glob.nwordtotal/float(cnt))
ssq+=x*x
termidf.append(str(x))
row[self.word_col]=" ".join(words)
row[self.wordcount_col]=" ".join(counts)
row[self.termidf_col]=" ".join(termidf)
row[self.vectorlength_col]=str(math.sqrt(ssq))
|
StarcoderdataPython
|
6559191
|
<reponame>MRossol/plotting
"""
Plotting of 3D arrays in 2D plots
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import numpy.ma as ma
import seaborn as sns
from plotting.base import plotting_base
def heatmap_plot(data, **kwargs):
"""
Heat map plot using seaborn heatmap
Parameters
----------
data : ndarray | pandas.DataFrame
ndarray of heatmap values or
pandas DataFrame of heat map values with tick labels as index
and column labels
kwargs : dict
kwargs for seaborn.heatmap and plotting_base
See Also
--------
seaborn.heatmap : plotting function
plotting.base.plotting_base : plotting base
"""
def plot_func(axis, data, **kwargs):
sns.heatmap(data, ax=axis, **kwargs)
plotting_base(plot_func, data, **kwargs)
def add_colorbar(axis, cf, ticks, size, padding,
location='right', label=None, lines=None, fontsize=14):
"""
Add a colorbar legend to given axis
Parameters
----------
axis : matplotlib.axis
Axis objet to add colorbar to
cf : matplotlib.cm.ScalarMappable
Contour set or colormap mappable to use for colors
ticks : list
list of tick values
size : tuple
colorbar size
padding : float
how much to pad around the colorbar
location : str, optional
Location of colorbar, by default 'right'
label : str, optional
Label for colorbar, by default None
lines : list, optional
list of lines to add, by default None
fontsize : int, optional
fontsize for label
tick size = fontsize -2
by default 14
"""
divider = make_axes_locatable(axis)
caxis = divider.append_axes(location, size=size,
pad=padding)
if location in ['top', 'bottom']:
orientation = 'horizontal'
else:
orientation = 'vertical'
cbar = plt.colorbar(cf, ticks=ticks, cax=caxis,
orientation=orientation,
ticklocation=location)
cbar.ax.tick_params(labelsize=fontsize - 2)
if label is not None:
cbar.set_label(label, size=fontsize)
if lines is not None:
cbar.add_lines(lines)
def contour_plot(data, **kwargs):
"""
Create a contoured colormap from data shape = (n, 3)
Parameters
----------
data : ndarray
n X 3 array of data to plot of form (x, y, c)
figsize : tuple, optional
Figure size, by default (8, 6)
fontsize : int, optional
Labels font size, by default 14
zlim : float, optional
z / c limit, by default None
major_spacing : float, optional
space between major contours, by default None
minor_spacing : float, optional
space between minor contours, by default None
contour_width : int, optional
contour line width, by default 1
contour_color : str, optional
contour line color, by default 'k'
opacity : float, optional
opacity of colormap, by default 1.
colorbar : bool, optional
Display color bar, by default True
colorbar_location : str, optional
Location of colorbar, by default 'right'
colorbar_label : str, optional
Colorbar label, by default None
colorbar_lines : bool, optional
Plot lines on colorbar, by default True
colorbar_ticks : int, optional
Number of colorbar ticks, by default None
colormap : str, optional
colormap style, by default 'jet'
kwargs : dict
kwargs for plotting_base
See Also
--------
matplotlib.pyplot.contour : plotting function
matplotlib.pyplot.countourf : plotting function
plotting.base.plotting_base : plotting base
"""
def plot_func(axis, data, figsize=(8, 6), fontsize=14, zlim=None,
major_spacing=None, minor_spacing=None, contour_width=1,
contour_color='k', opacity=1., colorbar=True,
colorbar_location='right', colorbar_label=None,
colorbar_lines=True, colorbar_ticks=None, colormap='jet'):
assert len(data) == 3, 'Data must be of shape (x, y, c)'
x, y, z = data
z_m = ma.masked_invalid(z)
a_ratio = z.shape
a_ratio = a_ratio[1] / a_ratio[0]
if isinstance(figsize, (int, float)):
figsize = [figsize * a_ratio, figsize]
else:
figsize = max(figsize)
figsize = [figsize * a_ratio, figsize]
if zlim is None:
zmin, zmax = np.nanmin(z), np.nanmax(z)
else:
zmin, zmax = zlim
if major_spacing is None:
major_spacing = (zmax - zmin) / 10
if minor_spacing is None:
minor_spacing = major_spacing / 10
cl_levels = np.arange(zmin, zmax + major_spacing, major_spacing)
cf_levels = np.arange(zmin, zmax + minor_spacing, minor_spacing)
if colorbar_ticks is None:
l_levels = cl_levels[::2]
else:
l_levels = (zmax - zmin) / colorbar_ticks
l_levels = np.arange(zmin, zmax + l_levels, l_levels)
orientation = 'vertical'
if colorbar_location in ['top', 'bottom']:
orientation = 'horizontal'
cf = plt.contourf(x, y, z_m, alpha=opacity, levels=cf_levels,
extend='both', antialiased=True)
if contour_color is not None:
cl = plt.contour(cf, levels=cl_levels, colors=(contour_color,),
linewidths=(contour_width,))
if colormap is not None:
cf.set_cmap(colormap)
if colorbar:
cbar_padding = 0.1
if colorbar_location in ['top', 'bottom']:
figsize[1] += figsize[1] / 10
cbar_size = figsize[0] / 20
else:
figsize[0] += figsize[0] / 10
cbar_size = figsize[1] / 20
divider = make_axes_locatable(axis)
caxis = divider.append_axes(colorbar_location, size=cbar_size,
pad=cbar_padding)
cbar = plt.colorbar(cf, ticks=l_levels, cax=caxis,
orientation=orientation,
ticklocation=colorbar_location)
cbar.ax.tick_params(labelsize=fontsize - 2)
if colorbar_label is not None:
cbar.set_label(colorbar_label, size=fontsize)
if colorbar_lines is not None:
if contour_color is not None:
cbar.add_lines(cl)
plotting_base(plot_func, data, **kwargs)
def colorbar(zlim, ticks=None, lines=None, line_color='k', linewidth=1,
colormap='jet', extend='neither', ticklocation='right',
fontsize_other=18, label=None, fontsize_label=21, figsize=6,
dpi=100, showfig=True, filename=None):
"""
Create colorbar
Parameters
----------
zlim : tuple
List or tuple indicating zmin and zmax.
tick : int
Number of ticks to label.
lines : int
Number of lines to draw on colorbar.
line_color : str
Color of lines drawn on colorbar.
linewidth : int
Line width for each line drawn on colorbar.
colormap : str
Color scheme for colorbar.
extend : str
Direction to extend colors beyond zmin and zmax.
ticklocation : str
Orientation of colorbar and location of tick marks.
fontsize_other : int
Font size of tick numbers.
label : str
Label for colorbar
fontsize_label : int
Font size of label.
figsize : tuple
Width and height of figure
dpi : int
DPI resolution of figure.
showfig : bool
Whether to show figure.
filename : str
Name of file/path to save the figure to.
"""
a_ratio = 20
if isinstance(figsize, (list, tuple)):
figsize = max(figsize)
if ticklocation in ['right', 'left']:
figsize = (figsize / a_ratio, figsize)
orientation = 'vertical'
else:
figsize = (figsize, figsize / a_ratio)
orientation = 'horizontal'
if ticks is not None:
ticks = (zlim[1] - zlim[0]) / ticks
ticks = np.arange(zlim[0], zlim[1] + ticks, ticks)
fig = plt.figure(figsize=figsize, dpi=dpi)
axis = fig.add_axes([0.0, 0.0, 1.0, 1.0])
norm = mpl.colors.Normalize(vmin=zlim[0], vmax=zlim[1])
cb = mpl.colorbar.ColorbarBase(axis, cmap=colormap, norm=norm,
orientation=orientation, extend=extend,
ticks=ticks, ticklocation=ticklocation)
cb.ax.tick_params(labelsize=fontsize_other)
if label is not None:
cb.set_label(label, size=fontsize_label)
if lines is not None:
lines = (zlim[1] - zlim[0]) / lines
lines = np.arange(zlim[0], zlim[1] + lines, lines)
cb.add_lines(lines, colors=(line_color,) * len(lines),
linewidths=(linewidth,) * len(lines))
if filename is not None:
plt.savefig(filename, dpi=dpi, transparent=True,
bbox_inches='tight')
if showfig:
plt.show()
plt.close()
|
StarcoderdataPython
|
1784122
|
<gh_stars>0
"""Trino integration tests.
These rely on having a Trino+Hadoop cluster set up.
They also require a tables created by make_test_tables.sh.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from pyhive import trino
from pyhive.tests.dbapi_test_case import with_cursor, with_complex_processing_cursor
from pyhive.tests.test_presto import TestPresto
import datetime
_HOST = 'localhost'
_PORT = '18080'
class TestTrino(TestPresto):
__test__ = True
def connect(self, process_complex_columns=False):
return trino.connect(host=_HOST, port=_PORT, source=self.id(),
process_complex_columns=False)
def test_bad_protocol(self):
self.assertRaisesRegexp(ValueError, 'Protocol must be',
lambda: trino.connect('localhost', protocol='nonsense').cursor())
def test_escape_args(self):
escaper = trino.TrinoParamEscaper()
self.assertEqual(escaper.escape_args((datetime.date(2020, 4, 17),)),
("date '2020-04-17'",))
self.assertEqual(escaper.escape_args((datetime.datetime(2020, 4, 17, 12, 0, 0, 123456),)),
("timestamp '2020-04-17 12:00:00.123'",))
@with_cursor
def test_description(self, cursor):
cursor.execute('SELECT 1 AS foobar FROM one_row')
self.assertEqual(cursor.description, [('foobar', 'integer', None, None, None, None, True)])
self.assertIsNotNone(cursor.last_query_id)
@with_cursor
def test_complex(self, cursor):
cursor.execute('SELECT * FROM one_row_complex')
# TODO Trino drops the union field
tinyint_type = 'tinyint'
smallint_type = 'smallint'
float_type = 'real'
self.assertEqual(cursor.description, [
('boolean', 'boolean', None, None, None, None, True),
('tinyint', tinyint_type, None, None, None, None, True),
('smallint', smallint_type, None, None, None, None, True),
('int', 'integer', None, None, None, None, True),
('bigint', 'bigint', None, None, None, None, True),
('float', float_type, None, None, None, None, True),
('double', 'double', None, None, None, None, True),
('string', 'varchar', None, None, None, None, True),
('timestamp', 'timestamp', None, None, None, None, True),
('binary', 'varbinary', None, None, None, None, True),
('array', 'array(integer)', None, None, None, None, True),
('map', 'map(integer,integer)', None, None, None, None, True),
('struct', 'row(a integer,b integer)', None, None, None, None, True),
# ('union', 'varchar', None, None, None, None, True),
('decimal', 'decimal(10,1)', None, None, None, None, True),
])
rows = cursor.fetchall()
expected = [(
True,
127,
32767,
2147483647,
9223372036854775807,
0.5,
0.25,
'a string',
'1970-01-01 00:00:00.000',
b'123',
[1, 2],
{"1": 2, "3": 4}, # Trino converts all keys to strings so that they're valid JSON
[1, 2], # struct is returned as a list of elements
# '{0:1}',
'0.1',
)]
self.assertEqual(rows, expected)
# catch unicode/str
self.assertEqual(list(map(type, rows[0])), list(map(type, expected[0])))
@with_complex_processing_cursor
def test_complex_processing_cursor(self, cursor):
cursor.execute('SELECT * FROM one_row_deep_complex')
fetched_rows = cursor.fetchall()
expected_rows = [(
{
'inner_int1': 2,
'inner_int2': 3,
'inner_int_array': [4, 5],
'inner_row1': {
'inner_inner_varbinary': b'binarydata',
'inner_inner_string': 'some string'
}
},
{
'key1': {
'double_attribute': 2.2,
'integer_attribute': 60,
'map_attribute': {
602: ['string1', 'string2'],
21: ['other string', 'another string']
}
},
'key2': {
'double_attribute': 42.15,
'integer_attribute': 6060,
'map_attribute': {
14: ['11string1', 'somestring'],
22: ['other string', 'another string']
}
}
},
[
{
'int1': 42,
'double1': 24.5,
'string1': 'lalala'
},
{
'int1': 421,
'double1': 244.25,
'string1': 'bababa'
}
]
)]
self.assertEqual(expected_rows, fetched_rows)
|
StarcoderdataPython
|
5156804
|
<filename>python/tests/test_node_port.py<gh_stars>1-10
from ionpy import Node, Port, Type, TypeCode
def test_node_port():
t = Type(code_=TypeCode.Int, bits_=32, lanes_=1)
port_to_set = Port(key='iamkey', type=t, dim=3)
ports = [ port_to_set, ]
n = Node()
n.set_port(ports)
port_to_get = n.get_port('iamkey')
print(f'from node.get_port: {port_to_get}')
|
StarcoderdataPython
|
3466121
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped, Twist
import math
from twist_controller import Controller
from yaw_controller import YawController
from pid import PID
from lowpass import LowPassFilter
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
# Minimum speed at which we steer the vehicle
min_speed = 0.0
# Throttle/Brake PID parameters
k_p = 0.5
k_i = 0.0
k_d = 0.1
tau = 0.2
ts = 0.1
self.dbw_enabled = False # subscribe from /vehicle/dbw_enabled
self.target_velocity = Twist() # subscribe from /twist_cmd
self.current_velocity = Twist() # subsribe from /current_velocity
self.previous_throttle = 0
self.previous_steering = 0
self.previous_brake = 0
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd', SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd', ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd', BrakeCmd, queue_size=1)
# TODO: Create `TwistController` object
yaw_controller = YawController(wheel_base, steer_ratio, min_speed, max_lat_accel, max_steer_angle)
pid_controller = PID(k_p, k_i, k_d, decel_limit, accel_limit)
low_pass_filter = LowPassFilter(tau, ts)
self.controller = Controller(yaw_controller, pid_controller, low_pass_filter, vehicle_mass, fuel_capacity, wheel_radius)
# TODO: Subscribe to all the topics you need to
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cmd_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_cb)
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz
while not rospy.is_shutdown():
# TODO: Get predicted throttle, brake, and steering using `twist_controller`
# You should only publish the control commands if dbw is enabled
throttle, brake, steering = self.controller.control(self.target_velocity.linear,
self.target_velocity.angular,
self.current_velocity.linear,
self.dbw_enabled) #,
# <any other argument you need>)
if self.dbw_enabled:
self.publish(throttle, brake, steering)
rate.sleep()
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = (abs(throttle - self.previous_throttle) > 0.05)
if tcmd.enable :
self.previous_throttle = throttle
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = (abs(throttle - self.previous_steering) > 0.05)
if scmd.enable :
self.previous_steer = steer
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = (abs(brake - self.previous_brake) > 0.05)
if bcmd.enable :
self.previous_brake = brake
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
def twist_cmd_cb(self, msg):
# TODO: Implement
self.target_velocity = msg.twist
pass
def current_velocity_cb(self, msg):
# TODO: Implement
self.current_velocity = msg.twist
pass
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg.data
pass
if __name__ == '__main__':
try:
DBWNode()
except rospy.ROSInterruptException:
rospy.logerr('Error: Not start dbw_node!')
|
StarcoderdataPython
|
8085879
|
<filename>order.py
#!/usr/bin/env python
import argparse, os, sys, signal
sourcedir=os.path.dirname(os.path.abspath(__file__))
cwdir=os.getcwd()
sys.path.append(sourcedir)
from pythonmods import runsubprocess
def default_sigpipe():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def positiveint(x):
x = int(x)
if x < 0:
raise argparse.ArgumentTypeError("%s is an invalid positive int value" %x)
return x
def batchsizeint(x):
x = int(x)
if x < 2:
raise argparse.ArgumentTypeError("%s is too small; batch size must be greater than 1" %x)
if x > 500:
raise argparse.ArgumentTypeError("%s is too large; batch size must not exceed 500" %x)
return x
parser = argparse.ArgumentParser(description='bacterialBercow: bringing order to bacterial sequences',add_help=False)
#Help options
help_group = parser.add_argument_group('Help')
help_group.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')
#General options
general_group = parser.add_argument_group('General options')
general_group.add_argument('-o','--out', help='Output directory (required)', required=True, type=str)
general_group.add_argument('-t','--threads', help='Number of threads to use (default: 1)', default=1, type=positiveint)
#NCBI query and retrieval options
ncbi_group = parser.add_argument_group('NCBI query and retrieval options')
ncbi_group.add_argument('-e','--emailaddress', help="User's email address which will be provided as an argument to edirect econtact -email (required if retrieving data from NCBI)", required=False, type=str)
ncbi_group.add_argument('--taxonomyquery', help='Taxonomy search query term to be supplied to the edirect eseach -query argument (default: bacteria[porgn:__txid2])', default="bacteria[porgn:__txid2])", type=str)
ncbi_group.add_argument('--datequery', help='Date search query term to be supplied to the edirect eseach -query argument (e.g. "2017/01/01"[PDAT] : "3000"[PDAT] would retrieve records since 2017) (not required)', required=False, type=str)
ncbi_group.add_argument('-s','--dbsource', help='Database source; refseq or refseq_genbank (default: refseq_genbank)', default="refseq_genbank", choices=["refseq","refseq_genbank"],type=str)
ncbi_group.add_argument('--deduplicationmethod', help='Specify how identical sequences should be deduplicated; either "all" duplicates are removed; otherwise, duplicates are removed if they share biosample accession id + "submitter" metadata; or "bioproject" accession id; or "both" submitter metadata and bioproject accession id (default: "both")', default="both", choices=["both","submitter","bioproject","all"],type=str)
ncbi_group.add_argument('-b','--batchsize', help='Number of accession nucleotide records to retrieve per edirect query (default: 200; min: 2; max: 500)', default=200, type=batchsizeint)
#NCBI pipeline step customisation (specifying starting and stopping points)
steps_group = parser.add_argument_group('Customising NCBI pipeline steps (specifying starting / stopping points)')
steps_group.add_argument('--accessions', help='A text file containing NCBI plasmid accessions in the first column; if provided, these accessions will be retrieved, rather than retrieving plasmid accessions using a query term (default: retrieve accessions using a query term)',required=False)
steps_group.add_argument('--retrieveaccessionsonly', action='store_true',help='If flag is provided, stop after retrieving and filtering NCBI accessions (default: do not stop)',required=False)
steps_group.add_argument('--retrievesequencesonly', action='store_true',help='If flag is provided, stop after retrieving deduplicated sequences from NCBI filtered accessions (default: do not stop)',required=False)
steps_group.add_argument('--restartwithsequences', action='store_true',help='If flag is provided, re-start the pipeline using sequences retrieved from NCBI',required=False)
#In-house contig options
contig_group = parser.add_argument_group('Customising in-house contig pipeline steps')
contig_group.add_argument('--inhousesequences', help='A fasta file containing uncharacterised bacterial contig nucleotide sequences; if provided, these contigs will be typed using rmlst and replicon loci to determine whether they are likely to be plasmids or chromosomal (default: retrieve sequences from NCBI)',required=False)
contig_group.add_argument('--typing', help='Specifies what sequence typing to perform (only applicable if in-house sequences are provided using --inhousesequences flag); either "replicon", "rmlst" typing or "both" (default: both)',default="both",choices=["both","replicon","rmlst"],required=False)
contig_group.add_argument('--contigsamples', help='A tsv file containing contig names in the first column and associated sample names in the second column',required=False)
contig_group.add_argument('--contigcompleteness', help='A tsv file containing contig names in the first column and contig completeness information in the second column (accepted contig completeness descriptions: circular,complete,complete_linear,linear,incomplete,unknown)',required=False)
#contig_group.add_argument('--sampleoutput', action='store_true',help='If flag is provided, output a file with typing information at the sample-level (--contigsamples must be provided)',required=False)
contig_group.add_argument('--typedcontigsonly', action='store_true',help='If flag is provided, only include contigs that have a detected rMLST/replicon type in the contig output file',required=False)
args = parser.parse_args()
outputpath=os.path.relpath(args.out, cwdir)
#check databases downloaded
rmlstdbexists=os.path.exists('%s/databases/rmlstalleles/blastdbs'%sourcedir)
plasmidfinderdbexists=os.path.exists('%s/databases/plasmidfinder_db/blastdbs'%sourcedir)
if rmlstdbexists==False or plasmidfinderdbexists==False:
if rmlstdbexists==False and plasmidfinderdbexists==False:
sys.exit('Error: the rMLST database and the PlasmidFinder database must be installed first (see README)')
elif rmlstdbexists==False:
sys.exit('Error: the rMLST database must be installed first (see README)')
else:
sys.exit('Error: the PlasmidFinder database must be installed first (see README)')
#check --sampleoutput flag used correctly if provided
#if args.sampleoutput==True and args.contigsamples==None:
# sys.exit('Error: --sampleoutput is only possible if the --contigsamples flag is provided, to specify sample groupings')
if args.contigsamples!=None:
args.sampleoutput=True #always produce sample-level output if args.contigsamples is provided
cmdArgs=['mkdir -p %s'%outputpath]
runsubprocess(cmdArgs,shell=True)
###retrieve accessions and sequences from NCBI
if args.inhousesequences==None and args.restartwithsequences==False:
if args.accessions==None:
if args.datequery==None:
datepresent="absent"
else:
datepresent=="present"
runsubprocess(['bash','%s/downloadaccessions.sh'%sourcedir,datepresent,str(args.taxonomyquery),str(args.datequery),str(args.dbsource),outputpath])
print('Retrieved accessions from NCBI')
runsubprocess(['python','%s/filteraccessions.py'%sourcedir,outputpath])
print('Finished initial filtering of accessions based on accession title text')
else:
runsubprocess(['bash','%s/downloaduseraccessions.sh'%sourcedir,str(args.accessions),outputpath])
print('Retrieved accessions from NCBI')
runsubprocess(['python','%s/filteraccessions.py'%sourcedir,outputpath])
print('Finished initial filtering of accessions based on accession title text')
###retrieve sequences if args.retrieveaccessionsonly is false
if args.retrieveaccessionsonly==True:
sys.exit()
else:
runsubprocess(['bash','%s/downloadsequences.sh'%sourcedir,str(args.batchsize),str(args.emailaddress),outputpath])
print('Downloaded sequences from NCBI')
runsubprocess(['python','%s/deduplicateseqs.py'%sourcedir,str(args.deduplicationmethod),outputpath])
print('Deduplicated sequences using deduplication method: %s'%str(args.deduplicationmethod))
if args.retrieveaccessionsonly==True:
sys.exit()
if args.retrievesequencesonly==True:
sys.exit()
###characterise sequences to identify plasmids
cmdArgs=['mkdir -p %s/plasmidfinder'%outputpath]
runsubprocess(cmdArgs,shell=True)
cmdArgs=['mkdir -p %s/rmlst'%outputpath]
runsubprocess(cmdArgs,shell=True)
enterobacteriaceaedbpath='%s/databases/plasmidfinder_db/blastdbs/enterobacteriaceaedb'%sourcedir
gram_positivedbpath='%s/databases/plasmidfinder_db/blastdbs/gram_positivedb'%sourcedir
rmlstdbpath='%s/databases/rmlstalleles/blastdbs'%sourcedir
rmlstprofilepath='%s/databases/rmlstalleles'%sourcedir
if args.inhousesequences==None:
runsubprocess(['python', '%s/plasmidfinder.py'%sourcedir,'enterobacteriaceae',enterobacteriaceaedbpath,str(args.threads),outputpath,'ncbi',sourcedir])
print('Finished BLAST searching Enterobacteriaceae PlasmidFinder database')
runsubprocess(['python', '%s/plasmidfinder.py'%sourcedir,'gram_positive',gram_positivedbpath,str(args.threads),outputpath,'ncbi',sourcedir])
print('Finished BLAST searching Gram-positive PlasmidFinder database')
runsubprocess(['python', '%s/rmlst.py'%sourcedir,rmlstdbpath,str(args.threads),outputpath,'ncbi',sourcedir])
print('Finished BLAST searching rMLST database')
runsubprocess(['python', '%s/finalfilter.py'%sourcedir, rmlstprofilepath,outputpath, 'ncbi','enterobacteriaceae', 'gram_positive'])
else:
cmdArgs=["cat %s | bioawk -c fastx '{print $name,length($seq)}' > %s/seqlengths.tsv"%(str(args.inhousesequences),outputpath)]
runsubprocess(cmdArgs,shell=True)
if args.typing=='replicon' or args.typing=='both':
runsubprocess(['python', '%s/plasmidfinder.py'%sourcedir,'enterobacteriaceae',enterobacteriaceaedbpath,str(args.threads),outputpath,'user',sourcedir,str(args.inhousesequences)])
print('Finished BLAST searching Enterobacteriaceae PlasmidFinder database')
runsubprocess(['python', '%s/plasmidfinder.py'%sourcedir,'gram_positive',gram_positivedbpath,str(args.threads),outputpath,'user',sourcedir,str(args.inhousesequences)])
print('Finished BLAST searching Gram-positive PlasmidFinder database')
if args.typing=='rmlst' or args.typing=='both':
runsubprocess(['python', '%s/rmlst.py'%sourcedir,rmlstdbpath,str(args.threads),outputpath,'user',sourcedir,str(args.inhousesequences)])
print('Finished BLAST searching rMLST database')
runsubprocess(['python', '%s/finalfilter.py'%sourcedir, rmlstprofilepath,outputpath,'user',str(args.typing),'enterobacteriaceae', 'gram_positive',str(args.contigcompleteness),str(args.contigsamples),str(args.sampleoutput),str(args.typedcontigsonly)])
cmdArgs=["rm %s/seqlengths.tsv"%outputpath]
runsubprocess(cmdArgs,shell=True)
print('Finished running bacterialBercow!')
###OLD CODE
##Replicon and rMLST typing options
#typing_group = parser.add_argument_group('Replicon and rMLST typing options')
#typing_group.add_argument('--typing', help='Specifies what sequence typing to perform (only applicable if in-house sequences are provided using --inhousesequences flag); either "replicon", "rmlst" typing or "both" (default: both)',default="both",choices=["both","replicon","rmlst"],required=False)
#typing_group.add_argument('--enterobacdbpath', help='Path to the "enterobacteriaceae" plasmidfinder BLAST database (default: databases/plasmidfinder/enterobacteriaceae/enterobacteriaceaedb)',required=False)
#typing_group.add_argument('--gramposdbpath', help='Path to the "gram_positive" plasmidfinder BLAST database (default: databases/plasmidfinder/gram_positive/gram_positivedb)',required=False)
#typing_group.add_argument('--rmlstdbpath', help='Path to the directory used to store the rmlst blast database files (default: databases/rmlstalleles/blastdbs)',required=False)
#typing_group.add_argument('--rmlstprofilepath', help='Path to the directory used to store the rmlst profile file (default: databases/rmlstalleles)',required=False)
# if args.enterobacdbpath==None:
# enterobacteriaceaedbpath='%s/databases/plasmidfinder/enterobacteriaceae/enterobacteriaceaedb'%sourcedir
# else:
# enterobacteriaceaedbpath=str(args.enterobacdbpath)
# if args.gramposdbpath==None:
# gram_positivedbpath='%s/databases/plasmidfinder/gram_positive/gram_positivedb'%sourcedir
# else:
# gram_positivedbpath=str(args.gramposdbpath)
# if args.rmlstdbpath==None:
# rmlstdbpath='%s/databases/rmlstalleles/blastdbs'%sourcedir
# else:
# rmlstdbpath=str(args.rmlstdbpath)
# if args.rmlstprofilepath==None:
# rmlstprofilepath='%s/databases/rmlstalleles'%sourcedir
# else:
# rmlstprofilepath=str(args.rmlstprofilepath)
|
StarcoderdataPython
|
3233562
|
"""create video table3
Revision ID: c74dc70ede84
Revises: <PASSWORD>
Create Date: 2021-09-23 20:58:02.017347
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c74dc70ede84'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
|
StarcoderdataPython
|
8012913
|
<gh_stars>10-100
import json
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
def read_pvuv_data():
"""
read pv uv data
:return:list,ele = (date,pv,uv)
"""
data = []
with open('./data/pvuv.txt') as fo:
linenum = 0
for row in fo:
if linenum == 0:
linenum += 1
continue
date, pv, uv = row.strip().split("\t")
data.append((date, pv, uv))
return data
@app.route('/getjson')
def getjson():
# read file
data = read_pvuv_data()
# return json
return json.dumps(data)
if __name__ == '__main__':
app.run()
|
StarcoderdataPython
|
9648782
|
from .summary import Summary
__all__ = ["Summary"]
|
StarcoderdataPython
|
3421432
|
#!/usr/bin/env python
import yaml
import json
my_list = range(5)
my_list.append('Python Programming')
my_list.append('Is Fun')
my_list.append({})
my_list[-1]['IP_ADDR'] = '10.10.10.239'
my_list[-1]['HOSTNAME'] = 'testbox'
my_list[-1]['DOMAIN_NAME'] = 'someplace.net'
with open("Lesson1Number6_create_first_yaml.yml", "w") as filehandle1:
filehandle1.write(yaml.dump(my_list, default_flow_style=False))
filehandle1.close()
with open("Lesson1Number6_create_first_json.json", "w") as filehandle2:
json.dump(my_list, filehandle2)
filehandle2.close()
|
StarcoderdataPython
|
4800065
|
<reponame>theanshulcode/Automatic-License-Number-Plate-Recognition-System
import numpy as np
import cv2
from PIL import Image
import pytesseract as tess
def clean2_plate(plate):
gray_img = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray_img, 110, 255, cv2.THRESH_BINARY)
if cv2.waitKey(0) & 0xff == ord('q'):
pass
num_contours,hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if num_contours:
contour_area = [cv2.contourArea(c) for c in num_contours]
max_cntr_index = np.argmax(contour_area)
max_cnt = num_contours[max_cntr_index]
max_cntArea = contour_area[max_cntr_index]
x,y,w,h = cv2.boundingRect(max_cnt)
if not ratioCheck(max_cntArea,w,h):
return plate,None
final_img = thresh[y:y+h, x:x+w]
return final_img,[x,y,w,h]
else:
return plate,None
def ratioCheck(area, width, height):
ratio = float(width) / float(height)
if ratio < 1:
ratio = 1 / ratio
if (area < 1063.62 or area > 73862.5) or (ratio < 3 or ratio > 6):
return False
return True
def isMaxWhite(plate):
avg = np.mean(plate)
if(avg>=115):
return True
else:
return False
def ratio_and_rotation(rect):
(x, y), (width, height), rect_angle = rect
if(width>height):
angle = -rect_angle
else:
angle = 90 + rect_angle
if angle>15:
return False
if height == 0 or width == 0:
return False
area = height*width
if not ratioCheck(area,width,height):
return False
else:
return True
img = cv2.imread("testData/sample15.jpg")
print("Number input image...",)
cv2.imshow("input",img)
if cv2.waitKey(0) & 0xff == ord('q'):
pass
img2 = cv2.GaussianBlur(img, (3,3), 0)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
img2 = cv2.Sobel(img2,cv2.CV_8U,1,0,ksize=3)
_,img2 = cv2.threshold(img2,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(17, 3))
morph_img_threshold = img2.copy()
cv2.morphologyEx(src=img2, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_img_threshold)
num_contours, hierarchy= cv2.findContours(morph_img_threshold,mode=cv2.RETR_EXTERNAL,method=cv2.CHAIN_APPROX_NONE)
cv2.drawContours(img2, num_contours, -1, (0,255,0), 1)
for i,cnt in enumerate(num_contours):
min_rect = cv2.minAreaRect(cnt)
if ratio_and_rotation(min_rect):
x,y,w,h = cv2.boundingRect(cnt)
plate_img = img[y:y+h,x:x+w]
print("Number identified number plate...")
cv2.imshow("num plate image",plate_img)
if cv2.waitKey(0) & 0xff == ord('q'):
pass
if(isMaxWhite(plate_img)):
clean_plate, rect = clean2_plate(plate_img)
if rect:
fg=0
x1,y1,w1,h1 = rect
x,y,w,h = x+x1,y+y1,w1,h1
plate_im = Image.fromarray(clean_plate)
text = tess.image_to_string(plate_im, lang='eng')
print("Number Detected Plate Text : ",text)
|
StarcoderdataPython
|
6408647
|
from django.contrib.admin.sites import AdminSite
from django.core import mail
from django.test import TestCase
from django.utils import timezone
from django_q.models import Schedule
import recurrence
from qatrack.notifications.models import (
RecipientGroup,
ServiceEventReviewNotice,
UnitGroup,
)
from qatrack.notifications.service_log_review import admin, tasks
from qatrack.qa import models
import qatrack.qa.tests.utils as qa_utils
import qatrack.service_log.tests.utils as utils
class TestServiceEventReviewAdmin(TestCase):
def setUp(self):
self.admin = admin.ServiceEventReviewAdmin(model=ServiceEventReviewNotice, admin_site=AdminSite())
def test_get_notification_type_unreviewed(self):
rg = RecipientGroup.objects.create(name="RG")
n = ServiceEventReviewNotice.objects.create(
notification_type=ServiceEventReviewNotice.UNREVIEWED,
time="0:00",
recipients=rg,
)
assert "Notify about Service Events awaiting review" in self.admin.get_notification_type(n)
def test_get_units(self):
u = qa_utils.create_unit(name="Test Unit")
ug = UnitGroup.objects.create(name="UG")
ug.units.add(u)
rg = RecipientGroup.objects.create(name="RG")
n = ServiceEventReviewNotice.objects.create(
notification_type=ServiceEventReviewNotice.UNREVIEWED,
units=ug,
recipients=rg,
time="0:00",
)
assert ug.name in self.admin.get_units(n)
def test_get_recipients(self):
rg = RecipientGroup.objects.create(name="RG")
n = ServiceEventReviewNotice.objects.create(
notification_type=ServiceEventReviewNotice.UNREVIEWED,
recipients=rg,
time="0:00",
)
assert rg.name in self.admin.get_recipients(n)
class TestServiceEventReviewModel(TestCase):
def setUp(self):
self.unit1 = qa_utils.create_unit(name="unit1", number=1)
self.unit2 = qa_utils.create_unit(name="unit2", number=2)
self.usa1 = utils.create_unit_service_area(unit=self.unit1)
self.usa2 = utils.create_unit_service_area(unit=self.unit2)
self.se1 = utils.create_service_event(unit_service_area=self.usa1, is_review_required=True)
self.se2 = utils.create_service_event(unit_service_area=self.usa2, is_review_required=False)
self.unit_group = UnitGroup.objects.create(name="test group")
self.unit_group.units.add(self.usa1.unit)
self.group = qa_utils.create_group()
user = models.User.objects.latest('pk')
user.is_active = True
user.groups.add(self.group)
user.email = "<EMAIL>"
user.save()
self.recipients = RecipientGroup.objects.create(name="test group")
self.recipients.groups.add(self.group)
self.inactive_user = models.User.objects.create_user('inactive', '<EMAIL>', 'password')
self.inactive_user.groups.add(self.group)
self.inactive_user.is_active = False
self.inactive_user.save()
# delete defaults schedules to make counting easier
Schedule.objects.all().delete()
def test_unreviewed_both_unreviewed_no_groups(self):
self.se1.is_review_required = True
self.se1.save()
self.se2.is_review_required = True
self.se2.save()
notice = ServiceEventReviewNotice.objects.create(
recipients=self.recipients,
notification_type=ServiceEventReviewNotice.UNREVIEWED,
time="0:00",
)
expected = [
{
'unit_service_area__unit__name': self.usa1.unit.name,
'unit_service_area__service_area__name': self.usa1.service_area.name,
'unit_service_area__unit__name__count': 1,
'unit_service_area__service_area__name__count': 1,
},
{
'unit_service_area__unit__name': self.usa2.unit.name,
'unit_service_area__service_area__name': self.usa2.service_area.name,
'unit_service_area__unit__name__count': 1,
'unit_service_area__service_area__name__count': 1,
},
]
assert list(notice.ses_by_unit_usa()) == expected
def test_upcoming_both_unreviewed_unit_group(self):
self.se1.is_review_required = True
self.se1.save()
self.se2.is_review_required = False
self.se2.save()
notice = ServiceEventReviewNotice.objects.create(
recipients=self.recipients,
units=self.unit_group,
notification_type=ServiceEventReviewNotice.UNREVIEWED,
time="0:00",
)
expected = [
{
'unit_service_area__unit__name': self.usa1.unit.name,
'unit_service_area__service_area__name': self.usa1.service_area.name,
'unit_service_area__unit__name__count': 1,
'unit_service_area__service_area__name__count': 1,
},
]
assert list(notice.ses_by_unit_usa()) == expected
def test_is_props(self):
assert ServiceEventReviewNotice(notification_type=ServiceEventReviewNotice.UNREVIEWED).is_unreviewed
class TestServiceEventReviewEmails(TestCase):
def setUp(self):
self.unit1 = qa_utils.create_unit(name="unit1", number=1)
self.unit2 = qa_utils.create_unit(name="unit2", number=2)
self.usa1 = utils.create_unit_service_area(unit=self.unit1)
self.usa2 = utils.create_unit_service_area(unit=self.unit2)
self.unit_group = UnitGroup.objects.create(name="test group")
self.unit_group.units.add(self.usa1.unit)
self.group = qa_utils.create_group()
user = models.User.objects.latest('pk')
user.groups.add(self.group)
user.is_active = True
user.email = "<EMAIL>"
user.save()
self.recipients = RecipientGroup.objects.create(name="test group")
self.recipients.groups.add(self.group)
self.inactive_user = models.User.objects.create_user('inactive', '<EMAIL>', 'password')
self.inactive_user.groups.add(self.group)
self.inactive_user.is_active = False
self.inactive_user.save()
self.notice = ServiceEventReviewNotice.objects.create(
recipients=self.recipients,
notification_type=ServiceEventReviewNotice.UNREVIEWED,
time="0:00",
)
# delete defaults schedules to make counting easier
Schedule.objects.all().delete()
def test_send_notice(self):
self.se1 = utils.create_service_event(unit_service_area=self.usa1)
self.se1.is_review_required = True
self.se1.save()
now = timezone.now()
tasks.send_serviceeventreview_notice(self.notice.pk)
self.notice.refresh_from_db()
assert self.notice.last_sent >= now
assert "QATrack+ Unreviewed Service Event Notice:" in mail.outbox[0].subject
def test_send_notice_empty(self):
self.notice.send_empty = True
self.notice.save()
now = timezone.now()
tasks.send_serviceeventreview_notice(self.notice.pk)
self.notice.refresh_from_db()
assert self.notice.last_sent >= now
assert "QATrack+ Unreviewed Service Event Notice:" in mail.outbox[0].subject
def test_send_notice_not_empty(self):
tasks.send_serviceeventreview_notice(self.notice.pk)
self.notice.refresh_from_db()
assert len(mail.outbox) == 0
def test_send_notice_non_existent(self):
tasks.send_serviceeventreview_notice(self.notice.pk + 1)
self.notice.refresh_from_db()
assert self.notice.last_sent is None
assert len(mail.outbox) == 0
def test_send_notice_no_recipients(self):
utils.create_service_event(is_review_required=True)
self.recipients.groups.clear()
tasks.send_serviceeventreview_notice(self.notice.pk)
self.notice.refresh_from_db()
assert self.notice.last_sent is None
assert len(mail.outbox) == 0
def test_review_notice(self):
next_run = timezone.now() + timezone.timedelta(hours=1)
tasks.schedule_serviceeventreview_notice(self.notice, next_run)
assert Schedule.objects.count() == 1
def test_run_review_notices(self):
self.notice.recurrences = recurrence.Recurrence(rrules=[recurrence.Rule(recurrence.DAILY)])
self.notice.time = (timezone.localtime(timezone.now()) + timezone.timedelta(minutes=1)).time()
self.notice.save()
tasks.run_service_event_review_notices()
assert Schedule.objects.count() == 1
|
StarcoderdataPython
|
9681214
|
<gh_stars>10-100
n=int(input("enter a number: "))
i=1
while i<=n:
j=i
c=0
k=j
while j>0:
print(k," ",sep='',end="")
k=k+n-1-c
j-=1
c+=1
i+=1
print()
'''
output:
enter a number: 9
1
2 10
3 11 18
4 12 19 25
5 13 20 26 31
6 14 21 27 32 36
7 15 22 28 33 37 40
8 16 23 29 34 38 41 43
9 17 24 30 35 39 42 44 45
'''
|
StarcoderdataPython
|
6563341
|
# -*- coding: utf-8 -*-
from typing import List
import pandas as pd
from zvt.api.kdata import get_kdata_schema
from zvt.contract.api import decode_entity_id
from zvt.contract.drawer import Drawer, ChartType
from zvt.utils import to_pd_timestamp
def compare(entity_ids, schema_map_columns: dict = None, chart_type: ChartType = ChartType.line):
entity_type_map_ids = _group_entity_ids(entity_ids=entity_ids)
dfs = []
for entity_type in entity_type_map_ids:
if schema_map_columns:
for schema in schema_map_columns:
columns = ["entity_id", "timestamp"] + schema_map_columns.get(schema)
df = schema.query_data(entity_ids=entity_type_map_ids.get(entity_type), columns=columns)
dfs.append(df)
else:
schema = get_kdata_schema(entity_type=entity_type)
df = schema.query_data(entity_ids=entity_type_map_ids.get(entity_type))
dfs.append(df)
all_df = pd.concat(dfs)
if schema_map_columns:
drawer = Drawer(main_df=all_df)
drawer.draw(main_chart=chart_type, show=True)
else:
drawer = Drawer(main_df=all_df, sub_df_list=[all_df[["entity_id", "timestamp", "turnover"]].copy()])
drawer.draw_kline(show=True)
def distribute(entity_ids, data_schema, columns, histnorm="percent", nbinsx=20, filters=None):
columns = ["entity_id", "timestamp"] + columns
df = data_schema.query_data(entity_ids=entity_ids, columns=columns, filters=filters)
if not entity_ids:
df["entity_id"] = "entity_x_distribute"
drawer = Drawer(main_df=df)
drawer.draw_histogram(show=True, histnorm=histnorm, nbinsx=nbinsx)
def composite(entity_id, data_schema, columns, filters=None):
columns = ["entity_id", "timestamp"] + columns
df = data_schema.query_data(entity_id=entity_id, columns=columns, filters=filters)
drawer = Drawer(main_df=df)
drawer.draw_pie(show=True)
def composite_all(data_schema, column, timestamp, entity_ids=None, filters=None):
if type(column) is not str:
column = column.name
if filters:
filters.append([data_schema.timestamp == to_pd_timestamp(timestamp)])
else:
filters = [data_schema.timestamp == to_pd_timestamp(timestamp)]
df = data_schema.query_data(
entity_ids=entity_ids, columns=["entity_id", "timestamp", column], filters=filters, index="entity_id"
)
entity_type, exchange, _ = decode_entity_id(df["entity_id"].iloc[0])
pie_df = pd.DataFrame(columns=df.index, data=[df[column].tolist()])
pie_df["entity_id"] = f"{entity_type}_{exchange}_{column}"
pie_df["timestamp"] = timestamp
drawer = Drawer(main_df=pie_df)
drawer.draw_pie(show=True)
def _group_entity_ids(entity_ids):
entity_type_map_ids = {}
for entity_id in entity_ids:
entity_type, _, _ = decode_entity_id(entity_id)
ids: List = entity_type_map_ids.setdefault(entity_type, [])
ids.append(entity_id)
return entity_type_map_ids
if __name__ == "__main__":
from zvt.domain import CashFlowStatement
composite(
entity_id="stock_sz_000338",
data_schema=CashFlowStatement,
columns=[
CashFlowStatement.net_op_cash_flows,
CashFlowStatement.net_investing_cash_flows,
CashFlowStatement.net_financing_cash_flows,
],
filters=[
CashFlowStatement.report_period == "year",
CashFlowStatement.report_date == to_pd_timestamp("2015-12-31"),
],
)
# the __all__ is generated
__all__ = ["compare", "distribute", "composite", "composite_all"]
|
StarcoderdataPython
|
6473798
|
import sys
import csv
def main():
# check to make sure the number of arguments passed in are correct
if len(sys.argv) != 3:
print("Usage: dna.py [csv file] [dna text file]")
sys.exit(1)
# AGATC,TTTTTTCT,AATG,TCTAG,GATA,TATC,GAAA,TCTG
# DnaSTR = ["AGATC", "AATG", "TATC"]
names = []
# read the csv file into memory
with open(sys.argv[1], "r") as csvFile:
reader = csv.DictReader(csvFile)
for line in reader:
for key in line:
if key != "name":
line[key] = int(line[key])
names.append(line)
# read the dna seq text file
with open(sys.argv[2], "r") as seqFile:
for line in seqFile:
dna_seq = str(line.rstrip())
STRcountDict = {}
keys = []
STR_index = 1
for STR_key in names[0]:
if STR_key != "name":
max_count = 0
# Call the maxSTR function to get the max for each STR to find a match
max_count = maxSTR(dna_seq, STR_key)
STRcountDict[STR_key] = max_count
keys.append(STR_key)
result = "No match"
# find match using the names[]
for person in names:
allMatch = True
for i in range(len(keys)):
if person[keys[i]] != STRcountDict[keys[i]]:
allMatch = False
if allMatch == True:
result = person["name"]
break
print(result)
def maxSTR(dna_seq, dna_str):
# loop through seq to look for each STR AGATC,AATG,TATC and put it into a list of STR dict and count
n = len(dna_str)
consec_itr = 0
save_count = 0
max_count = 0
for i in range(len(dna_seq)):
if dna_seq[i:i + n] == dna_str:
if consec_itr > 0 and max_count > save_count:
save_count = max_count
max_count = count_consecutive(i, n, dna_seq, dna_str)
consec_itr += 1
if save_count > max_count:
max_count = save_count
return max_count
def count_consecutive(start, increment, dna_seq, dna_str):
max_count = 0
count = 0
for i in range(start, len(dna_seq), increment):
end_seq = i + increment
dna_seqToCheck = dna_seq[i:end_seq]
if dna_seqToCheck == dna_str:
count += 1
else:
# found a STR that does not match
if count > max_count:
max_count = count
# start counting over for next group of consecutive matches
count = 0
return max_count
main()
|
StarcoderdataPython
|
240083
|
import pyjion
import pyjion.dis
import pytest
@pytest.mark.optimization(level=1)
def test_import(capsys):
def _f():
print("foo foo")
return 2
assert _f() == 2
info = pyjion.info(_f)
assert info['compiled']
pyjion.dis.dis(_f)
captured = capsys.readouterr()
assert "ldarg.1" in captured.out
assert "METHOD_LOADGLOBAL_HASH" in captured.out
|
StarcoderdataPython
|
8162430
|
<reponame>fragro/Open-Assembly
from django.contrib import admin
from models import DashboardPanel
admin.site.register(DashboardPanel)
|
StarcoderdataPython
|
3498993
|
# -*- coding: utf-8 -*-
import os
import pytest
import logging
from phk_logger import __version__
from phk_logger import PHKLogger as Logger
@pytest.fixture(scope='session')
def log_file(request):
# Will be executed before the first test
f = open(request.param, 'wt')
f.close()
f = open(request.param, 'rt')
yield f
# Will be executed after the last test
f.close()
# Remove file
os.remove(request.param)
class TestIt:
def test_version(self):
assert __version__ == '0.1.4'
def test_init(self, caplog):
logger = Logger()
assert True
caplog.set_level(logging.WARNING)
logger.debug('Check DEBUG')
assert caplog.record_tuples == []
logger.info('Check INFOS')
assert caplog.record_tuples == []
logger.warning('Check WARNING')
assert caplog.record_tuples == [("phk_logger.phkLogger", logging.WARNING, "Check WARNING")]
caplog.clear()
logger.error('Check ERROR')
assert caplog.record_tuples == [("phk_logger.phkLogger", logging.ERROR, "Check ERROR")]
caplog.clear()
logger.critical('Check CRITICAL')
assert caplog.record_tuples == [("phk_logger.phkLogger", logging.CRITICAL, "Check CRITICAL")]
caplog.clear()
logger.write('Check DEBUG', level='debug')
assert caplog.record_tuples == []
caplog.clear()
logger.write('Check INFO', level='info')
assert caplog.record_tuples == []
caplog.clear()
logger.write('Check INFOS', level='infos')
assert caplog.record_tuples == []
caplog.clear()
logger.write('Check WARNING', level='warning')
assert caplog.record_tuples == [("phk_logger.phkLogger", logging.WARNING, "Check WARNING")]
caplog.clear()
logger.write('Check ERROR', level='error')
assert caplog.record_tuples == [("phk_logger.phkLogger", logging.ERROR, "Check ERROR")]
caplog.clear()
logger.write('Check CRITICAL', level='critical')
assert caplog.record_tuples == [("phk_logger.phkLogger", logging.CRITICAL, "Check CRITICAL")]
logger = None
def test_named(self, caplog):
logger = Logger(name='mytest')
assert True
caplog.set_level(logging.WARNING)
logger.debug('Check DEBUG')
assert caplog.record_tuples == []
logger.info('Check INFOS')
assert caplog.record_tuples == []
logger.warning('Check WARNING')
assert caplog.record_tuples == [("mytest", logging.WARNING, "Check WARNING")]
logger = None
@pytest.mark.parametrize('log_file', ['mytest.log','./.mytest.log','/tmp/mytest.log'], indirect=True)
def test_filename(self, log_file):
logger = Logger(filename=log_file.name)
assert os.path.exists(log_file.name)
logger.info('Check INFO')
assert log_file.read() == ''
logger.warning('Check WARNING')
# Get last line in file
for line in log_file:
pass
assert line.startswith("phk_logger.phkLogger ")
assert line.endswith(" WARNING Check WARNING\n")
logger = None
def test_cli(self, capsys):
logger = Logger(cli=True)
assert True
logger.debug('Check DEBUG')
captured = capsys.readouterr()
assert captured.out == ''
logger.info('Check INFO')
captured = capsys.readouterr()
assert captured.out == ''
logger.warning('Check WARNING')
captured = capsys.readouterr()
assert captured.out == "\x1b[0;33m[-] Check WARNING\x1b[0m\n"
logger.error('Check ERROR')
captured = capsys.readouterr()
assert captured.out == "\x1b[0;31m[!] Check ERROR\x1b[0m\n"
logger.critical('Check CRITICAL')
captured = capsys.readouterr()
assert captured.out == "\x1b[1;31m[!] Check CRITICAL\x1b[0m\n"
logger = None
def test_debug(self, capsys):
logger = Logger(name='mytest', cli=True, level='debug')
assert True
# logger.debug('Check DEBUG')
# captured = capsys.readouterr()
# assert captured.out == "\x1b[1;34m[*] Check DEBUG\x1b[0m\n"
logger.info('Check INFO')
captured = capsys.readouterr()
assert captured.out == "\x1b[0;32m[+] Check INFO\x1b[0m\n"
logger.warning('Check WARNING')
captured = capsys.readouterr()
assert captured.out == "\x1b[0;33m[-] Check WARNING\x1b[0m\n"
logger.error('Check ERROR')
captured = capsys.readouterr()
assert captured.out == "\x1b[0;31m[!] Check ERROR\x1b[0m\n"
logger.critical('Check CRITICAL')
captured = capsys.readouterr()
assert captured.out == "\x1b[1;31m[!] Check CRITICAL\x1b[0m\n"
logger = None
def test_pattern(self, capsys):
logger = Logger(name='my_test', pattern='%(name)s %(message)s')
assert True
logger = None
|
StarcoderdataPython
|
4987806
|
<reponame>elyase/polyaxon
from django.contrib import admin
class JobStatusAdmin(admin.ModelAdmin):
readonly_fields = ('created_at',)
|
StarcoderdataPython
|
11267824
|
#!/usr/bin/env python3
from pv.data import PVData, PVWR
import requests
import datetime
import pytz
import sys
local = pytz.timezone("Europe/Berlin")
class PVRestApi:
pvdata = PVData()
host = "http://127.0.0.1"
url = "/rawdata.html"
def __init__(self, host="http://127.0.0.1", url="/rawdata.html"):
self.host = host
self.url = url
def LocalToUTC(self, naive):
try:
pst_now = local.localize(naive, is_dst=None)
except (pytz.NonExistentTimeError):
pst_now = local.localize(naive, is_dst=True)
except (pytz.AmbiguousTimeError):
pst_now = local.localize(naive, is_dst=False)
utc_now = pst_now.astimezone(pytz.utc)
return utc_now
def GetPVDataRestApi(self):
IP = self.host + self.url # "http://192.168.15.160/rawdata.html"
if(len(self.pvdata.wr) < 2):
self.pvdata.wr.clear()
self.pvdata.wr.append(PVWR())
self.pvdata.wr.append(PVWR())
try:
x = requests.get(IP)
# print(x.text)
if(x.status_code == 200):
# print("received data")
kvp = {}
for line in x.iter_lines(decode_unicode=True):
if(line.find(':') != -1):
# print(str(line))
line = line.replace(" ", "").replace("<br>", "")
key = line.split(':', 1)[0].replace(" ", "_")
value = line.split(':', 1)[1].replace(",", ".")
# print("Key: "+str(key)+" Value: "+str(value) )
kvp[key] = value
self.pvdata.PTotal = float(kvp["Gesamtleistung_AC"])
self.pvdata.PDayTotal = float(kvp["Tagesenerie_AC"])
self.pvdata.Time = self.LocalToUTC(datetime.datetime.strptime(kvp["Messzeit"], ' %d.%m.%Y %H:%M:%S')).timestamp()
self.pvdata.wr[0].IAC = float(kvp["Strom_AC_WR_1"])
self.pvdata.wr[0].UAC = float(kvp["Spannung_AC_WR_1"])
self.pvdata.wr[0].FAC = float(kvp["Freq._AC_WR_1"])
self.pvdata.wr[0].IDC = float(kvp["Strom_DC_WR_1"])
self.pvdata.wr[0].UDC = float(kvp["Spannung_DC_WR_1"])
self.pvdata.wr[0].PDay = float(kvp["Tagesenerie_WR_1"])
self.pvdata.wr[0].PNow = float(kvp["Leistung_WR_1"])
self.pvdata.wr[0].EFF = float(kvp["Wirkungsgrad_WR_1"])
self.pvdata.wr[1].IAC = float(kvp["Strom_AC_WR_2"])
self.pvdata.wr[1].UAC = float(kvp["Spannung_AC_WR_2"])
self.pvdata.wr[1].FAC = float(kvp["Freq._AC_WR_2"])
self.pvdata.wr[1].IDC = float(kvp["Strom_DC_WR_2"])
self.pvdata.wr[1].UDC = float(kvp["Spannung_DC_WR_2"])
self.pvdata.wr[1].PDay = float(kvp["Tagesenerie_WR_2"])
self.pvdata.wr[1].PNow = float(kvp["Leistung_WR_2"])
self.pvdata.wr[1].EFF = float(kvp["Wirkungsgrad_WR_2"])
self.pvdata.Error = "OK"
else:
self.pvdata.Error = "Http Error: " + str(x.status_code)
print(self.pvdata.Error, file=sys.stderr)
except Exception as e:
self.pvdata.Error = "Error: " + str(e)
print(self.pvdata.Error, file=sys.stderr)
return self.pvdata
|
StarcoderdataPython
|
3265321
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#File from
#https://github.com/dthierry/k_aug/blob/ma57/test/pyomo_/sens_kaug_dcdp.py
from __future__ import division
from __future__ import print_function
from pyomo.environ import *
from pyomo.opt import SolverFactory, ProblemFormat
from shutil import copyfile
"""Example taken from the sipopt manual
please check
https://github.com/coin-or/Ipopt/blob/master/Ipopt/contrib/sIPOPT/examples/
This illustrates how to use k_aug with the dsdp_mode for sensitivity"""
__author__ = '<NAME>' #: @2018
#: Declare Model
m = ConcreteModel()
m.i = Set(initialize=[1, 2, 3])
init_vals = {1:25E+07, 2:0.0, 3:0.0}
#: Variables
m.x = Var(m.i, initialize=init_vals)
#: Objective
m.oF = Objective(expr=m.x[1]**2 +m.x[2]**2 + m.x[3]**2,
sense=minimize)
#: Dummy variables
m.p1 = Var(initialize=5.0)
m.p2 = Var(initialize=1.0)
#: Parameters variables
m.p1_0 = Param(initialize=5.0)
m.p2_0 = Param(initialize=1.0)
#: Constraints
m.c1 = Constraint(expr=6.0 * m.x[1] + 3.0 * m.x[2] + 2.0 * m.x[3] - m.p1 == 0.0)
m.c2 = Constraint(expr=m.p2 * m.x[1] + m.x[2] - m.x[3] - 1.0 == 0.0)
#: Dummy Constraint REQUIRED!
m.c1p = Constraint(expr=m.p1 - m.p1_0 == 0.0)
m.c2p = Constraint(expr=m.p2 - m.p2_0 == 0.0)
#: Ipopt suffixes REQUIRED FOR K_AUG!
m.dual = Suffix(direction=Suffix.IMPORT_EXPORT)
m.ipopt_zL_out = Suffix(direction=Suffix.IMPORT)
m.ipopt_zU_out = Suffix(direction=Suffix.IMPORT)
m.ipopt_zL_in = Suffix(direction=Suffix.EXPORT)
m.ipopt_zU_in = Suffix(direction=Suffix.EXPORT)
ipopt = SolverFactory('ipopt')
kaug = SolverFactory('k_aug', executable='C:\\cygwin64\\home\\greg6\\k_aug\\bin\\k_aug.exe')
#: K_AUG SUFFIXES
m.dcdp = Suffix(direction=Suffix.EXPORT) #: the dummy constraints
m.var_order = Suffix(direction=Suffix.EXPORT) #: Important variables (primal)
m.c1p.set_suffix_value(m.dcdp, 1)
m.c2p.set_suffix_value(m.dcdp, 2)
#: make sure the order is consistent i.e. 1, 2 and 3. E.g. not 1, 1 and 2 (wrong!)
m.x[1].set_suffix_value(m.var_order, 1)
m.x[2].set_suffix_value(m.var_order, 2)
m.x[3].set_suffix_value(m.var_order, 3) #: we could have all, a subset or none at all
#: please check the dsdp_in_.in file generated !,
# dsdp_in_.in gives you the sensiviity matrix but the order is weird. Sould change it to a matrix by yourself.
#: please check the dxdp_.dat file generated if var_order was set!
# recommend to use dxdp_.dat, already is the matrix form, just need to multiply the perturbed parameter matrix.
# ***please check the guess_meaning.py to interpret dsdp and dxdp
#: Clear this file
with open('ipopt.opt', 'w') as f:
f.close()
ipopt.solve(m, tee=True)
m.ipopt_zL_in.update(m.ipopt_zL_out) #: important!
m.ipopt_zU_in.update(m.ipopt_zU_out) #: important!
#: k_aug
kaug.options['dsdp_mode'] = "" #: sensitivity mode!
kaug.solve(m, tee=True)
|
StarcoderdataPython
|
9663421
|
<reponame>aragilar/NewsBlur
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from vendor.paypal.standard.forms import PayPalStandardBaseForm
from vendor.paypal.standard.pdt.models import PayPalPDT
class PayPalPDTForm(PayPalStandardBaseForm):
class Meta:
model = PayPalPDT
|
StarcoderdataPython
|
6657804
|
runn = True
while run
input_value = input("Enter student's score or type "Exit" to quit":)
score = float(input_value)
if score >= 90 and score <= 100:
print(" student wins laptop")
elif score >= 60 and score <= 89:
print(" student wins tablet")
elif score >= 0 and score <= 59:
print(" student wins nothing")
elif (input_value).capitalize() == "Exit":
break
else:
print("score out of range!")
|
StarcoderdataPython
|
1765812
|
<gh_stars>0
# Returns (first, last]) index of target value in the array
# 0 1 2 3 4 5 6 7 8 9 10
arr = [5, 5, 7, 7, 8, 8, 8, 8, 10, 10, 12]
target = 8
# Returns leftmost index of the target
def get_pos(arr, target):
n = len(arr)
lower = 0
upper = n - 1
pos = n
while lower <= upper:
mid = lower + (upper - lower) // 2
if arr[mid] >= target:
pos = mid
upper = mid - 1
else:
lower = mid + 1
return pos
def first_and_last(arr, target):
first = get_pos(arr, target)
last = get_pos(arr, target + 1) - 1
if first <= last:
return (first, last)
return (-1, -1)
print(first_and_last(arr, target))
|
StarcoderdataPython
|
27705
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import enum
import json
import os
import plistlib
import subprocess
import time
import tools
import requests
python_script_debug_enable = False # 是否开启debug模式 用于测试脚本
pwd = os.getcwd() # 当前文件的路径
ios_project_path = os.path.abspath(os.path.dirname(
pwd) + os.path.sep + ".") # ios项目路径,默认为当前文件的父路径, 如果修改请填写项目绝对路径
system_home_dir = os.path.expanduser('~') # home路径
build_directory = os.path.join(pwd, 'build') # 打包输出的文件夹
auth_key_dir_name = 'private_keys'
auth_key_copy_dir = os.path.join(pwd, auth_key_dir_name)
auth_key_destination = '~/private_keys/'
pgy_upload_url = 'https://www.pgyer.com/apiv2/app/upload'
testflights_url = 'https://appstoreconnect.apple.com/apps'
qr_code_img_path = os.path.join(build_directory, 'qrCode.jpg')
log_directory = os.path.join(pwd, 'log') # 日志文件夹
packaging_log_path = os.path.join(log_directory, 'packaging.log')
@enum.unique
class DistributionMethodType(enum.Enum):
Development = 'development'
AppStoreConnect = 'app-store'
AdHoc = 'ad-hoc'
class Config(object):
project_name: str
project_scheme_list: list
project_scheme_index: int
apple_account_team_id: str
development_provisioning_profiles: dict
distribution_provisioning_profiles: dict
adhoc_provisioning_profiles: dict
distribution_method: DistributionMethodType
upload_pgy_enable: bool
pgy_api_key: str
upload_app_sotre_enable: bool
upload_app_store_account_type: int # 1 使用apple账号 2 使用apiKey
apple_account_user: str
apple_account_password: str
auth_key_file_name: str
apple_account_apiKey: str
apple_account_apiIssuer: str
send_email_enable: bool
email_host: str
email_port: int
email_sender_user: str
email_sender_psw: str
email_receivers: list
add_build_number_enable: bool
log_enable: bool
app_update_message = ''
github_access_token: str
github_repo_url: str
testflight_external_group_name: str
xcodeproj_path = None
xcworkspace_path = None
is_workspace_project = True
def get_product_scheme():
return Config.project_scheme_list[Config.project_scheme_index]
def get_export_options_plist_path():
plist_path = os.path.join(
build_directory, Config.distribution_method.value+'_ExportOptions.plist')
return plist_path
def get_signing_certificate():
if Config.distribution_method == DistributionMethodType.Development:
return 'Apple Development'
elif Config.distribution_method == DistributionMethodType.AppStoreConnect:
return 'Apple Distribution'
elif Config.distribution_method == DistributionMethodType.AdHoc:
return 'Apple Distribution'
def get_provisioning_profile():
if Config.distribution_method == DistributionMethodType.Development:
return Config.development_provisioning_profiles
elif Config.distribution_method == DistributionMethodType.AppStoreConnect:
return Config.distribution_provisioning_profiles
elif Config.distribution_method == DistributionMethodType.AdHoc:
return Config.adhoc_provisioning_profiles
def get_export_path():
export_path = os.path.join(
build_directory, Config.distribution_method.value)
if export_path in os.listdir(build_directory):
print("%s exists" % (export_path))
else:
print("create dir %s" % (export_path))
subprocess.call('mkdir %s' % (export_path), shell=True)
time.sleep(1)
return export_path
def get_xcode_workspace_path():
if Config.xcworkspace_path is None:
path = search_project_file(
ios_project_path, '%s.xcworkspace' % (Config.project_name))
Config.xcworkspace_path = path
return os.path.join(path)
else:
return os.path.join(Config.xcworkspace_path)
def get_xcode_project_path():
if Config.xcodeproj_path is None:
path = search_project_file(
ios_project_path, '%s.xcodeproj' % (Config.project_name))
Config.xcodeproj_path = path
return os.path.join(path)
else:
return os.path.join(Config.xcodeproj_path)
def get_xcode_project_pbxproj_path():
return os.path.join(get_xcode_project_path(), 'project.pbxproj')
def search_project_file(path, target):
target_path = ''
for root, dirs, fs in os.walk(path):
for d in dirs:
if d == target:
target_path = os.path.join(root, d)
return target_path
for f in fs:
if f == target:
target_path = os.path.join(root, f)
return target_path
if target_path == '':
tools.fail_print('没有找到%s文件' % (target))
return target_path
def get_target_name():
return Config.project_name # 默认target name和project name一致
def get_exported_ipa_path():
ipa_path = os.path.join(
build_directory, '%s/%s.ipa' % (Config.distribution_method.value, Config.project_name))
return ipa_path
def prepare_config():
config_path = os.path.join(pwd, 'config.json')
with open(config_path, 'r') as config_file:
config_json_dic = json.load(config_file)
Config.project_name = config_json_dic['project_name']
Config.project_scheme_list = config_json_dic['project_scheme_list']
Config.project_scheme_index = config_json_dic['project_scheme_index']
Config.apple_account_team_id = config_json_dic['apple_account_team_id']
Config.development_provisioning_profiles = config_json_dic[
'development_provisioning_profiles']
Config.distribution_provisioning_profiles = config_json_dic[
'distribution_provisioning_profiles']
Config.adhoc_provisioning_profiles = config_json_dic['adhoc_provisioning_profiles']
Config.distribution_method = DistributionMethodType(
config_json_dic['distribution_method'])
Config.upload_pgy_enable = config_json_dic['upload_pgy_enable']
Config.pgy_api_key = config_json_dic['pgy_api_key']
Config.upload_app_sotre_enable = config_json_dic['upload_app_sotre_enable']
Config.upload_app_store_account_type = config_json_dic['upload_app_store_account_type']
Config.apple_account_user = config_json_dic['apple_account_user']
Config.apple_account_password = config_json_dic['apple_account_password']
Config.auth_key_file_name = config_json_dic['auth_key_file_name']
Config.apple_account_apiKey = config_json_dic['apple_account_apiKey']
Config.apple_account_apiIssuer = config_json_dic['apple_account_apiIssuer']
Config.send_email_enable = config_json_dic['send_email_enable']
Config.email_host = config_json_dic['email_host']
Config.email_port = config_json_dic['email_port']
Config.email_sender_user = config_json_dic['email_sender_user']
Config.email_sender_psw = config_json_dic['email_sender_psw']
Config.email_receivers = config_json_dic['email_receivers']
Config.add_build_number_enable = config_json_dic['add_build_number_enable']
Config.log_enable = config_json_dic['log_enable']
Config.github_access_token = config_json_dic['github_access_token']
Config.github_repo_url = config_json_dic['github_repo_url']
Config.testflight_external_group_name = config_json_dic['testflight_external_group_name']
if get_xcode_workspace_path() != '':
Config.is_workspace_project = True
else:
Config.is_workspace_project = False
if get_xcode_project_path() != '':
tools.fail_print('没有找到%s.xcodeproj文件, 请将脚本文件放到项目目录下')
# check project_scheme_list
if len(Config.project_scheme_list) == 0:
tools.warn_print("project_scheme_list未配置,正在获取project的schemes...")
list_project_command_run = subprocess.Popen(
'xcodebuild -list -project %s -json' % (get_xcode_project_path()), shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = list_project_command_run.communicate()
project_info = stdout.decode('utf-8')
project_dict = json.loads(project_info)
print('projec info:\n %s' % (project_dict))
Config.project_scheme_list = project_dict['project']['schemes']
print('project_scheme_lis:\n%s' % (Config.project_scheme_list))
list_project_command_run.wait()
save_packaging_config()
def save_packaging_config():
dic = {
"project_name": Config.project_name,
"project_scheme_list": Config.project_scheme_list,
"project_scheme_index": Config.project_scheme_index,
"apple_account_team_id": Config.apple_account_team_id,
"development_provisioning_profiles": Config.development_provisioning_profiles,
"distribution_provisioning_profiles": Config.distribution_provisioning_profiles,
"distribution_method": Config.distribution_method.value,
"upload_pgy_enable": Config.upload_pgy_enable,
"pgy_api_key": Config.pgy_api_key,
"upload_app_sotre_enable": Config.upload_app_sotre_enable,
"upload_app_store_account_type": Config.upload_app_store_account_type,
"apple_account_user": Config.apple_account_user,
"apple_account_password": Config.apple_account_password,
"auth_key_file_name": Config.auth_key_file_name,
"apple_account_apiKey": Config.apple_account_apiKey,
"apple_account_apiIssuer": Config.apple_account_apiIssuer,
"send_email_enable": Config.send_email_enable,
"email_host": Config.email_host,
"email_port": Config.email_port,
"email_sender_user": Config.email_sender_user,
"email_sender_psw": Config.email_sender_psw,
"email_receivers": Config.email_receivers,
"add_build_number_enable": Config.add_build_number_enable,
"log_enable": Config.log_enable,
"github_access_token": Config.github_access_token,
"github_repo_url": Config.github_repo_url,
"testflight_external_group_name": Config.testflight_external_group_name
}
tools.warn_print('back up configs')
json_str = json.dumps(dic, ensure_ascii=False, indent=4) # 缩进4字符
config_path = os.path.join(pwd, 'config.json')
with open(config_path, 'w+') as config_file:
config_file.truncate(0)
config_file.write(json_str)
config_file.close()
def create_export_options_plist_file():
plist_value = {
'method': Config.distribution_method.value,
'destination': 'export',
'teamID': Config.apple_account_team_id,
'stripSwiftSymbols': True,
'compileBitcode': True,
'thinning': '<none>',
'signingCertificate': get_signing_certificate(),
'signingStyle': 'manual',
'provisioningProfiles': get_provisioning_profile(),
}
plist_path = get_export_options_plist_path()
print('ExportOptions.plist:\n'+plist_path+'\n')
print(plist_value)
with open(plist_path, 'wb') as fp:
plistlib.dump(plist_value, fp)
return plist_path
def prepare_packaging_dir():
tools.notice_print('prepare build dir: ' + build_directory)
subprocess.call(['rm', '-rf', '%s' % (build_directory)])
time.sleep(1)
subprocess.call(['mkdir', '-p', '%s' % (build_directory)])
time.sleep(1)
def prepare_log_dir():
tools.notice_print('prepare log dir: ' + log_directory)
subprocess.call(['rm', '-rf', '%s' % (log_directory)])
time.sleep(1)
subprocess.call(['mkdir', '-p', '%s' % (log_directory)])
time.sleep(1)
def prepare_app_store_upload():
if Config.upload_app_store_account_type == 1:
if len(Config.apple_account_user) == 0 or len(Config.apple_account_password) == 0:
tools.warn_print(
'上传App Store Connect需要 账号/密码 或者 apiKey/apiIssuer, upload_app_store_account_type值为 1 或者 2, 请在config.json中填写相关信息')
tools.end_program(2)
elif Config.upload_app_store_account_type == 2:
if len(Config.apple_account_apiKey) == 0 or len(Config.apple_account_apiIssuer) == 0:
tools.warn_print(
'上传App Store Connect需要 账号/密码 或者 apiKey/apiIssuer, upload_app_store_account_type值为 1 或者 2, 请在config.json中填写相关信息')
tools.end_program(2)
prepare_authkey_dir()
else:
tools.warn_print(
'上传App Store Connect需要 账号/密码 或者 apiKey/apiIssuer, upload_app_store_account_type值为 1 或者 2, 请在config.json中填写相关信息')
tools.end_program(2)
def prepare_authkey_dir():
if Config.auth_key_file_name is None or Config.auth_key_file_name not in os.listdir(auth_key_copy_dir):
tools.warn_print(
'使用apiKey/apiIssuer来上传App Store Connect时需要配置*.p8文件, 请先将*.p8文件复制到private_keys目录下, 具体详情可参考: https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api')
tools.end_program(2)
if auth_key_dir_name in os.listdir(system_home_dir):
print("%s exists" % (auth_key_destination))
else:
print("create dir: %s" % (auth_key_destination))
subprocess.call('cd ~ && mkdir %s' %
(auth_key_destination), shell=True)
time.sleep(1)
key_dir = os.path.expanduser(auth_key_destination)
if Config.auth_key_file_name in os.listdir(key_dir):
print("%s/%s file exists" %
(auth_key_destination, Config.auth_key_file_name))
else:
print("copy file: %s/%s" %
(auth_key_destination, Config.auth_key_file_name))
subprocess.call('cp -r %s %s' %
(auth_key_copy_dir, auth_key_destination), shell=True)
time.sleep(1)
def save_qr_code(qr_code_url):
r = requests.get(qr_code_url)
with open(qr_code_img_path, 'wb') as f:
f.write(r.content)
return qr_code_img_path
def save_packaging_log(start_time='', end_time='', error_message=''):
if Config.log_enable:
prepare_log_dir()
version = tools.get_xcode_project_info(
project_pbxproj_path=get_xcode_project_pbxproj_path(), target_name=get_target_name())
log = {
"strat_time": start_time,
"end_time": end_time,
"erro_message": error_message,
"app_name": Config.project_name,
"scheme": get_product_scheme(),
"update_message": Config.app_update_message,
"version": version[0]+'('+version[1]+')',
"upload_to_pgy": Config.upload_pgy_enable,
"upload_to_app_store": Config.upload_app_sotre_enable,
"auto_add_build": Config.add_build_number_enable,
'signingCertificate': get_signing_certificate(),
"distribution_method": Config.distribution_method.value,
"ipa_path": get_exported_ipa_path(),
"xcodeproj_path": get_xcode_project_path(),
"xcworkspace_path": get_xcode_workspace_path()
}
json_str = json.dumps(log, ensure_ascii=False, indent=4)
with open(packaging_log_path, "w+") as log_file:
log_file.truncate(0)
log_file.write(json_str)
log_file.close()
return json_str
|
StarcoderdataPython
|
4988474
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
if root:
self.flattenHelper(root)
def flattenHelper(self, node):
tail = node
right = node.right
if node.left:
node.right, tail = self.flattenHelper(node.left)
node.left = None
if right:
tail.right, tail = self.flattenHelper(right)
return node, tail
|
StarcoderdataPython
|
368411
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _Generic
class _Ansible(_Generic):
_type = "ansible"
_icon_dir = "resources/generic/ansible"
class Ansible(_Ansible):
_icon = "Ansible.png"
class Collection(_Ansible):
_icon = "Collection.png"
class Module(_Ansible):
_icon = "Module.png"
class Output(_Ansible):
_icon = "Output.png"
class Playbook(_Ansible):
_icon = "Playbook.png"
class Required(_Ansible):
_icon = "Required.png"
class Role(_Ansible):
_icon = "Role.png"
class Variable(_Ansible):
_icon = "Variable.png"
# Aliases
|
StarcoderdataPython
|
9636075
|
<reponame>SimmonsRitchie/topojoin
import pytest
from topojoin.helper import get_topo_features
from topojoin.topojoin import TopoJoin
from pathlib import Path
def test_topojson_init(topo_path, csv_path):
""" Test that TopoJoin instance is initialized and attribs are properly set """
topojoin_obj = TopoJoin(topo_path, csv_path, topo_key="GEOID", csv_key="fips",)
assert isinstance(topojoin_obj.csv_path, Path)
assert isinstance(topojoin_obj.topo_path, Path)
assert topojoin_obj.csv_key == "fips"
assert topojoin_obj.topo_key == "GEOID"
print(topojoin_obj.topo_key)
def test_topojson_with_relative_paths(topo_path, csv_path, tmp_path):
output_path = tmp_path / "test_joined.json"
relative_topo_path = Path(topo_path).relative_to(Path.cwd())
relative_csv_path = Path(csv_path).relative_to(Path.cwd())
topojoin_obj = TopoJoin(
relative_topo_path, relative_csv_path, topo_key="GEOID", csv_key="fips",
)
topojoin_obj.join(output_path)
file_list = tmp_path.glob("**/*")
file_list = [x for x in file_list if x.is_file()]
assert len(file_list) == 1
assert topojoin_obj.topo_path.absolute() == Path(topo_path).absolute()
assert topojoin_obj.csv_path.absolute() == Path(csv_path).absolute()
def test_error_when_csv_key_not_present(topo_path, csv_path):
""" Test failure when topo_key is not among keys in CSV file """
with pytest.raises(Exception):
TopoJoin(topo_path, csv_path, topo_key="GEOID", csv_key="ducks")
def test_error_when_csv_key_wrong_case(topo_path, csv_path):
""" Test failure when case doesn't match """
with pytest.raises(Exception):
TopoJoin(topo_path, csv_path, topo_key="GEOID", csv_key="FIPS")
def test_error_when_topo_key_not_present(topo_path, csv_path):
""" Test failure when topo_key is not among keys in topojson file """
with pytest.raises(Exception):
TopoJoin(topo_path, csv_path, topo_key="ducks", csv_key="fips")
def test_error_when_topo_key_changed_to_invalid(topo_path, csv_path):
with pytest.raises(Exception):
topojoin_obj = TopoJoin(topo_path, csv_path, topo_key="GEOID", csv_key="fips")
topojoin_obj.csv_key = "ducks"
def test_file_created_after_join(topo_path, csv_path, tmp_path):
output_path = tmp_path / "test_joined.json"
topojoin_obj = TopoJoin(topo_path, csv_path, topo_key="GEOID", csv_key="fips")
topojoin_obj.join(output_path)
file_list = tmp_path.glob("**/*")
file_list = [x for x in file_list if x.is_file()]
assert len(file_list) == 1
def test_null_fields_after_join(topo_path, csv_path_non_matching):
""" Test that Allegheny county feature in joined data has several null properties because there was no Allegheny
County row in provided CSV data."""
topojoin_obj = TopoJoin(
topo_path, csv_path_non_matching, topo_key="GEOID", csv_key="fips",
)
topo_data = topojoin_obj.join()
features = get_topo_features(topo_data)
allegheny_county_props = [
feature["properties"]
for feature in features
if feature["properties"]["NAME"].lower() == "allegheny"
][0]
assert allegheny_county_props["population"] is None
def test_filter_csv_props_when_joining(topo_path, csv_path):
topojoin_obj = TopoJoin(
topo_path, csv_path, topo_key="GEOID", csv_key="fips",
)
topo_data = topojoin_obj.join(csv_props=["population"])
first_feature = get_topo_features(topo_data)[0]
assert not first_feature["properties"].get("name")
def test_failure_when_invalid_filter_csv_prop_provided(topo_path, csv_path):
with pytest.raises(Exception):
topojoin_obj = TopoJoin(
topo_path, csv_path, topo_key="GEOID", csv_key="fips",
)
topojoin_obj.join(csv_props=["population", "duck"])
|
StarcoderdataPython
|
6463473
|
import utils
import logging
import json
import re
import urllib
import random
import counters
import specialcases
from SteamAPI.Users import *
from datastore.models import *
from datetime import datetime, timedelta
from google.appengine.api import mail
from config import *
from bs3.BeautifulSoup import BeautifulSoup
from google.appengine.api import memcache
# TODO: We should be mapping the return codes to variable names...it's nasty to return stuff like "4"
steam_re = r'[\d]{17}'
def get_user(steam_id, stats=None):
"""
Return codes for get_user():
1 - New user succesfully added
2 - User update succeeded
3 - New user was private, not added
4 - Current user, no need for update, sucesfully returned
5 - Update succeeded, but private profile
6 - Update failed - too soon since last update
7 - Bad Steam ID
"""
# If the user inputs a URL that doesn't have the 64 bit id, then we have to retrieve that
# with a call to Steam to see if it's valid. Since we don't always want to do that,
# we store the "orig_id" (the full url) in memcache, mapped to the actual id if needed.
# Cuts down on the amount of Steam API calls needed per user lookup.
if stats is None:
stats = utils.retrieve_stats()
orig_id = steam_id
id_retrieved = False
cached_id = memcache.get(orig_id)
if cached_id is not None:
id_retrieved = True
steam_id = cached_id
if id_retrieved is False:
steam_match = re.match(steam_re, steam_id)
if steam_match:
steam_id = steam_match.string
else:
if re.match(r'https?://steamcommunity.com/.*', steam_id):
try:
profile = urllib2.urlopen(steam_id)
except ValueError:
return None, 7
soup = BeautifulSoup(profile)
scripts = soup.findAll('script')
found = False
for script in scripts:
text = script.text.strip()
if text[:15] == 'g_rgProfileData':
json_info = json.loads(text[18:-1])
steam_id = json_info['steamid']
found = True
if found is False:
return None, 7
else:
try:
profile = urllib2.urlopen("http://steamcommunity.com/id/%s" % steam_id)
except ValueError:
return None, 7
soup = BeautifulSoup(profile)
scripts = soup.findAll('script')
found = False
for script in scripts:
text = script.text.strip()
if text[:15] == 'g_rgProfileData':
json_info = json.loads(text[18:-1])
steam_id = json_info['steamid']
found = True
if found is False:
return None, 7
memcache.add(orig_id, steam_id)
user = SteamIds.get_by_id(steam_id)
counters.pingpong_incr(queries_counter)
# User already exists, decide what to do
if user:
# If this is true, there have been updates to the db. Update the user, if possible.
if stats.games_last_updated > user.last_updated or stats.hltb_last_updated > user.last_updated:
info_to_update = SteamUsers(steam_id, api_key)
# User profile is invisible. Still update what we have on record, but warn the user
# to update w/public profile.
if info_to_update.visibility is False:
user, rc = _update_user(user, info_to_update, stats)
return user, rc
#User's profile was visible, fully sucessful update.
else:
user, rc = _update_user(user, info_to_update, stats)
return user, rc
# Current user, no need for update, just return for display.
else:
return user, 4
else:
user_info = SteamUsers(steam_id, api_key)
# This is not a Steam ID
if user_info.good_id is False:
return None, 7
# This is not a public profile. Can't view.
elif user_info.visibility is False:
return None, 3
# New user was succesfully added. FTW!
else:
user = add_user_to_ndb(user_info, stats)
#increment_steamids()
counters.pingpong_incr(steamids_counter)
return user, 1
def add_user_to_ndb(user_info,stats):
games = user_info.get_games()
total_hours, hours_without_mp, hours = calc_hours(games)
hours_needed_main, hours_needed_complete, game_objs, mp_main, mp_complete = calc_needed(games,stats)
price = utils.calc_value(game_objs)
new_steam_id = SteamIds(visibility=user_info.visibility,steam_id=user_info.get_steam(),
username=user_info.get_username(),profileurl=user_info.get_profileurl(),avatar=user_info.get_avatar(),
games=games.keys(),last_updated=datetime.now(),steam_account_worth=price,
hours_played=total_hours,hours_needed_main=hours_needed_main,
hours_needed_completion=hours_needed_complete,hours=hours,id=user_info.steam_id,hours_without_mp=hours_without_mp,
needed_main_nmp=mp_main,needed_complete_nmp=mp_complete)
new_steam_id.put()
return new_steam_id
def _update_user(user, info_to_update, stats):
if info_to_update.visibility is False:
user.key.delete()
return user, 5
else:
games = info_to_update.get_games()
total_hours, hours_without_mp, hours = calc_hours(games)
hours_needed_main, hours_needed_complete, game_objs, mp_main, mp_complete = calc_needed(info_to_update.get_games(),stats)
price = utils.calc_value(game_objs)
user.games = games.keys()
user.steam_account_worth = price
user.hours_played = total_hours
user.hours_needed_main = hours_needed_main
user.hours_needed_complete = hours_needed_complete
user.needed_main_nmp=mp_main
user.needed_complete_nmp=mp_complete
user.hours = hours
user.hours_without_mp = hours_without_mp
user.last_updated = datetime.now()
user.put()
return user, 2
def update_user(steam_id):
"""
Update users return codes:
2 - Full, sucessful update
5 - Private profile, user removed
6 - Update failed. Too soon since last update.
8 - Huh? That user doesn't exist.
"""
stats = utils.retrieve_stats()
user = SteamIds.get_by_id(steam_id)
if user:
if user.last_updated > datetime.now() - timedelta(minutes=1):
return user, 6
else:
info_to_update = SteamUsers(steam_id, api_key)
user, rc = _update_user(user, info_to_update, stats)
return user, rc
else:
return None, 8
def calc_hours(games):
total_hours = 0.0
hours_without_mp = 0.0
hours = []
for game in games:
total_hours += games[game]['hours']
if game not in specialcases.mp_games:
hours_without_mp += games[game]['hours']
hours.append(float(games[game]['hours']))
return total_hours, hours_without_mp, hours
# Refactor the following two methods into one
def calc_needed_update(games,stats):
total_need_main = 0.0
total_need_complete = 0.0
mp_main = 0.0
mp_complete = 0.0
all_games = get_game_objects(games)
for game_info in all_games:
if game_info is not None:
if game_info.appid in specialcases.mp_games:
try:
mp_main += game_info.main
except:
mp_main += stats.average_main
try:
mp_complete += game_info.complete
except:
mp_complete += stats.average_completion
if game_info.main is not None:
total_need_main += game_info.main
else:
total_need_main += stats.average_completion
if game_info.completion is not None:
total_need_complete += game_info.completion
else:
total_need_complete += stats.average_completion
return total_need_main, total_need_complete, all_games, mp_main, mp_complete
def calc_needed(games, stats):
total_need_main = 0.0
total_need_complete = 0.0
mp_main = 0.0
mp_complete = 0.0
not_found = []
all_games = get_game_objects(games.keys())
for game_info in all_games:
if game_info is not None:
if game_info.appid in mp_games:
try:
mp_main += game_info.main
except:
mp_main += stats.average_main
try:
mp_complete += game_info.complete
except:
mp_complete += stats.average_completion
if game_info.main is not None:
total_need_main += game_info.main
else:
total_need_main += stats.average_main
if game_info.completion is not None:
total_need_complete += game_info.completion
else:
total_need_complete += stats.average_completion
return total_need_main, total_need_complete, all_games, mp_main, mp_complete
def get_game_objects(games):
all_keys = []
for game in games:
all_keys.append(ndb.Key('Games_DB', str(game)))
return ndb.get_multi(all_keys)
|
StarcoderdataPython
|
308981
|
<reponame>foreignbill/eoj3
from rest_framework.views import APIView
from rest_framework.response import Response
from account.models import User
from problem.models import Problem
from account.permissions import is_admin_or_root
from django.db.models import Q
from django.urls import reverse
from functools import reduce
from operator import or_
def query_user(kw):
results = list()
if kw and len(kw) >= 3:
for user in User.objects.filter(username__icontains=kw, is_active=True).all().only('username')[:5]:
results.append(dict(title=user.username, url=reverse('generic', kwargs=dict(pk=user.pk))))
return dict(name='User', results=results)
def get_problem_q_object(kw, all=False, managing=None):
if kw:
q_list = list()
if len(kw) >= 2:
q_list.append(Q(title__icontains=kw))
if kw.isdigit():
q_list.append(Q(pk__exact=kw))
if q_list:
q = reduce(or_, q_list)
if not all:
q &= Q(visible=True)
if managing:
q |= Q(managers=managing)
return q
return None
def query_problem(kw, all=False):
results = list()
q = get_problem_q_object(kw, all)
if q:
for problem in Problem.objects.filter(q).distinct().all()[:5]:
results.append(dict(title=str(problem),
url=reverse('problem:detail', kwargs=dict(pk=problem.pk))))
return dict(name='Problem', results=results)
class SearchAPI(APIView):
def get(self, request):
kw = request.GET.get('kw')
results = dict()
if kw:
results['user'] = query_user(kw)
results['problem'] = query_problem(kw, all=is_admin_or_root(request.user))
return Response(dict(results=results))
class SearchUserAPI(APIView):
def get(self, request):
kw = request.GET.get('kw')
results = list()
if kw:
for user in User.objects.filter(username__icontains=kw, is_active=True).all().only('username', 'pk')[:5]:
results.append(dict(name=user.username, value=user.pk))
return Response(dict(success=True, results=results))
class SearchProblemAPI(APIView):
def get(self, request):
kw = request.GET.get('kw')
managing = request.user if request.GET.get('managing') else None
results = list()
q = get_problem_q_object(kw, is_admin_or_root(request.user), managing)
if q:
for problem in Problem.objects.filter(q).distinct().all()[:5]:
results.append(dict(name=str(problem), value=problem.pk))
return Response(dict(success=True, results=results))
|
StarcoderdataPython
|
3549394
|
import urllib2
import logging
from lxml import html
# Default logger
logger = logging.getLogger('spotify2piratebay')
def fetch_url(url):
""" Fetches a URL and returns contents - use opener to support HTTPS. """
# Fetch and parse
logger.debug(u'Fetching %s', url)
# Use urllib2 directly for enabled SSL support (LXML doesn't by default)
try:
timeout = 30
opener = urllib2.urlopen(url, None, timeout)
# Fetch HTTP data in one batch, as handling the 'file-like' object to
# lxml results in thread-locking behaviour.
htmldata = opener.read()
except urllib2.URLError:
# Probably a timeout, try again
htmldata = fetch_url(url)
return htmldata
def parse_url(url):
"""
Return lxml-parsed HTML for given URL or None when HTTP request failed.
Uses urllib2 directly and fetches as string before parsing as to prevent
thread locking issues.
"""
htmldata = fetch_url(url)
# No data, return None
if not htmldata:
return None
# Parse
logger.debug(u'Parsing HTML for %s', url)
parsed = html.fromstring(htmldata, base_url=url)
# Make all links in the result absolute
parsed.make_links_absolute(url)
return parsed
|
StarcoderdataPython
|
3497452
|
<reponame>SubTheSandwich/YouTube-to-Mp3
#hi!
import os
import subprocess
from pydub import AudioSegment
url = input("Please enter your url: ")
ask = str(input("File name?"))
def main():
try:
if os.path.exists('file.m4a'):
os.remove('file.m4a')
subprocess.call(['youtube-dl', '-f', '140', url, '-o', 'file.m4a'])
sound = AudioSegment.from_file('file.m4a')
sound.export("./files/" + ask + '.mp3', format="mp3", bitrate="128k")
print("Your video has been successfully downloaded and converted to an mp3.")
if os.path.exists('file.m4a'):
os.remove('file.m4a')
except:
print("An unknown error occured while performing this process.")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8163053
|
from multiprocessing import Process, Pipe
from random import random
import matplotlib.pyplot as plot
from control import TransferFunction, feedback, step_response, series, step_info
default_generations = 150
default_population = 50
default_crossover = 0.6
default_mutation = 0.25
F = TransferFunction(1, [1, 6, 11, 6, 0])
T = []
count = 0
while count < 100:
T.append(count)
count += 0.01
def compute_ise(Kp, Ti, Td):
g = Kp * TransferFunction([Ti * Td, Ti, 1], [Ti, 0])
sys = feedback(series(g, F), 1)
sys_info = step_info(sys)
_, y = step_response(sys, T)
ise = sum((y - 1) ** 2)
t_r = sys_info['RiseTime']
t_s = sys_info['SettlingTime']
m_p = sys_info['Overshoot']
return ise, t_r, t_s, m_p
def genetic_algorithm(pipe, generation_count, population_size, crossover_prob, mutate_prob):
parents = [[8.0, 4.0, 0.5], [12.0, 6.0, 1.5]]
best_factor, best_tr, best_ts, best_mp = 0.0, 0.0, 0.0, 0.0
best_factor_list = []
i = 0
while i < generation_count:
children = []
j = 0
while j < population_size / 2 - 1:
child_1 = parents[0].copy()
child_2 = parents[1].copy()
crossover(crossover_prob, child_1, child_2)
mutate(mutate_prob, child_1)
mutate(mutate_prob, child_2)
children.append(child_1)
children.append(child_2)
j += 1
parents.clear()
factor_total = 0.0
factor_list = []
for child in children:
try:
ise, t_r, t_s, m_p = compute_ise(child[0], child[1], child[2])
factor = 1 / ise
except IndexError:
factor, t_r, t_s, m_p = 0.0, 0.0, 0.0, 0.0
factor_list.append(factor)
factor_total += factor
if factor > best_factor:
best_factor = factor
best_tr = t_r
best_ts = t_s
best_mp = m_p
k = 0
while len(parents) < 2:
if factor_list[k] > random() * factor_total:
parents.append(children[k])
k = (k + 1) % len(children)
best_factor_list.append(best_factor)
i += 1
pipe.send(best_factor_list)
pipe.close()
print("For gen={}, pop={}, cross={}, mut={} -> factor={}, t_r={}, t_s={}, m_p={}"
.format(generation_count, population_size, crossover_prob, mutate_prob,
best_factor, best_tr, best_ts, best_mp))
def crossover(probability, child_1, child_2):
coin_toss = 0.5
if random() < probability:
if random() < coin_toss:
child_1[0], child_2[0] = child_2[0], child_1[0]
if random() < coin_toss:
child_1[1], child_2[1] = child_2[1], child_1[1]
if random() < coin_toss:
child_1[2], child_2[2] = child_2[2], child_1[2]
def mutate(probability, child):
if random() < probability:
child[0] = round(random() * (18.00 - 2.01) + 2.01, 2)
if random() < probability:
child[1] = round(random() * (9.42 - 1.06) + 1.06, 2)
if random() < probability:
child[2] = round(random() * (2.37 - 0.27) + 0.27, 2)
def graph(name, values):
proc, parent, legend = [], [], []
for v in values:
p, c = Pipe()
parent.append(p)
switch = {
"main": (c, default_generations, default_population, default_crossover, default_mutation),
"generation": (c, v, default_population, default_crossover, default_mutation),
"population": (c, default_generations, v, default_crossover, default_mutation),
"crossover": (c, default_generations, default_population, v, default_mutation),
"mutation": (c, default_generations, default_population, default_crossover, v)
}
proc.append(Process(target=genetic_algorithm, args=switch.get(name)))
legend.append("{} = {}".format(name, v))
for p in proc:
p.start()
for p in proc:
p.join()
for p in parent:
plot.plot(p.recv())
if values[0] is not None:
plot.legend(legend, loc='lower right')
plot.ylabel("Fitness")
plot.xlabel("Generation")
plot.savefig("{}.png".format(name), bbox_inches='tight')
plot.close()
if __name__ == '__main__':
graph("main", [None])
graph("generation", [10, 25, 50, 100, 150])
graph("population", [10, 20, 30, 40, 50])
graph("crossover", [0.2, 0.4, 0.6, 0.8])
graph("mutation", [0.1, 0.25, 0.4, 0.65, 0.9])
|
StarcoderdataPython
|
11341048
|
<filename>brainfrick/__init__.py
from .__main__ import __version__
from .interpreter import Interpreter
def main(argv):
file = " ".join(argv)
bf = Interpreter(file=file)
bf.run()
|
StarcoderdataPython
|
3398483
|
from sklearn.metrics import confusion_matrix, roc_auc_score, roc_curve, auc
def get_metrics(predicted_label, labels, predicted_score=None, is_binary_task=True):
if is_binary_task:
tp, fp, fn, tn = get_confusion_matrix(predicted_label, labels)
roc_auc = roc_auc_score(y_true=labels, y_score=predicted_label)
if predicted_score is not None:
fpr, tpr, thresholds = roc_curve(labels, predicted_score, pos_label=1)
pr_auc = auc(fpr, tpr)
else:
pr_auc = 0.0
else:
tp, fp, fn, tn = 0, 0, 0, 0
rets = confusion_matrix(y_true=labels, y_pred=predicted_label)
for i, ret in enumerate(rets):
for j, item in enumerate(ret):
if i == j:
tp += item
else:
fn += item
fp += item
roc_auc = 0.0
pr_auc = 0.0
accuracy = 0.0 if (tp + fp + fn + tn) == 0 else (tp + tn) / (tp + fp + fn + tn)
precision = 0.0 if (tp + fp) == 0 else tp / (tp + fp)
recall = 0.0 if (tp + fn) == 0 else tp / (tp + fn)
f1 = 0.0 if (precision + recall) == 0 else 2 * precision * recall / (precision + recall)
return {'tp': tp, 'fp': fp, 'fn': fn, 'tn': tn, 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'pr_auc': pr_auc, 'roc_auc': roc_auc}
def get_confusion_matrix(y_pred, y_true):
tp, fp, fn, tn = 0, 0, 0, 0
for yt, yp in zip(y_true, y_pred):
if yt == 1 and yp == 1:
tp += 1
elif yt == 1 and yp == 0:
fn += 1
elif yt == 0 and yp == 1:
fp += 1
elif yt == 0 and yp == 0:
tn += 1
return tp, fp, fn, tn
def get_print_keys():
return ['f1', 'precision', 'recall', 'accuracy', 'pr_auc', 'roc_auc', 'tp', 'fp', 'fn', 'tn']
if __name__ == '__main__':
# tp: 2, tn: 1, fp:4 fn: 3
y_true = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = [0, 1, 1, 1, 1, 0, 0, 0, 1, 1]
y_pred_scores = [0.1, 0.6, 0.65, 0.9, 0.78, 0.21, 0.13, 0.45, 0.95, 0.7]
rets = get_metrics(y_pred, y_true, y_pred_scores)
print(rets)
assert rets['tp'] == 2
assert rets['tn'] == 1
assert rets['fp'] == 4
assert rets['fn'] == 3
|
StarcoderdataPython
|
6591544
|
# Generated by Django 3.2.6 on 2021-09-13 09:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('board', '0006_alter_store_user'),
]
operations = [
migrations.RemoveField(
model_name='store',
name='picture',
),
migrations.AlterField(
model_name='menu',
name='store',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='menu', to='board.store'),
),
migrations.AlterField(
model_name='menuimg',
name='menu',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='menu_img', to='board.menu'),
),
migrations.AlterField(
model_name='review',
name='store',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='store', to='board.store'),
),
migrations.AlterField(
model_name='review',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='reviewimg',
name='review',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_img', to='board.review'),
),
migrations.AlterField(
model_name='store',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='store', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='storeimg',
name='store',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='store_img', to='board.store'),
),
migrations.AlterField(
model_name='tag',
name='review',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tag', to='board.review'),
),
]
|
StarcoderdataPython
|
11380640
|
<filename>Command Line Programs/mapIt.py
#! python3
# mapIt.py
# Launches a map in the browser using an address
# from the command line or clipboard
import webbrowser, sys, logging, pyperclip
if len(sys.argv) > 1:
# From command line
address = ' '.join(sys.argv[1:])
else:
# From clipboard
address = pyperclip.paste()
# Open browser
webbrowser.open('https://www.google.com/maps/place/' + address)
|
StarcoderdataPython
|
5169837
|
<reponame>jaswinder9051998/Resources<gh_stars>100-1000
#Count files with a .py extension in root1 directory and its subdirectories
#This solution works for the previous exercise as well with one file in a directory
import glob
file_list = glob.glob("subdirs/**/*.py", recursive=True)
print(len(file_list))
|
StarcoderdataPython
|
11269382
|
# Copyright 2016 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import traceback
from lcm.pub.database.models import FPInstModel, VNFFGInstModel
from lcm.pub.exceptions import NSLCMException
from lcm.pub.msapi import extsys, resmgr, sdncdriver
logger = logging.getLogger(__name__)
class DeleteSfcs(object):
def __init__(self, sfc_inst_id):
self.sfc_inst_id = sfc_inst_id
self.ns_inst_id = ""
def do(self):
try:
sfc_inst_info = FPInstModel.objects.filter(fpinstid=self.sfc_inst_id)
if not sfc_inst_info:
logger.warn("sfc inst id(%s) is not exist or has been already deleted" % self.sfc_inst_id)
return {"result": 0, "detail": "sfc is not exist or has been already deleted"}
self.ns_inst_id = sfc_inst_info[0].nsinstid
self.delete_sfc_from_driver(sfc_inst_info[0])
self.delete_sfc_from_resmgr()
self.delete_sfc_from_db(sfc_inst_info)
return {"result": 0, "detail": "delete sfc success"}
except NSLCMException as e:
return self.exception_handle(e)
except Exception as e:
logger.error(traceback.format_exc())
return self.exception_handle(e)
def exception_handle(self, e):
detail = 'sfc delete failed, detail message: %s' % e.args[0]
logger.error(detail)
return {"result": 1, "detail": detail}
def delete_sfc_from_driver(self, sfc_inst_info):
sdn_controller_id = sfc_inst_info.sdncontrollerid
sdn_controller_url = extsys.get_sdn_controller_by_id(sdn_controller_id)["url"]
sfc_id = sfc_inst_info.sfcid
flow_classifiers = sfc_inst_info.flowclassifiers
port_pair_groups = sfc_inst_info.portpairgroups
if sfc_id:
req_param = {"sdnControllerId": sdn_controller_id, "url": sdn_controller_url, "id": sfc_id}
sdncdriver.delete_port_chain(req_param)
if flow_classifiers:
for flow_id in flow_classifiers.split(","):
req_param = {"sdnControllerId": sdn_controller_id, "url": sdn_controller_url, "id": flow_id}
sdncdriver.delete_flow_classifier(req_param)
if port_pair_groups:
for group in json.JSONDecoder().decode(port_pair_groups):
group_id = group["groupid"]
req_param = {"sdnControllerId": sdn_controller_id, "url": sdn_controller_url, "id": group_id}
sdncdriver.delete_port_pair_group(req_param)
port_pair = group["portpair"]
for port_pair_id in port_pair:
req_param = {"sdnControllerId": sdn_controller_id, "url": sdn_controller_url, "id": port_pair_id}
sdncdriver.delete_port_pair(req_param)
def delete_sfc_from_db(self, sfc_inst_info):
# do_biz_with_share_lock("delete-sfclist-in-vnffg-%s" % self.ns_inst_id, self.delete_sfc_inst_id_in_vnffg)
self.delete_sfc_inst_id_in_vnffg()
sfc_inst_info.delete()
def delete_sfc_from_resmgr(self):
resmgr.delete_sfc(self.sfc_inst_id)
def delete_sfc_inst_id_in_vnffg(self):
for vnffg_info in VNFFGInstModel.objects.filter(nsinstid=self.ns_inst_id):
new_sfc_id_list = ""
for old_sfc_id in vnffg_info.fplist.split(","):
if old_sfc_id != self.sfc_inst_id:
new_sfc_id_list += old_sfc_id + ","
new_sfc_id_list = new_sfc_id_list[:-1]
VNFFGInstModel.objects.filter(vnffginstid=vnffg_info.vnffginstid).update(fplist=new_sfc_id_list)
|
StarcoderdataPython
|
8182875
|
<filename>yolo3/det_visualizer.py
import os
import cv2
import uuid
import numpy as np
import shutil
from tensorflow.keras.callbacks import Callback
from .models import yolo_anchors, yolo_anchor_masks, yolo_tiny_anchors, yolo_tiny_anchor_masks
from .convert import make_eval_model_from_trained_model
from .utils import draw_outputs, cv2_letterbox_resize
class DetVisualizer(Callback):
def __init__(self, dataset, result_dir='dets', num_batches=64, tiny=True):
self.result_dir = result_dir
self.dataset = dataset
self.num_batches = num_batches
self.tiny = tiny
super(DetVisualizer, self).__init__()
def on_train_begin(self, logs=None):
if os.path.exists(self.result_dir):
shutil.rmtree(self.result_dir, ignore_errors=True)
else:
os.makedirs(self.result_dir)
def on_epoch_end(self, epoch, logs=None):
if self.tiny:
anchors = yolo_tiny_anchors
masks = yolo_tiny_anchor_masks
else:
anchors = yolo_anchors
masks = yolo_anchor_masks
model = make_eval_model_from_trained_model(self.model, anchors, masks)
epoch_dir = os.path.join(self.result_dir, str(epoch))
os.makedirs(epoch_dir)
for batch, (images, labels) in enumerate(self.dataset):
images = images.numpy()
for i in range(images.shape[0]):
boxes, scores, classes = model.predict(images[i:i + 1, ...])
img_for_this = (images[i, ...] * 255).astype(np.uint8)
boxes_for_this, scores_for_this, classes_for_this = boxes[0, ...], scores[0, ...], classes[0, ...]
img_for_this = draw_outputs(img_for_this, (boxes_for_this, scores_for_this, classes_for_this))
cv2.imwrite(os.path.join(epoch_dir, '{0}.jpg'.format(uuid.uuid4())), img_for_this)
if batch == self.num_batches:
break
|
StarcoderdataPython
|
1783649
|
<gh_stars>1-10
import abc
from .. import exceptions
from .. import contextManagers
## @todo Should we just assume all validation of supplied entity references,
## etc is taken care of in the Manager/Entity abstraction, to avoid doubling
## the work, if a host already tests whether or not something is a ref before
## acting upon it.
__all__ = [ 'ManagerInterfaceBase', ]
class ManagerInterfaceBase(object):
"""
@brief This Interface binds a @ref asset_management_system into the
Asset API. It is not called directly by a @ref host, but by the
middle-ware that presents a more object-oriented model of this the a @ref
host - namely, the @ref python.Manager.Manager and @ref
python.Entity.Entity classes.
It is structured around the following principals:
@li The currency of the API is either data, or an @ref entity_reference.
objects should not be used to represent an Entity or its properties.
@li All calls should be atomic, to facilitate concurrency (see the @ref
threading section for more details).
@li The implementation of this class should have no UI dependencies, so
that it can be used in command-line only hosts/batch process etc...
Logging and Error Handling
--------------------------
The @ref python::logging module provides basic logging functionality.
It is guaranteed that this will be well mapped to the most appropriate
message handler for the current application/environment.
This module should be used for *any* logging that the user may want to see.
You may use other mechanisms for any internal logging, that is not meaningful
to the user of the system.
@see python::logging
Exceptions should be thrown to handle any in-flight errors that occur.
The error should be mapped to a derived class of
python.exceptions.BaseException, and thrown. All exceptions of this kind,
will be correctly passed across the plug-in C boundary, and re-thrown. Other
exceptions should not be used.
@see python::exceptions
Threading
---------
Any implementation of the ManagerInterfaceBase should be thread safe. The one
exception being Manager::initialize, this will never be called
concurrently.
When a @ref python.Context.Context object is constructed by @ref
python.Session.Session.createContext, the createState method will be called,
and the resulting state object stored in the context.
This context will then be re-used across related API calls to the
ManagerInterfaceBase implementation. You can use this to determine which
calls may be part of a specific 'action' in the Host, or to store any
transaction-persistent data.
One exception to the treading rule is that the transaction managing functions
won't be called from multiple threads with the same transaction object.
There should be no persistent state in the implementation, concepts such
as getError(), etc.. for example should not be used.
Context
-------
The Context object is passed to many methods of this class. Though in the
majority of situations this will be well defined, it should not cause an
error if this is ever None.
Initialization
--------------
The constructor makes a new instance, but at this point it is not ready for use.
The implementation shouldn't do any work in the constructor, so that it is
its is cheap to create one. This means that only the informational
methods need to be available until initialize() has been called.
@todo Finish/Document settings mechanism.
@see initialize()
"""
__metaclass__ = abc.ABCMeta
# Test harness methods.
#
# It is difficult to derive generic tests for the API, as they need sample
# entity references to use in the calls.
#
# As such, we introduce optional methods here, that are required to support
# the various tests in test/TestCases/Core. If this are not implemented, then
# you will not be able to test the implementation using these tests.
# def _test_getReference(self, specification):
# """
#
# Returns an entityReference that can be used for testing purposes, the
# specification will be a TestEntitySpecification. The current test can be
# queried using specification.testName(). Some tests require a reference to
# an existing entity, so specification.shouldExist() should be respected.
# Additionally specification.embeddable() can be queried to determine if the
# ref will be used in isolation, or may be embedded in a more complex
# string.
#
# """
# def _test_cleanup(self, references)
# """
#
# Called by the test harness to clean up any references that it requested.
# This is called after any test that has requested a reference completes.
# You could use this to remove any temporary database entires, etc... that
# were necessary to satisfy a request for a reference to an 'existing'
# asset.
#
# """
##
# @name Asset Management System Information
#
# These functions provide general information about the @ref asset_management_system itself.
#
# @{
@abc.abstractmethod
def getIdentifier(self):
"""
Returns an identifier to uniquely identify a specific asset manager.
This may be used by a Host to persist the users preferred manager via a
preferences mechanism, or when spawning child processes, etc...
It should match the name used to register the plug-in with the plug-in host.
The identifier should use only alpha-numeric characters and '.', '_' or '-'.
For example:
"uk.co.foundry.asset.testAssetManager"
@return str
"""
raise NotImplementedError
@abc.abstractmethod
def getDisplayName(self):
"""
Returns a human readable name to be used to reference this specific
asset manager.
Once instance of its use may be in a Host's Preferences UI or logging.
For example:
"The Foundry Test Asset Manager"
"""
raise NotImplementedError
def getInfo(self):
"""
Returns other information that may be useful about this @ref
asset_management_system. This can contain arbitrary key/value pairs.For
example:
{ 'version' : '1.1v3',
'server' : 'am.thefoundry.co.uk' }
There are certain optional keys that may be used by a host or the API:
@li FnAssetAPI.constants.kField_SmallIcon (upto 32x32)
@li FnAssetAPI.constants.kField_Icon (any size)
Because it can often be expensive to bridge between languages, info can
also contain one of two additional fields - a prefix, or perl regex
compatible string to identify a valid entity reference. Only one should be
set at once. If supplied, this may be used by the API to optimise calls to
isEntityReference when bridging between C/Python etc... can be slow. If
neither of these fields are set, then isEntityReference will always be used
to determine if a string is an entityReference or not. Note, not all hosts
support this optimisation, so @ref isEntityReference should be implemented
regardless. @ref containsEntityReferences will always be called regardless.
@li FnAssetAPI.constants.kField_EntityReferencesMatchPrefix
@li FnAssetAPI.constants.kField_EntityReferencesMatchRegex
@note Keys should always be UTF-8 stings, and values must be
plain-old-data types (ie: str, int, float, bool).
"""
return {}
def localizeStrings(self, stringDict):
"""
This call gives the Host a chance to customise certain strings used in it's
UI/messages. @see python.constants for known keys. The values in stringDict
can be freely updated to match the terminology of the asset management
system you are representing.
For example, you may way a Host's "Publish Clip" menu item to read "Release
Clip", so you would set the kLocalizationKey_Publish value to "Release".
@return None
@see @ref python.constants
@see @ref python.Session.Session.__terminology
"""
pass
## @}
##
# @name Initialization
#
## @{
def getSettings(self):
return {}
def setSettings(self, settings):
pass
@abc.abstractmethod
def initialize(self):
"""
Prepares for interaction with a Host.
This is a good opportunity to initialize connections to a back end
implementation, as @ref setSettings will have already been called (if
applicable). This may result in this call blocking for a period of time.
If an exception is raised by this call, its is safe to assume that a fatal
error occurred, and this @ref asset_management_system is not available, and
should be retried later.
If no exception is raised, it can be assumed that the @ref asset_management_system is
ready. It is the implementations responsibility to deal with transient
connection errors (if applicable) once initialized.
The behaviour of calling initialize() on an already initialized
Manager should be a no-op, but if an error was raised previously, then
initialization should be re-attempted.
@note This will always be called prior to any Entity-related calls.
An exception should be raised if this is not the case. It is however,
the following functions may be called prior to initialization:
@li @ref getIdentifier()
@li @ref getDisplayName()
@li @ref getInfo()
@li @ref localizeStrings()
@li @ref getSettings()
@li @ref setSettings()
@todo We need a 'teardown' method to, before a manager is de-activated in a
host, to allow any event registrations etc... to be removed.
"""
raise NotImplementedError
def prefetch(self, entitRefs, context):
"""
Called by a Host to express interest in the supplied @ref
entity_reference list. This usually means that the Host is about to make
multiple queries to the same references. This can be left unimplemented,
but it is advisable to batch request the data for resolveEntityReference,
getEntityMetadata here if possible to minimize server load.
The implementation should ignore any unrecognised strings, or any entities
to which no action is applicable (maybe as they don't exist yet).
@warning Because the majority of the resolution API itself is designated
thread stafe, it is important to implement any pre-fetch mechanism with
suitable locks/etc... if required.
@param context python.contexts.Context, may be None, but if present, you
may wish to make use of the managerInterfaceState object (if you supplied
one on construction of the context), to simplify scoping any caching of
data. Otherwise, it's up to you how to manage the lifetime of the data to
avoid inconsistencies, but the @ref flushCaches method should clear any
otherwise sorted data for this call.
@return None
"""
pass
def flushCaches(self):
"""
Clears any internal caches. Only applicable if the implementation makes
use of any caching, otherwise it is a no-op. In caching interfaces, this
should cause any retained data to be discarded to ensure future queries are
fresh. This should have no effect on any open @ref transaction.
"""
pass
## @}
##
# @name Entity Reference inspection
#
# Because of the nature of an @ref entity_reference, it is often
# necessary to determine if some working string is actually an @ref
# entityReference or not, to ensure it is handled correctly.
#
# @{
@abc.abstractmethod
def isEntityReference(self, token, context):
"""
Determines if a supplied token (in its entirety) matches the pattern of
an @ref entity_reference.
It does not verify that it points to a valid entity in the system,
simply that the pattern of the token is recognised by this implementation.
If this returns True, the token is an @ref entity_reference and should
be considered as a managed entity. Consequently, it should be resolved
before use. It also confirms that it can be passed to any other
method that requires an @ref entity_reference.
If false, this manager should no longer be involved in actions relating
to the token.
@warning The result of this call should not depend on the context Locale,
as these results may be cached by access pattern.
@param token The string to be inspected.
@param context, The calling context, this may be None.
@return bool, True if the supplied token should be considered as an @ref
entityReference, False if the pattern is not recognised.
@note This call does not verify the entity exits, just that the format of
the string is recognised.
@see entityExists()
@see resolveEntityReference()
"""
raise NotImplementedError
@abc.abstractmethod
def containsEntityReference(self, string, context):
"""
Determines if the string contains a @ref entity_reference.
There may be occasion to operate on a more complex input string, that
combines one or more @ref entity_reference and free-form text.
For example, the following strings should cause isEntityReference()
to return false, but this function to return true:
@li `{fnasset://job?t=scriptsDir}/fileInDirectory.nk`
@li `source fnasset://myScript?v=1 fnasset://myOtherScript?v=2`
Positive matches here inform the Host that the string may need to
be resolved using resolveEntityReferences() prior to use.
@param str The input to parse for @ref entity_reference occurrences
@return bool, True if one or more @ref entitry_reference is found within the
string, otherwise False.
@param context, The calling context, this may be None.
@warning The result of this call should not depend on the context Locale,
as these results may be cached by access pattern.
@note This call does not verify that any of the referenced entities exit,
just that the string contains one or more @ref entity_reference.
@see resolveEntityReferences()
"""
raise NotImplementedError
@abc.abstractmethod
def entityExists(self, entityRef, context):
"""
Called to determine if the supplied @ref entity_reference points to an
Entity that exists in the @ref asset_management_system, and that it can be
resolved into a meaningful string.
By 'Exist' we mean 'is ready to be read'. For example, entityExists may be
called before attempting to read from a reference that is believed to point
to an image sequence, so that alternatives can be found.
In the future, this may need to be extended to cover a more complex
definition of 'existence' (for example, known to the system, but not yet
finalized). For now however, it should be assumed to simply mean, 'ready to
be consumed', and if only a placeholder or un-finalized asset is available,
False should be returned.
The supplied context's locale may contain information pertinent to
disambiguating this subtle definition of 'exists' in some cases too, as it
better explains the use-case of the call.
@return bool, True if it points to an existing entity, False if the Entity
is not known or ready yet.
@exception python.exceptions.InvalidEntityReference If the input string is
not a valid entity reference.
"""
raise NotImplementedError
## @}
##
# @name Entity Reference Resolution
#
# The concept of resolution is turning an @ref entity_reference into a
# 'finalized' string. This, ultimately, is anything meaningful to the
# situation. It could be a colour space, a directory, a script or image
# sequence. A rule of thumb is that a resolved @ref entity_reference
# should be the string that the application would have anyway, in a
# unmanaged environment. For some kind of Entity - such as a 'Shot', for
# example, there may not be a meaningful string, though often some sensible
# value can be returned.
#
# @{
@abc.abstractmethod
def resolveEntityReference(self, entityRef, context):
"""
Returns the 'finalized' string represented by the @ref entity_reference.
When the @ref entity_reference points to a sequence of files, the frame
token should be preserved, and in the sptintf compatible syntax.
This function should attempt to take into account the current Host/Context
to ensure that any other substitution tokens are presented in a suitable
form. The Context should also be carefully considered to ensure that the
access does not violate any rules of the system - for example, resolving an
existing entity reference for write.
The caller should have first called isEntityReference() on the supplied
string.
@note You may need to call getFinalizedEntityVersion() within this function
to ensure any @ref meta_versions are resolved prior to resolution.
@return str, The UTF-8 ASCII compatible string that that is represented by
the reference.
@exception python.exceptions.InvalidEntityReference If the supplied @ref
entity_reference is not known by the asset management system.
@exception python.exceptions.EntityResolutionError If the supplied @ref
entity_reference does not have a meaningful string representation, or it is
a valid reference format, that doesn't exist.
@exception python.exceptions.InvalidEntityReference if the supplied
entity_reference should not be resolved for that context, for example, if
the context access is kWrite and the entity is an existing version -
raising will ensure that a Host will not attempt to write to that location.
@see entityExists()
@see isEntityReference()
@see resolveEntityReferences()
"""
raise NotImplementedError
def resolveEntityReferences(self, references, context):
"""
Batch-resolves a list of entityReferences, following the same pattern as
@ref resolveEntityReference.
@return list, A list of strings, corresponding to the source reference
with the same index.
This will be called by hosts when they wish to batch-resolve many
references with an eye to performance, or server hits, and so should be
re-implemented to minimise the number of queries over a standard 'for'
loop.
The base class implementation simply calls resolveEntityReference
repeatedly for each suppled reference.
"""
resolved = []
for r in references:
resolved.append(self.resolveEntityReference(r, context))
return resolved
@abc.abstractmethod
def resolveInlineEntityReferences(self, string, context):
"""
Returns a copy of the input string will all references resolved in-place.
The same rules of resolution apply for each @ref entity_reference in the
input string as noted in resolveEntityReference().
If no entity references are present, the input string should be returned.
@return str
@exception python.exceptions.InvalidEntityReference If any supplied
@ref entity_reference is not recognised by the asset management system.
@exception python.exceptions.EntityResolutionError If any supplied @ref
entity_reference does not have a meaningful string representation, or any
supplied reference points to a non-existent entity.
@see resolveEntityReference()
@see containsEntityReference()
"""
raise NotImplementedError
def getDefaultEntityReference(self, specification, context):
"""
Returns an @ref entity_reference considered to be a sensible default for
the given Specification and Context. This is often used in a host to ensure
dialogs, prompts or publish locations default to some sensible value,
avoiding the need for a user to re-enter such information when a Host is
being run in some known environment.
For example, a Host may request the default ref for
'ShotSpecification/kWriteMultiple'. If the Manager has some concept of the
'current sqeuence' it may wish to return this so that a 'Create Shots'
starts somewhere meaningful.
@return str, A valid entity reference, or empty string.
"""
return ''
## @}
##
# @name Entity information
#
# There are several common requests for basic, generic information about
# an entity that is assumed to be valid for all entity types.
#
# This suite of methods query information for a supplied @ref
# entity_reference.
#
# @see @ref metadata
#
# @{
@abc.abstractmethod
def getEntityName(self, entityRef, context):
"""
Returns the name of the entity itself, not including any hierarchy or
classification.
For example:
@li `"1"` - for a version of an asset
@li `"seq003"` - for a sequence in a hierarchy
@return str, An UTF-8 ASCII string containing any valid characters for the
manager's implementation.
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not recognised by the asset management system.
"""
raise NotImplementedError
@abc.abstractmethod
def getEntityDisplayName(self, entityRef, context):
"""
Returns an unambiguous, humanised display name for the entity.
The display name may consider the Host, and any other relevant Context
information to form a display name for an entity that can uniquely
identify the entity in that context.
For example:
@li `"dive / build / cuttlefish / model / v1"` - for a version of an
asset in an 'open recent' menu.
@li `"Sequence 003 [ Dive / Episode 1 ]"` - for a sequence in
an hierarchy as a window title.
@return str, an ASCII string containing any valid characters for the
@ref asset_management_system's implementation.
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not recognised by the asset management system.
"""
raise NotImplementedError
@abc.abstractmethod
def getEntityMetadata(self, entityRef, context):
"""
Retrieve @ref metadata for an entity.
It may be required here to bridge between certain perhaps 'first-class'
properties of the asset management system in question, and keys in the
metadata dictionary. For example, if the asset system represents a 'Shot'
with 'cutIn' and 'cutOut' properties or accessors, these should be remapped to the
@ref python.kField_FrameIn/Out metadata keys as appropriate.
@warning See @ref setEntityMetadata for important notes on metadata and its
role in the system.
@return dict, with the entities meta-data. Values must be P.O.D types, keys
must be UTF-8 ASCII strings.
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not recognised by the asset management system.
"""
raise NotImplementedError
@abc.abstractmethod
def setEntityMetadata(self, entityRef, data, context, merge=True):
"""
Sets an entities metadata.
@param merge, bool If true, then the entity's existing metadata will be
merged with the new data (the new data taking precedence). If false,
its metadata will entirely replaced by the new data.
@note It is a vital that the implementation faithfully stores and recalls
metadata. It is the underlying binding to any stronger Entity types within
this API, that simply wrap the metadata dictionary to allow hosts a more
sophisticated interaction. Specific key named and value types should be
maintained. To ensure entities created by other facilities of the asset
sysetem, It may also be necessary to bridge data between its native
representation in the system, and well-known keys here, based on the
Entity's type.
If any value is 'None' it should be assumed that that key should be un-set
on the object.
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not recognised by the asset management system.
@exception ValueError if any of the metadata values are of an un-storable
type. Presently it is only required to store str, float, int, bool
@exception KeyError if any of the metadata keys are non-strings.
"""
raise NotImplementedError
def getEntityMetadataEntry(self, entityRef, key, context, defaultValue=None):
"""
Returns the value for the specified metadata key.
@param key str, The key to look up
@param defaultValue p.o.d If not None, this value will be returned in the
case of the specified key not being set for the entity.
@return p.o.d, The value for the specific key.
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not recognised by the asset management system.
@exception KeyError If no defaultValue is supplied, and the entity has no
metadata for the specified key.
"""
value = defaultValue
try:
value = self.getEntityMetadata(entityRef, context)[key]
except KeyError:
if defaultValue is None:
raise
return value
def setEntityMetadataEntry(self, entityRef, key, value, context):
self.setEntityMetadata(entityRef, {key : value}, context, merge=True)
## @}
##
# @name Versioning
#
# Most asset_management_systems allow multiple revisions of certain
# entities to be tracked simultaneously. This API exposes this as
# a generalised concept, and its necessary for the caller to make sure
# only @ref entity_references that are meaningfully versioned are
# queried.
#
# @{
def getEntityVersionName(self, entityRef, context):
"""
Retrieves the name of the version pointed to by the supplied @ref
entity_reference.
@return str, A UTF-8 ASCII string representing the version or an empty
string if the entity was not versioned.
@note It is not necessarily a requirement that the entity exists, if, for
example, the version name can be determined from the reference itself (in
systems that implement a human-readable url, for example)
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not recognised by the asset management system.
@see getEntityVersions()
@see getFinalizedEntityVersion()
"""
return ""
def getEntityVersions(self, entityRef, context, includeMetaVersions=False, maxResults=-1):
"""
Retrieves all available versions of the supplied @ref entity_reference
(including the supplied ref, if it points to a specific version).
@param includeMetaVersions bool, if true, @ref meta_versions such as
'latest', etc... should be included, otherwise, only concrete versions
will be retrieved.
@param maxResults int, Limits the number of results collected, if more
results are available than the limit, then the newest versions will be
returned. If a value of -1 is used, then all results will be returned.
@return dict, Where the keys are ASCII string versions, and the values are
an @ref entity_reference that points to its entity. Additionally the
python.constants.kVersionDict_OrderKey can be set to a list of the
version names (ie: dict keys) in their natural ascending order, that may be
used by UI elements, etc...
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not recognised by the asset management system.
@see getEntityVersionName()
@see getFinalizedEntityVersion()
"""
return {}
def getFinalizedEntityVersion(self, entityRef, context, overrideVersionName=None):
"""
Retrieves a @ref entity_reference that points to the concrete version
of a @ref meta-version @ref entity_reference.
If the supplied entity reference is not versioned, or already has a
concrete version, the input reference is passed-through.
If versioning is unsupported for the given @ref entity_reference, then the
input reference is returned.
@param overrideVersionName str If supplied, then the call should return the
entity reference for the version of the referenced asset that matches the
name specified here, ignoring any version inferred by the input reference.
@return str
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not recognised by the asset management system.
@exception python.exceptions.EntityResolutionError should be thrown if the
entityReference is ambiguously versioned (for example if the version is
missing from a reference to a versioned entity, and that behaviour is
undefined in the system managers model. It may be that it makes sense in
the specific asset manager to fall back on 'latest' in this case...)
@exception python.exception.EntityResolutionError if the supplied
overrideVersionName does not exist for that entity.
@see getEntityVersionName()
@see getEntityVersions()
"""
return entityRef
## @}
##
# @name Related Entities
#
# A 'related' entity could take many forms. For example:
#
# @li In 3D CGI, Multiple @ref aovs may be related to a 'beauty' render.
# @li In Compositing, an image sequence may be related to the script
# that created it.
# @li An asset may be related to a task that specifies work to be done.
# @li Parent/child relationships are also (semantically) covered by
# these relationships.
#
# In the this API, these relationships are represented by a generic
# Specification, this may just be a 'type', but can additionally have
# arbitrary attributes to further define the relationship. For example in
# the case of @ref aovs, the type might be 'alternate output' and the
# attributes may be that the 'channel' is 'diffuse'.
#
# Related references form a vital part in the abstraction of the internal
# structure of the asset management system from the Host application in its
# attempts to provide the user with meaningful functionality. A good example
# of this is in an editorial example, where it may need to query whether a
# 'shot' exists in a certain part of the asset system. One approach would be
# to use a 'getChildren' call, on this part of the system. This has the
# drawback that is assumes that shots are always something that can be
# described as 'immediate children' of the location in question. This lay not
# always be the case (say, for example there is some kind of 'task' structure
# in place too). Instead we use a request that asks for any 'shots' that
# relate to the chosen location. It is then up to the implementation of the
# ManagerInterfaceBase to determine how that maps to its own data model.
# Hopefully this allows Hosts of this API to work with a broader range of
# asset managements, without providing any requirements of their structure or
# data model.
#
# @{
@abc.abstractmethod
def getRelatedReferences(self, entityRefs, relationshipSpecs, context,
resultSpec=None):
"""
Returns related entity references, based on a relationship specification.
This is an essential function in this API - as it is widely used to query
organisational hierarchy, etc...
There are three possible conventions for calling this function, to allow
for batch optimisations in the implementation and prevent excessive query
times with high-latency services.
a) A single entity reference, a list of specifications.
b) A list of entity references and a single specification.
c) Equal length lists of references and specifications.
In all cases, the return value is a list of lists, for example:
a) getRelatedReferencess( [ r1 ], [ s1, s2, s3 ] )
> [ [ r1-s1-matches, ... ], [ r1-s2-matches, ... ], [ r1-s3-matches, ... ] ]
b) getRelatedReferences( [ r1, r2, r3 ], [ s1 ] )
> [ [ r1-s1-matches, ... ], [ r2-s1-matches, ... ], [ r3-s1-matches, ... ] ]
c) getRelatedReferences( [ r1, r2, r3 ], [ s1, s2, s3 ] )
> [ [ r1-s1-matches, ... ], [ r2-s2-matches, ... ], [ r3-s3-matches, ... ] ]
@note The order of entities in the inner lists of matching references will
not be considered meaningful, but the outer list should match the input
order.
In summary, if only a single entityRef is provided, it should be assumed
that all specs should be considered for that one entity. If only a single
relationshipSpec is provided, then it should be considered for all supplied
entity references. If lists of both are supplied, then they must be the
same length, and it should be assumed that it is a 1:1 mapping of spec per
entity. If this is not the case, ValueErrors should be thrown.
If any specification is unknown, then an empty list should be returned for
that specificaion, and no errors should be raised.
@param entityRefs str list
@param relationshipSpecs python.specifications.Specification or
python.specifications.RelationshipSpecification list
@param resultSpec python.specifications.EntitySpecification or None, a hint
as to what kind of entity the caller is expecting to be returned. May be
None.
@return list of str lists, this MUST be the correct length, returning an
empty outer list is NOT valid. (ie: max(len(refs), len(specs)))
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not known by the @ref asset_management_system. However, no exception should be
thrown if it is a recognised reference, but has no applicable relations.
@exception ValueError If more than one reference and specification is
provided, but they lists are not equal in length, ie: not a 1:1 mapping of
entities to specs. The abstraction of this interface into the Manager
class does cursory validation that this is the case before calling this
function.
@see python.specifications
@see setRelatedReferences()
"""
raise NotImplementedError
def setRelatedReferences(self, entityRef, relationshipSpec, relatedRefs,
context, append=True):
"""
Creates a new relationship between the referenced entities.
@param append bool, When True (default) new relationships will be added to
any existing ones. If False, then any existing relationships with the
supplied specification will first be removed.
Though getRelatedReferences is an essential call, there is some asymetry
here, as it is not neccesarily required to be able to setRelatedReferences
directly. For example, in the case of a 'shot' (as illustrated in the docs
for getRelatedReferences) - any new shots would be created by registering a
new @ref python.specifications.ShotSpecification under the parent, rather
than using this call. The best way to think of it is that this call is
reserved for defining relationships between existing assets (Such as
connecting multiple image sequences published under the same shot, as being
part of the same render.) and 'register' as being defining the relationship
between a new asset and some existing one.
In systems that don't support post-creation adjustment of relationships,
this can simply be a no-op.
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not recognised by the asset management system.
@return None
@see @ref getRelatedReferences()
@see @ref register()
"""
if not self.entityExists(entityRef, context):
raise exceptions.InvalidEntityReference(entityReference=entityRef)
for r in relatedRefs:
if not self.entityExists(r, context):
raise exceptions.InvalidEntityReference(entityReference=r)
## @}
##
# @name Publishing
#
# The publishing functions allow a Host create entities within the
# @ref asset_management_system represented by this impementation. The API
# is designed to accommodate the broad variety of roles that
# different asset managers embody. Some are 'librarians' that simply
# catalog the locations of existing media. Others take an active role
# in both the temporary and long-term paths to items they manage.
#
# There are two key components to publishing within this API.
#
# *1 - The Entity Reference*
#
# As with the other entry points in this API, it is assumed that an @ref
# entity_reference is known ahead of time. How this reference is determined
# is beyond the scope of this layer of the API, and functions exists in
# higher levels that combine browsing and publishing etc... Here, we simply
# assert that there must be a meaningful reference given the @ref
# Specification of the entity that is being created or published.
#
# @note 'Meaningful' is best defined by the asset manager itself. For
# example, in a system that versions each 'asset' by creating children of the
# asset for each version, when talking about where to publish an image
# sequence of a render to, it may make sense to reference to the Asset
# itself, so that the system can determine the 'next' version number at the
# time of publish. It may also make sense to reference a specific version of
# this asset to implicitly state which version it will be written to. Other
# entity types may not have this flexibility.
#
# *2 - The Specification*
#
# The Specification allows ancillary information to be provided to help the
# implementation better interpret what type of entity may be best suited in
# any given situation. For example, a path to an image will generally be
# accompanied by with an spec, that details the file type, colour space,
# resolution etc...
#
# @note The Specification should *not* be confused with @ref metadata. The
# implementation must not directly store any information contained within the
# Specification, though it may be used to better define the type of entity.
# Hosts that wish to persist other properties of the published entity, will
# call @ref setEntityMetadata() directly instead, and as described in the
# metadata section, it is assumed that this is the channel for information
# that needs to persist.
#
# For more on the relationship between Entities, Specifications and
# Meta-data, please see @ref entities_specifications_and_metadata
# "this" page.
#
# The action of 'publishing' itself, is split into two parts, depending on
# the nature of the item to be published.
#
# @li **Preflight** When a Host is about to create some new media/asset.
# @li **Registration** When a Host is ready to publish media that exists.
#
# For examples of how to correctly call these parts of the
# API within a host, see the @ref examples page.
#
# @note The term '@ref publish' is somewhat loaded. It generally means
# something different depending on who you are talking to. See the @ref
# publish "Glossary entry" for more on this, but to help avoid confusion,
# this API provides the @ref localizeStrings call, in order to allow the
# implementation to standardise some of the language and terminology used in a
# Hosts presentation of the asset management system with other integrations
# of the system.
#
# @{
@abc.abstractmethod
def managementPolicy(self, specification, context, entityRef=None):
"""
Determines if the asset manager is interested in participating in
interactions with the specified type of @ref Entity.
For example, a Host may call this in order to see if it would
like to manage the path of a scene file whilst choosing a destination to
save to.
This information is then used to determine which options should be
presented to the user. For example, if kIgnored was returned for a query as
to the management of scene files, a Host will hide or disable menu items
that relate to publish or loading of assetised scene files.
Calls with an accompanying @ref entity_reference may be used to prevent
users from attempting to perform an asset-action that is not supported by
the asset management system.
@note One very important attribute returned as part of this policy is the
@ref python.constants.kWillManagePath bit. If set, this instructs the Host
that the asset management system will manage the path use for the creation
of any new assets. When set, @ref preflight will be called before any file
creation to allow the asset management system to determine and prepare the
work path. If this bit if off, then only @ref register will ever be called,
and the user will be tasked with determining where new files should be
located. In many cases, this greatly reduces the sophisticaion of the
integration as registering the asset becomes a partially manual task,
rather than one that can be fully automated for new assets.
Additionally, the @ref python.constants.kSupportsBatchOperations bit is
important if you want Hosts to call the *Multiple variants of the
@ref preflight and @ref register methods.
@param entityRef str, If supplied, then the call should be interpreted as a
query as to the applicability of the given specification if registered to
the supplied entity. For example, attempts to register an ImageSpecification
to an entity reference that refers to the top level project may be
meaningless, so in this case kIgnored should be returned.
@return int, a bitfield, see @ref python.constants
"""
raise NotImplementedError
def thumbnailSpecification(self, specification, context, options):
"""
This will be called prior to registration of an asset to determine if the
asset system would like a thumbnail preparing. Presently, only JPEG
thumbnails will be generated. The arguments to this call are the same as
those that will be passed to the register call.
If a thumbnail is requested, its path will be set in the specfication
property 'thumbnailPath' passed to a register call at a later date if it
was possible to create one.
@param options dict, The thumbnail process can be customised, by setting
the following keys in the options dict.
@li kField_PixelWidth ('width') : The pixel width of the thumbnail
@li kField_PixelHeight ('height') : The pixel height of the thumbnail
The keys may be set to the default set by the Host. It will try to best
match the requested specfications, but it should not be assumed that all
requested properties are honoured.
@return bool, If True, a Thumbnail is desired by the Manager, if False, the
host should not waste time making one.
"""
return False
def preflight(self, targetEntityRef, entitySpec, context):
"""
Prepares for some work to be done, to the referenced entity.
The entity referenced may not yet exist (@ref entity_reference). This
call is designed to allow sanity checking, placeholder creation or
any other sundry preparatory actions to be carried out.
Generally, this will be called before register() in any Host that
creates media, where the return to @ref managementPolicy has the @ref
python.constants.kWillManagePath bit set.
@param targetEntityRef str, a @ref entity_reference that it is desired to
pubish the forthcoming media to. See the notes in the API documentation for
the specifics of this.
@note it is important for the implementation to pay attention to
python.contexts.Context.retention, as not all Hosts will support the
reference changing at this point.
@return str, An @ref entity_reference, that the host should resolve to
determine the path to write media too. This may or may not be the same as
the input reference. A Host should resolve this reference to get the
working filepath before writing any files.
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not suitable for the supplied specification.
@exception python.exceptions.PreflightError if some fatal exception happens
during preflight, indicating the process should be aborted.
@exception python.exceptions.RetryableError If any non-fatal error occurs
that means the host should re-try from the beginning of any given process.
@see register()
"""
return targetEntityRef
def preflightMultiple(self, targetEntityRefs, entitySpecs, context):
"""
A batch version of @ref preflight, where most arguments are replaced by
arrays of equal length. Exception behaviour, etc... is the same as per
preflight, and should be thrown mid way though preflight if necessary.
This will be used in preference to calling preflight many times in
succession to allow the implementation to optimise communication with the
back end asset management system.
@param context Context, is not replaced with an array in order to
simplify implementation. Otherwise, transactional handling has the
potential to be extremely complex if different contexts are allowed.
@return list str, A list of working entity references.
"""
result = []
numSteps = len(targetEntityRefs)
with contextManagers.ScopedProgressManager(numSteps) as progress:
for t,s in zip(targetEntityRefs, entitySpecs):
with progress.step():
result.append(self.preflight(t, s, context))
return result
@abc.abstractmethod
def register(self, stringData, targetEntityRef, entitySpec, context):
"""
Publish an entity to the @ref asset_management_system
Instructs the implementation to ensure a valid entity exists for the given
reference and spec. This will be called either in isolation or after
calling preflight, depending on the nature of the data being published and
the kWillManagePath bit of the returned @ref managementPolicy.
@param stringData str, The string that the entity should resolve to if
passed to a call to resolveEntityReference(). This may be left blank, if
there is no meaningful string representation of that entity (eg: a
'sequence' in a hierarchy). This must be stored by the Manager.
@param targetReference The @ref entity_reference to publish to. It is
up to the Manager to ensure that this is meaningful, as it is most
likely implementation specific. For example, if a 'Shot' specification
is requested to be published to a reference that points to a 'Sequence'
it makes sense to interpret this as a 'add a shot of this spec to the
sequence'. For other types of entity, there may be different
constraints on what makes sense.
@param entitySpec python.specifications.EntitySpecification A description
of the Entity (or 'asset') that is being published. It is *not* required
for the implementation to store any information contained in the specification,
though it may choose to use it if it is meaningful. A Host will separately
call setEntityMetadata() if it wishes to persist any other information in
the entity.
@return str, An @ref entity_reference to the 'final' entity created by the
publish action. It may or may not be the same as targetReference.
@note it is important for the implementation to pay attention to
python.contexts.Context.retention, as not all Hosts will support the
reference changing at this point.
@exception python.exceptions.InvalidEntityReference If any supplied
reference is not suitable for the supplied specification.
@exception python.exceptions.RegistrationError if some fatal exception happens
during publishing, indicating the process should be aborted.
@exception python.exceptions.RetryableError If any non-fatal error occurs
that means the host should re-try from the beginning of any given process.
@see preflight()
@see resolveEntityReference()
"""
raise NotImplementedError
def registerMultiple(self, strings, targetEntityRefs, entitySpecs, context):
"""
A batch version of @ref register, where most arguments are replaced by
arrays of equal length. Exception behaviour, etc... is the same as per
register, and should be thrown mid way though registration if necessary.
This will be used in preference to calling register many times in
succession to allow the implementation to optimise communication with the
back end asset management system.
@param context Context, is not replaced with an array in order to
simplify implementation. Otherwise, transactional handling has the
potential to be extremely complex if different contexts are allowed.
@return list str, A list of finalized entity references.
"""
result = []
numSteps = len(targetEntityRefs)
with contextManagers.ScopedProgressManager(numSteps) as progress:
for d,t,s in zip(strings, targetEntityRefs, entitySpecs):
with progress.step():
result.append(self.register(d, t, s, context))
return result
## @}
##
# @name Commands
#
# The commands mechanism provides a means for Hosts and asset managers to
# extend functionality of the API, without requiring any new methods.
#
# The API represents commands via a @ref
# python.specifications.CommandSpecification, which maps to a 'name' and some
# 'arguments'.
#
# @todo Reference any core module commands
#
# @{
def commandSupported(self, commandSpec, context):
"""
Determines if a specified command is supported by the system.
@return bool, True if the system implements the command, else False.
@see commandIsSupported()
@see runCommand()
"""
return False
def commandAvailable(self, commandSpec, context):
"""
Determines if specified command is permitted or should succeed in the
current context. This call can be used to test whether a command can
be carried out, generally to provide some meaningful feedback to a user
so that they don't perform an action that would consequently error.
For example, the 'checkout' command for an asset may return false here
if that asset is already checked out by another user, or the current
user is not allowed to check the asset out.
@exception python.exceptions.InvalidCommand If an un-supported command is
passed.
@return (bool, str), True if the command should complete stressfully if
called, False if it is known to fail or is not permitted. The second part
of the tuple will contain any meaningful information from the system to
qualify the status.
@see commandIsSupported()
@see runCommand()
"""
msg = "The command '%s' is not supported by the Asset Manager." % commandSpec
return False, msg
def runCommand(self, commandSpec, context):
"""
Instructs the asset system to perform the specified command.
@exception python.exceptions.InvalidCommand If the command is not
implemented by the system.
@exception python.exceptions.CommandError if any other run-time error
occurs during execution of the command
@return Any result of the command.
@see commandSupported()
@see commandAvailable()
"""
msg = "The command '%s' is not supported by this Asset Manager." % commandSpec
raise exceptions.InvalidCommand(msg)
## @}
##
# @name Manager State
#
# A single 'task' in a Host, may require more than one interaction with
# the asset management system.
#
# Because the @ref ManagerInterfaceBase is largely state-less. To simplify error
# handling, and allow an implementation to know which interactions are
# related, this API supports the concept of a @ref manager_state
# object. This is contained in every @ref Context and passed to relevant
# calls.
#
# This mechanism may be used for a variety of purposes. For example, it
# could ensure that queries are made from a coherent time stamp during a
# render, or to undo the publishing of multiple assets. It can also be used
# to define 'transactions' - groups of related actions that may be cancelled
# together/rolled back.
#
# @note Not all implementations may support transactions, there is no
# requirement for any of the functions in this group being implemented. The
# defaults are effectively no-ops.
#
# @{
def createState(self, parentState=None):
"""
Create a new object to represent the state of the interface and return it
(or some handle that can be persisted within the context). You are free to
implement this however you like, as long as it can be uniquely represented
by the object returned from this function.
A new state object is created whenever a @ref Context is made by a @ref
python.Session.Session.
This object is then stored in the newly created Context, and is
consequently available to all the API calls in the ManagerInterfaceBase that
take a Context instance. The implementation of a ManagerInterfaceBase can
then use this internally to control its behaviour.
This object is also extracted from the context and passed directly to any
of the 'transactional' calls in this API. For more on the transactional
model in this API, and how these will be called, see the @ref transactions
Page.
@param parentState obj, If present, it is to be assumed that the new state
is considered a 'child' of the supplied state. This may be used when
creating a child Context for persistence somewhere in a UI, etc... when
further processing may change the access/retention of the Context. It is
expected that the Manager will migrate any applicable state components to
this child context, for example - a timestamp used for 'vlatest'. Hoewver
is it *not* expected to link the new state with any transaction that is
open in the parent state. So the returned state should have any open
transactions.
@return object, Some object that represents self-contained state of the
ManagerInterfaceBase. This will be passed to future calls and to the
transactional methods. Presently this can be any hashable object.
@exceptions python.exceptions.StateError If for some reason creation
fails.
@see startTransaction()
@see finishTransaction()
@see cancelTransaction()
@see freezeState()
@see thawState()
@see The @ref transactions page.
"""
return None
def startTransaction(self, state):
"""
Called to indicate the start of a series of connected actions. The aim of a
transaction is to allow undo/cancellation of all related actions in one
step. Largely to avoid inconsistent state in the back end. It is important
though that any queries made to data that has been created or set within
the transaction, returns the updated or new data.
For more on the transactional model in this API, and how these functions are
called, see the @ref transactions Page.
This will never be called with the same state from multiple
threads, but may be called with different state objects.
This method **must** store any persistent state in the supplied
state object to ensure the API is stateless. It should not store any
state relating to the transaction within the ManagerInterfaceBase instance
itself.
@return None
@exception python.exceptions.StateError If for some reason the
action fails.
@see createState()
@see finishTransaction()
@see cancelTransaction()
@see The @ref transactions page.
"""
pass
def finishTransaction(self, state):
"""
Called at the end of a group of actions to inform the ManagerInterfaceBase
that any pending internal management should be finalized.
For more on the transactional model in this API, and how these functions
will be called, see the @ref transactions Page.
This will never be called with the same state from multiple
threads, but may be called with different state objects.
This method **must** only use or store any persistent state from the
supplied state object to ensure the API is stateless.
@return None
@exception python.exceptions.StateError If for some reason the
action fails, or finish is called before start.
@see createState()
@see startTransaction()
@see cancelTransaction()
@see The @ref transactions page.
"""
pass
def cancelTransaction(self, state):
"""
Can be called at any point after @ref startTransaction to undo actions and
reset the transaction state.
Generally called in response to some fatal error, in order to request
the implementation to unroll, or revert any changes made since @ref
startTransaction
The state should also be re-configured so that it is then safe to
call @ref startTransaction
@return Bool True if roll-back was successful, False in all other cases.
@see createState()
@see startTransaction()
@see finishTransaction()
@see The @ref transactions page.
"""
return False
##
# @name State Persistence
#
# A Host may wish to distribute work. Often the workers may be in a
# different execution space. As such, it becomes necessary to pass a
# reference to the current transaction stack along with the work, so that
# actions can be correctly grouped.
#
# The freezeState() call can be made at any point, and
# The ManagerInterfaceBase should return a string that, when
# passed to thawState() in another process, will restore the
# state of the context so that future actions will be associated with the
# same state as before freezing.
#
# Included in this is the requirement that if a transaction has been started,
# this should also be persisted, so that actions on a thawed state are also
# associated with that transaction.
#
# This string could be a serialized representation of some transaction
# object, or a simple uuid or handle.
#
# If an implementation does not support freezing the state, then it should
# ensure that any outstanding internal tasks pending on @ref
# finishTransaction are completed prior to thawing, but the 'open' state of
# the transaction should be persisted to the thawed state - as @ref
# finishTransaction will most likely still be called.
def freezeState(self, state):
"""
Returns a string that encapsulates the current state of the
ManagerInterfaceBase represented by the supplied state object, so that
can be restored later, or in another process.
After calling this, the state should be considered frozen, and any further
cancel/finish calls should throw a @ref python.exceptions.StateError if
made without first thawing the stack.
@return An ASCII compatible string that can be used to restore the
stack.
@see thawState()
@see The @ref transactions page.
"""
return ""
def thawState(self, token):
"""
Restores the supplied state object to a previously frozen state.
@return object A state object, as per createState(), except restored to the
previous state encapsulated in the token, which is the same string as
returned by freezeState.
@exception python.exceptions.StateError If the supplied token is not
meaningful, or that a state has already been thawed.
"""
return None
## @}
|
StarcoderdataPython
|
6410716
|
<reponame>djtorch26/MorningAssistant
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 02:12:33 2020
@author: Dawson
"""
from gtts import gTTS
from . import FileManager as fmanager
#import playsound
import shutil
import pygame
import os
nuggetSpeech = fmanager.readNuggetFile()
def speak(text):
tts = gTTS(text=text, lang = 'en')
#changes directory of file
try:
#creates and saves file from the daily .txt file
saveFile = 'MorningNuggie'+ fmanager.timeNow() + '.mp3'
tts.save(saveFile)
#soundfile = open(save)
#plays sound object for the daily .txt file that is generated.
#playsound.playsound(saveFile)
# pygame.mixer.init()
# soundtoplay = pygame.mixer.music.load(os.path.join('/home/pi/Documents/MorningAssistant/', saveFile))
# soundtoplay.play()
# soundfile.close()
#while pygame.mixer.music.get_busy() == True:
#continue
pathfrom = ('/home/pi/Documents/MorningAssistant/' + saveFile)
pathto = '/home/pi/Documents/MorningAssistant/MorningNuggets/'
shutil.move(pathfrom, pathto)
systemInput = ('omxplayer ' + pathto + saveFile)
os.system(systemInput)
except Exception as e:
print(e)
#speak(nuggetSpeech)
|
StarcoderdataPython
|
87999
|
<reponame>Ahlyab/udemy-course-grabber
from pack import functions
from pack import banner
no = int(1)
def write_coupons (list_of_coupons_and_title):
global no
for indx, coupon_and_title in enumerate(list_of_coupons_and_title):
title, link = coupon_and_title.split('||')
coupons_file.write("\n"+ str(no) + ". " + title + "\n" + link + "\n")
no = int(no) + 1
def discudemy():
list_of_coupons_and_title = functions.discudemy(1)
write_coupons(list_of_coupons_and_title)
def learnviral():
list_of_coupons_and_title = functions.learnviral(1)
write_coupons(list_of_coupons_and_title)
def real_disc():
list_of_coupons_and_title = functions.real_disc(1)
write_coupons(list_of_coupons_and_title)
def udemy_freebies():
list_of_coupons_and_title = functions.udemy_freebies(1)
write_coupons(list_of_coupons_and_title)
def udemy_coupons_me():
list_of_coupons_and_title = functions.udemy_coupons_me(1)
write_coupons(list_of_coupons_and_title)
def main():
discudemy()
learnviral()
real_disc()
udemy_freebies()
udemy_coupons_me()
if __name__ == '__main__' :
banner.banner()
banner.info()
banner.msg()
coupons_file = open("./coupons.txt", "w", encoding="utf-8")
main()
banner.end_msg()
coupons_file.close()
|
StarcoderdataPython
|
1678841
|
<reponame>wangcj05/sciann
""" Utilities to process functionals.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sciann
def is_functional(f):
""" Checks whether `f` is a functional object.
# Arguments
f: an object to be tested.
# Returns
True if functional.
# Raises
ValueError: if the object cannot be tested with `isinstance`.
"""
if isinstance(f, (sciann.Functional, sciann.functionals.RNNFunctional)):
return True
else:
return False
def validate_functional(f):
""" if `f` is not a functional object, raises value error.
# Arguments
f: an object to be tested.
# Returns
True if functional, False otherwise.
# Raises
ValueError: if the object is not a Functional object.
"""
if isinstance(f, (sciann.Functional, sciann.functionals.rnn_functional.RNNFunctional)):
return True
else:
raise ValueError(
'These operations can only be applied to the `functional` object. '
'Use `Keras` or `TensorFlow` functions when applying to tensors.'
)
def is_constraint(f):
""" Checks whether `f` is a `Constraint` object.
# Arguments
f: an object to be tested.
# Returns
True if Constraint.
# Raises
ValueError: if the object cannot be tested with `isinstance`.
"""
if isinstance(f, sciann.Constraint):
return True
else:
return False
def validate_constraint(f):
""" if `f` is not a Constraint object, raises value error.
# Arguments
f: an object to be tested.
# Returns
True if Constraint, False otherwise.
# Raises
ValueError: if the object is not a Constraint object.
"""
if isinstance(f, sciann.Constraint):
return True
else:
raise ValueError(
'These operations can only be applied to the `Constraint` object. '
'Use `Keras` or `TensorFlow` functions when applying to tensors '
'or layers. '
)
def is_parameter(f):
""" Checks whether `f` is a parameter object.
# Arguments
f: an object to be tested.
# Returns
True if a parameter.
# Raises
ValueError: if the object cannot be tested with `isinstance`.
"""
if isinstance(f, sciann.Parameter):
return True
else:
return False
def validate_parameter(f):
""" if `f` is not a parameter object, raises value error.
# Arguments
f: an object to be tested.
# Returns
True if parameter, False otherwise.
# Raises
ValueError: if the object is not a Parameter object.
"""
if isinstance(f, sciann.Parameter):
return True
else:
raise ValueError(
'These operations can only be applied to the `parameter` object. '
'Use `Keras` or `TensorFlow` functions when applying to tensors.'
)
def is_field(f):
""" Checks whether `f` is a `Field` object.
# Arguments
f: an object to be tested.
# Returns
True if Field.
# Raises
ValueError: if the object cannot be tested with `isinstance`.
"""
if isinstance(f, (sciann.Field, sciann.functionals.RNNField)):
return True
else:
return False
def validate_field(f):
""" if `f` is not a Field object, raises value error.
# Arguments
f: an object to be tested.
# Returns
True if Field, False otherwise.
# Raises
ValueError: if the object is not a Field object.
"""
if isinstance(f, (sciann.Field, sciann.functionals.RNNField)):
return True
else:
raise ValueError(
'These operations can only be applied to the `Field` object. '
'Use `Keras` or `TensorFlow` functions when applying to tensors '
'or layers. '
)
def is_variable(f):
""" Checks whether `f` is a `Variable` object.
# Arguments
f: an object to be tested.
# Returns
True if Variable.
# Raises
ValueError: if the object cannot be tested with `isinstance`.
"""
if isinstance(f, (sciann.Variable, sciann.functionals.RadialBasis, sciann.functionals.RNNVariable)):
return True
else:
return False
def validate_variable(f):
""" if `f` is not a Variable object, raises value error.
# Arguments
f: an object to be tested.
# Returns
True if Variable, False otherwise.
# Raises
ValueError: if the object is not a Variable object.
"""
if isinstance(f, (sciann.Variable, sciann.functionals.RadialBasis, sciann.functionals.RNNVariable)):
return True
else:
raise ValueError(
'These operations can only be applied to the `Variable` object. '
'Use `Keras` or `TensorFlow` functions when applying to tensors '
'or layers. '
)
def is_scimodel(f):
""" Checks whether `f` is a `SciModel` object.
# Arguments
f: an object to be tested.
# Returns
True if SciModel.
# Raises
ValueError: if the object cannot be tested with `isinstance`.
"""
if isinstance(f, sciann.SciModel):
return True
else:
return False
def validate_scimodel(f):
""" if `f` is not a SciModel object, raises value error.
# Arguments
f: an object to be tested.
# Returns
True if SciModel, False otherwise.
# Raises
ValueError: if the object is not a SciModel object.
"""
if isinstance(f, sciann.SciModel):
return True
else:
raise ValueError(
'These operations can only be applied to the `SciModel` object. '
'Use `Keras` or `TensorFlow` functions when applying to tensors '
'or layers. '
)
|
StarcoderdataPython
|
3588707
|
class Array:
def __init__(self):
self._row_len = 0
self._data = []
@property
def rows(self):
return len(self._data)
def add_row(self, row: str):
if self._row_len == 0:
self._row_len = len(row)
elif self._row_len != len(row):
raise ValueError(f"invalid row length: {len(row)}, expected {self._row_len}")
self._data.append(row)
# (0,0) is top left, x grows horizontally to the right, y grows vertically down
# for example in array below (2, 1) is A:
# OOO
# OOA
# if x if larger than row_len, values are "repeated" (array grows infinitely to the right)
def at(self, x: int, y: int):
if x < 0 or y < 0 or y >= len(self._data):
raise ValueError(f"invalid coordinates ({x}, {y}), allowed are x >= 0, 0 <= y < {len(self._data)}")
return self._data[y][x % self._row_len]
def __str__(self) -> str:
return "\n".join(self._data)
def count_trees(self, step_x: int, step_y: int) -> int:
x, y = 0, 0
trees_count = 0
while y < self.rows:
if self.at(x, y) == "#":
trees_count += 1
elif self.at(x, y) != ".":
raise ValueError(f"incorrect input data at position ({x}, {y}): {self.at(x, y)}")
x += step_x
y += step_y
return trees_count
def main():
a = Array()
with open("data.txt") as f:
for line in f:
line = line.strip()
if len(line) > 0:
a.add_row(line)
steps = [
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2),
]
product = 1
for s in steps:
trees_count = a.count_trees(s[0], s[1])
product *= trees_count
print(f"trees encountered {s}: {trees_count}")
print(f"total product: {product}")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
196912
|
<reponame>tomviner/dojo-tcp-generator
import socket
import random
if __name__ == '__main__':
HOST = '127.0.0.1'
PORT = 8080
def process_data(data):
answers = [
b"No You " + data,
b"Tell that to your sister and/or brother!",
b"You didn't!",
b"That didn't even hurt my feelings",
]
return random.choice(answers) + b'\n'
while 1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
with conn:
print('Connected by', addr)
while 1:
data = conn.recv(1024)
if not data:
break
response = process_data(data)
conn.sendall(response)
|
StarcoderdataPython
|
3345620
|
<reponame>ttrummel/pandapipes
# Copyright (c) 2020 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
from pandapower.control import run_control as run_control_pandapower, prepare_run_ctrl as prepare_run_control_pandapower
import pandapipes as ppipe
from pandapipes.pipeflow import PipeflowNotConverged
def run_control(net, ctrl_variables=None, max_iter=30, continue_on_lf_divergence=False,
**kwargs):
"""
Function to run a control of the pandapipes network.
:param net: The pandapipes network
:type net: pandapipesNet
:param ctrl_variables: Used control variables. If None, default control variables are used.
:type ctrl_variables: dict, default None
:param max_iter: Maximal amount of iterations
:type max_iter: int, default 30
:param continue_on_lf_divergence: ?
:type continue_on_lf_divergence: bool, default False
:param kwargs: Additional keyword arguments
:type kwargs: dict
:return: No output
"""
if ctrl_variables is None:
ctrl_variables = prepare_run_ctrl(net, None)
run_control_pandapower(net, ctrl_variables=ctrl_variables, max_iter=max_iter,
continue_on_lf_divergence=continue_on_lf_divergence, **kwargs)
def prepare_run_ctrl(net, ctrl_variables):
"""
Function that defines default control variables.
:param net: The pandapipes network
:type net: pandapipesNet
:return: ctrl_variables
:rtype: dict
"""
if ctrl_variables is None:
ctrl_variables = prepare_run_control_pandapower(net, None)
ctrl_variables["run"] = ppipe.pipeflow
ctrl_variables["errors"] = (PipeflowNotConverged)
return ctrl_variables
|
StarcoderdataPython
|
1858244
|
"""
mbed OS
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import socket
import select
import threading
from sys import stdout
from SocketServer import BaseRequestHandler, UDPServer, _eintr_retry
from mbed_host_tests import BaseHostTest
# The watchdog is used to terminate the udp helper thread in the
# event of an error condition. This graceful cleanup is required in
# particular for the test automation environment where failure to
# terminate the thread will leaving a python process and the
# inability of the automation system to reuse the server port.
# The value of the timeout is set to equal the minimum
# MBED_HOSTTEST_TIMEOUT value of the target tests using this script
# (currently udp_echo_client.cpp).
SAL_UDPSERVER_WATCHDOG_TIMOUT = 60.0
SalUdpServerDebug=False
class SalUdpServer(UDPServer):
""" UDP Server derived class with a custom serve_forever() implementation
for implementing detection of shutdown command allowing graceful
termination of the host test script"""
address_family = socket.AF_INET
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass):
UDPServer.__init__(self, server_address, RequestHandlerClass)
self._shutdown_request = False
# watchdog guards against the failure mode when the remote target fails
# to send any packets. If the watchdog reaches the high water mark, the
# server is terminated so as not to leave the server thread unterminated
self.watchdog = 0.0
def serve_forever(self, poll_interval=0.5):
"""Provide an override that can be shutdown from a request handler.
The threading code in the BaseSocketServer class prevented this from working
even for a non-threaded blocking server.
"""
try:
while not self._shutdown_request:
r, w, e = _eintr_retry(select.select, [self], [], [], poll_interval)
if self in r:
# reset watchdog
self.watchdog = 0.0
self._handle_request_noblock()
else:
self.watchdog += poll_interval
if self.watchdog > SAL_UDPSERVER_WATCHDOG_TIMOUT:
self._shutdown_request = True
finally:
self._shutdown_request = False
class SalUdpServerEchoCallback(BaseRequestHandler):
"""UDP Server callback handler for processing rx-ed data. Received data is
echoed back to the sender """
def handle(self):
""" One handle per connection
"""
try:
data, socket = self.request
print("HOST: Received %d bytes of data" % len(data))
if SalUdpServerDebug == True:
print "HOST: sending the data back to transmitter at:"
print self.client_address
print "HOST: data:"
print data
print "HOST: %d bytes sendto() client %s" % (len, str(self.client_address))
if 'shutdown' in data:
self.server._shutdown_request = True
else:
tx_bytes = socket.sendto(data, self.client_address)
print("HOST: Sent %d bytes of data" % tx_bytes)
except Exception as e:
print("HOST: detected unexpected exception: %s" % str(e))
class SalUdpServerTest(BaseHostTest):
"""
mbed greentea framework host test script for udp_echo_client server
side functionality. The test does the following:
- creates a UDP Server for echo back to sender any packets received.
- communicates the udp server {ipaddr, port} to the DUT via serial
so the DUT can send packets to the udp server.
- The DUT will send udp packets of various lengths and the UDP server
will echo them back again.
- When finished, the DUT will send a shutdown command to the UDP
server causing the udp server thread to terminate, and this
function to return.
"""
name = 'sal_udpserver'
def send_server_ip_port(self, selftest, ip_address, port_no):
"""send the udp server {ipaddr, port} to target via serial console."""
self.watchdog = 0.0
# Read 3 lines which are sent from client
print "HOST: About to read 3 lines from target before sending UDP Server {ipaddr, port} tuple."
for i in range(0, 3):
c = selftest.mbed.serial_readline()
if c is None:
selftest.print_result(self.RESULT_IO_SERIAL)
return
print "MBED: " + c.strip()
print "HOST: Sending server IP Address to target..."
connection_str = ip_address + ":" + str(port_no) + "\n"
selftest.mbed.serial_write(connection_str)
# mbed greentea framwork support the following for outputing serial console emitted
# from the target, but it doesnt work relibly for me.
# selftest.dump_serial()
# selftest.dump_serial_end()
# until its fixed, the following emits the serial console trace
while True:
c = selftest.mbed.serial_readline()
if c is None:
selftest.print_result(self.RESULT_IO_SERIAL)
return
print "MBED: " + c.strip()
# look for the end tag in serial output denoting the test has
# finished, and this can return.
if c.strip() == "{{end}}":
print "HOST: Terminating Test"
break
# null lines are periodically generated, which can be used to trigger the watchdog
elif c.strip() == "":
self.watchdog += 1.0
if self.watchdog > SAL_UDPSERVER_WATCHDOG_TIMOUT:
break
else:
# reset watchdog
self.watchdog = 0.0
return selftest.RESULT_SUCCESS
def test(self, selftest):
""" Method invoked by test framework to implement udp_echo_client
server side functionality."""
# socket functions used are selected to promote portability across
# windows, linux and mac.
srv_ipaddr = socket.gethostbyname(socket.gethostname())
self.udpserver = SalUdpServer((srv_ipaddr, 0), SalUdpServerEchoCallback)
srv_port = self.udpserver.socket.getsockname()[1]
print "HOST: Listening for UDP connections on %s:%d." %(srv_ipaddr, srv_port)
udp_thread = threading.Thread(target=self.udpserver.serve_forever)
udp_thread.start()
self.send_server_ip_port(selftest, srv_ipaddr, srv_port)
|
StarcoderdataPython
|
3360936
|
<filename>tensor2tensor/models/research/autoencoders.py
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Autoencoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_layers
from tensor2tensor.models import basic
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_model
class BasicDiscreteAutoencoder(basic.BasicAutoencoder):
def bottleneck(self, x, res_size):
hparams = self._hparams
x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_size, name="bottleneck"))
d = x + tf.stop_gradient(2 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
y = tf.nn.dropout(x, keep_prob=1.0 - hparams.dropout)
x = common_layers.mix(d, y, hparams.discretize_warmup_steps,
hparams.mode == tf.estimator.ModeKeys.TRAIN)
x = tf.layers.dense(x, res_size, name="unbottleneck")
return x
@registry.register_hparams
def basic_discrete_autoencoder():
"""Basic autoencoder model."""
hparams = basic.basic_autoencoder()
hparams.hidden_size = 128
hparams.bottleneck_size = 512
hparams.bottleneck_warmup_steps = 3000
hparams.add_hparam("discretize_warmup_steps", 5000)
return hparams
|
StarcoderdataPython
|
4870098
|
<filename>tests/test_create_project.py
import os
import subprocess
from click.testing import CliRunner
def test_create_app(project):
os.environ['MASHINA_SETTINGS_MODULE'] = '%s.config.settings' % project
from mashina.commands import createapp
runner = CliRunner()
result = runner.invoke(createapp, ['animal', 'animals'])
assert result.exit_code == 0
def test_run_server(wsgi_app):
try:
wsgi_app.communicate(timeout=2)
except subprocess.TimeoutExpired:
pass
finally:
assert wsgi_app.returncode is None
|
StarcoderdataPython
|
299030
|
from typing import Optional
class Node:
def __init__(self, key: int, val: int) -> None:
self.key = key
self.val = val
self.prev = None
self.next = None
class LRUCache:
def __init__(self, capacity: int) -> None:
self.capacity = capacity
self.dic = dict()
self.head = Node(-1, -1)
self.tail = Node(-1, -1)
self.head.next, self.tail.prev = self.tail, self.head
def get(self, key: int) -> int:
if key in self.dic:
cur_node = self.dic[key]
self._remove(cur_node)
self._add(cur_node)
return cur_node.val
return -1
def put(self, key: int, val: int) -> None:
if key in self.dic:
self._remove(self.dic[key])
node = Node(key, val)
self._add(node)
self.dic[key] = node
if len(self.dic) > self.capacity:
cur_node = self.head.next
self._remove(cur_node)
del self.dic[cur_node.key]
def _remove(self, node: Optional[Node]) -> None:
prev, nxt = node.prev, node.next
prev.next, nxt.prev = nxt, prev
def _add(self, node: Optional[Node]) -> None:
cur_node = self.tail.prev
cur_node.next, node.prev = node, cur_node
self.tail.prev, node.next = node, self.tail
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
|
StarcoderdataPython
|
1744869
|
<filename>Darlington/phase2/Data Structure/day 58 solution/qtn10.py
#program to group a sequence of key-value pairs into a dictionary of lists.
from collections import defaultdict
class_roll = [('v', 1), ('vi', 2), ('v', 3), ('vi', 4), ('vii', 1)]
d = defaultdict(list)
for k, v in class_roll:
d[k].append(v)
print(sorted(d.items()))
|
StarcoderdataPython
|
8142012
|
import numpy as np
def from_data_file(data_dir):
""" This function reads the data that we use in this demo."""
data=dict()
import scipy.io as sio
data_file = sio.loadmat(data_dir+'/data_train.mat')
data['train']=dict()
data['train']['inputs'] = data_file['inputs']
data['train']['target'] = data_file['target']
data_file = sio.loadmat(data_dir+'/data_validation.mat')
data['val']=dict()
data['val']['inputs'] = data_file['inputs']
data['val']['target'] = data_file['target']
data_file = sio.loadmat(data_dir+'/data_test.mat')
data['test']=dict()
data['test']['inputs'] = data_file['inputs']
data['test']['target'] = data_file['target']
return data
def theta_to_model(theta):
""" This function takes a model (or gradient) in the form of one long vector (maybe produced
by model_to_theta), and restores it to the structure format, i.e. with fields
.input_to_hid and .hid_to_class, both matrices. """
n_hid = np.int(theta.shape[0] / (256+10.))
ret=dict()
ret['input_to_hid'] = np.reshape(np.ravel(theta)[0:256 * n_hid], (n_hid, 256), order='F')
ret['hid_to_class'] = np.reshape(np.ravel(theta)[256 * n_hid:], (10, n_hid), order='F')
return ret
def initial_model(n_hid):
""" This function initialises model parameters. """
n_params = (256 + 10) * n_hid
as_row_vector = np.cos(np.arange(n_params))
return theta_to_model(as_row_vector[:,np.newaxis] * 0.1) # We don't use random initialization, for
# this assignment. This way, everybody will get the same result
def model_to_theta(model):
# This function takes a model (or gradient in model form),
# and turns it into one long vector. See also theta_to_model.
input_to_hid = np.ravel(model['input_to_hid'], order='F')[:,np.newaxis]
hid_to_class = np.ravel(model['hid_to_class'], order='F')[:,np.newaxis]
return np.vstack((input_to_hid, hid_to_class))
def logistic(input):
ret = 1 / (1 + np.exp(-input))
return ret
def log_sum_exp_over_rows(a):
# This computes log(sum(exp(a), 1)) in a numerically stable way
maxs_small = np.max(a, 0)
maxs_big = np.tile(maxs_small[np.newaxis,:], (a.shape[0], 1))
#print('maxs_small = {}'.format(maxs_small.shape))
#print('maxs_big = {}'.format(maxs_big.shape))
#print('a = {}'.format(a.shape))
ret = np.log(np.sum(np.exp(a - maxs_big), 0)) + maxs_small
return ret
def classification_performance(model, data):
# This returns the fraction of data cases that is incorrectly classified by the model.
hid_input = np.dot(model['input_to_hid'], data['inputs']) # input to the hidden units, i.e. before the logistic. size: <number of hidden units> by <number of data cases>
hid_output = logistic(hid_input) # output of the hidden units, i.e. after the logistic. size: <number of hidden units> by <number of data cases>
class_input = np.dot(model['hid_to_class'], hid_output) # input to the components of the softmax. size: <number of classes, i.e. 10> by <number of data cases>
choices = np.argmax(class_input,0) # choices is integer: the chosen class, plus 1.
targets = np.argmax(data['target'],0) # targets is integer: the target class, plus 1.
ret = np.mean((choices != targets).astype(np.float))
return ret
|
StarcoderdataPython
|
6476215
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from application import bootstrap
from cherrypy.process.plugins import Daemonizer
bootstrap()
# debugging purpose, e.g. run with PyDev debugger
if __name__ == '__main__':
import sys ;
import cherrypy
if '--daemon' in sys.argv :
Daemonizer(cherrypy.engine).subscribe() ;
else :
cherrypy.engine.signals.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
|
StarcoderdataPython
|
9729055
|
<reponame>Testing4AI/DeepJudge
import numpy as np
import scipy.stats
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
DIGISTS = 4
def Rob(model, advx, advy):
""" Robustness (empirical)
args:
model: suspect model
advx: black-box test cases (adversarial examples)
advy: ground-truth labels
return:
Rob value
"""
return round(np.sum(np.argmax(model.predict(advx), axis=1)==np.argmax(advy, axis=1))/advy.shape[0], DIGISTS)
def JSD(model1, model2, advx):
""" Jensen-Shanon Distance
args:
model1 & model2: victim model and suspect model
advx: black-box test cases
return:
JSD value
"""
vectors1 = model1.predict(advx)
vectors2 = model2.predict(advx)
mid = (vectors1 + vectors2)/2
distances = (scipy.stats.entropy(vectors1, mid, axis=1) + scipy.stats.entropy(vectors2, mid, axis=1))/2
return round(np.average(distances), DIGISTS)
def LOD(model1, model2, tests, order=2):
""" Layer Outputs Distance
args:
model1 & model2: victim model and suspect model
tests: white-box test cases
order: distance norm
return:
LOD value
"""
lods = []
for loc in tests.keys():
layer_index, idx = loc[0], loc[1]
submodel1 = Model(inputs = model1.input, outputs = model1.layers[layer_index].output)
submodel2 = Model(inputs = model2.input, outputs = model2.layers[layer_index].output)
outputs1 = submodel1(tests[loc])
outputs1 = K.mean(K.reshape(outputs1, (outputs1.shape[0], -1, outputs1.shape[-1])), axis = 1)
outputs2 = submodel2(tests[loc])
outputs2 = K.mean(K.reshape(outputs2, (outputs2.shape[0], -1, outputs2.shape[-1])), axis = 1)
lods.append(np.linalg.norm(outputs1 - outputs2, axis=1, ord=order))
return round(np.average(np.array(lods)), DIGISTS)
def LAD(model1, model2, tests, theta=0.5):
""" Layer Activation Distance
args:
model1 & model2: victim model and suspect model
tests: white-box test cases
theta: activation threshold
return:
LAD value
"""
def normalize(vs):
return [(v-np.min(v))/(np.max(v)-np.min(v)+1e-6) for v in vs]
lads = []
for loc in tests.keys():
layer_index, idx = loc[0], loc[1]
submodel1 = Model(inputs = model1.input, outputs = model1.layers[layer_index].output)
submodel2 = Model(inputs = model2.input, outputs = model2.layers[layer_index].output)
outputs1 = submodel1(tests[loc])
outputs1 = K.mean(K.reshape(outputs1, (outputs1.shape[0], -1, outputs1.shape[-1])), axis = 1)
outputs2 = submodel2(tests[loc])
outputs2 = K.mean(K.reshape(outputs2, (outputs2.shape[0], -1, outputs2.shape[-1])), axis = 1)
outputs1_normlized = normalize(outputs1)
outputs2_normlized = normalize(outputs2)
activations1 = np.array([np.where(i>theta, 1, 0) for i in outputs1_normlized])
activations2 = np.array([np.where(i>theta, 1, 0) for i in outputs2_normlized])
lads.append(np.linalg.norm(activations1 - activations2, axis=1, ord=1))
return round(np.average(np.array(lads)), DIGISTS)
def NOD(model1, model2, tests):
""" Neuron Output Distance
args:
model1 & model2: victim model and suspect model
tests: white-box test cases
return:
NOD value
"""
nods = []
for loc in tests.keys():
layer_index, idx = loc[0], loc[1]
submodel1 = Model(inputs = model1.input, outputs = model1.layers[layer_index].output)
submodel2 = Model(inputs = model2.input, outputs = model2.layers[layer_index].output)
outputs1 = submodel1(tests[loc])
outputs1 = K.mean(K.reshape(outputs1, (outputs1.shape[0], -1, outputs1.shape[-1])), axis = 1)
outputs2 = submodel2(tests[loc])
outputs2 = K.mean(K.reshape(outputs2, (outputs2.shape[0], -1, outputs2.shape[-1])), axis = 1)
nods.append(np.abs(outputs1[:,idx] - outputs2[:,idx]))
return round(np.average(np.array(nods)), DIGISTS)
def NAD(model1, model2, tests, theta=0.5):
""" Neuron Activation Distance
args:
model1 & model2: victim model and suspect model
tests: white-box test cases
theta: activation threshold
return:
NAD value
"""
def normalize(vs):
return [(v-np.min(v))/(np.max(v)-np.min(v)+1e-6) for v in vs]
nads = []
for loc in tests.keys():
layer_index, idx = loc[0], loc[1]
submodel1 = Model(inputs = model1.input, outputs = model1.layers[layer_index].output)
submodel2 = Model(inputs = model2.input, outputs = model2.layers[layer_index].output)
outputs1 = submodel1(tests[loc])
outputs1 = K.mean(K.reshape(outputs1, (outputs1.shape[0], -1, outputs1.shape[-1])), axis = 1)
outputs2 = submodel2(tests[loc])
outputs2 = K.mean(K.reshape(outputs2, (outputs2.shape[0], -1, outputs2.shape[-1])), axis = 1)
outputs1_normlized = normalize(outputs1)
outputs2_normlized = normalize(outputs2)
activations1 = np.array([np.where(i>theta, 1, 0) for i in outputs1_normlized])
activations2 = np.array([np.where(i>theta, 1, 0) for i in outputs2_normlized])
nads.append(np.abs(activations1[:,idx] - activations2[:,idx]))
return round(np.average(np.array(nads))*len(tests), DIGISTS)
|
StarcoderdataPython
|
4875928
|
import os
import sys
from configparser import ConfigParser
import platform
import logging as log
from func import Func
import argparse
try:
if getattr(sys, 'frozen', False):
script_dir = os.path.dirname(sys.executable)
else:
script_dir = os.path.dirname(os.path.realpath(__file__))
config = ConfigParser()
config.read(script_dir + os.sep + 'config.ini')
except Exception as err:
raise SystemExit(f'Config parse: {err}')
ZONE = config.get('GENERAL', 'ZONE').split(',')
ADMIN_EMAIL = config.get('GENERAL', 'ADMIN_EMAIL')
CONFIG_DIR = config.get('GENERAL', 'LE_CONFIG_DIR')
CERTBOT = config.get('GENERAL', 'CERTBOT')
LELOG = config.get('GENERAL', 'LE_LOG')
WEBSERVERENABLED = config.getboolean('WEBSERVER', 'ENABLED')
TESTCONFIG = config.get('WEBSERVER', 'TEST_CONFIG')
RELOADCONFIG = config.get('WEBSERVER', 'RELOAD_CONFIG')
SMTPENABLED = config.getboolean('SMTP', 'ENABLED')
SMTPSERVER = config.get('SMTP', 'SERVER')
SMTPPORT = int(config.get('SMTP', 'PORT'))
SMTPUSER = config.get('SMTP', 'USERNAME')
SMTPPASS = config.get('SMTP', 'PASSWORD')
SENDER = config.get('SMTP', 'FROM')
RECIPIENT = config.get('SMTP', 'TO').split(',')
SLACKENABLED = config.getboolean('SLACK', 'ENABLED')
SLACKWEBHOOK = config.get('SLACK', 'WEBHOOK')
TELEGRAMENABLED = config.getboolean('TELEGRAM', 'ENABLED')
TELEGRAMTOKEN = config.get('TELEGRAM', 'TOKEN')
TELEGRAMCHATID = config.get('TELEGRAM', 'CHAT_ID')
POSTHOOKENABLED = config.getboolean('POSTHOOK', 'ENABLED')
POSTHOOKSCRIPT = config.get('POSTHOOK', 'SCRIPT')
HOSTNAME = platform.node()
LOG_FILE = config.get('LOG', 'LOG_FILE')
if platform.system() == "Windows":
AUTH_HOOK = f'{script_dir}{os.sep}auth.exe'
CLEAN_HOOK = f'{script_dir}{os.sep}clean.exe'
else:
AUTH_HOOK = f'{script_dir}{os.sep}auth'
CLEAN_HOOK = f'{script_dir}{os.sep}clean'
ENC_KEY = 'XXX'
ENC_DAT = f'{script_dir}{os.sep}enc.dat'
log.basicConfig(format = '%(levelname)-8s [%(asctime)s] %(filename)s %(lineno)d: %(message)s', level = log.INFO, filename = f'{script_dir}{os.sep}{LOG_FILE}', filemode='w')
def notify(subject, msg, test=False):
if SMTPENABLED:
try:
Func.sendEmail(SENDER, RECIPIENT, subject, msg, SMTPSERVER, SMTPPORT, SMTPUSER, SMTPPASS)
except Exception as err:
log.error(err)
sys.exit(err)
if SLACKENABLED:
try:
Func.slackSend(SLACKWEBHOOK, f'{subject} {msg}')
except Exception as err:
log.error(err)
sys.exit(err)
if TELEGRAMENABLED:
try:
Func.telegramSend(TELEGRAMTOKEN, TELEGRAMCHATID, f'{subject} {msg}')
except Exception as err:
log.error(err)
sys.exit(err)
def main():
parser = argparse.ArgumentParser(description='LetsEncrypt NIC')
parser.add_argument('-v', dest='verbose', help='verbose output', action='store_true', required=False)
parser.add_argument('-t', dest='test', help='test (not actual run)', action='store_true', required=False)
parser.add_argument('-n', dest='new_cert', help='obtain new certificate', action='store_true', required=False)
parser.add_argument('-a', dest='add_creds', help='add credentials', action='store_true', required=False)
args = parser.parse_args()
try:
# save credentials
if args.add_creds:
nicuser, nicpass, nic_id, nic_sec = Func.NIC_inputCreds()
Func.encrypt(ENC_KEY, ENC_DAT, nicuser, nicpass, nic_id, nic_sec)
print('Credentials encrypted and saved! Exit...')
sys.exit(0)
# decrypt
if args.verbose:
os.environ['VERBOSE'] = "true"
print('-= LetsEncrypt NIC =-')
log.info('-= LetsEncrypt NIC =-')
try:
USER, PASS, CLIENT_ID, CLIENT_SECRET = Func.decrypt(ENC_KEY, ENC_DAT)
except Exception as err:
log.error(err)
notify(f'[ {HOSTNAME} ] LetsEncrypt', str(err))
raise SystemExit(err)
# export credentials
Func.exportCredentials(USER, PASS, CLIENT_ID, CLIENT_SECRET)
# make domains list
if args.verbose:
print('Preparing domain list...')
log.info('Preparing domain list...')
try:
maindomain = Func.makeMainDomain(ZONE)
domains = Func.makeList(ZONE)
except Exception as err:
log.error(err)
notify(f'[ {HOSTNAME} ] LetsEncrypt', str(err))
raise SystemExit(err)
# certbot dry run
if args.test:
if args.verbose:
print('[+] ACME Test: [ START ]')
log.info('[+] ACME Test: [ START ]')
try:
code, out, err = Func.acmeRun(maindomain, domains, CERTBOT, ADMIN_EMAIL, CONFIG_DIR, AUTH_HOOK, CLEAN_HOOK, test=True, new=args.new_cert, verbose=args.verbose)
if code != 0:
log.error(err)
sys.exit(err)
except Exception as err:
log.error(err)
notify(f'[ {HOSTNAME} ] LetsEncrypt', str(err), test=True)
raise SystemExit(err)
if args.verbose:
print('[+] ACME Test: [ DONE ]')
log.info('[+] ACME Test: [ DONE ]')
log.info('-= Program completed! =-')
if args.verbose:
print('-= Program completed! =-')
sys.exit()
# certbot run
if args.verbose:
print('[+] ACME Run: [ START ]')
log.info('[+] ACME Run: [ START ]')
try:
code, out, err = Func.acmeRun(maindomain, domains, CERTBOT, ADMIN_EMAIL, CONFIG_DIR, AUTH_HOOK, CLEAN_HOOK, new=args.new_cert, verbose=args.verbose)
if code != 0:
log.error(err)
sys.exit(err)
except Exception as err:
log.error(err)
notify(f'[ {HOSTNAME} ] LetsEncrypt', str(err))
raise SystemExit(err)
if args.verbose:
print('[+] ACME Run: [ DONE ]')
log.info('[+] ACME Run: [ DONE ]')
# reload webserver
if WEBSERVERENABLED:
if args.verbose:
print('[+] SERVER Reload: [ START ]')
log.info('[+] SERVER Reload: [ START ]')
try:
Func.reloadServer(TESTCONFIG, RELOADCONFIG)
except Exception as err:
log.error(err)
notify(f'[ {HOSTNAME} ] LetsEncrypt', str(err))
raise SystemExit(err)
if args.verbose:
print('[+] SERVER Reload: [ DONE ]')
log.info('[+] SERVER Reload: [ DONE ]')
# destroy credentials
Func.destroyCredentials()
# posthook run
if POSTHOOKENABLED:
if args.verbose:
print('[+] POST HOOK Run: [ START]')
log.info('[+] POST HOOK Run: [ START]')
try:
code, out, err = Func.call(POSTHOOKSCRIPT)
except Exception as err:
log.error(err)
notify(f'[ {HOSTNAME} ] LetsEncrypt', str(err))
raise SystemExit(err)
if args.verbose:
print('[+] POST HOOK Run: [ DONE ]')
log.info('[+] POST HOOK Run: [ DONE ]')
# complete
if args.verbose:
print('-= Program completed! =-')
log.info('-= Program completed! =-')
sys.exit(0)
except KeyboardInterrupt:
raise SystemExit('\n-= Program terminated... =-')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1922928
|
'''Tests for Bruker format conversion.
Copyright <NAME>, University of Oxford 2021
Subject to the BSD 3-Clause License.
'''
import subprocess
from pathlib import Path
import json
import numpy as np
from .io_for_tests import read_nifti_mrs
# Data paths
bruker_path = Path(__file__).parent / 'spec2nii_test_data' / 'bruker'
data_path = bruker_path / '20201208_105201_lego_rod_1_3'
def test_fid(tmp_path):
subprocess.check_call(['spec2nii', 'bruker',
'-f', 'fid',
'-m', 'FID',
'-d',
'-o', tmp_path,
'-j',
str(data_path)])
# Img 5 - csi
img = read_nifti_mrs(tmp_path / 'fid_FID_5.nii.gz')
assert img.shape == (16, 16, 1, 1980)
assert np.iscomplexobj(img.dataobj)
assert 1 / img.header['pixdim'][4] == 4000.0
hdr_ext_codes = img.header.extensions.get_codes()
hdr_ext = json.loads(img.header.extensions[hdr_ext_codes.index(44)].get_content())
assert np.isclose(hdr_ext['SpectrometerFrequency'][0], 400.32251)
assert hdr_ext['ResonantNucleus'][0] == '1H'
assert hdr_ext['OriginalFile'][0] == str(data_path.absolute() / '5' / 'fid')
assert 'method' in hdr_ext
assert 'acqp' in hdr_ext
# Img 6 - csi
img = read_nifti_mrs(tmp_path / 'fid_FID_6.nii.gz')
assert img.shape == (16, 16, 1, 1980)
assert np.iscomplexobj(img.dataobj)
assert 1 / img.header['pixdim'][4] == 4000.0
hdr_ext_codes = img.header.extensions.get_codes()
hdr_ext = json.loads(img.header.extensions[hdr_ext_codes.index(44)].get_content())
assert np.isclose(hdr_ext['SpectrometerFrequency'][0], 400.32251)
assert hdr_ext['ResonantNucleus'][0] == '1H'
assert hdr_ext['OriginalFile'][0] == str(data_path.absolute() / '6' / 'fid')
assert 'method' in hdr_ext
assert 'acqp' in hdr_ext
# Img 9 - svs
img = read_nifti_mrs(tmp_path / 'fid_FID_9.nii.gz')
assert img.shape == (1, 1, 1, 1980, 1)
assert np.iscomplexobj(img.dataobj)
assert np.isclose(1 / img.header['pixdim'][4], 4401.41)
hdr_ext_codes = img.header.extensions.get_codes()
hdr_ext = json.loads(img.header.extensions[hdr_ext_codes.index(44)].get_content())
assert hdr_ext['dim_5'] == 'DIM_DYN'
assert np.isclose(hdr_ext['SpectrometerFrequency'][0], 400.32251)
assert hdr_ext['ResonantNucleus'][0] == '1H'
assert hdr_ext['OriginalFile'][0] == str(data_path.absolute() / '9' / 'fid')
assert 'method' in hdr_ext
assert 'acqp' in hdr_ext
# Img 10 - svs
img = read_nifti_mrs(tmp_path / 'fid_FID_10.nii.gz')
assert img.shape == (1, 1, 1, 1980, 1)
assert np.iscomplexobj(img.dataobj)
assert np.isclose(1 / img.header['pixdim'][4], 4401.41)
hdr_ext_codes = img.header.extensions.get_codes()
hdr_ext = json.loads(img.header.extensions[hdr_ext_codes.index(44)].get_content())
assert hdr_ext['dim_5'] == 'DIM_DYN'
assert np.isclose(hdr_ext['SpectrometerFrequency'][0], 400.32251)
assert hdr_ext['ResonantNucleus'][0] == '1H'
assert hdr_ext['OriginalFile'][0] == str(data_path.absolute() / '10' / 'fid')
assert 'method' in hdr_ext
assert 'acqp' in hdr_ext
def test_2dseq(tmp_path):
subprocess.check_call(['spec2nii', 'bruker',
'-f', '2dseq',
'-m', '2DSEQ',
'-d',
'-o', tmp_path,
'-j',
str(data_path)])
# Img 5 - csi
img = read_nifti_mrs(tmp_path / '2dseq_2DSEQ_5_2_lego_rod_3.nii.gz')
assert img.shape == (16, 16, 1, 2048)
assert np.iscomplexobj(img.dataobj)
assert 1 / img.header['pixdim'][4] == 4000.0
hdr_ext_codes = img.header.extensions.get_codes()
hdr_ext = json.loads(img.header.extensions[hdr_ext_codes.index(44)].get_content())
assert np.isclose(hdr_ext['SpectrometerFrequency'][0], 400.32251)
assert hdr_ext['ResonantNucleus'][0] == '1H'
assert hdr_ext['OriginalFile'][0] == str(data_path.absolute() / '5' / 'pdata' / '2' / '2dseq')
assert 'method' in hdr_ext
assert 'visu_pars' in hdr_ext
# Img 6 - csi
img = read_nifti_mrs(tmp_path / '2dseq_2DSEQ_6_2_lego_rod_3.nii.gz')
assert img.shape == (16, 16, 1, 2048)
assert np.iscomplexobj(img.dataobj)
assert 1 / img.header['pixdim'][4] == 4000.0
hdr_ext_codes = img.header.extensions.get_codes()
hdr_ext = json.loads(img.header.extensions[hdr_ext_codes.index(44)].get_content())
assert np.isclose(hdr_ext['SpectrometerFrequency'][0], 400.32251)
assert hdr_ext['ResonantNucleus'][0] == '1H'
assert hdr_ext['OriginalFile'][0] == str(data_path.absolute() / '6' / 'pdata' / '2' / '2dseq')
assert 'method' in hdr_ext
assert 'visu_pars' in hdr_ext
# Img 9 - svs
img = read_nifti_mrs(tmp_path / '2dseq_2DSEQ_9_2_lego_rod_3.nii.gz')
assert img.shape == (1, 1, 1, 2048)
assert np.iscomplexobj(img.dataobj)
assert np.isclose(1 / img.header['pixdim'][4], 4401.41)
hdr_ext_codes = img.header.extensions.get_codes()
hdr_ext = json.loads(img.header.extensions[hdr_ext_codes.index(44)].get_content())
assert np.isclose(hdr_ext['SpectrometerFrequency'][0], 400.32251)
assert hdr_ext['ResonantNucleus'][0] == '1H'
assert hdr_ext['OriginalFile'][0] == str(data_path.absolute() / '9' / 'pdata' / '2' / '2dseq')
assert 'method' in hdr_ext
assert 'visu_pars' in hdr_ext
# Img 10 - svs
img = read_nifti_mrs(tmp_path / '2dseq_2DSEQ_10_2_lego_rod_3.nii.gz')
assert img.shape == (1, 1, 1, 2048)
assert np.iscomplexobj(img.dataobj)
assert np.isclose(1 / img.header['pixdim'][4], 4401.41)
hdr_ext_codes = img.header.extensions.get_codes()
hdr_ext = json.loads(img.header.extensions[hdr_ext_codes.index(44)].get_content())
assert np.isclose(hdr_ext['SpectrometerFrequency'][0], 400.32251)
assert hdr_ext['ResonantNucleus'][0] == '1H'
assert hdr_ext['OriginalFile'][0] == str(data_path.absolute() / '10' / 'pdata' / '2' / '2dseq')
assert 'method' in hdr_ext
assert 'visu_pars' in hdr_ext
|
StarcoderdataPython
|
9631206
|
<gh_stars>0
# Generated by Django 3.2.6 on 2021-08-14 22:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('collect', '0006_alter_courseinfo_name'),
]
operations = [
migrations.RemoveField(
model_name='raceinfo',
name='course',
),
migrations.RemoveField(
model_name='raceinfo',
name='date',
),
migrations.RemoveField(
model_name='raceinfo',
name='round',
),
]
|
StarcoderdataPython
|
285763
|
<gh_stars>0
patches = [
{
"op": "remove",
"path": "/PropertyTypes/AWS::S3::StorageLens.S3BucketDestination/Properties/Encryption/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::S3::StorageLens.S3BucketDestination/Properties/Encryption/PrimitiveType",
"value": "Json",
},
# Rename AWS::S3::StorageLens.DataExport to AWS::S3::StorageLens.StorageLensDataExport due to conflict with AWS::S3::Bucket.DataExport
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::StorageLens.DataExport",
"path": "/PropertyTypes/AWS::S3::StorageLens.StorageLensDataExport",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::StorageLens.StorageLensConfiguration/Properties/DataExport/Type",
"value": "StorageLensDataExport",
},
# Rename AWS::S3::LifecycleConfiguration.Rule to AWS::S3::LifecycleConfiguration.LifecycleRule - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.Rule",
"path": "/PropertyTypes/AWS::S3::Bucket.LifecycleRule",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.LifecycleConfiguration/Properties/Rules/ItemType",
"value": "LifecycleRule",
},
# Rename AWS::S3::Bucket.Transition to AWS::S3::Bucket.LifecycleRuleTransition - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.Transition",
"path": "/PropertyTypes/AWS::S3::Bucket.LifecycleRuleTransition",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.LifecycleRule/Properties/Transition/Type",
"value": "LifecycleRuleTransition",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.LifecycleRule/Properties/Transitions/ItemType",
"value": "LifecycleRuleTransition",
},
# Rename AWS::S3::Bucket.CorsRule to AWS::S3::Bucket.CorsRule - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.CorsRule",
"path": "/PropertyTypes/AWS::S3::Bucket.CorsRules",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.CorsConfiguration/Properties/CorsRules/ItemType",
"value": "CorsRules",
},
# Rename AWS::S3::Bucket.FilterRule to AWS::S3::Bucket.Rules - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.FilterRule",
"path": "/PropertyTypes/AWS::S3::Bucket.Rules",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.S3KeyFilter/Properties/Rules/ItemType",
"value": "Rules",
},
# Rename AWS::S3::Bucket.S3KeyFilter to AWS::S3::Bucket.S3Key - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.S3KeyFilter",
"path": "/PropertyTypes/AWS::S3::Bucket.S3Key",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.NotificationFilter/Properties/S3Key/Type",
"value": "S3Key",
},
# Rename AWS::S3::Bucket.NotificationFilter to AWS::S3::Bucket.Filter - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.NotificationFilter",
"path": "/PropertyTypes/AWS::S3::Bucket.Filter",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.LambdaConfiguration/Properties/Filter/Type",
"value": "Filter",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.QueueConfiguration/Properties/Filter/Type",
"value": "Filter",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.TopicConfiguration/Properties/Filter/Type",
"value": "Filter",
},
# Rename AWS::S3::Bucket.LambdaConfiguration to AWS::S3::Bucket.LambdaConfigurations - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.LambdaConfiguration",
"path": "/PropertyTypes/AWS::S3::Bucket.LambdaConfigurations",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.NotificationConfiguration/Properties/LambdaConfigurations/ItemType",
"value": "LambdaConfigurations",
},
# Rename AWS::S3::Bucket.QueueConfigurations to AWS::S3::Bucket.QueueConfigurations - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.QueueConfiguration",
"path": "/PropertyTypes/AWS::S3::Bucket.QueueConfigurations",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.NotificationConfiguration/Properties/QueueConfigurations/ItemType",
"value": "QueueConfigurations",
},
# Rename AWS::S3::Bucket.TopicConfiguration to AWS::S3::Bucket.TopicConfigurations - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.TopicConfiguration",
"path": "/PropertyTypes/AWS::S3::Bucket.TopicConfigurations",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.NotificationConfiguration/Properties/TopicConfigurations/ItemType",
"value": "TopicConfigurations",
},
# Rename AWS::S3::Bucket.ReplicationDestination to AWS::S3::Bucket.ReplicationConfigurationRulesDestination - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.ReplicationDestination",
"path": "/PropertyTypes/AWS::S3::Bucket.ReplicationConfigurationRulesDestination",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.ReplicationRule/Properties/Destination/Type",
"value": "ReplicationConfigurationRulesDestination",
},
# Rename AWS::S3::Bucket.ReplicationRule to AWS::S3::Bucket.ReplicationConfigurationRules - backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::S3::Bucket.ReplicationRule",
"path": "/PropertyTypes/AWS::S3::Bucket.ReplicationConfigurationRules",
},
# backward compatibility
{
"op": "replace",
"path": "/PropertyTypes/AWS::S3::Bucket.ReplicationConfiguration/Properties/Rules/ItemType",
"value": "ReplicationConfigurationRules",
},
]
|
StarcoderdataPython
|
5137465
|
<reponame>heminsatya/aurora<gh_stars>1-10
################
# Dependencies #
################
import importlib
from aurora.security import request, redirect, check_cookie, get_cookie, check_session, get_session, set_session
from aurora.helpers import app_exists
from flask.views import View
####################
# Controller Class #
####################
##
# @desc Controller class to control views based on the requested method
##
class Controller(View):
##
# @desc Constructor method -- Generates Pluggable Views
##
def __init__(self) -> None:
# Required modules
config = importlib.import_module('config')
# Required attributes
self.default_lang = getattr(config, 'DEFAULT_LANG')
self.multi_lang = getattr(config, "MULTI_LANG")
self.languages = getattr(config, 'LANGUAGES')
# Public properties
self.active_lang = self.default_lang
self.LANGUAGE = ''
# Check the language
if self.multi_lang:
# Fetch the lang
path = request.path
lang = path.split('/')[1]
# The root path and apps path
if path == '/' or app_exists(lang)['result']:
# active_lang cookie exists
if check_cookie('active_lang'):
self.active_lang = get_cookie('active_lang')
set_session('active_lang', get_cookie('active_lang'))
# active_lang session exists
elif check_session('active_lang'):
self.active_lang = get_session('active_lang')
# Neighter active_lang cookie nor active_lang session exists
else:
self.active_lang = self.default_lang
set_session('active_lang', self.default_lang)
# Languages path
elif lang in self.languages:
self.active_lang = lang
set_session('active_lang', lang)
# Other paths
else:
self.active_lang = self.default_lang
set_session('active_lang', self.default_lang)
# Set active language URL
self.LANGUAGE = '/' + self.active_lang
##
# @desc Flask dispatch_request method -- Generates Pluggable Views
##
def dispatch_request(self, *class_args, **class_kwargs):
# Check the requested methods then return the related view function
# The 'POST' request
if request.method == 'POST':
return self.post(*class_args, **class_kwargs)
# The 'GET' request
elif request.method == 'GET':
# Check the language
if self.multi_lang:
# Fetch the path
path = request.path
# The root path
if path == '/' or app_exists(path.split('/')[1])['result']:
if check_cookie('active_lang'):
return redirect('/' + get_cookie('active_lang') + path)
elif check_session('active_lang'):
return redirect('/' + get_session('active_lang') + path)
else:
return redirect('/' + self.default_lang + path)
return self.get(*class_args, **class_kwargs)
# The 'PUT' request
elif request.method == 'PUT':
return self.put(*class_args, **class_kwargs)
# The 'DELETE' request
elif request.method == 'DELETE':
return self.delete(*class_args, **class_kwargs)
##
# @desc get method placeholder -- To handle the 'GET' requests
#
# @return any
##
def post(self):
return 'POST Method'
##
# @desc get method placeholder -- To handle the 'GET' requests
#
# @return any
##
def get(self):
return 'GET Method'
##
# @desc get method placeholder -- To handle the 'GET' requests
#
# @return any
##
def put(self):
return 'PUT Method'
##
# @desc get method placeholder -- To handle the 'DELETE' requests
#
# @return any
##
def delete(self):
return 'DELETE Method'
|
StarcoderdataPython
|
9697537
|
<gh_stars>0
"""
Tests for the implementation of the GroupwiseStratifiedKFold
"""
import math
from groupwise_stratified_kfold import (
GroupwiseStratifiedKFold,
RepeatedGroupwiseStratifiedKFold,
)
from groupwise_stratified_kfold.kfold import (
absolute_class_counts,
diff_distribution,
join_distributions,
relative_class_counts,
)
example_data_1 = {
"g01": "BBDE",
"g02": "CBEDF",
"g03": "BBAA",
"g04": "ABCD",
"g05": "ABBDF",
"g06": "ABC",
"g07": "ABBAA",
"g08": "ACBD",
"g09": "DEBBA",
"g10": "AABC",
"g11": "AAAAF",
"g12": "CCDACD",
"g13": "CCADB",
"g14": "CBAF",
"g15": "ABCD",
"g16": "CDBD",
}
example_data_2 = {
"g01": "BBDE",
"g02": "CBEDF",
"g03": "BBAA",
"g04": "ABCD",
"g05": "ABBDF",
"g06": "ABC",
"g07": "ABBAA",
"g08": "ACBD",
"g09": "DEBBA",
"g10": "AABC",
"g11": "AAAAF",
"g12": "CCDACD",
"g13": "CCADB",
"g14": "CBAF",
"g15": "ABCD",
"g16": "CDBD",
"g17": "BBDE",
"g18": "CBEDF",
"g19": "BBAA",
"g20": "ABCD",
"g21": "ABBDF",
"g22": "ABC",
"g23": "ABBAA",
"g24": "ACBD",
"g25": "DEBBA",
"g26": "AABC",
"g27": "AAAAF",
"g28": "CCDACD",
"g29": "CCADB",
"g30": "CBAF",
"g31": "ABCD",
"g32": "CDBD",
}
def test_groupwise_stratified_kfold():
folds = list(GroupwiseStratifiedKFold(4, example_data_1))
# Correct length of folds.
assert len(folds) == 4
# No overlap between train and tests folds.
assert all(set(train) & set(test) == set() for train, test in folds)
# Each train/test split covers the whole data.
all(set(train) | set(test) == set(example_data_1) for train, test in folds)
# All test sets of the folding cover the whole data.
assert {group for tr, test in folds for group in test} == set(
example_data_1
)
# Folding is stratified, i.e. label distributions are similar.
assert [
"".join(
sorted(label for group in train for label in example_data_1[group])
)
for train, _ in folds
] == [
"AAAAAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCCCCDDDDDDDDDDEEFFF",
"AAAAAAAAAAAAAABBBBBBBBBBBBBBBCCCCCCCCCCCDDDDDDDDDDEEFFF",
"AAAAAAAAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCCCCDDDDDDDDEEFFF",
"AAAAAAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCCDDDDDDDDEEEFFF",
]
def test_groupwise_stratified_kfold_groups_not_a_multiple_of_k():
# Add one extra 17th group to the data.
data = dict(example_data_1, g17="ABCDEF")
folds = list(GroupwiseStratifiedKFold(4, data))
# Correct length of folds.
assert len(folds) == 4
# No overlap between train and tests folds.
assert all(set(train) & set(test) == set() for train, test in folds)
# Each train/test split covers the whole data.
all(set(train) | set(test) == set(data) for train, test in folds)
# All test sets of the folding cover the whole data.
assert {group for tr, test in folds for group in test} == set(data)
def test_groupwise_stratified_kfold_less_than_k_samples_for_class():
# Add one extra 17th group to the data with a unique X label.
data = dict(example_data_1, g17="ABCDEFX")
folds = list(GroupwiseStratifiedKFold(4, data))
# Correct length of folds.
assert len(folds) == 4
# No overlap between train and tests folds.
assert all(set(train) & set(test) == set() for train, test in folds)
# Each train/test split covers the whole data.
all(set(train) | set(test) == set(data) for train, test in folds)
# All test sets of the folding cover the whole data.
assert {group for tr, test in folds for group in test} == set(data)
def test_repeated_groupwise_stratified_kfold():
folds = list(
RepeatedGroupwiseStratifiedKFold(
4, example_data_2, shuffle=True, repeats=2
)
)
# Correct length of folds.
assert len(folds) == 2 * 4
# Every fold is different. Note this only works when suffle=True and the
# dataset is large enough.
assert len({tuple(sorted(test)) for _, test, _ in folds}) == 2 * 4
def test_absolute_class_counts():
data = [0, 2, 2, 1, 0, 1, 2, 2, 0]
expected = {0: 3, 1: 2, 2: 4}
assert absolute_class_counts(data) == expected
# Counts sum up to the length of the input data.
assert sum(expected.values()) == len(data)
# For every unique item in the data there is one key value.
assert set(data) == set(expected.keys())
def test_absolute_class_counts_with_expected_classes():
expected_classes = [0, 1, 2, 9]
data = [0, 2, 2, 1, 0, 1, 2, 2, 0]
expected = {0: 3, 1: 2, 2: 4, 9: 0}
assert (
absolute_class_counts(data, expected_classes=expected_classes)
== expected
)
# Counts sum up to the length of the input data.
assert sum(expected.values()) == len(data)
# For every unique item in the data there is one key value.
assert set(data).issubset(set(expected.keys()))
def test_relative_class_counts():
data = {0: 0, 1: 1, 2: 2, 7: 7}
expected = {0: 0.0, 1: 0.1, 2: 0.2, 7: 0.7}
assert relative_class_counts(data) == expected
# All keys appear in the relative counts
assert data.keys() == expected.keys()
# Values sum up to 1.
assert sum(expected.values()) == 1.0
def test_diff_distribution():
# Equal distributions yield zero diff.
assert math.isclose(
diff_distribution({0: 0.1, 9: 0.9}, {0: 0.1, 9: 0.9}), 0.0
)
# Similar distributions yield small diff.
assert math.isclose(
diff_distribution({0: 0.1, 9: 0.9}, {0: 0.2, 9: 0.8}), 0.2
)
# Different distributions yield a larger diff.
assert math.isclose(
diff_distribution({0: 0.1, 9: 0.9}, {0: 0.5, 9: 0.5}), 0.8
)
# Weights can defined per class how much a diff for that class contributes
# to the total diff.
assert math.isclose(
diff_distribution(
{0: 0.1, 9: 0.9}, {0: 0.5, 9: 0.5}, weights={0: 0.99, 9: 0.01}
),
0.4,
)
def test_join_distributions():
assert join_distributions({0: 0.1, 9: 0.9}, {0: 0.1, 9: 0.9}) == {
0: 0.2,
9: 1.8,
}
|
StarcoderdataPython
|
12835853
|
class Solution:
def isValid(self, s: str) -> bool:
stack = []
d = {"]": "[", "}": "{", ")": "("}
for char in s:
# Opening
if char in d.values():
stack.append(char)
elif char in d.keys():
if stack == [] or d[char] != stack.pop():
return False
else:
return False
return stack == []
s = Solution()
print(s.isValid("()[]{}"))
|
StarcoderdataPython
|
6525966
|
import os
import random
import shutil
import tarfile
import cv2
import numpy as np
from keras.utils import Sequence
from utilities import download_file, download_image_cv2_urllib
class DataGen(Sequence):
"""
This generator downloads one tar file at each epoch. Extracts and selects the valid images from it to
form batches. And after the epoch is complete, deletes the files to free up space.
"""
def __init__(self, valid_ids_dict, num_classes, start=10, batch_size=128, steps=10, verbose=1):
self.valid_ids_dict = valid_ids_dict # dict of image ids to landmarks {image_id: landmark_id}
self.NUM_CLASSES = num_classes # number of valid classes to consider
self.batch_size = batch_size
self.steps = steps # should be equal to the number of epochs
self.images = []
self.landmarks = []
self.tar_idx = start
self.epoch_init()
def epoch_init(self):
self.all_images = []
self.all_landmarks = []
if self.tar_idx < 10:
tarfilestr = "00" + str(self.tar_idx)
elif self.tar_idx < 100:
tarfilestr = "0" + str(self.tar_idx)
else:
tarfilestr = str(self.tar_idx)
download_file("https://s3.amazonaws.com/google-landmark/train/images_{}.tar".format(tarfilestr), "images.tar",
bar=False)
#print(os.listdir())
tar = tarfile.open('images.tar')
tar.extractall("imagesfolder")
tar.close()
self.total = self.pickfiles("imagesfolder")
self.tar_idx += 1
print("tar", self.tar_idx - 1, "total:", self.total)
def pickfiles(self, dirr):
count = 0
for f in os.listdir(dirr):
if os.path.isfile(dirr + "/" + f):
if f[:-4] in self.valid_ids_dict:
self.all_images.append(dirr + "/" + f)
self.all_landmarks.append(self.valid_ids_dict[f[:-4]])
count += 1
else:
count += self.pickfiles(dirr + "/" + f)
return count
def normalize(self, data):
return data / 255 - 0.5
def __getitem__(self, index):
image_path_list = self.all_images[index * self.batch_size:min(self.total, (index + 1)) * self.batch_size]
class_list = self.all_landmarks[index * self.batch_size:min(self.total, (index + 1)) * self.batch_size]
if len(image_path_list) == 0:
image_path_list = self.all_images[:self.batch_size]
class_list = self.all_landmarks[:self.batch_size]
images = []
y_list = []
for ix in range(len(image_path_list)):
try:
image_path = image_path_list[ix]
im = cv2.imread(image_path)
im = cv2.resize(im, (192, 192), interpolation=cv2.INTER_AREA)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if im.shape == (192, 192, 3):
images.append(im)
y_list.append(class_list[ix])
except:
continue
x = np.array(images)
y = np.zeros((len(y_list), self.NUM_CLASSES))
for i in range(len(y_list)):
y[i, y_list[i]] = 1.
return x, y
def on_epoch_end(self):
self.steps -= 1
os.unlink("images.tar")
shutil.rmtree("imagesfolder")
if self.steps > 0:
self.epoch_init()
def __len__(self):
return self.total // self.batch_size + int(self.total % self.batch_size > 0)
class DataGenURLVersion(Sequence):
"""
This generator uses the image urls from the train dataset to form batches
and downloads each image individually. It will be approx 10 times slower than above version.
"""
def __init__(self, valid_urls_dict, num_classes, data, batch_size=24, verbose=1):
self.batch_size = batch_size
self.data_urls = data
self.NUM_CLASSES = num_classes # number of classes
self.valid_urls_dict = valid_urls_dict # dict of url and corresponding landmark {image_url: landmark}
def normalize(self, data):
return data
def __getitem__(self, index):
batch_urls = random.sample(self.data_urls, self.batch_size)
output = []
y_classes = []
for url in batch_urls:
im = download_image_cv2_urllib(url)
if im.size != 0:
output.append(im)
y_classes.append(self.valid_urls_dict[url.split("/")[-1]])
x = np.array(output)
y = np.zeros((len(output), self.NUM_CLASSES))
for i in range(len(y_classes)):
y[i, y_classes[i]] = 1.
return x, y
def on_epoch_end(self):
return
def __len__(self):
# return len(valid_urls_list) // self.batch_size
return 10
|
StarcoderdataPython
|
3504882
|
# Write a program to fill the screen horizontally and vertically with your name. [Hint: add the
# option end= '' into the print function to fill the screen horizontally.]
for i in range(100):
for j in range(100):
print('AhmadAbdulrahman', end='')
print('') # to start a new line
|
StarcoderdataPython
|
5163423
|
# -*- coding: utf-8 -*-
"""
makeplot.py
make a figure from the data selected
"""
import sys
import os
from glob import glob
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class MakePlot(QMainWindow):
def __init__(self, parent=None, basedir=None, file=None, *args, **kwargs):
super(MakePlot, self).__init__(*args, **kwargs)
self.parent = parent
self.file = file
self.basedir = basedir
self.setWindowTitle('plot gui')
#app.aboutToQuit.connect(self.exit_dialog)
self.layout = QGridLayout()
w = QWidget()
w.setLayout(self.layout)
self.setCentralWidget(w)
if file is None:
self.get_file()
self.read_file()
if self.dataframe is not None:
self.setup_window()
x=int(self.parent.dt_height*0.02)
y=int(self.parent.dt_width*0.02)
w=int(self.parent.dt_width*0.02)
h=int(self.parent.dt_height*0.05)
self.setGeometry(x, y, w, h)
self.show()
def get_file(self):
tx = self.basedir
if tx == 0:
tx = None
file, spec = QFileDialog.getOpenFileName(self, "open file", tx, "files (*.xlsx)")
self.file = file
def setup_window(self):
top_layout_2 = QGridLayout()
status_box = QGroupBox('')
combox = QComboBox()
self.list_variables(combox)
combox.currentIndexChanged.connect(self.change_variable)
combox.name = 'xvar'
top_layout_2.addWidget(combox, 0, 1)
top_layout_2.addWidget(QLabel('x variable'), 0, 2)
comboy = QComboBox()
self.list_variables(comboy)
comboy.currentIndexChanged.connect(self.change_variable)
comboy.name = 'yvar'
top_layout_2.addWidget(comboy, 0, 3)
top_layout_2.addWidget(QLabel('y variable'), 0, 4)
# plot button
self.buttons = {}
self.buttons['plot'] = QPushButton('plot', clicked=self.make_plot)
top_layout_2.addWidget(self.buttons['plot'], 0, 5)
self.buttons['save'] = QPushButton('save', clicked=self.save_plot)
top_layout_2.addWidget(self.buttons['save'], 0, 6)
self.buttons['exit'] = QPushButton('exit', clicked=self.exit_dialog)
top_layout_2.addWidget(self.buttons['exit'], 0, 7)
# save button
status_box.setLayout(top_layout_2)
self.layout.addWidget(status_box, 0, 0)
def list_variables(self, combo):
combo.addItems(self.dataframe.keys())
def change_variable(self):
s = self.sender()
if s.name == 'xvar':
self.xvar = s.currentText()
if s.name == 'yvar':
self.yvar = s.currentText()
def read_file(self):
self.dataframe = pd.read_excel(self.file)
def make_plot(self):
f, ax = plt.subplots(1,1, figsize=(3.5, 2.2), dpi=250)
xdata = self.dataframe[self.xvar]
ydata = self.dataframe[self.yvar]
ax.plot(xdata, ydata, marker='o', linestyle='None')
ax.set_xlabel(self.xvar)
ax.set_ylabel(self.yvar)
plt.tight_layout()
self.f = f
self.ax = ax
plt.show()
def save_plot(self):
""" write to the output"""
dirout = os.path.join(self.basedir, 'data', 'plots')
if not os.path.isdir(dirout):
os.mkdir(dirout)
files = glob(os.path.join(dirout, '*.png'))
N = len(files) + 1
sname = 'plot_%d.png' % N
fname = os.path.join(dirout, sname)
self.f.savefig(fname, dpi=900)
def exit_dialog(self, event=None):
buttonReply = QMessageBox.question(self, 'PyQt5 message', "Close plot dialog?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
self.destroy()
if self.parent == None:
sys.exit(0)
def main(dirout=None):
""" run stand-alone for testing """
global app
app = QApplication([])
w = MakePlot(basedir=dirout)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
print('setup a small gui for plot control')
dirout='C:/Users/Ryan/Desktop/Data/pro/2019-12-06_00.53.51'
main(dirout=dirout)
|
StarcoderdataPython
|
3359091
|
#
#Chequea si un numero es primo o no
#Devuelve True si es primo sino devuelve False
def primeNumber(n):
#Por definicion de primo
if(n == 0 or n == 1 or n < 0):
return False
elif(n == 2):
return True
else:
#Checkeo para que sea primo me alcansa con probar
#1 que no sea par
#2 que no sea divisible por ninguno de sus anteriores numero impares ya que si es divisible por un numero par es divisible por 2 por definicion
return not(isEven(n)) and checkPrime(n, n - 2)
#Metodo auxiliar que chequea si un numero es divisible por m o alguno de sus numeros anteriores
def checkPrime(n, m):
if(m <= 2):
return True
elif(n % m == 0):
return False
else:
return checkPrime(n,m-2)
#Checkea si un numero es par o no
#Devuelve True si es par sino False
def isEven(n):
return (n % 2) == 0
numberToTest = int(input("Decime el numero?"))
print("Es primo: " + str( primeNumber(numberToTest) ) )
|
StarcoderdataPython
|
3237574
|
import logging
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import RedirectResponse
from prometheus_fastapi_instrumentator import Instrumentator
from api import metadata
from core.config import (
CORS_ORIGINS,
CURRENT_API_VERSION,
DOCS_URL,
OPENAPI_URL,
SERVICE_DESCRIPTION,
SERVICE_ID,
SERVICE_NAME,
)
from scripts import LoadMeta, load_instrument_catalog
logger = logging.getLogger(f"{SERVICE_ID}-app")
app = FastAPI(
title=SERVICE_NAME,
openapi_url=OPENAPI_URL,
docs_url=DOCS_URL,
redoc_url=None,
version=CURRENT_API_VERSION,
description=SERVICE_DESCRIPTION,
)
app.add_middleware(
CORSMiddleware,
allow_origins=CORS_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/", include_in_schema=False)
def home():
return RedirectResponse(url=f"/{SERVICE_ID}")
@app.on_event("startup")
def startup_event():
LoadMeta()
load_instrument_catalog()
app.include_router(
metadata.router, prefix=f"/{SERVICE_ID}", tags=[f"{SERVICE_ID}"]
)
# Prometheus instrumentation
Instrumentator().instrument(app).expose(
app, endpoint="/metadata/metrics", include_in_schema=False
)
|
StarcoderdataPython
|
3316860
|
<filename>setup.py
from distutils.core import setup
setup(
name = 'uwuizer',
packages = ['uwuizer'],
version = '1.0.1',
license='MIT',
description = 'uwu text generator ٩(◕‿◕。)۶',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/Philinphiladelphia/uwu',
download_url = 'https://github.com/Philinphiladelphia/uwu/archive/refs/tags/v_1.0.0.tar.gz',
keywords = ['uwu', "( ‾́ ◡ ‾́ )"],
install_requires=[
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
StarcoderdataPython
|
1784180
|
import json
from pathlib import Path
import numpy as np
from podm.podm import get_pascal_voc_metrics, MetricPerClass
from tests.utils import load_data, assert_results, load_data_coco
def test_sample2():
dir = Path('tests/sample_2')
gt_BoundingBoxes = load_data(dir / 'groundtruths.json')
pd_BoundingBoxes = load_data(dir / 'detections.json')
RESULT0_5 = json.load(open(dir / 'expected0_5.json'))
results = get_pascal_voc_metrics(gt_BoundingBoxes, pd_BoundingBoxes, .5)
assert_results(results, RESULT0_5, 'ap')
assert_results(results, RESULT0_5, 'precision')
assert_results(results, RESULT0_5, 'recall')
assert_results(results, RESULT0_5, 'tp')
assert_results(results, RESULT0_5, 'fp')
assert_results(results, RESULT0_5, 'num_groundtruth')
assert_results(results, RESULT0_5, 'num_detection')
mAP = MetricPerClass.mAP(results)
assert np.isclose(RESULT0_5['mAP'], mAP, 1e-3), mAP
def test_sample2_coco():
dir = Path('tests/sample_2')
gt_BoundingBoxes, pd_BoundingBoxes = load_data_coco(dir / 'groundtruths_coco.json',
dir / 'detections_coco.json')
RESULT0_5 = json.load(open(dir / 'expected0_5.json'))
results = get_pascal_voc_metrics(gt_BoundingBoxes, pd_BoundingBoxes, .5)
assert_results(results, RESULT0_5, 'ap')
assert_results(results, RESULT0_5, 'precision')
assert_results(results, RESULT0_5, 'recall')
assert_results(results, RESULT0_5, 'tp')
assert_results(results, RESULT0_5, 'fp')
assert_results(results, RESULT0_5, 'num_groundtruth')
assert_results(results, RESULT0_5, 'num_detection')
mAP = MetricPerClass.mAP(results)
assert np.isclose(RESULT0_5['mAP'], mAP, 1e-3), mAP
if __name__ == '__main__':
test_sample2_coco()
|
StarcoderdataPython
|
1706153
|
<gh_stars>0
'''
print( )
print('DESAFIO 1')
nome = input ('Qual seu nome?')
print('Ola ' +nome+ ' Seja bem vindx!')
print( )
print('------- DESAFIO 02 -------')
dia = input ('Qual o dia que você nasceu?')
mes = input ('Qual o mês que você nasceu?')
ano = input ('Qual o ano que você nasceu?')
print('Certo. Você nasceu no dia ',dia,' de ', mes, ' de ',ano, 'Correto?')
print( )
print('------- DESAFIO 03 -------')
pnum = int(input('Qual o primeiro número?'))
snum = int(input('Qual o segundo número?'))
sum = pnum+snum
print('A soma de {} com {} é igual a {}'.format(pnum, snum, sum))
print( )
print('------- DESAFIO 04 -------')
var = input('Digite algo: ')
print('Verificação se o que foi digitado é Alfabético:')
print(var.isalpha())
print('Verificação se o que foi digitado é Numerico:')
print(var.isnumeric())
print('Verificação se o que foi digitado é Alfanumérico:')
print(var.isalnum())
print(type(var))
print( )
print('------- DESAFIO 05 -------')
num = int(input('Digite um número: '))
ant = num-1
sus = num+1
print('Seu sucessor é {}'.format(sus))
print('Seu antecessor é {}'.format(ant))
print( )
print('------- DESAFIO 06 -------')
n = int(input('Digite um número: '))
d = n*2
t = n*3
r = n**(1/2)
print('Seu dobro é {}, seu triplo é {} e sua raiz quadrada é {}'.format(d, t, r))
print( )
print('------- DESAFIO 07 -------')
n1 = int(input('Digite a primeira nota: '))
n2 = int(input('Digite a segunda nota: '))
m = (n1+n2)/2
print('A média da sua nota é {}'.format(m))
print( )
print('------- DESAFIO 08 -------')
mt = int(input('Digite um valor: '))
cm = mt*100
mm = mt*1000
print('O valor digitado foi {} metros, equivalente a {} centimetros e {} milímetros'.format(mt,cm,mm))
print( )
print('------- DESAFIO 09 -------')
nt = int(input('Digite um número: '))
t0 = nt*0
t1 = nt*1
t2 = nt*2
t3 = nt*3
t4 = nt*4
t5 = nt*5
t6 = nt*6
t7 = nt*7
t8 = nt*8
t9 = nt*9
t10 = nt*10
print('Você digitou o número {} e sua tabuada é'.format(nt))
print('{}*0={}'.format(nt, t0))
print('{}*1={}'.format(nt, t1))
print('{}*2={}'.format(nt, t2))
print('{}*3={}'.format(nt, t3))
print('{}*4={}'.format(nt, t4))
print('{}*5={}'.format(nt, t5))
print('{}*6={}'.format(nt, t6))
print('{}*7={}'.format(nt, t7))
print('{}*8={}'.format(nt, t8))
print('{}*9={}'.format(nt, t9))
print('{}*10={}'.format(nt, t10))
print( )
print('------- DESAFIO 10 -------')
rs = int(input('Digite um valor {acima de 4}: '))
us = rs/3.27
print('Com R${},00 você pode comprar U${}'.format(rs, us))
print( )
print('------- DESAFIO 11 -------')
b = int(input('Digite a largura: '))
h = int(input('Digite a altura: '))
a = b*h
t = a/2
print('Com largura de {} e altura de {} tem-se uma área de {}. São necessários {} litros de tinta para pintar tudo!'.format(b, h, a, t))
print( )
print('------- DESAFIO 12 -------')
p = float(input('Digite o preço a pagar: '))
desc = p-p*5/100
print('O preço digitado foi de R${} e com 5% de desconto sairá por R${}'.format(p, desc))
print( )
print('------- DESAFIO 13 -------')
sal = float(input('Digite o salário: '))
aum = sal+sal*15/100
print('Seu salário é de R${} e com 15% de aumento será R${}'.format(sal, aum))
print( )
print('------- DESAFIO 14 -------')
t = float(input('Digite a temperatura: '))
tf = ((9* t)/5)+32
print('A temperatura digitada foi de {}ºC ou {}ºF'.format(t, tf))
print( )
print('------- DESAFIO 15 -------')
km = float(input('Quantos km foram percorridos? '))
d = float(input('Por quantos dias? '))
p = 60*d+0.15*km
print('Um carro que rodou {}km por {} dias, terá o valor de R${:.2f} para aluguel.'.format(km, d, p))
print( )
from math import trunc
print('------- DESAFIO 16 -------')
n = float(input('Digite um numero real: '))
nint = trunc(n)
print('O número digitado foi {} e sua parte inteira é {}'.format(n, nint))
print( )
from math import pow
print('------- DESAFIO 17 -------')
co = float(input('Digite o valor do Cateto Oposto: '))
ca = float(input('Digite o valor do Cateto Adjacente: '))
hip = ((pow(co, 2)) + (pow(ca, 2))) **(1/2)
print('Com CO equivalendo {} e o CA a {} O comprimento da hipotenuza é {}'.format(co, ca, hip))
print( )
import math
print('------- DESAFIO 18 -------')
a = float(input('Digite o valor de um angulo qualquer: '))
s = math.sin(math.radians(a))
c = math.cos(math.radians(a))
t = math.tan(math.radians(a))
print('Sobre o Ângulo de {} o seno = {:.2f} O cosseno = {:.2f} e a tangente = {:.2f}'.format(a, s, c, t))
print( )
|
StarcoderdataPython
|
9667759
|
<filename>libs/voting.py<gh_stars>1-10
'''
Simplistic voting which keeps track of voters in a poll and can tally the results
This is used by the advancedvote command & reaction package
@author: NGnius
'''
class Poll():
'''Base class for voting systems that everything below extends
This class should never be used in a concrete implementation'''
def __init__(self, options = ["Y", "N"], allowed_voters = None, voted=None, votes=None):
if voted is None:
voted = list()
if votes is None:
votes = dict()
self.options = options
self.allowed_voters = allowed_voters
self.voted = voted
self.votes = votes
def addVote(self, voter, vote):
'''(Poll, anything, str) -> bool
If vote is an option, the vote will be added to the sef.votes.
Otherwise, this will return False'''
if vote in self.options and (self.allowed_voters == None or voter in self.allowed_voters) and voter not in self.voted:
self.votes[voter] = vote
self.voted.append(voter)
return True
return False
addChoice = addVote
def tallyVotes(self):
'''(Poll) -> list
this should return a list sorted from winner to loser in form [option, option's votes]'''
pass
def dumpVotes(self):
'''(Poll) -> list
this should return an unsorted list of votes, anonymised
in a list of [anonymous voter, their vote(s)]'''
pass
class FPTP(Poll):
'''Implementation of First Past The Post voting system'''
def tallyVotes(self):
'''(FPTP) -> list
returns a list of [option, total votes], sorted by total votes'''
# print(self.dumpVotes())
votes_by_option = dict(zip(self.options, [0]*len(self.options)))
for voter in self.votes:
votes_by_option[self.votes[voter]]+=1
results = [[votes_by_option[x],x] for x in votes_by_option] #turn into list of [votes, option]
results.sort()
results = results[::-1] #reverse order so highest number is first; not last
return [[x[1],x[0]] for x in results] #swap option with votes
def dumpVotes(self, anonymised=True):
'''(FPTP) -> list
returns a list of [option, voter], unsorted'''
if not anonymised:
return [[x, self.votes[x]] for x in self.votes] #turn self.votes dict into list of [voter, vote]
else:
result = list()
count=0
for voter in self.votes:
result.append([count, self.votes[voter]])
count += 1
return result
class STV(Poll):
'''Implementation of Single Transferable Vote voting system'''
def __init__(self, options = ["A", "B", "C"], allowed_voters = None, transferables=None, **kwargs):
'''(STV [, list, list, , int, dict]) -> None
transferables is how many ranked votes one person can make
ie transferables=2 means a voter can have a first choice and a second choice
transferables=5 means a voter can have a first choice up to a fifth choice'''
super().__init__(options=options, allowed_voters=allowed_voters, **kwargs)
if transferables is not None and isinstance(transferables, int):
self.transferables = transferables
else:
self.transferables = len(self.options)
self.votes = dict()
def addVote(self, voter, vote):
'''(STV, str, list)-> None
vote should be list of options from highest priority choice (1st choice) to lowest choice
If the length of the vote list isn't the same length as transferables or the voter isn't allowed to vote,
this won't do anything
Any other invalid input (ie invalid option, repeated option, etc.) will raise as error (most likely a ValueError)'''
if len(vote) == self.transferables and (self.allowed_voters == None or voter in self.allowed_voters ) and voter not in self.voted:
for i in vote:
if i not in self.options:
raise ValueError("Invalid option: "+i)
if vote.count(i)>1:
raise ValueError("Option "+i+" used more than once")
self.votes[str(voter)]=list(vote)
self.voted.append(str(voter))
else:
raise ValueError("Invalid vote or voter")
def addChoice(self, voter, vote):
'''(STV, str, str)-> bool
adds votes chronologically (1st addChoice is 1st choice, 2nd addChoice is 2nd choice, etc.)'''
if voter not in self.votes and (self.allowed_voters == None or voter in self.allowed_voters):
self.votes[str(voter)]=[None]*self.transferables
self.voted.append(str(voter))
if voter in self.votes and None in self.votes[voter] and vote in self.options and vote not in self.votes[voter]:
self.votes[str(voter)][self.votes[str(voter)].index(None)] = str(vote)
return True
return False
def tallyVotes(self):
'''Recursion: kill me now...'''
# print(self.dumpVotes())
self.setModifiedBordaCounts()
return self.recursiveTallySort(self.votes, self.options)
def dumpVotes(self, anonymised=True):
'''(STV) -> list
returns everyone's votes'''
if not anonymised:
return [[x, self.votes[x]] for x in self.votes]
else:
result = list()
count = 0
for x in self.votes:
result.append([int(count), list(self.votes[x])])
count += 1
return result
def setModifiedBordaCounts(self):
self.MBC = dict()
for option in self.options:
self.MBC[option] = 0
for voter in self.votes:
self.MBC[option] += self._bordaCountFromSingleBallot(self.votes[voter], option)
def _bordaCountFromSingleBallot(self, ballot, option):
if option not in ballot:
return 0
if None in ballot:
return ballot.index(None) - ballot.index(option)
return len(ballot) - ballot.index(option)
# Unused:
# def countFirsts(self, votes, options):
# optionsCount = dict(zip(options, [0]*len(options)))
# for voter in votes:
# if votes[voter]!= []:
# optionsCount[votes[voter][0]]+=1
# output = [[optionsCount[x], x] for x in optionsCount]
# output.sort()
# return output
def countVotes(self, votes, options):
counts = list() # [[0]*self.transferables]*len(options) but copies, not pointers
# Damn it Python why you make me do dis!? ^^^ is so much nicer and easier...
for i in range(len(options)):
counts.append(list())
for j in range(self.transferables):
counts[i].append(0)
optionsCount = dict(zip(options, counts))
for voter in votes:
for i in range(len(votes[voter])):
optionsCount[votes[voter][i]][i]+=1
output = [[optionsCount[x], x] for x in optionsCount]
output.sort()
return output
def deleteNones(self, votes):
for voter in votes:
while None in votes[voter]:
del(votes[voter][votes[voter].index(None)])
def recursiveTallySort(self, votestemp, optionstemp):
'''(dict, list) -> list
If this works, returns a list of options sorted by highest (winner) to lowest (least voted for)'''
votes = dict()
for voter in votestemp: # I give up with hoping Python mem shit will work
votes[str(voter)]=list(votestemp[voter])
options = list(optionstemp)
self.deleteNones(votes)
voteCount = self.countVotes(votes, options)
if len(options)>1:
possible_ties = [[self.MBC[voteCount[0][1]], voteCount[0]]]
for i in range(1, len(options)):
if (voteCount[i][0][0] == voteCount[0][0][0]):
possible_ties.append([self.MBC[voteCount[i][1]], voteCount[i]])
else:
break
possible_ties.sort() # lowest MBC first
lowest_voted = possible_ties[0][1] # lowest_voted is list in form [votes, option]
for voter in votes:
if lowest_voted[1] in votes[voter]:
del(votes[voter][votes[voter].index(lowest_voted[1])])
del(options[options.index(lowest_voted[1])])
return self.recursiveTallySort(votes, options) + [[lowest_voted[1],lowest_voted[0][0]]]
elif len(options)==1:
return [[voteCount[0][1], voteCount[0][0][0]]]
else: # len(options) == 0
return ["No votes recorded"]
|
StarcoderdataPython
|
4893526
|
#!/usr/bin/python
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import re
FLAGS = None
SKIP_EXTS = ('jpeg', 'jpg', 'pgm', 'png',
'log', 'serverlog',
'preprocessed', 'jmx', 'gz',
'caffemodel')
SKIP_PATHS = ('deploy/single_server/.helmignore',
'docs/examples/model_repository',
'qa/custom_models/custom_float32_float32_float32/output0_labels.txt',
'qa/custom_models/custom_nobatch_float32_float32_float32/output0_labels.txt',
'qa/custom_models/custom_int32_int32_int32/output0_labels.txt',
'qa/custom_models/custom_nobatch_int32_int32_int32/output0_labels.txt',
'qa/ensemble_models/mix_platform_float32_float32_float32/output0_labels.txt',
'qa/ensemble_models/mix_type_int32_float32_float32/output0_labels.txt',
'qa/ensemble_models/mix_ensemble_int32_float32_float32/output0_labels.txt',
'serving',
'src/servables/caffe2/testdata',
'src/servables/tensorflow/testdata',
'src/servables/tensorrt/testdata',
'src/servables/ensemble/testdata',
'src/test/testdata',
'tools/patch',
'VERSION')
COPYRIGHT_YEAR_RE0 = 'Copyright \\(c\\) (20[0-9][0-9]), NVIDIA CORPORATION. All rights reserved.'
COPYRIGHT_YEAR_RE1 = 'Copyright \\(c\\) (20[0-9][0-9])-(20[0-9][0-9]), NVIDIA CORPORATION. All rights reserved.'
COPYRIGHT ='''
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of NVIDIA CORPORATION nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
single_re = re.compile(COPYRIGHT_YEAR_RE0)
range_re = re.compile(COPYRIGHT_YEAR_RE1)
def visit(path):
if FLAGS.verbose:
print("visiting " + path)
for skip in SKIP_EXTS:
if path.endswith('.' + skip):
if FLAGS.verbose:
print("skipping due to extension: " + path)
return True
for skip in SKIP_PATHS:
if path.startswith(skip):
if FLAGS.verbose:
print("skipping due to path prefix: " + path)
return True
with open(path, 'r') as f:
first_line = True
line = None
try:
for fline in f:
line = fline
# Skip any '#!', '..', '<!--', or '{{/*' lines at the
# start of the file
if first_line:
first_line = False
if (fline.startswith("#!") or fline.startswith("..") or
fline.startswith("<!--") or fline.startswith("{{/*")):
continue
# Skip empty lines...
if len(fline.strip()) != 0:
break
except UnicodeDecodeError as ex:
# If we get this exception on the first line then assume a
# non-text file.
if not first_line:
raise ex
if FLAGS.verbose:
print("skipping binary file: " + path)
return True
if line is None:
if FLAGS.verbose:
print("skipping empty file: " + path)
return True
line = line.strip()
# The next line must be the copyright line with a single year
# or a year range. It must start with either '#' or '//'
prefix = None
if line.startswith('#'):
prefix = '#'
elif line.startswith('//'):
prefix = '//'
else:
print("incorrect prefix for copyright line, expecting '#' or '//', for " +
path + ": " + line)
return False
start_year = 0
end_year = 0
m = single_re.match(line[(len(prefix) + 1):])
if m and len(m.groups()) == 1:
start_year = end_year = int(m.group(1))
else:
m = range_re.match(line[(len(prefix) + 1):])
if m and len(m.groups()) == 2:
start_year = int(m.group(1))
end_year = int(m.group(2))
else:
print("copyright year is not recognized for " + path + ": " + line)
return False
if start_year > FLAGS.year:
print("copyright start year greater than current year for " + path + ": " + line)
return False
if end_year > FLAGS.year:
print("copyright end year greater than current year for " + path + ": " + line)
return False
if end_year < start_year:
print("copyright start year greater than end year for " + path + ": " + line)
return False
# Subsequent lines must match the copyright body.
copyright_body = [l.rstrip() for i, l in enumerate(COPYRIGHT.splitlines()) if i > 0]
copyright_idx = 0
for line in f:
if copyright_idx >= len(copyright_body):
break
line = line.strip()
if len(copyright_body[copyright_idx]) == 0:
expected = prefix
else:
expected = (prefix + " " + copyright_body[copyright_idx])
if line != expected:
print("incorrect copyright body for " + path)
print(" expected: '" + expected + "'")
print(" got: '" + line + "'")
return False
copyright_idx += 1
if copyright_idx != len(copyright_body):
print("missing " + str(len(copyright_body) - copyright_idx) +
" lines of the copyright body")
return False
if FLAGS.verbose:
print("copyright correct for " + path)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-y', '--year', type=int, required=True,
help='Copyright year')
parser.add_argument('paths', type=str, nargs='*', default=None,
help='Directories or files to check')
FLAGS = parser.parse_args()
if FLAGS.paths is None or len(FLAGS.paths) == 0:
parser.print_help()
exit(1)
ret = True
for path in FLAGS.paths:
if not os.path.isdir(path):
if not visit(path):
ret = False
else:
for root, dirs, files in os.walk(path):
for name in files:
if not visit(os.path.join(root, name)):
ret = False
exit(0 if ret else 1)
|
StarcoderdataPython
|
6411745
|
import random
from src.player.IBot import *
from src.action.IAction import *
class RandomBot(IBot):
def moveRandomly(self, board) -> IAction:
validPawnMoves = board.storedValidPawnMoves[self.pawn.coord]
return random.choice(validPawnMoves)
def placeFenceRandomly(self, board) -> IAction:
randomFencePlacing = random.choice(board.storedValidFencePlacings)
attempts = 5
while board.isFencePlacingBlocking(randomFencePlacing) and attempts > 0:
randomFencePlacing = random.choice(board.storedValidFencePlacings)
attempts -= 1
if attempts == 0:
return self.moveRandomly()
return randomFencePlacing
def play(self, board) -> IAction:
if random.randint(0, 2) == 0 and self.remainingFences() > 0 and len(board.storedValidFencePlacings) > 0:
return self.placeFenceRandomly(board)
else:
return self.moveRandomly(board)
|
StarcoderdataPython
|
1933729
|
import inspect
def af_set_var(name,val, layer = 2):
stack = inspect.stack()
inspect.getargvalues(stack[layer].frame).locals[name]=val
def af_get_var(name, layer = 2):
stack = inspect.stack()
return inspect.getargvalues(stack[layer].frame).locals[name]
def func(*args):
def _():
for func_ in args:
func_()
return _
|
StarcoderdataPython
|
206970
|
# This is the sales.
|
StarcoderdataPython
|
5184986
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
#Getting the data
def get_data(filename ='../data/raw/data.csv' ):
data = pd.read_csv(filename).values
#Shuffling the data
np.random.shuffle(data)
labels = data[:, -1]
images = data[:,:-1]
#Converting the ints to floats for preprocessing the data
images = images.astype('float').reshape(images.shape[0], 32,32)
return images, labels
#preprocessing the data
def pre_processing(char_data, char_labels):
char_data = char_data/255
classes = np.unique(char_labels)
for i in range(46):
classes[i] = classes[i].split('_')[-1]
encoder = LabelEncoder()
char_labels = encoder.fit_transform(char_labels)
return char_data, char_labels, classes
|
StarcoderdataPython
|
366876
|
with open('input.txt') as file:
data = file.read()
data = data.splitlines()
depart_timestamp = int(data[0])
ids = []
for v in data[1].split(","):
if v.isdigit():
v = int(v)
ids.append(v)
time_to_wait = 9999999999999999999
best_id = 0
for id in ids:
if id == 'x':
continue
# print(f"ID:{id}, {depart_timestamp % id}, time to wait: {id - (depart_timestamp % id)}")
temp = id - depart_timestamp % id
if temp < time_to_wait:
time_to_wait = temp
best_id = id
# part1
print(time_to_wait, time_to_wait * best_id)
from functools import reduce
def chinese_remainder(n, a):
sum = 0
prod = reduce(lambda a, b: a*b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * mul_inv(p, n_i) * p
return sum % prod
def mul_inv(a, b):
b0 = b
x0, x1 = 0, 1
if b == 1: return 1
while a > 1:
q = a // b
a, b = b, a%b
x0, x1 = x1 - q * x0, x0
if x1 < 0: x1 += b0
return x1
a = [id for id in ids if id != 'x']
n = [i for i in range(len(ids)) if ids[i] != 'x']
cr = chinese_remainder(a, n)
N = reduce(lambda x, y: x * y, a)
print(N % cr)
|
StarcoderdataPython
|
6617641
|
class BiblioAD:
def capturar(this,datos):
# 1. Abrir el archivo
archivo = open("Libros.txt","a")
# 2. Escribir, guardar o almacenar los datos en el archivo
archivo.write(datos+"\n")
# 3. Cerrar el archivo
archivo.close()
return "Datos a capturar: "+datos
def consultaGeneral(this):
datos=""
libro=""
try:
# 1. Abrir el archivo
archivo = open("Libros.txt","r")
# 2. Procesar los datos del archivo
libro = archivo.readline()
while(libro != ""):
datos = datos+libro
libro = archivo.readline()
# 3. Cerrar el archivo
archivo.close()
datos = "CONSULTA GENERAL:\n"+datos
except:
datos="ERROR"
return datos
def consultarEditorial(this, edit):
datos=""
libro=""
encontrado=False
try:
# 1. Abrir el archivo
archivo = open("Libros.txt","r")
# 2. Procesar los datos del archivo
edit = edit+"\n"
libro = archivo.readline()
while(libro != ""):
st = libro.split(".")
second = st[2]
#edit = edit+"\n"
if(edit == st[2]):
datos = datos + libro
encontrado = True
libro = archivo.readline()
# 3. Cerrar el archivo
archivo.close()
if(not encontrado):
datos = "ERROR"
except:
datos="ERROR ABRIENDO EL ARCHIVO"
return datos
def consultarTitulo(this, tit):
datos=""
libro=""
encontrado=False
try:
# 1. Abrir el archivo
archivo = open("Libros.txt","r")
# 2. Procesar los datos del archivo
libro = archivo.readline()
while(libro != ""):
st = libro.split("_")
if(tit == st[0]):
datos = datos + libro
encontrado = True
libro = archivo.readline()
# 3. Cerrar el archivo
archivo.close()
if(not encontrado):
datos = "ERROR"
except:
datos="ERROR ABRIENDO EL ARCHIVO"
return datos
def consultarAutor(this, aut):
datos=""
libro=""
encontrado=False
try:
# 1. Abrir el archivo
archivo = open("Libros.txt","r")
# 2. Procesar los datos del archivo
libro = archivo.readline()
while(libro != ""):
st = libro.split("_")
if(aut == st[1]):
datos = datos + libro
encontrado = True
libro = archivo.readline()
# 3. Cerrar el archivo
archivo.close()
if(not encontrado):
datos = "ERROR"
except:
datos="ERROR ABRIENDO EL ARCHIVO"
return datos
|
StarcoderdataPython
|
1887761
|
<filename>IOatmos.py
import time
from datetime import datetime, timedelta
import os, sys, string
from netCDF4 import Dataset
import numpy as np
"""
Created by <NAME>
https://github.com/trondkr/model2roms
"""
def help ():
"""
This function generates the initial netcdf atmospheric forcing file for the U and V wind component
for ROMS. Methods:
def createNetCDFFileUV(grdROMS, outfilename, myformat):
"""
def createNetCDFFileUV(grdROMS, outfilename, myformat, mytype):
if (myformat=='NETCDF4'):
myzlib = True
else:
myzlib = False
if os.path.exists(outfilename):
os.remove(outfilename)
f1 = Dataset(outfilename, mode='w', format=myformat)
f1.title = "Atmospheric forcing file"
f1.grdFile = "%s"%(grdROMS.grdfilename)
f1.history = 'Created ' + time.ctime(time.time())
f1.source = "{} ({})".format(confM2R.authorname, confM2R.authoremail)
f1.Conventions = "CF-1.0"
""" Define dimensions """
f1.createDimension('xi_rho', grdROMS.xi_rho)
f1.createDimension('eta_rho', grdROMS.eta_rho)
f1.createDimension('wind_time', None)
vnc = f1.createVariable('lon_rho', 'd', ('eta_rho','xi_rho',),zlib=myzlib, fill_value=grdROMS.fill_value)
vnc.long_name = 'Longitude of RHO-points'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:,:] = grdROMS.lon_rho
vnc = f1.createVariable('lat_rho', 'd', ('eta_rho','xi_rho',),zlib=myzlib, fill_value=grdROMS.fill_value)
vnc.long_name = 'Latitude of RHO-points'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:,:] = grdROMS.lat_rho
v_time = f1.createVariable('wind_time', 'd', ('wind_time',),zlib=myzlib, fill_value=grdROMS.fill_value)
v_time.long_name = 'Days since 1948-01-01 00:00:00'
v_time.units = 'Days since 1948-01-01 00:00:00'
v_time.field = 'time, scalar, series'
v_time.calendar = 'standard'
v_temp_west = f1.createVariable('Vwind', 'f', ('wind_time', 'eta_rho', 'xi_rho',),zlib=myzlib, fill_value=grdROMS.fill_value)
v_temp_west.long_name = "Eta-component of wind"
v_temp_west.units = "meter second-1"
v_temp_west.field = "Vwind, scalar, series"
v_temp_west.missing_value = grdROMS.fill_value
v_temp_west.time = "wind_time"
v_temp_west = f1.createVariable('Uwind', 'f', ('wind_time', 'eta_rho', 'xi_rho',),zlib=myzlib, fill_value=grdROMS.fill_value)
v_temp_west.long_name = "Xi-component of wind"
v_temp_west.units = "meter second-1"
v_temp_west.field = "Uwind, scalar, series"
v_temp_west.missing_value = grdROMS.fill_value
v_temp_west.time = "wind_time"
f1.close()
|
StarcoderdataPython
|
1902365
|
<gh_stars>0
from requests_html import HTMLSession, AsyncHTMLSession
def checkAmazonPrice(url):
found = False
while not found:
try:
session = HTMLSession()
r = session.get(url)
price_html = r.html.find('#priceblock_ourprice', first=True)
price = price_html.text
found = True
return price
except AttributeError:
pass
|
StarcoderdataPython
|
6475160
|
<gh_stars>0
class Solution:
"""
第一次用时:63min
总用时:88min
时间复杂度:O(n)
空间复杂度:O(n)
思路:将子序列和分别与max、0比较,小于0当做0对待。
"""
def XXX(self, nums: List[int]) -> int:
my_max=nums[0]
subArraySum=nums[0]
if subArraySum<0:
subArraySum=0
for i in range(1,len(nums)):
subArraySum+=nums[i]
if(subArraySum>my_max):
my_max=subArraySum
if(subArraySum<0):
subArraySum=0
return my_max
|
StarcoderdataPython
|
1795728
|
<filename>microsim/utilities.py
# Contains some useful utility functionality
import os
from urllib.request import urlopen
import requests
import tarfile
import pandas as pd
from typing import List
from tqdm import tqdm
from microsim.column_names import ColumnNames
class Optimise:
"""
Functions to optimise the memory use of pandas dataframes.
From https://medium.com/bigdatarepublic/advanced-pandas-optimize-speed-and-memory-a654b53be6c2
"""
@staticmethod
def optimize(df: pd.DataFrame, datetime_features: List[str] = []):
return Optimise._optimize_floats(Optimise._optimize_ints(Optimise._optimize_objects(df, datetime_features)))
@staticmethod
def _optimize_floats(df: pd.DataFrame) -> pd.DataFrame:
floats = df.select_dtypes(include=['float64']).columns.tolist()
df[floats] = df[floats].apply(pd.to_numeric, downcast='float')
return df
@staticmethod
def _optimize_ints(df: pd.DataFrame) -> pd.DataFrame:
ints = df.select_dtypes(include=['int64']).columns.tolist()
df[ints] = df[ints].apply(pd.to_numeric, downcast='integer')
return df
@staticmethod
def _optimize_objects(df: pd.DataFrame, datetime_features: List[str]) -> pd.DataFrame:
for col in df.select_dtypes(include=['object']):
if col not in datetime_features:
num_unique_values = len(df[col].unique())
num_total_values = len(df[col])
if float(num_unique_values) / num_total_values < 0.5:
df[col] = df[col].astype('category')
else:
df[col] = pd.to_datetime(df[col])
return df
def check_durations_sum_to_1(individuals, activities):
total_duration = [0.0] * len(individuals) # Add up all the different activity durations
for activity in activities:
total_duration = total_duration + individuals.loc[:, f"{activity}{ColumnNames.ACTIVITY_DURATION}"]
if not (total_duration.apply(lambda x: round(x, 5)) == 1.0).all():
print("Some activity durations don't sum to 1", flush=True)
print(total_duration[total_duration != 1.0], flush=True)
raise Exception("Some activity durations don't sum to 1")
# data fetching functions
def download_data(url : str):
"""Download data utility function
Args:
url (str, optional): A url to an archive file. Defaults to "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz".
"""
response = requests.get(url, stream=True)
# specify target_path as name of tarfile downloaded by splitting url
# and retrieving last item
target_path = os.path.join(url.split('/')[-1])
# Create a progress bar
file_size = int(urlopen(url).info().get('Content-Length', -1))
pbar = tqdm(total=file_size, initial=0, unit='B', unit_scale=True, desc=url.split('/')[-1])
if response.status_code == 200:
with open(target_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return target_path
def unpack_data(archive : str):
"""unpack tar data archive
Args:
archive (str): A string directory path to archive file using
"""
tar_file = tarfile.open(archive)
tar_file.extractall(".")
def data_setup(url : str = "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz"):
"""A wrapper function for downloading and unpacking Azure stored devon_data
Args:
archive (str): A string directory path to archive file using
url (str, optional): A url to an archive file. Defaults to "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz".
"""
archive_file = download_data(url = url)
unpack_data(archive = archive_file)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.