blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17cbd59404e6774d0093023cd921bea9c0b812b8 | 3e5e8d6c1b39d459f4e489db083bd437f88bf213 | /path/path_server.py | f998079d6e3812562f8b43147012f9300ab9e3bd | [] | no_license | emonson/SamVis | 37b4f92e482a5227520c4f6b95896ab35d0b71e5 | 98f1dc793bc6a0a38785cb279cd8d27a44807b8b | refs/heads/master | 2020-06-04T03:04:53.257031 | 2014-10-30T17:34:39 | 2014-10-30T17:34:39 | 9,029,161 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,354 | py | import cherrypy
import json
from path_obj import PathObj
import os
import glob
class ResourceIndex(object):
def __init__(self, server_url, data_names):
self.server_url = server_url
self.data_names = data_names
@cherrypy.expose
def index(self):
return self.to_html()
@cherrypy.expose
def datasets(self):
return json.dumps(self.data_names)
def to_html(self):
html_item = lambda (name): '<div><a href="' + self.server_url + '?data={name}">{name}</a></div>'.format(**vars())
items = map(html_item, self.data_names)
items = ''.join(items)
return '<html>{items}</html>'.format(**vars())
class PathServer:
# _cp_config = {'tools.gzip.on': True}
def __init__(self, path):
print 'STARTING UP', path
self.path = PathObj(path)
@cherrypy.expose
def index(self):
return self.path.path_data_dir
# ------------
# Paths
@cherrypy.expose
@cherrypy.tools.gzip()
def districtcoords(self, district_id = None, depth = 1, previous_id = None, rold = "1.0, 0.0, 0.0, 1.0"):
if district_id is not None:
dist_id = int(district_id)
d = int(depth)
if previous_id is not None:
prev_id = int(previous_id)
else:
prev_id = dist_id
R_old = self.parse_rold(rold)
return self.path.GetDistrictDeepPathLocalRotatedCoordInfo_JSON(dist_id, prev_id, d, R_old)
# ------------
# Ellipses
@cherrypy.expose
@cherrypy.tools.gzip()
def districtellipses(self, district_id = None, type = 'space', previous_id = None, rold = "1.0, 0.0, 0.0, 1.0"):
if district_id is not None:
dist_id = int(district_id)
if previous_id is not None:
prev_id = int(previous_id)
else:
prev_id = dist_id
R_old = self.parse_rold(rold)
if type == 'diffusion':
return self.path.GetDistrictDiffusionRotatedEllipses_JSON(dist_id, prev_id, R_old)
else:
return self.path.GetDistrictLocalRotatedEllipses_JSON(dist_id, prev_id, R_old)
# ------------
# Query
@cherrypy.expose
@cherrypy.tools.gzip()
def pathtimedistrict(self, time=None):
if time is not None:
t = int(time)
# Get district ID for path at a specified time
return self.path.GetDistrictFromPathTime_JSON(t)
@cherrypy.expose
@cherrypy.tools.gzip()
def netpoints(self):
# 2D coordinates of overview of district centers
return self.path.GetNetPoints_JSON()
@cherrypy.expose
@cherrypy.tools.gzip()
def datainfo(self):
# {datatype:('image', 'gene',...), shape:[n_rows, n_cols], alldata_bounds:[min, max]}}
return self.path.GetDataInfo_JSON()
@cherrypy.expose
@cherrypy.tools.gzip()
def transitiongraph(self):
# nodes (with ids and occupation times) and edges (with transition sums)
return self.path.GetTransitionGraph_JSON()
@cherrypy.expose
@cherrypy.tools.gzip()
def timesfromdistrict(self, district_id=None):
if district_id is not None:
dist_id = int(district_id)
# Average 1st passage times to other districts from this one
return self.path.GetTimesFromDistrict_JSON(dist_id)
@cherrypy.expose
@cherrypy.tools.gzip()
def districtcenterdata(self, district_id=None):
if district_id is not None:
dist_id = int(district_id)
# TODO: Make this more general. For now it's just an image for the district center
# TODO: Need to figure out a way to detect early on what type of data is associated
# with each district, and tailor the JS visualizations accordingly, and here
# just grab data without knowing what it is.
return self.path.GetDistrictCenterData_JSON(dist_id)
# ------------
# Utility
def parse_rold(self, rold):
# Parse comma-separated list of four floats encoded as a string
try:
a00, a01, a10, a11 = (float(r) for r in rold.split(','))
R_old = [[a00, a01], [a10, a11]]
except:
R_old = [[1.0, 0.0], [0.0, 1.0]]
return R_old
# ------------
class Root(object):
def __init__(self, names_list):
self.data_names = names_list
@cherrypy.expose
@cherrypy.tools.gzip()
def index(self):
return json.dumps(self.data_names)
# Storing server name and port in a json file for easy config
server_filename = 'server_conf.json'
server_opts = json.loads(open(server_filename).read())
# Go through data directory and add methods to root for each data set
data_dir = server_opts['path_data_dir']
vis_page = 'district_path.html'
data_paths = [xx for xx in glob.glob(os.path.join(data_dir,'*')) if os.path.isdir(xx)]
data_dirnames = [os.path.basename(xx) for xx in data_paths]
# Storing the dataset names in the root so they can easily be passed to the html pages
root = Root(data_dirnames)
# This adds the methods for each data directory
for ii,name in enumerate(data_dirnames):
print name, data_paths[ii]
setattr(root, name, PathServer(data_paths[ii]))
# add the resource index, which will list links to the data sets
base_url = 'http://' + server_opts['server_name'] + '/~' + server_opts['account'] + '/' + server_opts['path_web_path'] + '/' + vis_page
root.resource_index = ResourceIndex(server_url=base_url, data_names=data_dirnames)
# Start up server
cherrypy.config.update({
# 'tools.gzip.on' : True,
'server.socket_port': server_opts['path_port'],
# 'server.socket_host':'127.0.0.1'
'server.socket_host':server_opts['server_name']
})
cherrypy.quickstart(root)
| [
"[email protected]"
] | |
6751813df7cadcbc722015f087934164f1982cbe | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res_bw/scripts/common/lib/email/mime/nonmultipart.py | 527fda5afdf5a3217564dd26d8a2f0384691bce1 | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 730 | py | # 2016.02.14 12:47:55 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/email/mime/nonmultipart.py
"""Base class for MIME type messages that are not multipart."""
__all__ = ['MIMENonMultipart']
from email import errors
from email.mime.base import MIMEBase
class MIMENonMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def attach(self, payload):
raise errors.MultipartConversionError('Cannot attach additional subparts to non-multipart/*')
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\email\mime\nonmultipart.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:47:55 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
a91f1a29d2b88913cdb79f5181f207f5e3eadd65 | 05e634a232574f676434dfa8e4183f3d0a1a4bc9 | /paddlecv/ppcv/ops/connector/base.py | 9d315823ec24a76a0e34664c97122662ff637792 | [
"Apache-2.0"
] | permissive | PaddlePaddle/models | 67ac00d93c5255ac64a9d80ae5be2e8927e47cee | 8042c21b690ffc0162095e749a41b94dd38732da | refs/heads/release/2.4 | 2023-09-04T15:23:59.543625 | 2023-07-20T11:54:16 | 2023-07-20T11:54:16 | 88,868,842 | 7,633 | 3,597 | Apache-2.0 | 2023-09-05T23:23:54 | 2017-04-20T13:30:15 | Python | UTF-8 | Python | false | false | 1,062 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
from ppcv.ops.base import BaseOp
class ConnectorBaseOp(BaseOp):
def __init__(self, model_cfg, env_cfg=None):
super(ConnectorBaseOp, self).__init__(model_cfg, env_cfg)
self.name = model_cfg["name"]
keys = self.get_output_keys()
self.output_keys = [self.name + '.' + key for key in keys]
@classmethod
def type(self):
return 'CONNECTOR'
| [
"[email protected]"
] | |
149cbde05cc6385c66a90062e7ac22763bf9aed1 | a03a7935a191d63bee76fd3b85a61ee27f98904a | /test/tests/databases/pdbdatabase.py | 819adafd698290f378a6eb7b80bb41c8c6c1bf27 | [] | no_license | cchriste/visit | 57091c4a512ab87efd17c64c7494aa4cf01b7e53 | c72c413f571e56b52fb7221955219f11f4ba19e3 | refs/heads/master | 2020-04-12T06:25:27.458132 | 2015-10-12T15:41:49 | 2015-10-12T15:41:49 | 10,111,791 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,311 | py | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: pdbdatabase.py
#
# Tests: mesh - 2D,3D curvilinear, single domain
# plots - Pseudocolor, Subset, Vector
# operators - Clip
#
# Programmer: Brad Whitlock
# Date: Thu Sep 25 09:31:28 PDT 2003
#
# Modifications:
# Brad Whitlock, Wed Mar 31 09:11:08 PDT 2004
# I added code to clear the engine cache to reduce memory usage.
#
# Brad Whitlock, Fri Apr 9 16:54:15 PST 2004
# I added TestSection to divide up the tests a little.
#
# Brad Whitlock, Thu Sep 2 12:08:59 PDT 2004
# I replaced some deprecated calls with their new equivalents.
#
# Brad Whitlock, Tue Dec 7 17:52:33 PST 2004
# I added a test for mixvars in Flash files.
#
# Mark C. Miller, Sat Feb 3 00:42:05 PST 2007
# Added tests for array variables
# ----------------------------------------------------------------------------
##
## This creates a name for a test.
##
def CreateTestName(testName, testIndex):
name = "%s_%02d" % (testName, testIndex)
return name
def sv3():
v3 = View3DAttributes()
v3.viewNormal = (0.516282, 0.582114, 0.628169)
v3.focus = (0, 0, 0)
v3.viewUp = (-0.488576, 0.80261, -0.342213)
v3.viewAngle = 30
v3.parallelScale = 43.589
v3.nearPlane = -87.178
v3.farPlane = 87.178
v3.imagePan = (0, 0)
v3.imageZoom = 1.41577
v3.perspective = 1
SetView3D(v3)
##
## This function performs the test using the specified database.
##
def TestWithDatabase(db, testName):
TestSection("Testing with %s" % db)
# Open the test database
OpenDatabase(db)
##
## Do the 2D tests.
##
# Add the plots.
AddPlot("Subset", "material(mesh)")
DrawPlots()
# Do the first test in the series
Test(CreateTestName(testName, 0))
SetTimeSliderState(6)
Test(CreateTestName(testName, 1))
SetTimeSliderState(15)
Test(CreateTestName(testName, 2))
# Do a test on the last frame in the animation.
SetTimeSliderState(22)
Test(CreateTestName(testName, 3))
AddPlot("Mesh", "mesh")
DrawPlots()
v = View2DAttributes()
v.windowCoords = (-6.07862, -0.374491, 4.48986, 10.8545)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
SetView2D(v)
Test(CreateTestName(testName, 4))
# Try turning off material 2
SetActivePlots((0,1))
TurnMaterialsOff("2")
Test(CreateTestName(testName, 5))
TurnMaterialsOn()
ResetView()
DeleteAllPlots()
AddPlot("Pseudocolor", "mesh/a")
DrawPlots()
Test(CreateTestName(testName, 6))
# Define a expression. I'm testing this because of the strange
# <mesh/var> syntax that my plugin has.
DefineVectorExpression("testexp1", "3.0 * {<mesh/lt>, <mesh/a>/399.0}")
AddPlot("Vector", "testexp1")
DrawPlots();
vec = VectorAttributes()
vec.nVectors = 1200
vec.colorByMag = 0
SetPlotOptions(vec)
v.windowCoords = (-9.51217, -0.289482, 0.983025, 10.6717)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
SetView2D(v)
Test(CreateTestName(testName, 7))
# Set the time back to frame 0
SetTimeSliderState(0)
ResetView()
DeleteAllPlots()
##
## Do the 3D tests.
##
AddPlot("Subset", "material2(revolved_mesh)")
AddOperator("Clip")
c = ClipAttributes()
c.funcType = c.Plane
c.plane1Status = 0
c.plane2Status = 1
c.plane3Status = 1
SetOperatorOptions(c)
DrawPlots()
# Set the view
sv3()
Test(CreateTestName(testName, 8))
SetTimeSliderState(6)
sv3()
Test(CreateTestName(testName, 9))
SetTimeSliderState(15)
sv3()
Test(CreateTestName(testName, 10))
# Do a test on the last frame in the animation.
SetTimeSliderState(22)
sv3()
Test(CreateTestName(testName, 11))
# Turn off some materials
TurnMaterialsOff(("1", "3", "4"))
sv3()
Test(CreateTestName(testName, 12))
TurnMaterialsOn()
# Set the time back to frame 2
SetTimeSliderState(2)
ResetView()
DeleteAllPlots()
#
# Test array variables
#
AddPlot("Pseudocolor","logical_mesh/marray_comps/comp_002")
DrawPlots()
Test(CreateTestName(testName, 13))
DeleteAllPlots()
ResetView()
AddPlot("Pseudocolor","revolved_mesh/marray_comps/comp_002")
DrawPlots()
Test(CreateTestName(testName, 14))
DeleteAllPlots()
ResetView()
AddPlot("Label","logical_mesh/marray")
DrawPlots()
Test(CreateTestName(testName, 15))
# Set the time back to frame 0
SetTimeSliderState(0)
ResetView()
DeleteAllPlots()
CloseDatabase(db)
ClearCache("localhost")
#
# Test mixvars.
#
def TestMixvars(db):
TestSection("Testing mixvars in Flash files")
DeleteAllPlots()
OpenDatabase(db)
AddPlot("Pseudocolor", "mesh/mixvar")
DrawPlots()
ResetView()
v = View2DAttributes()
v.windowCoords = (-9.51866, 3.29394, 13.9258, 26.4126)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
v.fullFrameActivationMode = v.Off
SetView2D(v)
Test("pdb_nomix")
# Do the same plot but with forced MIR so the mixvar gets reconstructed.
ClearWindow()
m = GetMaterialAttributes()
m.forceMIR = 1
SetMaterialAttributes(m)
DrawPlots()
Test("pdb_mix")
DeleteAllPlots()
#
# Run the test a few times with different versions of the database. We do this
# because we have the same database split up three different ways and all the
# ways a database can be split up must work.
#
# multi{00,01,02}.pdb - Contains multiple time states in each file but
# we group them all into "multi*.pdb database".
#
# family??.pdb - Contains a single time state in each file but
# we group them all into "family*.pdb database".
#
# allinone00.pdb - Contains all of the time states in one file.
#
databases = (data_path("pdb_test_data/multi*.pdb database"),
data_path("pdb_test_data/family*.pdb database"),
data_path("pdb_test_data/allinone00.pdb"))
testNames = ("pdb_multi", "pdb_family", "pdb_allinone")
for i in range(len(databases)):
TestWithDatabase(databases[i], testNames[i])
# Do the mixvar test.
TestMixvars(databases[2])
Exit()
| [
"bonnell@18c085ea-50e0-402c-830e-de6fd14e8384"
] | bonnell@18c085ea-50e0-402c-830e-de6fd14e8384 |
28c3703bfafc76e25ef8fc5c9da913bae9874f5d | cc6154486d4546a1394aac53dc5cfa5b1c4dfd02 | /docs/source/conf.py | 24d6fc870cd9b14bf251caaaa8fe5c7eec8edf10 | [
"BSD-3-Clause"
] | permissive | AustralianSynchrotron/tiled | 3c4448030c5fb53c952790ad4c501291bf427214 | 307d2f3b7e9b841afdf5af716f218584e4c3d530 | refs/heads/main | 2023-08-29T04:19:20.768182 | 2021-10-14T00:38:00 | 2021-10-14T00:38:00 | 416,939,893 | 0 | 0 | BSD-3-Clause | 2021-10-14T00:36:20 | 2021-10-14T00:36:19 | null | UTF-8 | Python | false | false | 9,066 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tiled documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 28 12:35:56 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.openapi',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'sphinx_click',
'sphinx_copybutton',
'myst_parser',
]
# Configuration options for plot_directive. See:
# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81
plot_html_show_source_link = False
plot_html_show_formats = False
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tiled'
copyright = '2021, Bluesky Collaboration'
author = 'Bluesky Collaboration'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import tiled
# The short X.Y version.
version = tiled.__version__
# The full version, including alpha/beta/rc tags.
release = tiled.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext trees.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'tiled'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tiled.tex', 'tiled Documentation',
'Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tiled', 'tiled Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tiled', 'tiled Documentation',
author, 'tiled', 'Tile-based access to SciPy/PyData structures over the web in many formats',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'matplotlib': ('https://matplotlib.org', None),
}
import yaml
def generate_schema_documentation(header, schema, target):
# header
with open(header, "r") as f:
header_md = f.readlines()
header_md = header_md[1:]
header_md = [ln.strip("\n") for ln in header_md]
# schema
with open(schema, "r") as f:
data = yaml.safe_load(f)
def parse_schema(d, md=[], depth=0, pre=""):
"""
Generate markdown headers from a passed python dictionary created by
parsing a schema.yaml file.
"""
if "then" in d:
d = d["then"]
if "properties" in d:
depth += 1
# Create markdown headers for each schema level
for key, val in d["properties"].items():
md.append("(schema_%s)=" % (pre + key))
md.append("#" * (depth + 1) + " " + pre + key)
md.append("")
if "description" in val:
for ln in val["description"].split("\n"):
md.append(ln)
md.append("")
parse_schema(val, md, depth, pre + "{}.".format(key))
depth -= 1
if "items" in d:
depth += 1
# Create markdown headers for each schema level
if "properties" in d["items"]:
for key, val in d["items"]["properties"].items():
md.append("(schema_%s)=" % (pre + key))
md.append("#" * (depth + 1) + " " + pre[:-1] + "[item]." + key)
md.append("")
if "description" in val:
for ln in val["description"].split("\n"):
md.append(ln)
md.append("")
parse_schema(val, md, depth, pre + "{}.".format(key))
depth -= 1
return md
schema_md = parse_schema(data)
# reference = header + schema
reference_md = header_md + schema_md
with open(target, "w") as f:
f.write("\n".join(reference_md))
generate_schema_documentation(
"reference/service-configuration-header.txt",
"../../tiled/schemas/service_configuration.yml",
"reference/service-configuration.md",
)
generate_schema_documentation(
"reference/client-profiles-header.txt",
"../../tiled/schemas/client_profiles.yml",
"reference/client-profiles.md",
)
from tiled.trees.in_memory import Tree
from tiled.authenticators import DummyAuthenticator
from tiled.server.app import serve_tree
app = serve_tree(Tree({}), authentication={"authenticator": DummyAuthenticator()})
api = app.openapi()
with open("reference/api.yml", "w") as file:
yaml.dump(api, file) | [
"[email protected]"
] | |
2d1bf385aa0af57dac548b94154d0021b5bcbf2c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_069/ch31_2019_09_28_01_31_28_102445.py | f9f4884fb86277a703c6d470865f3c6e798b155a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | valor = float(input('Qual o valor da casa?' ))
salario = float(input('Qual o seu salário? '))
tempo = int(input('Em quantos anos deseja pagar? '))
prestacao = valor/tempo*12
if salario >= 0.3*prestacao:
print ('Empréstimo aprovado')
else:
print ('Empréstimo não aprovado') | [
"[email protected]"
] | |
dde962a6155bab28965c2ed4dfa4a581508ce225 | 69d3680f881833a0a4906ad708eac11401bc03c6 | /python3/2. 01背包问题.py | 2741d3ecf6749f95de6819feb609bb510721b0ff | [] | no_license | menghuu/YALeetcode | 21df4b5ea6cb0a249263b0ce2df37e7580477ddd | 1959a884bb1cc9f2f1acb1ba6f413498ea0d1aca | refs/heads/master | 2023-08-18T03:55:41.470428 | 2021-09-11T12:39:02 | 2021-09-11T12:39:02 | 269,104,152 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 m <[email protected]>
#
# Distributed under terms of the MIT license.
"""
"""
import sys
N, V = map(int, sys.stdin.readline().strip().split())
# dps[j]
dps = [0 for _ in range(V + 1)]
# dps[i][j]
for _ in range(N):
v, w = map(int, sys.stdin.readline().strip().split())
for j in range(V, v - 1, -1):
dps[j] = max(dps[j], dps[j - v] + w)
print(dps[-1])
| [
"[email protected]"
] | |
c22c01818686115aaa4f416dc26874227498f59a | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/004621ef8e2f9da82e0ed2be016e874230d93a0d-<profiles>-fix.py | a3ec5d59222b96ebf703c7962993fe499e3d1581 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | @property
def profiles(self):
'Returns a list of profiles from the API\n\n The profiles are formatted so that they are usable in this module and\n are able to be compared by the Difference engine.\n\n Returns:\n list (:obj:`list` of :obj:`dict`): List of profiles.\n\n Each dictionary in the list contains the following three (3) keys.\n\n * name\n * context\n * fullPath\n\n Raises:\n F5ModuleError: If the specified context is a value other that\n ``all``, ``server-side``, or ``client-side``.\n '
if ('items' not in self._values['profiles']):
return None
result = []
for item in self._values['profiles']['items']:
context = item['context']
if (context == 'serverside'):
context = 'server-side'
elif (context == 'clientside'):
context = 'client-side'
name = item['name']
if (context in ['all', 'server-side', 'client-side']):
result.append(dict(name=name, context=context, full_path=item['fullPath']))
else:
raise F5ModuleError("Unknown profile context found: '{0}'".format(context))
return result | [
"[email protected]"
] | |
2a37b57347e8945b94ea8041f9511b3b88e12a17 | 72af8e47d5786571bce1789fc047965de4f9ac92 | /api/__init__.py | ad16f4c6968b0f0d9cc72ec542f4a5cc4cc4663a | [] | no_license | 444thLiao/WES_pipelines | 18d488e7c01ca618b8a6916979e2d8f64d1aa631 | 06365dc6d91b8c1861c053970e2823c322e5814d | refs/heads/master | 2022-01-20T17:59:11.688758 | 2019-07-17T06:43:44 | 2019-07-17T06:43:44 | 93,579,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | import sys
from os.path import dirname
sys.path.insert(0,dirname(dirname(__file__)))
from luigi_pipelines.share_luigi_tasks import PrintReads, Annovar1, Annovar2
| [
"[email protected]"
] | |
3e023f193c68c9ab08ac4d6d2ac5f80e9f724559 | 28c598bf75f3ab287697c7f0ff1fb13bebb7cf75 | /starter.mmo/genesis/spell/spellmain.py | 170cb2340251bc0dc466557b56c4e020fc7eafc1 | [] | no_license | keaysma/solinia_depreciated | 4cb8811df4427261960af375cf749903d0ca6bd1 | 4c265449a5e9ca91f7acf7ac05cd9ff2949214ac | refs/heads/master | 2020-03-25T13:08:33.913231 | 2014-09-12T08:23:26 | 2014-09-12T08:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py |
from genesis.dbdict import DBSpellProto
from mud.world.defines import *
import invulnerability
| [
"[email protected]"
] | |
bba750f4f5d2b831e16a33244d5dcbf9e58ec1ac | 87dcb103e48da1fd17233232a7b4ad1d79ae50d5 | /svtplay-dl | c90f98e505eedf0ecbe8124fd1bd4cc58c18b091 | [
"MIT"
] | permissive | gusseleet/svtplay-dl | 9bd64ba5c83775a12496a3dcd42282e5171249ff | 55d811286df237738802ac9754417a8fed21280f | refs/heads/master | 2020-12-25T02:30:14.399785 | 2016-02-19T20:29:49 | 2016-02-19T20:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | #!/usr/bin/env python
print("This file is no longer updated.")
print("if you still want to use it. go to https://svtplay-dl.se/archive and download the latest one")
| [
"[email protected]"
] | ||
5fef8ea7f91e094835ace56319fab0b154591baf | 18ca2e0f98b98941ff9d9e098e0be89166c8b87c | /Abp/Cp17/c17_7_1_resizeAndAddLogo2.py | 1e9e0f0d7ca76bb52331717c0a1cfcf67a729979 | [] | no_license | masa-k0101/Self-Study_python | f20526a9cd9914c9906059678554285bfda0c932 | 72b364ad4da8485a201ebdaaa430fd2e95681b0a | refs/heads/master | 2023-03-07T07:38:27.559606 | 2021-02-22T16:24:47 | 2021-02-22T16:24:47 | 263,381,292 | 1 | 0 | null | 2020-06-09T17:32:06 | 2020-05-12T15:47:48 | Python | UTF-8 | Python | false | false | 1,571 | py | #! python3
# -*- coding: utf-8 -*-
# 演習プロジェクト 17.7.1用に改造
# resizeAndAddLogo2.py - カレントディレクトリのすべての画像を300x300に収まる
# ようにサイズ変更し、catlogo.pngを右下に追加する。
import os
from PIL import Image
SQUARE_FIT_SIZE = 300
LOGO_FILENAME = 'catlogo.png'
logo_im = Image.open(LOGO_FILENAME)
logo_width, logo_height = logo_im.size
os.makedirs('withLogo', exist_ok=True)
# カレントディレクトリの全画像をループする
for filename in os.listdir('.'):
# 拡張子の大文字と小文字を区別しない(小文字に変換してマッチする)
lfname = filename.lower()
# PNG, JPG, GIF, BMPファイル以外ならスキップする
if not (lfname.endswith('.png') or lfname.endswith('.jpg') \
or lfname.endswith('.gif') or lfname.endswith('.bmp')) \
or lfname == LOGO_FILENAME:
continue # 画像以外とロゴ画像はスキップする
im = Image.open(filename)
# 画像をサイズ変更する
im.thumbnail((SQUARE_FIT_SIZE, SQUARE_FIT_SIZE))
width, height = im.size
# ロゴの2倍サイズ未満なら、スキップする
if width < logo_width * 2 or height < logo_height * 2:
continue
# ロゴを追加する
print('ロゴを追加中 {}...'.format(filename))
im.paste(logo_im, (width-logo_width, height-logo_height), logo_im)
# 変更を保存する
im.save(os.path.join('withLogo', filename)) | [
"[email protected]"
] | |
ef35d4d21c0c69a4d991e93868072dc6cf75a519 | 61d484ae68e40b89432f66f98164c811692ee612 | /ThirdParty/protobuf-registry/python/protobufs/services/profile/actions/get_profile_stats_pb2.py | 55e84806eed9e93479af3fa9b97e42596ef5d993 | [
"MIT"
] | permissive | getcircle/luno-ios | 2a29192c130c48415e55b50850e77a1a37f22ad1 | d18260abb537496d86cf607c170dd5e91c406f0f | refs/heads/master | 2021-05-01T04:01:52.647661 | 2016-12-05T04:54:08 | 2016-12-05T04:54:08 | 27,101,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,563 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/services/profile/actions/get_profile_stats.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from protobufs.services.profile import containers_pb2 as protobufs_dot_services_dot_profile_dot_containers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/services/profile/actions/get_profile_stats.proto',
package='services.profile.actions.get_profile_stats',
syntax='proto3',
serialized_pb=b'\n:protobufs/services/profile/actions/get_profile_stats.proto\x12*services.profile.actions.get_profile_stats\x1a+protobufs/services/profile/containers.proto\"H\n\tRequestV1\x12\x13\n\x0b\x61\x64\x64ress_ids\x18\x01 \x03(\t\x12\x14\n\x0clocation_ids\x18\x02 \x03(\t\x12\x10\n\x08team_ids\x18\x03 \x03(\t\"@\n\nResponseV1\x12\x32\n\x05stats\x18\x01 \x03(\x0b\x32#.services.profile.containers.StatV1b\x06proto3'
,
dependencies=[protobufs_dot_services_dot_profile_dot_containers__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTV1 = _descriptor.Descriptor(
name='RequestV1',
full_name='services.profile.actions.get_profile_stats.RequestV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address_ids', full_name='services.profile.actions.get_profile_stats.RequestV1.address_ids', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='location_ids', full_name='services.profile.actions.get_profile_stats.RequestV1.location_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='team_ids', full_name='services.profile.actions.get_profile_stats.RequestV1.team_ids', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=151,
serialized_end=223,
)
_RESPONSEV1 = _descriptor.Descriptor(
name='ResponseV1',
full_name='services.profile.actions.get_profile_stats.ResponseV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stats', full_name='services.profile.actions.get_profile_stats.ResponseV1.stats', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=225,
serialized_end=289,
)
_RESPONSEV1.fields_by_name['stats'].message_type = protobufs_dot_services_dot_profile_dot_containers__pb2._STATV1
DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1
DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1
RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(
DESCRIPTOR = _REQUESTV1,
__module__ = 'protobufs.services.profile.actions.get_profile_stats_pb2'
# @@protoc_insertion_point(class_scope:services.profile.actions.get_profile_stats.RequestV1)
))
_sym_db.RegisterMessage(RequestV1)
ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEV1,
__module__ = 'protobufs.services.profile.actions.get_profile_stats_pb2'
# @@protoc_insertion_point(class_scope:services.profile.actions.get_profile_stats.ResponseV1)
))
_sym_db.RegisterMessage(ResponseV1)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
13c9726cece639eb23085d411129eaa87a551621 | 87e60b0504be11c6997f1b20b72e9428cc128342 | /ana/magic/histo.py | ea1f01da27e77f8533bcff0d15645274f3f75b83 | [] | no_license | brettviren/cowbells | 70a85856fdfc54526c847f115d5dc01ec85ec215 | 1ceca86383f4f774d56c3f159658518242875bc6 | refs/heads/master | 2021-01-10T18:44:41.531525 | 2014-04-09T15:17:29 | 2014-04-09T15:17:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,839 | py | #!/usr/bin/env python
'''
Histogram store
'''
from UserDict import DictMixin
import ROOT
class Histo(DictMixin):
'''
Provide a dictionary interface to a TDirectory (TFile) for
managing ROOT Histogram objects (any TNamed object, really).
The TDirectory must be associated with a TFile opened with the
"UPDATE" option if items are to be set on objects of this class.
Note, that this allows items to be set using a key name that may
differ from the histogram name. Getting an item by histogram name
will still work but will create a duplicate object in memory. If
you do not wish to save these do not do an explicit TFile::Write()
on the file holding the TDirectory given to Histo.
'''
def __init__(self, tdir = None):
'''
A dictionary-like collection of histograms (any TObjects,
really) tied to a file (TDirectory). <tdir> is some ROOT
TDirectory-like thing where the histograms are to be kept. It
needs to be writable in order to store histograms.
'''
self.tdir = tdir
self.bag = dict()
def __getitem__(self, name):
hist = self.bag.get(name)
if hist: return hist
if self.tdir:
hist = self.tdir.Get(name)
if not hist:
raise KeyError, 'No histogram "%s"' % name
self[name] = hist
return hist
def __setitem__(self, name, obj):
obj.SetDirectory(0)
if name != obj.GetName():
obj.SetName(name)
self.bag[name] = obj
return
def add(self, obj):
self[obj.GetName()] = obj
def keys(self):
kl = set()
if self.tdir:
kl = set([k.GetName() for k in self.tdir.GetListOfKeys()])
map(kl.add, self.bag.keys())
return list(kl)
def flush(self, tdir = None):
'''
Write all hists to directory
'''
tdir = tdir or self.tdir
if not tdir:
raise ValueError, 'No TDirectory to flush to'
for obj in self.bag.values():
tdir.WriteTObject(obj)
def test():
fd = ROOT.TFile.Open('test_histo.root','recreate')
h = Histo(fd)
h['h1key'] = ROOT.TH1F('h1name','hist1',10,-1,1)
assert h['h1key']
h['h1key'].FillRandom('gaus')
entries = h['h1key'].GetEntries()
assert entries
print 'Original entries:', entries
h.flush()
fd.Close()
del(h)
print 'Opening file read-only'
fd2 = ROOT.TFile.Open('test_histo.root','readonly')
h2 = Histo(fd2)
print 'keys',h2.keys()
assert 'h1key' in h2.keys()
print 'h1key',h2.get('h1key')
assert h2.get('h1key')
print 'h1name',h2.get('h1name')
assert not h2.get('h1name')
assert entries == h2['h1key'].GetEntries()
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
b6293e11242c694c26602b35f2ac13d2b23179dc | 86da8478bd5b28045581445263fded606f592158 | /tests/network/nano_node/data/http/empty_watching.py | c91229055452ec790f8826079b0f4474b6efc22f | [
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Matoking/siliqua | c2053214187ed6a2a1d418daf7e43108770c731c | b943822631ab18dde85e95d1731ebd7ffd7ef14a | refs/heads/master | 2020-08-28T02:59:53.841369 | 2019-11-18T17:00:26 | 2019-11-18T17:00:26 | 217,568,445 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from tests.network.nano_node.conftest import HTTPReplay
DATA = [
HTTPReplay(
{
"action": "account_history",
"account": "xrb_15n1wthxc5ndjnoufdfe8m4z5j973o6trzwbfys4cu4gtju5mh4xc918fout",
"count": 500,
"raw": True,
"reverse": True
},
{
"error": "Account not found"
}
)
]
| [
"[email protected]"
] | |
f4e7f0e88b95e72ed71b719cc5ec004ce4f3a78e | c84ba95b559d0d1fd142c88dffec3da45cb8e711 | /backend/users/migrations/0003_auto_20210115_1652.py | 04ad1b16a0cd2f76a84ca29e9d06e1ab48a24855 | [] | no_license | crowdbotics-apps/insta-23855 | 4460bc7f00d52a86f9c30f90249e451957d4b145 | c3abded4dc1a1dcaf201da48fe12d348468c7a02 | refs/heads/master | 2023-02-11T13:48:17.207924 | 2021-01-15T16:54:09 | 2021-01-15T16:54:09 | 329,785,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | # Generated by Django 2.2.17 on 2021-01-15 16:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20210115_0235'),
]
operations = [
migrations.AddField(
model_name='user',
name='last_updated',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='user',
name='timestamp_created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"[email protected]"
] | |
4eb5f6a1973d51f56c5840b06100a56e3a8e22e8 | 957430fc737d07df115f80dae22ce5cd11096689 | /restaurants/table/migrations/0001_initial.py | 36d1e79ffd34d9c15c9e0a9377af92f001469bf6 | [] | no_license | Hamza-abughazaleh/Restaurant | c6ac28c029d1d2c8eadcf0a61575c54d39273623 | ecffb9a7bf11b115aa0d33617f61e72697f327cc | refs/heads/main | 2023-06-19T09:09:03.268647 | 2021-07-16T19:45:33 | 2021-07-16T19:45:33 | 386,622,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | # Generated by Django 3.2.5 on 2021-07-14 19:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import table.validation
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Table',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('table_number', models.IntegerField(error_messages={'unique': 'A Table number already exists.'}, unique=True, verbose_name='Employee number')),
('seats_number', models.IntegerField(validators=[table.validation.validate_table_seats], verbose_name='Employee number')),
('userid', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
7c5d0a108ce1a97c84572b1c05a6cc3f1bb972a3 | 5a4ae6581fa70025a3c6cd4a8d8b0e179f10a7dc | /tests/past_api09_temptable.py | 4fe41861e916ce3471721c4d49d36b41444d3818 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | Dev4Data/datatest | b250a465f4c313ebe8d59a1d273e4e1ce4f86619 | bf136eab23c2b6ea36c201e1446fca9243c3fba6 | refs/heads/master | 2023-08-03T00:36:34.362741 | 2021-12-05T17:44:33 | 2021-12-05T17:44:33 | 136,925,059 | 0 | 0 | null | 2018-06-11T12:44:50 | 2018-06-11T12:44:50 | null | UTF-8 | Python | false | false | 18,160 | py | # -*- coding: utf-8 -*-
import itertools
import sqlite3
import unittest
import datatest._vendor.temptable as temptable
from datatest._compatibility import collections
from datatest._vendor.temptable import (
table_exists,
new_table_name,
normalize_names,
normalize_default,
create_table,
get_columns,
insert_records,
alter_table,
drop_table,
savepoint,
load_data,
)
class TestTableExists(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_empty_database(self):
self.assertFalse(table_exists(self.cursor, 'table_a'))
def test_persistent_table(self):
self.cursor.execute('CREATE TABLE table_b (col1, col2)')
self.assertTrue(table_exists(self.cursor, 'table_b'))
def test_temporary_table(self):
self.cursor.execute('CREATE TEMPORARY TABLE table_c (col1, col2)')
self.assertTrue(table_exists(self.cursor, 'table_c'))
class TestNewTableName(unittest.TestCase):
def setUp(self):
# Rebuild internal generator.
temptable._table_names = ('tbl{0}'.format(x) for x in itertools.count())
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_empty_database(self):
table_name = new_table_name(self.cursor)
self.assertEqual(table_name, 'tbl0')
def test_existing_temptable(self):
self.cursor.execute('CREATE TEMPORARY TABLE tbl0 (col1, col2)')
table_name = new_table_name(self.cursor)
self.assertEqual(table_name, 'tbl1')
def test_existing_table_and_temptable(self):
self.cursor.execute('CREATE TABLE tbl0 (col1, col2)')
self.cursor.execute('CREATE TEMPORARY TABLE tbl1 (col1, col2)')
table_name = new_table_name(self.cursor)
self.assertEqual(table_name, 'tbl2')
class TestNormalizeNames(unittest.TestCase):
def test_single_value(self):
normalized = normalize_names('A')
self.assertEqual(normalized, '"A"')
def test_list_of_values(self):
normalized = normalize_names(['A', 'B'])
expected = ['"A"', '"B"']
self.assertEqual(normalized, expected)
def test_non_strings(self):
normalized = normalize_names(2.5)
self.assertEqual(normalized, '"2.5"')
def test_whitespace(self):
normalized = normalize_names(' A ')
self.assertEqual(normalized, '"A"')
normalized = normalize_names(' ')
self.assertEqual(normalized, '""')
def test_quote_escaping(self):
normalized = normalize_names('Steve "The Woz" Wozniak')
self.assertEqual(normalized, '"Steve ""The Woz"" Wozniak"')
class TestNormalizeDefault(unittest.TestCase):
def test_none(self):
normalized = normalize_default(None)
self.assertEqual(normalized, 'NULL')
def test_expression(self):
expression = "(datetime('now'))"
normalized = normalize_default(expression)
self.assertEqual(normalized, expression)
def test_number_or_literal(self):
normalized = normalize_default(7)
self.assertEqual(normalized, '7')
normalized = normalize_default('foo')
self.assertEqual(normalized, "'foo'")
normalized = normalize_default('')
self.assertEqual(normalized, "''")
class TestCreateTable(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def count_tables(self): # <- Heper function.
self.cursor.execute('''
SELECT COUNT(*)
FROM sqlite_temp_master
WHERE type='table'
''')
return self.cursor.fetchone()[0]
def test_basic_creation(self):
self.assertEqual(self.count_tables(), 0, msg='starting with zero tables')
create_table(self.cursor, 'test_table1', ['A', 'B']) # <- Create table!
self.assertEqual(self.count_tables(), 1, msg='one table')
create_table(self.cursor, 'test_table2', ['A', 'B']) # <- Create table!
self.assertEqual(self.count_tables(), 2, msg='two tables')
def test_default_value(self):
# When unspecified, default is empty string.
create_table(self.cursor, 'test_table1', ['A', 'B'])
self.cursor.execute("INSERT INTO test_table1 (A) VALUES ('foo')")
self.cursor.execute("INSERT INTO test_table1 (B) VALUES ('bar')")
self.cursor.execute('SELECT * FROM test_table1')
expected = [
('foo', ''), # <- Default in column B
('', 'bar'), # <- Default in column A
]
self.assertEqual(self.cursor.fetchall(), expected)
# Setting default to None.
create_table(self.cursor, 'test_table2', ['A', 'B'], default=None)
self.cursor.execute("INSERT INTO test_table2 (A) VALUES ('foo')")
self.cursor.execute("INSERT INTO test_table2 (B) VALUES ('bar')")
self.cursor.execute('SELECT * FROM test_table2')
expected = [
('foo', None), # <- Default in column B
(None, 'bar'), # <- Default in column A
]
self.assertEqual(self.cursor.fetchall(), expected)
def test_sqlite3_errors(self):
"""Sqlite errors should not be caught."""
# Table already exists.
create_table(self.cursor, 'test_table1', ['A', 'B'])
with self.assertRaises(sqlite3.OperationalError):
create_table(self.cursor, 'test_table1', ['A', 'B'])
# Duplicate column name.
with self.assertRaises(sqlite3.OperationalError):
create_table(self.cursor, 'test_table2', ['A', 'B', 'A'])
# Duplicate column name (after normalization).
with self.assertRaises(sqlite3.OperationalError):
create_table(self.cursor, 'test_table3', ['A', 'B', ' A '])
# Duplicate empty/all-whitespace string columns (uses modified message).
with self.assertRaises(sqlite3.OperationalError) as cm:
create_table(self.cursor, 'test_table4', ['', 'B', ' '])
class TestGetColumns(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_get_columns(self):
self.cursor.execute('CREATE TABLE test1 ("A", "B")')
columns = get_columns(self.cursor, 'test1')
self.assertEqual(columns, ['A', 'B'])
self.cursor.execute('CREATE TEMPORARY TABLE test2 ("C", "D")')
columns = get_columns(self.cursor, 'test2')
self.assertEqual(columns, ['C', 'D'])
def test_missing_table(self):
with self.assertRaises(sqlite3.ProgrammingError):
columns = get_columns(self.cursor, 'missing_table')
class TestInsertRecords(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_basic_insert(self):
cursor = self.cursor
cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
records = [
('x', 1),
('y', 2),
]
insert_records(cursor, 'test_table', ['A', 'B'], records)
cursor.execute('SELECT * FROM test_table')
results = cursor.fetchall()
self.assertEqual(results, records)
def test_reordered_columns(self):
cursor = self.cursor
cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
records = [
(1, 'x'),
(2, 'y'),
]
columns = ['B', 'A'] # <- Column order doesn't match how table was created.
insert_records(cursor, 'test_table', columns, records)
cursor.execute('SELECT * FROM test_table')
results = cursor.fetchall()
expected = [
('x', 1),
('y', 2),
]
self.assertEqual(results, expected)
def test_wrong_number_of_values(self):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
too_few = [('x',), ('y',)]
with self.assertRaises(sqlite3.ProgrammingError):
insert_records(self.cursor, 'test_table', ['A', 'B'], too_few)
too_many = [('x', 1, 'foo'), ('y', 2, 'bar')]
with self.assertRaises(sqlite3.ProgrammingError):
insert_records(self.cursor, 'test_table', ['A', 'B'], too_many)
def test_no_records(self):
cursor = self.cursor
cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
records = iter([]) # <- Empty, no data.
insert_records(cursor, 'test_table', ['A', 'B'], records)
cursor.execute('SELECT * FROM test_table')
results = cursor.fetchall()
self.assertEqual(results, [])
def test_sqlite3_errors(self):
"""Sqlite errors should not be caught."""
# No such table.
with self.assertRaises(sqlite3.OperationalError):
records = [('x', 1), ('y', 2)]
insert_records(self.cursor, 'missing_table', ['A', 'B'], records)
# No column named X.
with self.assertRaises(sqlite3.OperationalError):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
records = [('a', 1), ('b', 2)]
insert_records(self.cursor, 'test_table', ['X', 'B'], records)
class TestAlterTable(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
self.cursor = connection.cursor()
def test_new_columns(self):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
alter_table(self.cursor, 'test_table', ['C', 'D', 'E'])
columns = get_columns(self.cursor, 'test_table')
self.assertEqual(columns, ['A', 'B', 'C', 'D', 'E'])
def test_existing_columns(self):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
alter_table(self.cursor, 'test_table', ['A', 'B', 'C', 'D'])
columns = get_columns(self.cursor, 'test_table')
self.assertEqual(columns, ['A', 'B', 'C', 'D'])
def test_ordering_behavior(self):
self.cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
alter_table(self.cursor, 'test_table', ['B', 'C', 'A', 'D'])
# Columns A and B already exist in a specified order and
# the new columns ('C' and 'D') are added in the order in
# which they are encountered.
columns = get_columns(self.cursor, 'test_table')
self.assertEqual(columns, ['A', 'B', 'C', 'D'])
class TestDropTable(unittest.TestCase):
def test_drop_table(self):
connection = sqlite3.connect(':memory:')
cursor = connection.cursor()
cursor.execute('CREATE TEMPORARY TABLE test_table ("A", "B")')
self.assertTrue(table_exists(cursor, 'test_table'))
drop_table(cursor, 'test_table') # <- Drop table!
self.assertFalse(table_exists(cursor, 'test_table'))
class TestSavepoint(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
connection.isolation_level = None
self.cursor = connection.cursor()
def test_transaction_status(self):
connection = self.cursor.connection
if not hasattr(connection, 'in_transaction'): # New in 3.2.
return
self.assertFalse(connection.in_transaction)
with savepoint(self.cursor):
self.assertTrue(connection.in_transaction)
self.assertFalse(connection.in_transaction)
def test_release(self):
cursor = self.cursor
with savepoint(cursor):
cursor.execute('CREATE TEMPORARY TABLE test_table ("A")')
cursor.execute("INSERT INTO test_table VALUES ('one')")
cursor.execute("INSERT INTO test_table VALUES ('two')")
cursor.execute("INSERT INTO test_table VALUES ('three')")
cursor.execute('SELECT * FROM test_table')
self.assertEqual(cursor.fetchall(), [('one',), ('two',), ('three',)])
def test_nested_releases(self):
cursor = self.cursor
with savepoint(cursor):
cursor.execute('CREATE TEMPORARY TABLE test_table ("A")')
cursor.execute("INSERT INTO test_table VALUES ('one')")
with savepoint(cursor): # <- Nested!
cursor.execute("INSERT INTO test_table VALUES ('two')")
cursor.execute("INSERT INTO test_table VALUES ('three')")
cursor.execute('SELECT * FROM test_table')
self.assertEqual(cursor.fetchall(), [('one',), ('two',), ('three',)])
def test_rollback(self):
cursor = self.cursor
with savepoint(cursor): # <- Released.
cursor.execute('CREATE TEMPORARY TABLE test_table ("A")')
try:
with savepoint(cursor): # <- Rolled back!
cursor.execute("INSERT INTO test_table VALUES ('one')")
cursor.execute("INSERT INTO test_table VALUES ('two')")
cursor.execute("INSERT INTO missing_table VALUES ('three')") # <- Bad table.
except sqlite3.OperationalError:
pass
cursor.execute('SELECT * FROM test_table')
self.assertEqual(cursor.fetchall(), [], 'Table should exist but contain no records.')
def test_nested_rollback(self):
cursor = self.cursor
with savepoint(cursor): # <- Released.
cursor.execute('CREATE TEMPORARY TABLE test_table ("A")')
cursor.execute("INSERT INTO test_table VALUES ('one')")
try:
with savepoint(cursor): # <- Nested rollback!
cursor.execute("INSERT INTO test_table VALUES ('two')")
raise Exception()
except Exception:
pass
cursor.execute("INSERT INTO test_table VALUES ('three')")
cursor.execute('SELECT * FROM test_table')
self.assertEqual(cursor.fetchall(), [('one',), ('three',)])
def test_bad_isolation_level(self):
connection = sqlite3.connect(':memory:')
connection.isolation_level = 'DEFERRED' # <- Expects None/autocommit!
cursor = connection.cursor()
with self.assertRaises(ValueError):
with savepoint(cursor):
pass
class TestLoadData(unittest.TestCase):
def setUp(self):
connection = sqlite3.connect(':memory:')
connection.isolation_level = None
self.cursor = connection.cursor()
try:
self.dict_constructor = collections.OrderedDict # New in 2.7
except AttributeError:
self.dict_constructor = dict
def test_four_args(self):
columns = ['A', 'B']
records = [
('x', 1),
('y', 2),
]
load_data(self.cursor, 'testtable', columns, records) # <- Four args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_four_args_mappings(self):
columns = ['A', 'B']
records = [
self.dict_constructor([('A', 'x'), ('B', 1)]),
self.dict_constructor([('B', 2), ('A', 'y')]), # <- Different key order.
]
load_data(self.cursor, 'testtable', columns, records) # <- Four args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_three_args(self):
records = [
['A', 'B'], # <- Used as header row.
('x', 1),
('y', 2),
]
load_data(self.cursor, 'testtable', records) # <- Three args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_three_args_mappings(self):
records = [
self.dict_constructor([('A', 'x'), ('B', 1)]),
self.dict_constructor([('B', 2), ('A', 'y')]), # <- Different key order.
]
load_data(self.cursor, 'testtable', records) # <- Three args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_three_args_namedtuples(self):
ntup = collections.namedtuple('ntup', ['A', 'B'])
records = [
ntup('x', 1),
ntup('y', 2),
]
load_data(self.cursor, 'testtable', records) # <- Three args.
self.cursor.execute('SELECT A, B FROM testtable')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', 2)])
def test_column_default(self):
load_data(self.cursor, 'testtable1', ['A', 'B'], [('x', 1)])
load_data(self.cursor, 'testtable1', ['A'], [('y',)])
load_data(self.cursor, 'testtable1', ['B'], [(3,)])
self.cursor.execute('SELECT A, B FROM testtable1')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', ''), ('', 3)])
load_data(self.cursor, 'testtable2', ['A', 'B'], [('x', 1)], default=None)
load_data(self.cursor, 'testtable2', ['A'], [('y',)])
load_data(self.cursor, 'testtable2', ['B'], [(3,)])
self.cursor.execute('SELECT A, B FROM testtable2')
self.assertEqual(self.cursor.fetchall(), [('x', 1), ('y', None), (None, 3)])
def test_empty_records(self):
records = []
load_data(self.cursor, 'testtable1', ['A', 'B'], records) # <- Using four args.
self.assertTrue(table_exists(self.cursor, 'testtable1'), 'should create table')
self.cursor.execute('SELECT A, B FROM testtable1')
self.assertEqual(self.cursor.fetchall(), [], 'should have zero records')
load_data(self.cursor, 'testtable2', records) # <- Using three args.
self.assertFalse(table_exists(self.cursor, 'testtable2'), 'should not create table')
def test_bad_columns_object(self):
records = [('x', 1), ('y', 2)]
columns = 'bad columns object' # <- Expects iterable of names, not this str.
with self.assertRaises(TypeError):
load_data(self.cursor, 'testtable', columns, records)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
34f85ad410331a5914a2517ee3343c14572b7b59 | 7a2bfe09f7526c36fce304999fa47466b89fdec2 | /profiles/models.py | 7cbf380d6bb77aeabe96546b9fe12b082a1ed6fc | [] | no_license | Brachamul/fichier-jdem | 179344ba64b830c3f6e352907e470a1db8d42a9b | f9b40657aea54db83b3abd3e7b38fec9260d34e9 | refs/heads/master | 2021-05-01T00:37:50.021517 | 2019-02-07T15:02:06 | 2019-02-07T15:02:06 | 58,691,054 | 0 | 0 | null | 2017-07-04T21:13:01 | 2016-05-13T02:01:05 | Python | UTF-8 | Python | false | false | 2,247 | py | from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save
from django.dispatch import receiver
from fichiers_adherents.models import FichierAdherents, Adherent, Cnil, adherents_actuels
class Member(models.Model):
id = models.IntegerField(primary_key=True)
phoneless = models.BooleanField(default=False)
def historique_adherent(self):
return Adherent.objects.filter(num_adherent=self.id)
def derniere_occurence_fichier(self):
adherents = Adherent.objects.filter(num_adherent=self.id)
fichier = FichierAdherents.objects.filter(adherent__in=adherents)
return Adherent.objects.get(num_adherent=self.id, fichier=fichier.latest())
def notes(self):
return Note.objects.filter(member=self)
def __str__(self):
return str(self.derniere_occurence_fichier())
def initiate(fichier=False):
''' Generate, for all fichiers or a single one, members for each adherent
this is used when rebuilding the DB '''
if fichier :
adherents = Adherent.objects.filter(fichier=fichier)
else :
adherents = Adherent.objects.all()
for adherent in adherents :
new_member, created = Member.objects.get_or_create(id=adherent.num_adherent)
def check_if_phoneless(self):
''' Returns 'True' if the adherent has no phone number '''
self.phoneless = self.derniere_occurence_fichier().phoneless()
self.save()
@receiver(post_save, sender=Adherent)
def initiate_member(sender, instance, created, **kwargs):
new_member, created = Member.objects.get_or_create(id=instance.num_adherent)
new_member.check_if_phoneless()
class Note(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
text = models.CharField(max_length=1024)
date = models.DateTimeField(auto_now_add=True)
def __str__(self): return self.text
# https://codepen.io/codyhouse/pen/FdkEf
class WrongNumber(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
reported_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
date = models.DateTimeField(auto_now_add=True)
def __str__(self): return self.member | [
"[email protected]"
] | |
da60dde1e796db0872b0c257e878c1ebb4826cda | ffff723a6c8527b45299a7e6aec3044c9b00e923 | /PS/BOJ/1238/1238.py | 599cad18df66ed2c7caf926d2eb19296b2ffb8d7 | [] | no_license | JSYoo5B/TIL | 8e3395a106656e090eeb0260fa0b0dba985d3beb | 3f9ce4c65451512cfa2279625e44a844d476b68f | refs/heads/master | 2022-03-14T09:15:59.828223 | 2022-02-26T01:30:41 | 2022-02-26T01:30:41 | 231,383,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | #!/usr/bin/env python3
import heapq
INF = 10 ** 9
input = __import__('sys').stdin.readline
heappush = heapq.heappush
heappop = heapq.heappop
def get_dist_to_others(edges, src):
nodes_cnt = len(edges)
dists = [INF for _ in range(nodes_cnt)]
heap = [ [0, src] ]
while len(heap) > 0:
[dist, node] = heappop(heap)
dists[node] = min(dists[node], dist)
for n, d in edges[node]:
if dist + d < dists[n]:
dists[n] = dist + d
heappush(heap, [dists[n], n])
return dists
if __name__ == '__main__':
nodes_cnt, edges_cnt, tgt_id = map(int, input().split())
tgt_id -= 1 # convert into zero offset
edges = [ [ ] for _ in range(nodes_cnt) ]
for _ in range(edges_cnt):
src, dst, dist = map(int, input().split())
edges[src-1].append([dst-1, dist])
single_dists = []
for n in range(nodes_cnt):
dist = get_dist_to_others(edges, n)
single_dists.append(dist)
return_dists = []
for n in range(nodes_cnt):
dist = single_dists[n][tgt_id] + single_dists[tgt_id][n]
return_dists.append(dist)
answer = max(return_dists)
print(answer)
| [
"[email protected]"
] | |
f6345fee883766347e8a49dfa0c93038f32995b2 | 48a7b266737b62da330170ca4fe4ac4bf1d8b663 | /molsysmt/_private/digestion/argument/chi3.py | ef60e618382a4208ce40cd84eadebbd653dad6de | [
"MIT"
] | permissive | uibcdf/MolSysMT | ddab5a89b8ec2377f383884c5169d147cab01322 | c3d713ba63db24eb8a2426115cf8d9cb3665d225 | refs/heads/main | 2023-08-08T15:04:16.217967 | 2023-08-04T05:49:56 | 2023-08-04T05:49:56 | 137,937,243 | 15 | 3 | MIT | 2023-06-04T20:27:06 | 2018-06-19T19:38:44 | Python | UTF-8 | Python | false | false | 407 | py | from ...exceptions import ArgumentError
methods_bool_input = ["molsysmt.topology.get_dihedral_quartets.get_dihedral_quartets",
"molsysmt.structure.get_dihedral_angles.get_dihedral_angles"]
def digest_chi3(chi3, caller=None):
if caller in methods_bool_input:
if isinstance(chi3, bool):
return chi3
raise ArgumentError('chi3', value=chi3, caller=caller, message=None)
| [
"[email protected]"
] | |
6764d6567a70fd6c2f2886bcd6dfc1234234f72f | edf31957838a65e989d5eb5e8118254ac2413fc8 | /parakeet/analysis/collect_vars.py | 66543535c72ccdc08e79053098b7cefbdccc4db0 | [
"BSD-3-Clause"
] | permissive | iskandr/parakeet | e35814f9030b9e8508a7049b62f94eee5b8c5296 | d9089f999cc4a417d121970b2a447d5e524a3d3b | refs/heads/master | 2021-07-18T19:03:05.666898 | 2019-03-13T17:20:20 | 2019-03-13T17:20:20 | 5,889,813 | 69 | 7 | NOASSERTION | 2021-07-17T21:43:03 | 2012-09-20T16:54:18 | Python | UTF-8 | Python | false | false | 1,523 | py | from .. syntax import Var, Tuple
from syntax_visitor import SyntaxVisitor
class SetCollector(SyntaxVisitor):
def __init__(self):
SyntaxVisitor.__init__(self)
self.var_names = set([])
def visit_Var(self, expr):
self.var_names.add(expr.name)
def collect_var_names(expr):
collector = SetCollector()
collector.visit_expr(expr)
return collector.var_names
def collect_var_names_from_exprs(exprs):
collector = SetCollector()
collector.visit_expr_list(exprs)
return collector.var_names
class ListCollector(SyntaxVisitor):
def __init__(self):
SyntaxVisitor.__init__(self)
self.var_names = []
def visit_Var(self, expr):
self.var_names.append(expr.name)
def collect_var_names_list(expr):
collector = ListCollector()
collector.visit_expr(expr)
return collector.var_names
def collect_binding_names(lhs):
lhs_class = lhs.__class__
if lhs_class is Var:
return [lhs.name]
elif lhs.__class__ is Tuple:
combined = []
for elt in lhs.elts:
combined.extend(collect_binding_names(elt))
return combined
else:
return []
class CollectBindings(SyntaxVisitor):
def __init__(self):
SyntaxVisitor.__init__(self)
self.bindings = {}
def bind(self, lhs, rhs):
if lhs.__class__ is Var:
self.bindings[lhs.name] = rhs
elif lhs.__class__ is Tuple:
for elt in lhs.elts:
self.bind(elt, rhs)
def visit_Assign(self, stmt):
self.bind(stmt.lhs, stmt.rhs)
def collect_bindings(fn):
return CollectBindings().visit_fn(fn)
| [
"[email protected]"
] | |
546664dc944f734fde1b16887bc05cfe6763ff9b | 65662b604fa40bdc6e8648e39ed201b0dd8ad6fd | /Python Specialization/Course 4/code/party4.py | 257a2d0f8d47dc1b565fc7854b62718b830ad3d4 | [
"MIT"
] | permissive | rubysubash/Coursera-Specializations | 973f9dbc01774dae84d90b6b97870a6dfde674bc | 88acc792bbee20e8d9b8d34ff6f7c3072236d6f3 | refs/heads/master | 2020-08-10T02:43:08.277860 | 2020-06-02T09:48:25 | 2020-06-02T09:48:25 | 214,237,214 | 0 | 0 | MIT | 2019-10-10T16:52:27 | 2019-10-10T16:52:27 | null | UTF-8 | Python | false | false | 295 | py | class PartyAnimal:
x = 0
name = ""
def __init__(self, nam):
self.name = nam
print self.name,"constructed"
def party(self) :
self.x = self.x + 1
print self.name,"party count",self.x
s = PartyAnimal("Sally")
s.party()
j = PartyAnimal("Jim")
j.party()
s.party()
| [
"[email protected]"
] | |
f496808570d534acea82cfe877a130b206da08d4 | a973f336765a31550cc9661be57e0384c317fc38 | /ejemplo3/proyectoUno/administrativo/urls.py | 8ef4aee1be71fedb011dd6c3682a4c4b57228cee | [] | no_license | PlataformasWeb-P-AA2021/clase03-2bim-ricardoifc | 0a40d61f351525ab87cb2ce1f0982804cb50df37 | 35c42f8e5c3420bfa66103dcb45a75c5b27d5a5a | refs/heads/main | 2023-06-19T17:46:12.663825 | 2021-07-16T17:47:59 | 2021-07-16T17:47:59 | 377,869,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | """
Manejo de urls para la aplicación
administrativo
"""
from django.urls import path
# se importa las vistas de la aplicación
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('estudiante/<int:id>', views.obtener_estudiante,
name='obtener_estudiante'),
]
| [
"66690702+github-classroom[bot]@users.noreply.github.com"
] | 66690702+github-classroom[bot]@users.noreply.github.com |
9bdf0b23533683e150f463573dbbc503fff15af3 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/auto_rig_pro/auto_rig_datas_export.py | 0ad9f6059d3985af9e8018a74ef2027c38c661bb | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | {'eyelid_top.l': ('Transformation', (0.0, 0.0, 0.0, 0.0, 0.0, 1.5)), 'eyelid_bot.l': ('Transformation', (0.0, 0.0, 0.0, 0.0, 0.0, 1.5)), 'eyelid_top.r': ('Transformation', (0.0, 0.0, 0.0, 0.0, 0.0, 1.5)), 'eyelid_bot.r': ('Transformation', (0.0, 0.0, 0.0, 0.0, 0.0, 1.5)), 'c_foot_bank_01.r': ('Transformation', (0.0, 0.0, 0.0, 0.0, -0.25, 0.25)), 'c_foot_bank_02.r': ('Transformation', (0.0, 0.0, 0.0, 0.0, -0.25, 0.25)), 'c_foot_heel.r': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'c_toes_end.r': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'c_foot_bank_01.l': ('Transformation', (0.0, 0.0, 0.0, 0.0, -0.25, 0.25)), 'c_foot_bank_02.l': ('Transformation', (0.0, 0.0, 0.0, 0.0, -0.25, 0.25)), 'c_foot_heel.l': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'c_toes_end.l': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'toes_end_ref.l': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0)), 'toes_end_ref.r': ('Transformation', (-0.5, 0.5, 0.0, 0.0, 0.0, 0.0))} | [
"[email protected]"
] | |
5dab7e3bfdea2a2c594b3dad9518850e875f603f | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/B/buttub/basic_twitter_scraper_179.py | 43a79636f9de0b40da964a9dc909525b46726714 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,294 | py | ###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:BarackObama'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:BarackObama'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 10
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"[email protected]"
] | |
11eaad49e2f332332ac43910e59112ef2d27a95d | c0340c511cff5b40b4681c4d3238d807624c0323 | /models/revision/branching_entropy/branching_direction_entropy.py | 88c5d3c8f2bdf70871641d209a2d1963a11af595 | [] | no_license | m-hahn/grammar-optim | 5fa7ade47d2ad91f517c887ee2c65af24059069d | 07a1a80692a504bcafc8120a21c4dc9066b495ee | refs/heads/master | 2022-08-30T06:54:42.749264 | 2022-08-05T12:09:28 | 2022-08-05T12:09:28 | 156,456,167 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | #/u/nlp/bin/stake.py -g 11.5g -s run-stats-pretrain2.json "python readDataDistEnglishGPUFree.py"
import random
import sys
from math import log, exp
from random import random, shuffle
from corpusIterator_FuncHead import CorpusIteratorFuncHead
languages = ["Hindi", "Swedish", "German", "Urdu", "English", "Spanish", "Chinese", "Slovenian", "Estonian", "Norwegian", "Serbian", "Croatian", "Finnish", "Portuguese", "Catalan", "Russian", "Arabic", "Czech", "Japanese", "French", "Latvian", "Basque", "Danish", "Dutch", "Ukrainian", "Gothic", "Hebrew", "Hungarian", "Latin", "Persian", "Bulgarian", "Romanian", "Indonesian", "Greek", "Turkish", "Slovak", "Belarusian", "Galician", "Italian", "Lithuanian", "Polish", "Vietnamese", "Korean", "Tamil", "Irish", "Marathi", "Afrikaans", "Telugu", "Coptic", "Ancient_Greek", "Old_Church_Slavonic"]
with open("branching_entropy.tsv", "w") as outFile:
print >> outFile, "Language\tBranchingEntropy"
for language in languages:
posUni = set() #[ "ADJ", "ADP", "ADV", "AUX", "CONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"]
posFine = set() #[ "``", ",", ":", ".", "''", "$", "ADD", "AFX", "CC", "CD", "DT", "EX", "FW", "GW", "HYPH", "IN", "JJ", "JJR", "JJS", "-LRB-", "LS", "MD", "NFP", "NN", "NNP", "NNPS", "NNS", "PDT", "POS", "PRP", "PRP$", "RB", "RBR", "RBS", "RP", "-RRB-", "SYM", "TO", "UH", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WP$", "WRB", "XX" ]
deps = ["acl", "acl:relcl", "advcl", "advmod", "amod", "appos", "aux", "auxpass", "case", "cc", "ccomp", "compound", "compound:prt", "conj", "conj:preconj", "cop", "csubj", "csubjpass", "dep", "det", "det:predet", "discourse", "dobj", "expl", "foreign", "goeswith", "iobj", "list", "mark", "mwe", "neg", "nmod", "nmod:npmod", "nmod:poss", "nmod:tmod", "nsubj", "nsubjpass", "nummod", "parataxis", "punct", "remnant", "reparandum", "root", "vocative", "xcomp"]
#deps = ["acl", " advcl", " advmod", " amod", " appos", " aux", " case cc", " ccompclf", " compound", " conj", " cop", " csubjdep", " det", " discourse", " dislocated", " expl", " fixed", " flat", " goeswith", " iobj", " list", " mark", " nmod", " nsubj", " nummod", " obj", " obl", " orphan", " parataxis", " punct", " reparandum", " root", " vocative", " xcomp"]
header = ["index", "word", "lemma", "posUni", "posFine", "morph", "head", "dep", "_", "_"]
originalDistanceWeights = {}
orderTable = {}
keys = set()
vocab = {}
distanceSum = {}
distanceCounts = {}
depsVocab = set()
totalCount = 0
for partition in ["train", "dev"]:
for sentence in CorpusIterator(language,partition, storeMorph=True).iterator():
for line in sentence:
vocab[line["word"]] = vocab.get(line["word"], 0) + 1
depsVocab.add(line["dep"])
posFine.add(line["posFine"])
posUni.add(line["posUni"])
if line["dep"] == "root":
continue
posHere = line["posUni"]
posHead = sentence[line["head"]-1]["posUni"]
dep = line["dep"]
direction = "HD" if line["head"] < line["index"] else "DH"
key = (posHead, dep, posHere)
keyWithDir = (posHead, dep, posHere, direction)
orderTable[keyWithDir] = orderTable.get(keyWithDir, 0) + 1
keys.add(key)
distanceCounts[key] = distanceCounts.get(key,0.0) + 1.0
distanceSum[key] = distanceSum.get(key,0.0) + abs(line["index"] - line["head"])
totalCount += 1
#print orderTable
entropyTotal = 0
dhLogits = {}
for key in keys:
hd = orderTable.get((key[0], key[1], key[2], "HD"), 0) + 0.00000001
dh = orderTable.get((key[0], key[1], key[2], "DH"), 0) + 0.00000001
p_hd = hd/(hd+dh)
entropyHere = p_hd * log(p_hd) + (1-p_hd) * log(1-p_hd)
entropyTotal -= (hd+dh)/totalCount * entropyHere
print >> outFile, ("\t".join(map(str,[language, entropyTotal])))
| [
"[email protected]"
] | |
a8c5f8fe733b1263b9e715e46f656c1827f702d7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2843/60723/275315.py | 68bd3c530b53af09ba7366a8b979d4690d44f3fa | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | num=int(input())
a=input().split()
for i in range(num):
a[i]=int(a[i])
b=[]
for i in range(num-1):
b.append(str(a[i]+a[i+1]))
b.append(str(a[num-1]))
print(' '.join(b)) | [
"[email protected]"
] | |
aeadd558d70fd182c63b90eaf2fd71d11de90b1a | a7821394b1f9817d2d8a33f7638ced65a9f60336 | /p11.py | 835f5a8bfaf8afdff081a43dc4bd102ebdcbd6f4 | [] | no_license | Azizz007/jenkins_python | 0a2f7c34ab6d9a58bcedf0b4ae3d611c1ca7c095 | ffc33bc0fcc980d05f52f12c1842e0ae0949e1fd | refs/heads/master | 2023-01-03T09:15:46.023053 | 2020-10-29T14:18:51 | 2020-10-29T14:18:51 | 308,250,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | print ("hey, 2")
| [
"[email protected]"
] | |
add0836df218250b62407c0f08330ba06dab4197 | 8962f8a8c69d2fc5e31de6e976ef4823bc9f956f | /TRPLib/pgmconstrict.py | 628b778345cdbd8fc70948cf7d9062b04e8cd729 | [] | no_license | btownshend/pyTecan | 28dd23a6b4a51afdf0dff811c53f4b2fe1b7c124 | 47087c41d7d01598eab6d45696f8c3beedd2310f | refs/heads/master | 2021-08-09T23:18:06.137339 | 2021-03-31T19:12:17 | 2021-03-31T19:12:17 | 9,189,758 | 8 | 3 | null | 2017-09-06T19:46:33 | 2013-04-03T08:05:45 | Python | UTF-8 | Python | false | false | 11,277 | py | import math
from ..Experiment import reagents, clock, worklist
from ..Experiment.concentration import Concentration
from ..Experiment.sample import Sample
from .QSetup import QSetup
from .TRP import TRP
from . import trplayout
reagents.add("BT5310", well="D1", conc=Concentration(20, 20, "pM"))
reagents.add("MKapa", well='A1', conc=Concentration(2.5, 1, 'x'), extraVol=30,
ingredients={'glycerol': 1, 'Water': 39})
reagents.add("MConstrict", well='A6', conc=Concentration(100.0 / 98.0, 1, 'x'), extraVol=30,
ingredients={'glycerol': 1, 'Water': 97})
reagents.add("P-End", well="C1", conc=4)
class Constrict(TRP):
# Mix constriction inputs, constrict, PCR, remove barcodes
pcreff = 1.98
def __init__(self, inputs, nmolecules, nconstrict, vol):
super(Constrict, self).__init__()
self.inputs = inputs
self.nmolecules = nmolecules
self.nconstrict = nconstrict
self.qconc = 20e-12 # Target qPCR concentration
self.qprimers = ["End"]
self.mix_conc = 100e-9 # Concentration of mixdown
self.con_dilvol = 100 # Volume to use for constriction dilutions
self.con_maxdilperstage = 100 / 3.0 # Maximum dilution/stage
self.con_pcr1vol = 100
self.con_pcr1inputvol = 2
self.con_pcr1tgtconc = self.qconc * 4 # Enough to take qPCR without dilutiojn
self.con_pcr2dil = 4
self.con_pcr2vol = 50
self.con_pcr2tgtconc = 10e-9
self.regen_predilvol = 100
self.regen_predil = 25
self.regen_dil = 25
self.regen_vol = 100
self.regen_cycles = 10
self.rsrc = [reagents.add("%s-%s-%s" % (inputs[i]['name'], inputs[i]['left'], inputs[i]['right']),
trplayout.SAMPLEPLATE,
well=inputs[i]['well'] if 'well' in inputs[i] else None,
conc=Concentration(stock=inputs[i]['bconc'], units="nM"),
initVol=vol, extraVol=0)
for i in range(len(inputs))]
self.q = None # Defined in pgm()
def pgm(self):
self.q = QSetup(self, maxdil=16, debug=False, mindilvol=60)
# Don't start idler (to minimize tip cross-contamination); last PCR allows plenty of time for doing dilutions without any effect on run time
# Will start after first constriction PCR is running
#self.q.debug = True
# self.e.addIdleProgram(self.q.idler)
self.q.addReferences(dstep=10, primers=self.qprimers, ref=reagents.getsample("BT5310"),nreplicates=2)
samps=[r.getsample() for r in self.rsrc]
for s in samps:
self.q.addSamples([s],needDil=max(10,s.conc.stock*1e-9/self.qconc),primers=self.qprimers)
print("### Mixdown #### (%.0f min)" % (clock.elapsed() / 60.0))
if len(samps)>1:
mixdown = self.mix(samps, [x['weight'] for x in self.inputs])
else:
mixdown=samps[0]
self.q.addSamples(mixdown, needDil=max(1.0,mixdown.conc.stock * 1e-9 / self.qconc), primers=self.qprimers)
print("Mixdown final concentration = %.0f pM" % (mixdown.conc.stock * 1000))
print("### Constriction #### (%.1f min)" % (clock.elapsed() / 60.0))
constricted = self.constrict(mixdown, mixdown.conc.stock * 1e-9)
print("### Regeneration #### (%.0f min)" % (clock.elapsed() / 60.0))
prefixes = set([x['left'][0] for x in self.inputs])
self.regenerate(constricted * len(prefixes), [p for p in prefixes for _ in constricted])
print("### qPCR #### (%.0f min)" % (clock.elapsed() / 60.0))
self.q.run(confirm=False, enzName='EvaGreen', waitForPTC=True)
print("### qPCR Done #### (%.0f min)" % (clock.elapsed() / 60.0))
worklist.userprompt("qPCR done -- only need to complete final PCR", 300)
self.e.waitpgm()
print("### Final PCR Done #### (%.0f min)" % (clock.elapsed() / 60.0))
def mix(self, inp, weights,mixvol=100,tgtconc=None,maxinpvol=20):
"""Mix given inputs according to weights (by moles -- use conc.stock of each input)"""
vol = [weights[i] *1.0 / inp[i].conc.stock for i in range(len(inp))]
scale = mixvol / sum(vol)
conc=sum([inp[i].conc.stock * scale * vol[i] for i in range(len(inp))]) / mixvol
if tgtconc is not None and conc>tgtconc:
scale*=tgtconc*1.0/conc
if max(vol)*scale<4.0:
scale=4.1/max(vol) # At least one input with 4ul input
vol = [x * scale for x in vol] # Mix to make planned total without water
for i in range(len(vol)):
# Check if this would require more than available of any input
newscale= min(maxinpvol,inp[i].volume-inp[i].plate.unusableVolume()-2)/vol[i]
if newscale<1:
vol = [x * 1.0 * newscale for x in vol]
if tgtconc is not None:
mixvol *= newscale # Maintain same target concentration by reducing total volume
if min(vol) < 4.0:
# Some components are too small; split mixing
lowvol=[i for i in range(len(inp)) if vol[i]<4.0]
highvol=[i for i in range(len(inp)) if i not in lowvol]
assert len(highvol)>0
assert len(lowvol)>0
lowtgtconc=sum([inp[i].conc.stock *1.0/ weights[i] for i in highvol])/len(highvol)*sum([weights[i] for i in lowvol])
print("Running premix of samples "+",".join(["%d"%ind for ind in lowvol])+" with target concentration of %.4f"%lowtgtconc)
mix1=self.mix([inp[i] for i in lowvol],[weights[i] for i in lowvol],tgtconc=lowtgtconc,mixvol=mixvol,maxinpvol=maxinpvol)
wt1=sum([weights[i] for i in lowvol])
mix2=self.mix([inp[i] for i in highvol]+[mix1],[weights[i] for i in highvol]+[wt1],tgtconc=tgtconc,mixvol=mixvol,maxinpvol=maxinpvol)
return mix2
print("Mixing into %.0ful with tgtconc of %s, dil=%.2f"%(mixvol,"None" if tgtconc is None else "%.4f"%tgtconc,mixvol/sum(vol)))
for i in range(len(inp)):
print("%-30.30s %6.3fnM wt=%5.2f v=%5.2ful"%(inp[i].name,inp[i].conc.stock,weights[i],vol[i]))
watervol = mixvol - sum(vol)
#print "Mixdown: vols=[", ",".join(["%.2f " % v for v in vol]), "], water=", watervol, ", total=", mixvol, " ul"
mixdown = Sample('mixdown', plate=trplayout.SAMPLEPLATE)
if watervol < -0.1:
print("Total mixdown is %.1f ul, more than planned %.0f ul" % (sum(vol), mixvol))
assert False
elif watervol >= 4.0: # Omit if too small
self.e.transfer(watervol, trplayout.WATER, mixdown, (False, False))
else:
pass
ordering=sorted(list(range(len(inp))),key=lambda i: vol[i],reverse=True)
for i in ordering:
inp[i].conc.final = inp[i].conc.stock * vol[i] / mixvol # Avoid warnings about concentrations not adding up
self.e.transfer(vol[i], inp[i], mixdown, (False, False))
self.e.shakeSamples([mixdown])
if not mixdown.wellMixed:
self.e.mix(mixdown)
mixdown.conc = Concentration(stock=sum([inp[i].conc.stock * vol[i] for i in range(len(inp))]) / mixvol,
final=None, units='nM')
print("Mix product, %s, is in well %s with %.1ful @ %.2f nM"%(mixdown.name,mixdown.plate.wellname(mixdown.well),mixdown.volume,mixdown.conc.stock))
print("----------")
return mixdown
def constrict(self, constrictin, conc):
"""Constrict sample with concentration given by conc (in M)"""
# noinspection PyPep8Naming
AN = 6.022e23
dil = conc * (self.con_pcr1inputvol * 1e-6) * AN / self.nmolecules
nstages = int(math.ceil(math.log(dil) / math.log(self.con_maxdilperstage)))
dilperstage = math.pow(dil, 1.0 / nstages)
print("Diluting by %.0fx in %.0f stages of %.1f" % (dil, nstages, dilperstage))
s = [trplayout.WATER] + [constrictin] * self.nconstrict + [trplayout.SSDDIL]
self.e.sanitize(3, 50) # Heavy sanitize
for j in range(nstages):
print("Stage ", j, ", conc=", conc)
if conc <= self.qconc * 1e-9:
self.q.addSamples(s, needDil=1.0, primers=self.qprimers, save=False)
s = self.runQPCRDIL(s, self.con_dilvol, dilperstage, dilPlate=True)
conc /= dilperstage
cycles = int(
math.log(self.con_pcr1tgtconc / conc * self.con_pcr1vol / self.con_pcr1inputvol) / math.log(self.pcreff) + 0.5)
pcr1finalconc = conc * self.con_pcr1inputvol / self.con_pcr1vol * self.pcreff ** cycles
print("Running %d cycle PCR1 -> %.1f pM" % (cycles, pcr1finalconc * 1e12))
s = s + [trplayout.WATER] # Extra control of just water added to PCR mix
pcr = self.runPCR(primers=None, src=s, vol=self.con_pcr1vol,
srcdil=self.con_pcr1vol * 1.0 / self.con_pcr1inputvol,
ncycles=cycles, master="MConstrict", kapa=True)
for p in pcr:
p.conc = Concentration(stock=pcr1finalconc * 1e9, final=pcr1finalconc / self.con_pcr2dil, units='nM')
self.e.addIdleProgram(self.q.idler) # Now that constriction is done, can start on qPCR setup
needDil = max(4, pcr1finalconc / self.qconc)
print("Running qPCR of PCR1 products using %.1fx dilution" % needDil)
self.q.addSamples(pcr, needDil=needDil, primers=self.qprimers, save=True)
pcr = pcr[1:-2] # Remove negative controls
cycles2 = int(math.log(self.con_pcr2tgtconc / pcr1finalconc * self.con_pcr2dil) / math.log(self.pcreff) + 0.5)
pcr2finalconc = pcr1finalconc / self.con_pcr2dil * self.pcreff ** cycles2
if cycles2 > 0:
print("Running %d cycle PCR2 -> %.1f nM" % (cycles2, pcr2finalconc * 1e9))
pcr2 = self.runPCR(primers="End", src=pcr, vol=self.con_pcr2vol, srcdil=self.con_pcr2dil,
ncycles=cycles2, master="MKapa", kapa=True)
self.q.addSamples(pcr2, needDil=pcr2finalconc / self.qconc, primers=self.qprimers, save=True)
for p in pcr2:
p.conc = Concentration(stock=pcr2finalconc * 1e9, units='nM')
self.e.waitpgm()
return pcr2
else:
return pcr
def regenerate(self, inp, prefix):
"""Regenerate T7 templates without barcodes with each of the given prefixes"""
print("Regen Predilute: %.1f nM by %.1fx to %.2f nM" % (
inp[0].conc.stock, self.regen_predil, inp[0].conc.stock / self.regen_predil))
d1 = self.runQPCRDIL(inp, self.regen_predilvol, self.regen_predil, dilPlate=True)
inconc = inp[0].conc.stock / self.regen_predil / self.regen_dil
print("Regen PCR: %.3f nM with %d cycles -> %.1f nM" % (
inconc, self.regen_cycles, inconc * self.pcreff ** self.regen_cycles))
res = self.runPCR(src=d1, srcdil=self.regen_dil, vol=self.regen_vol,
ncycles=self.regen_cycles,
primers=["T7%sX" % p for p in prefix], fastCycling=False, master="MKapa", kapa=True)
return res
| [
"[email protected]"
] | |
3e781df8255b5996389edf5779bd5da42cd892e9 | 000c243b4c30bd089867f73ca1bcfede1c3ef801 | /catkin_ws/devel/lib/python2.7/dist-packages/mapviz/srv/_AddMapvizDisplay.py | 51a8c74fc59d389bcea1f3394153276017a0544d | [] | no_license | dangkhoa1210/SLAM-AND-NAVIGATION-FOR-MOBILE-ROBOT-OUTDOOR-INDOOR- | b4d9bf2757d839d9766d512c2272731300320925 | 7273ea9e966353440d3993dcba112bc0a2262b98 | refs/heads/master | 2023-07-15T14:07:17.123812 | 2021-09-02T10:12:30 | 2021-09-02T10:12:30 | 402,361,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,142 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mapviz/AddMapvizDisplayRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import marti_common_msgs.msg
class AddMapvizDisplayRequest(genpy.Message):
_md5sum = "d99db34575927545707e7081858716f3"
_type = "mapviz/AddMapvizDisplayRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# Add or updates a mapviz display.
string name # The name of the display.
string type # The plugin type.
int32 draw_order # The display order. 1 corresponds
# to the first displayed, 2 to the
# second, -1 to last, and -2 to the
# second to last, etc. 0 will keep
# the current display order of an
# existing display and give a new
# display the last display order.
bool visible # If the display should be visible.
marti_common_msgs/KeyValue[] properties # Configuration properties.
================================================================================
MSG: marti_common_msgs/KeyValue
# An arbitrary key-value pair
string key
string value
"""
__slots__ = ['name','type','draw_order','visible','properties']
_slot_types = ['string','string','int32','bool','marti_common_msgs/KeyValue[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name,type,draw_order,visible,properties
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddMapvizDisplayRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
if self.type is None:
self.type = ''
if self.draw_order is None:
self.draw_order = 0
if self.visible is None:
self.visible = False
if self.properties is None:
self.properties = []
else:
self.name = ''
self.type = ''
self.draw_order = 0
self.visible = False
self.properties = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_iB().pack(_x.draw_order, _x.visible))
length = len(self.properties)
buff.write(_struct_I.pack(length))
for val1 in self.properties:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.properties is None:
self.properties = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.type = str[start:end]
_x = self
start = end
end += 5
(_x.draw_order, _x.visible,) = _get_struct_iB().unpack(str[start:end])
self.visible = bool(self.visible)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.properties = []
for i in range(0, length):
val1 = marti_common_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.properties.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_iB().pack(_x.draw_order, _x.visible))
length = len(self.properties)
buff.write(_struct_I.pack(length))
for val1 in self.properties:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.properties is None:
self.properties = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.type = str[start:end]
_x = self
start = end
end += 5
(_x.draw_order, _x.visible,) = _get_struct_iB().unpack(str[start:end])
self.visible = bool(self.visible)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.properties = []
for i in range(0, length):
val1 = marti_common_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.properties.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_iB = None
def _get_struct_iB():
global _struct_iB
if _struct_iB is None:
_struct_iB = struct.Struct("<iB")
return _struct_iB
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mapviz/AddMapvizDisplayResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class AddMapvizDisplayResponse(genpy.Message):
_md5sum = "937c9679a518e3a18d831e57125ea522"
_type = "mapviz/AddMapvizDisplayResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """
bool success # indicate successful run of triggered service
string message # informational, e.g. for error messages
"""
__slots__ = ['success','message']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddMapvizDisplayResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.message is None:
self.message = ''
else:
self.success = False
self.message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class AddMapvizDisplay(object):
_type = 'mapviz/AddMapvizDisplay'
_md5sum = '7bd51d7a9d8469fae51039cf79b96d10'
_request_class = AddMapvizDisplayRequest
_response_class = AddMapvizDisplayResponse
| [
"[email protected]"
] | |
5cff6d1e75311f6a39ff6edc9ee7a41307b16b8f | cc1b87f9368e96e9b3ecfd5e0822d0037e60ac69 | /dashboard/dashboard/api/sheriffs.py | 6a66dcc1d8196a45035d18f819709bca37d2f30c | [
"BSD-3-Clause"
] | permissive | CTJyeh/catapult | bd710fb413b9058a7eae6073fe97a502546bbefe | c98b1ee7e410b2fb2f7dc9e2eb01804cf7c94fcb | refs/heads/master | 2020-08-19T21:57:40.981513 | 2019-10-17T09:51:09 | 2019-10-17T18:30:16 | 215,957,813 | 1 | 0 | BSD-3-Clause | 2019-10-18T06:41:19 | 2019-10-18T06:41:17 | null | UTF-8 | Python | false | false | 591 | py | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from dashboard.api import api_request_handler
from dashboard.models import sheriff
class SheriffsHandler(api_request_handler.ApiRequestHandler):
def _CheckUser(self):
pass
def Post(self):
sheriff_keys = sheriff.Sheriff.query().fetch(keys_only=True)
return [key.string_id() for key in sheriff_keys]
| [
"[email protected]"
] | |
12c1e9f39cad94697ac642a2b342136937d4f0fe | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/prb_control/entities/base/pre_queue/actions_validator.py | 9751c236c69e44dbb07d504e8b417ae5707659af | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,737 | py | # 2017.08.29 21:45:24 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/prb_control/entities/base/pre_queue/actions_validator.py
from gui.prb_control.entities.base.actions_validator import BaseActionsValidator, ActionsValidatorComposite, CurrentVehicleActionsValidator
from gui.prb_control.items import ValidationResult
class InQueueValidator(BaseActionsValidator):
"""
Is player in queue validator.
"""
def _validate(self):
if self._entity.isInQueue():
return ValidationResult(False)
return super(InQueueValidator, self)._validate()
class PreQueueActionsValidator(ActionsValidatorComposite):
"""
Pre queue actions validator base class. It has several parts:
- state validation
- vehicle validation
"""
def __init__(self, entity):
self._stateValidator = self._createStateValidator(entity)
self._vehiclesValidator = self._createVehiclesValidator(entity)
validators = [self._stateValidator, self._vehiclesValidator]
super(PreQueueActionsValidator, self).__init__(entity, validators)
def _createStateValidator(self, entity):
"""
Part of template method to build state validation part
"""
return InQueueValidator(entity)
def _createVehiclesValidator(self, entity):
"""
Part of template method to build vehicles validation part
"""
return CurrentVehicleActionsValidator(entity)
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\prb_control\entities\base\pre_queue\actions_validator.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:45:25 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
febfd5b2eafe78bde4218ddc057d9e3594551aba | 4fb5eb0a9a24fa5c112a4ebc854ee2604b04adda | /python/oanda/models/client_configure_reject_transaction.py | 3288226fc72e0f163b859744219be769cb51631b | [
"MIT"
] | permissive | KoenBal/OANDA_V20_Client | ed4c182076db62ecf7a216c3e3246ae682300e94 | e67b9dbaddff6ed23e355d3ce7f9c9972799c702 | refs/heads/master | 2020-03-27T20:42:25.777471 | 2019-12-02T15:44:06 | 2019-12-02T15:44:06 | 147,088,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,328 | py | # coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ClientConfigureRejectTransaction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'time': 'str',
'user_id': 'int',
'account_id': 'str',
'batch_id': 'str',
'request_id': 'str',
'type': 'str',
'alias': 'str',
'margin_rate': 'str',
'reject_reason': 'str'
}
attribute_map = {
'id': 'id',
'time': 'time',
'user_id': 'userID',
'account_id': 'AccountID',
'batch_id': 'batchID',
'request_id': 'requestID',
'type': 'type',
'alias': 'alias',
'margin_rate': 'marginRate',
'reject_reason': 'rejectReason'
}
def __init__(self, id=None, time=None, user_id=None, account_id=None, batch_id=None, request_id=None, type=None, alias=None, margin_rate=None, reject_reason=None): # noqa: E501
"""ClientConfigureRejectTransaction - a model defined in Swagger""" # noqa: E501
self._id = None
self._time = None
self._user_id = None
self._account_id = None
self._batch_id = None
self._request_id = None
self._type = None
self._alias = None
self._margin_rate = None
self._reject_reason = None
self.discriminator = None
if id is not None:
self.id = id
if time is not None:
self.time = time
if user_id is not None:
self.user_id = user_id
if account_id is not None:
self.account_id = account_id
if batch_id is not None:
self.batch_id = batch_id
if request_id is not None:
self.request_id = request_id
if type is not None:
self.type = type
if alias is not None:
self.alias = alias
if margin_rate is not None:
self.margin_rate = margin_rate
if reject_reason is not None:
self.reject_reason = reject_reason
@property
def id(self):
"""Gets the id of this ClientConfigureRejectTransaction. # noqa: E501
The Transaction's Identifier. # noqa: E501
:return: The id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ClientConfigureRejectTransaction.
The Transaction's Identifier. # noqa: E501
:param id: The id of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._id = id
@property
def time(self):
"""Gets the time of this ClientConfigureRejectTransaction. # noqa: E501
The date/time when the Transaction was created. # noqa: E501
:return: The time of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this ClientConfigureRejectTransaction.
The date/time when the Transaction was created. # noqa: E501
:param time: The time of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._time = time
@property
def user_id(self):
"""Gets the user_id of this ClientConfigureRejectTransaction. # noqa: E501
The ID of the user that initiated the creation of the Transaction. # noqa: E501
:return: The user_id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this ClientConfigureRejectTransaction.
The ID of the user that initiated the creation of the Transaction. # noqa: E501
:param user_id: The user_id of this ClientConfigureRejectTransaction. # noqa: E501
:type: int
"""
self._user_id = user_id
@property
def account_id(self):
"""Gets the account_id of this ClientConfigureRejectTransaction. # noqa: E501
The ID of the Account the Transaction was created for. # noqa: E501
:return: The account_id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this ClientConfigureRejectTransaction.
The ID of the Account the Transaction was created for. # noqa: E501
:param account_id: The account_id of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def batch_id(self):
"""Gets the batch_id of this ClientConfigureRejectTransaction. # noqa: E501
The ID of the \"batch\" that the Transaction belongs to. Transactions in the same batch are applied to the Account simultaneously. # noqa: E501
:return: The batch_id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._batch_id
@batch_id.setter
def batch_id(self, batch_id):
"""Sets the batch_id of this ClientConfigureRejectTransaction.
The ID of the \"batch\" that the Transaction belongs to. Transactions in the same batch are applied to the Account simultaneously. # noqa: E501
:param batch_id: The batch_id of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._batch_id = batch_id
@property
def request_id(self):
"""Gets the request_id of this ClientConfigureRejectTransaction. # noqa: E501
The Request ID of the request which generated the transaction. # noqa: E501
:return: The request_id of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ClientConfigureRejectTransaction.
The Request ID of the request which generated the transaction. # noqa: E501
:param request_id: The request_id of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def type(self):
"""Gets the type of this ClientConfigureRejectTransaction. # noqa: E501
The Type of the Transaction. Always set to \"CLIENT_CONFIGURE_REJECT\" in a ClientConfigureRejectTransaction. # noqa: E501
:return: The type of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ClientConfigureRejectTransaction.
The Type of the Transaction. Always set to \"CLIENT_CONFIGURE_REJECT\" in a ClientConfigureRejectTransaction. # noqa: E501
:param type: The type of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
allowed_values = ["CREATE", "CLOSE", "REOPEN", "CLIENT_CONFIGURE", "CLIENT_CONFIGURE_REJECT", "TRANSFER_FUNDS", "TRANSFER_FUNDS_REJECT", "MARKET_ORDER", "MARKET_ORDER_REJECT", "FIXED_PRICE_ORDER", "LIMIT_ORDER", "LIMIT_ORDER_REJECT", "STOP_ORDER", "STOP_ORDER_REJECT", "MARKET_IF_TOUCHED_ORDER", "MARKET_IF_TOUCHED_ORDER_REJECT", "TAKE_PROFIT_ORDER", "TAKE_PROFIT_ORDER_REJECT", "STOP_LOSS_ORDER", "STOP_LOSS_ORDER_REJECT", "TRAILING_STOP_LOSS_ORDER", "TRAILING_STOP_LOSS_ORDER_REJECT", "ORDER_FILL", "ORDER_CANCEL", "ORDER_CANCEL_REJECT", "ORDER_CLIENT_EXTENSIONS_MODIFY", "ORDER_CLIENT_EXTENSIONS_MODIFY_REJECT", "TRADE_CLIENT_EXTENSIONS_MODIFY", "TRADE_CLIENT_EXTENSIONS_MODIFY_REJECT", "MARGIN_CALL_ENTER", "MARGIN_CALL_EXTEND", "MARGIN_CALL_EXIT", "DELAYED_TRADE_CLOSURE", "DAILY_FINANCING", "RESET_RESETTABLE_PL"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def alias(self):
"""Gets the alias of this ClientConfigureRejectTransaction. # noqa: E501
The client-provided alias for the Account. # noqa: E501
:return: The alias of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._alias
@alias.setter
def alias(self, alias):
"""Sets the alias of this ClientConfigureRejectTransaction.
The client-provided alias for the Account. # noqa: E501
:param alias: The alias of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._alias = alias
@property
def margin_rate(self):
"""Gets the margin_rate of this ClientConfigureRejectTransaction. # noqa: E501
The margin rate override for the Account. # noqa: E501
:return: The margin_rate of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._margin_rate
@margin_rate.setter
def margin_rate(self, margin_rate):
"""Sets the margin_rate of this ClientConfigureRejectTransaction.
The margin rate override for the Account. # noqa: E501
:param margin_rate: The margin_rate of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
self._margin_rate = margin_rate
@property
def reject_reason(self):
"""Gets the reject_reason of this ClientConfigureRejectTransaction. # noqa: E501
The reason that the Reject Transaction was created # noqa: E501
:return: The reject_reason of this ClientConfigureRejectTransaction. # noqa: E501
:rtype: str
"""
return self._reject_reason
@reject_reason.setter
def reject_reason(self, reject_reason):
"""Sets the reject_reason of this ClientConfigureRejectTransaction.
The reason that the Reject Transaction was created # noqa: E501
:param reject_reason: The reject_reason of this ClientConfigureRejectTransaction. # noqa: E501
:type: str
"""
allowed_values = ["INTERNAL_SERVER_ERROR", "INSTRUMENT_PRICE_UNKNOWN", "ACCOUNT_NOT_ACTIVE", "ACCOUNT_LOCKED", "ACCOUNT_ORDER_CREATION_LOCKED", "ACCOUNT_CONFIGURATION_LOCKED", "ACCOUNT_DEPOSIT_LOCKED", "ACCOUNT_WITHDRAWAL_LOCKED", "ACCOUNT_ORDER_CANCEL_LOCKED", "INSTRUMENT_NOT_TRADEABLE", "PENDING_ORDERS_ALLOWED_EXCEEDED", "ORDER_ID_UNSPECIFIED", "ORDER_DOESNT_EXIST", "ORDER_IDENTIFIER_INCONSISTENCY", "TRADE_ID_UNSPECIFIED", "TRADE_DOESNT_EXIST", "TRADE_IDENTIFIER_INCONSISTENCY", "INSUFFICIENT_MARGIN", "INSTRUMENT_MISSING", "INSTRUMENT_UNKNOWN", "UNITS_MISSING", "UNITS_INVALID", "UNITS_PRECISION_EXCEEDED", "UNITS_LIMIT_EXCEEDED", "UNITS_MIMIMUM_NOT_MET", "PRICE_MISSING", "PRICE_INVALID", "PRICE_PRECISION_EXCEEDED", "PRICE_DISTANCE_MISSING", "PRICE_DISTANCE_INVALID", "PRICE_DISTANCE_PRECISION_EXCEEDED", "PRICE_DISTANCE_MAXIMUM_EXCEEDED", "PRICE_DISTANCE_MINIMUM_NOT_MET", "TIME_IN_FORCE_MISSING", "TIME_IN_FORCE_INVALID", "TIME_IN_FORCE_GTD_TIMESTAMP_MISSING", "TIME_IN_FORCE_GTD_TIMESTAMP_IN_PAST", "PRICE_BOUND_INVALID", "PRICE_BOUND_PRECISION_EXCEEDED", "ORDERS_ON_FILL_DUPLICATE_CLIENT_ORDER_IDS", "TRADE_ON_FILL_CLIENT_EXTENSIONS_NOT_SUPPORTED", "CLIENT_ORDER_ID_INVALID", "CLIENT_ORDER_ID_ALREADY_EXISTS", "CLIENT_ORDER_TAG_INVALID", "CLIENT_ORDER_COMMENT_INVALID", "CLIENT_TRADE_ID_INVALID", "CLIENT_TRADE_ID_ALREADY_EXISTS", "CLIENT_TRADE_TAG_INVALID", "CLIENT_TRADE_COMMENT_INVALID", "ORDER_FILL_POSITION_ACTION_MISSING", "ORDER_FILL_POSITION_ACTION_INVALID", "TRIGGER_CONDITION_MISSING", "TRIGGER_CONDITION_INVALID", "ORDER_PARTIAL_FILL_OPTION_MISSING", "ORDER_PARTIAL_FILL_OPTION_INVALID", "INVALID_REISSUE_IMMEDIATE_PARTIAL_FILL", "TAKE_PROFIT_ORDER_ALREADY_EXISTS", "TAKE_PROFIT_ON_FILL_PRICE_MISSING", "TAKE_PROFIT_ON_FILL_PRICE_INVALID", "TAKE_PROFIT_ON_FILL_PRICE_PRECISION_EXCEEDED", "TAKE_PROFIT_ON_FILL_TIME_IN_FORCE_MISSING", "TAKE_PROFIT_ON_FILL_TIME_IN_FORCE_INVALID", "TAKE_PROFIT_ON_FILL_GTD_TIMESTAMP_MISSING", "TAKE_PROFIT_ON_FILL_GTD_TIMESTAMP_IN_PAST", "TAKE_PROFIT_ON_FILL_CLIENT_ORDER_ID_INVALID", "TAKE_PROFIT_ON_FILL_CLIENT_ORDER_TAG_INVALID", "TAKE_PROFIT_ON_FILL_CLIENT_ORDER_COMMENT_INVALID", "TAKE_PROFIT_ON_FILL_TRIGGER_CONDITION_MISSING", "TAKE_PROFIT_ON_FILL_TRIGGER_CONDITION_INVALID", "STOP_LOSS_ORDER_ALREADY_EXISTS", "STOP_LOSS_ORDER_GUARANTEED_REQUIRED", "STOP_LOSS_ORDER_GUARANTEED_PRICE_WITHIN_SPREAD", "STOP_LOSS_ORDER_GUARANTEED_NOT_ALLOWED", "STOP_LOSS_ORDER_GUARANTEED_HALTED_CREATE_VIOLATION", "STOP_LOSS_ORDER_GUARANTEED_HALTED_TIGHTEN_VIOLATION", "STOP_LOSS_ORDER_GUARANTEED_HEDGING_NOT_ALLOWED", "STOP_LOSS_ORDER_GUARANTEED_MINIMUM_DISTANCE_NOT_MET", "STOP_LOSS_ORDER_NOT_CANCELABLE", "STOP_LOSS_ORDER_NOT_REPLACEABLE", "STOP_LOSS_ORDER_GUARANTEED_LEVEL_RESTRICTION_EXCEEDED", "STOP_LOSS_ORDER_PRICE_AND_DISTANCE_BOTH_SPECIFIED", "STOP_LOSS_ORDER_PRICE_AND_DISTANCE_BOTH_MISSING", "STOP_LOSS_ON_FILL_REQUIRED_FOR_PENDING_ORDER", "STOP_LOSS_ON_FILL_GUARANTEED_NOT_ALLOWED", "STOP_LOSS_ON_FILL_GUARANTEED_REQUIRED", "STOP_LOSS_ON_FILL_PRICE_MISSING", "STOP_LOSS_ON_FILL_PRICE_INVALID", "STOP_LOSS_ON_FILL_PRICE_PRECISION_EXCEEDED", "STOP_LOSS_ON_FILL_GUARANTEED_MINIMUM_DISTANCE_NOT_MET", "STOP_LOSS_ON_FILL_GUARANTEED_LEVEL_RESTRICTION_EXCEEDED", "STOP_LOSS_ON_FILL_DISTANCE_INVALID", "STOP_LOSS_ON_FILL_PRICE_DISTANCE_MAXIMUM_EXCEEDED", "STOP_LOSS_ON_FILL_DISTANCE_PRECISION_EXCEEDED", "STOP_LOSS_ON_FILL_PRICE_AND_DISTANCE_BOTH_SPECIFIED", "STOP_LOSS_ON_FILL_PRICE_AND_DISTANCE_BOTH_MISSING", "STOP_LOSS_ON_FILL_TIME_IN_FORCE_MISSING", "STOP_LOSS_ON_FILL_TIME_IN_FORCE_INVALID", "STOP_LOSS_ON_FILL_GTD_TIMESTAMP_MISSING", "STOP_LOSS_ON_FILL_GTD_TIMESTAMP_IN_PAST", "STOP_LOSS_ON_FILL_CLIENT_ORDER_ID_INVALID", "STOP_LOSS_ON_FILL_CLIENT_ORDER_TAG_INVALID", "STOP_LOSS_ON_FILL_CLIENT_ORDER_COMMENT_INVALID", "STOP_LOSS_ON_FILL_TRIGGER_CONDITION_MISSING", "STOP_LOSS_ON_FILL_TRIGGER_CONDITION_INVALID", "TRAILING_STOP_LOSS_ORDER_ALREADY_EXISTS", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_MISSING", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_INVALID", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_PRECISION_EXCEEDED", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_MAXIMUM_EXCEEDED", "TRAILING_STOP_LOSS_ON_FILL_PRICE_DISTANCE_MINIMUM_NOT_MET", "TRAILING_STOP_LOSS_ON_FILL_TIME_IN_FORCE_MISSING", "TRAILING_STOP_LOSS_ON_FILL_TIME_IN_FORCE_INVALID", "TRAILING_STOP_LOSS_ON_FILL_GTD_TIMESTAMP_MISSING", "TRAILING_STOP_LOSS_ON_FILL_GTD_TIMESTAMP_IN_PAST", "TRAILING_STOP_LOSS_ON_FILL_CLIENT_ORDER_ID_INVALID", "TRAILING_STOP_LOSS_ON_FILL_CLIENT_ORDER_TAG_INVALID", "TRAILING_STOP_LOSS_ON_FILL_CLIENT_ORDER_COMMENT_INVALID", "TRAILING_STOP_LOSS_ORDERS_NOT_SUPPORTED", "TRAILING_STOP_LOSS_ON_FILL_TRIGGER_CONDITION_MISSING", "TRAILING_STOP_LOSS_ON_FILL_TRIGGER_CONDITION_INVALID", "CLOSE_TRADE_TYPE_MISSING", "CLOSE_TRADE_PARTIAL_UNITS_MISSING", "CLOSE_TRADE_UNITS_EXCEED_TRADE_SIZE", "CLOSEOUT_POSITION_DOESNT_EXIST", "CLOSEOUT_POSITION_INCOMPLETE_SPECIFICATION", "CLOSEOUT_POSITION_UNITS_EXCEED_POSITION_SIZE", "CLOSEOUT_POSITION_REJECT", "CLOSEOUT_POSITION_PARTIAL_UNITS_MISSING", "MARKUP_GROUP_ID_INVALID", "POSITION_AGGREGATION_MODE_INVALID", "ADMIN_CONFIGURE_DATA_MISSING", "MARGIN_RATE_INVALID", "MARGIN_RATE_WOULD_TRIGGER_CLOSEOUT", "ALIAS_INVALID", "CLIENT_CONFIGURE_DATA_MISSING", "MARGIN_RATE_WOULD_TRIGGER_MARGIN_CALL", "AMOUNT_INVALID", "INSUFFICIENT_FUNDS", "AMOUNT_MISSING", "FUNDING_REASON_MISSING", "CLIENT_EXTENSIONS_DATA_MISSING", "REPLACING_ORDER_INVALID", "REPLACING_TRADE_ID_INVALID"] # noqa: E501
if reject_reason not in allowed_values:
raise ValueError(
"Invalid value for `reject_reason` ({0}), must be one of {1}" # noqa: E501
.format(reject_reason, allowed_values)
)
self._reject_reason = reject_reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClientConfigureRejectTransaction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
d5d199e83ae7039dce538234c4fd52c1271f01f4 | 4364fb1fec2ebda2cd240ddc19ef89243812c122 | /tensorflow_datasets/image/diabetic_retinopathy_detection_test.py | c6729f05bccf57a228449fa8db506e268ffc95fc | [
"Apache-2.0"
] | permissive | undeadinu/datasets | 67ebbe6c20462ed6f58713ccd8dc1d67db89f4d9 | a6f1bce86404d534b7343fb90f0ebfd6d098c346 | refs/heads/master | 2020-04-16T03:31:37.564934 | 2019-01-11T10:12:42 | 2019-01-11T10:13:12 | 165,234,637 | 0 | 0 | Apache-2.0 | 2019-01-11T11:44:44 | 2019-01-11T11:41:26 | Python | UTF-8 | Python | false | false | 1,329 | py | # coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for diabetic_retinopathy_detection dataset module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets.image import diabetic_retinopathy_detection
from tensorflow_datasets.testing import dataset_builder_testing
class DiabeticRetinopathyDetectionTest(dataset_builder_testing.TestCase):
DATASET_CLASS = diabetic_retinopathy_detection.DiabeticRetinopathyDetection
SPLITS = { # Expected number of examples on each split.
"sample": 4,
"train": 12,
"test": 12,
}
OVERLAPPING_SPLITS = ["sample"] # contains examples from other examples
if __name__ == "__main__":
dataset_builder_testing.main()
| [
"[email protected]"
] | |
c52bc5cd19d2140298c65216a92c5de0be6e6b88 | 05d3038180001200fb2dffc403a84f31eda89abd | /aok/apps/utils/appdf/buildmaster.py | 4c80631d22f08fe0db40fed9efc7418f2c7cfc0b | [] | no_license | 1d20/DITBOG | 20d005b018e16238bd72431abd81940508677903 | 5a8b2f0e5acf45c3a2e81e74f5df5d3690ee540d | refs/heads/master | 2021-01-18T21:14:22.276501 | 2017-08-01T14:16:34 | 2017-08-01T14:16:34 | 87,014,061 | 0 | 0 | null | 2017-08-01T14:16:35 | 2017-04-02T20:29:31 | JavaScript | UTF-8 | Python | false | false | 11,550 | py | #-*- coding:utf-8 -*-
from enum import Enum
class ImgType(Enum):
Icon = "icon"
Promo = "promo"
Screenshot = "screenshot"
class BuilderMaster:
dataMaster = None
tmp_dir = "tmp\\"
work_dir = ""
res_dir = ""
apk_dir = ""
d_eng = ""
d_rus = ""
data_table = ""
default_data_file = ""
package = ""
name = ""
appdf_dir = ""
cancelbuild = false
string[] table_values
Dictionary<string, string> table_Dictionary
def BuilderMaster(string dir, string package):
dataMaster = new DataMaster()
this.package = package
Debug.WriteLine(package)
name = package.Split('.')[2]
work_dir = dir + "\\"
res_dir = dir + "\\res_appdf\\" + name + "\\"
if (!Directory.Exists(res_dir))
{
MessageBox.Show(res_dir + " not found")
FolderBrowserDialog fbd = new FolderBrowserDialog()
fbd.SelectedPath = dir + "\\res_appdf\\"
DialogResult dr = fbd.ShowDialog()
if (dr == DialogResult.OK)
{
DirectoryInfo di = new DirectoryInfo(fbd.SelectedPath)
di.MoveTo(res_dir)
}
}
apk_dir = work_dir + "apk\\"
d_eng = work_dir + "full_description_eng.txt"
d_rus = work_dir + "full_description_rus.txt"
data_table = work_dir + "table.csv"
default_data_file = work_dir + "appdf.txt"
appdf_dir = work_dir + "appdf\\"
def PrepareData():
Directory.CreateDirectory(tmp_dir)
foreach (string file in Directory.GetFiles(tmp_dir))
File.Delete(file)
FileInfo fi
#region icon
fi = dataMaster.description.images.icon
fi.CopyTo(tmp_dir + fi.Name)
fi = new FileInfo(tmp_dir + fi.Name)
if (!fi.Extension.Contains(".png"))
{
using (Image icon = Image.FromFile(fi.FullName))
{
icon.Save(tmp_dir + "icon.png", ImageFormat.Png)
icon.Dispose()
}
fi = new FileInfo(tmp_dir + "icon.png")
}
ResizeImage(fi.FullName, ImgType.Icon)
dataMaster.description.images.icon = fi
#endregion
#region promo
fi = dataMaster.description.images.promo
fi.CopyTo(tmp_dir + fi.Name)
fi = new FileInfo(tmp_dir + fi.Name)
if (!fi.Extension.Contains(".png"))
{
using (Image promo = Image.FromFile(fi.FullName))
{
promo.Save(tmp_dir + "promo.png", ImageFormat.Png)
promo.Dispose()
}
fi = new FileInfo(tmp_dir + "promo.png")
}
ResizeImage(fi.FullName, ImgType.Promo)
dataMaster.description.images.promo = fi
#endregion
#region screenshots
List<FileInfo> screenshots = dataMaster.description.images.screenshots
for (int i = 0 i < screenshots.Count i++ )
{
screenshots[i].CopyTo(tmp_dir + screenshots[i].Name)
screenshots[i] = new FileInfo(tmp_dir + screenshots[i].Name)
if (!screenshots[i].Extension.Contains(".png"))
{
try
{
using (Image screen = Image.FromFile(screenshots[i].FullName))
{
screen.Save(tmp_dir + "screen_" + (i + 1).ToString() + ".png", ImageFormat.Png)
screen.Dispose()
}
}
catch
{
cancelbuild = true
return
}
screenshots[i] = new FileInfo(tmp_dir + "screen_" + (i + 1).ToString() + ".png")
}
ResizeImage(screenshots[i].FullName, ImgType.Screenshot)
}
if (screenshots.Count == 0)
cancelbuild = true
else
for (int i = screenshots.Count screenshots.Count < 4 i++)
{
File.Copy(screenshots[i - screenshots.Count].FullName, tmp_dir + "screen_" + (i + 1).ToString() + ".png")
screenshots.Add(new FileInfo(tmp_dir + "screen_" + (i + 1).ToString() + ".png"))
}
dataMaster.description.images.screenshots = screenshots
#endregion
def FindInAppdfTXT(string key):
value = ""
StreamReader sr = new StreamReader(default_data_file)
while (!sr.EndOfStream)
{
line = sr.ReadLine()
if (line.IndexOf(key) == 0)
{
value = line.Split(new char[]{':'}, 2)[1]
break
}
}
sr.Close()
return value
def FindInTableCSV(int j):
string[] table_keys = null
if (table_values == null)
{
StreamReader sr = new StreamReader(data_table)
table_keys = sr.ReadLine().Split('')
while (!sr.EndOfStream)
{
string line = sr.ReadLine()
if (line.Contains("" + name + ""))
{
table_values = line.Split('')
break
}
}
sr.Close()
}
if (table_Dictionary == null)
{
table_Dictionary = new Dictionary<string, string>()
for (int i = 0 i < table_keys.Length i++)
table_Dictionary.Add(table_keys[i], table_values[i])
}
return table_values[j]
def DescriptionReplacer(string str):
foreach (string key in from key in table_Dictionary.Keys orderby key.Length descending select key)
{
str = str.Replace("$" + key, table_Dictionary[key])
}
return str
def CollectData():
dataMaster.version = "1"
dataMaster.platform = "android"
dataMaster.package = package
dataMaster.categorization.type = FindInAppdfTXT("type")
dataMaster.categorization.category = FindInAppdfTXT("category")
dataMaster.categorization.subcategory = FindInAppdfTXT("subcategory")
if (dataMaster.categorization.subcategory == "-")
dataMaster.categorization.subcategory = ""
dataMaster.description.texts.title = FindInTableCSV(0)
dataMaster.description.texts.keywords = (FindInAppdfTXT("keywords").Replace("\"","") + "," + FindInTableCSV(3)).Replace(",", ", ")
dataMaster.description.texts.full_description = DescriptionReplacer(new StreamReader(d_eng).ReadToEnd())
dataMaster.description.texts.short_description = dataMaster.description.texts.full_description.Remove(77) + "..."
dataMaster.description.texts.features.Add("-")
dataMaster.description.texts.features.Add("-")
dataMaster.description.texts.features.Add("-")
try
{
dataMaster.description.images.icon = new FileInfo(Directory.GetFiles(res_dir, FindInAppdfTXT("icon_name_tamplate") + ".*")[0])
dataMaster.description.images.promo = new FileInfo(Directory.GetFiles(res_dir, FindInAppdfTXT("big_image_template") + ".*")[0])
}
catch
{
cancelbuild = true
return
}
foreach (string str in Directory.GetFiles(res_dir, FindInAppdfTXT("screenshots_name_tamplate") + "*"))
dataMaster.description.images.screenshots.Add(new FileInfo(str))
///////////////////////////////////////////////ru///////////////////////
Description_localization description_localization = new Description_localization()
description_localization.texts.title = FindInTableCSV(1)
description_localization.texts.keywords = dataMaster.description.texts.keywords
description_localization.texts.full_description = DescriptionReplacer(new StreamReader(d_rus).ReadToEnd())
description_localization.texts.short_description = description_localization.texts.full_description.Remove(77) + "..."
description_localization.texts.features.Add("-")
description_localization.texts.features.Add("-")
description_localization.texts.features.Add("-")
dataMaster.description_localizations.Add(description_localization)
dataMaster.apk_files.apk_file = new FileInfo(apk_dir + dataMaster.description.texts.title.Replace("Memory:", "Memoria") + ".apk")
dataMaster.customer_support.phone = FindInAppdfTXT("phone")
dataMaster.customer_support.email = FindInAppdfTXT("email")
dataMaster.customer_support.website = FindInAppdfTXT("website")
def BuildDescriptionXML():
string description = tmp_dir + "description.xml"
StreamWriter sw = new StreamWriter(description)
sw.Write(dataMaster.ToXML())
sw.Close()
def PackFile(string sourceFile, string destFile):
try
{
//Проверяем файл на существование
if (!File.Exists(sourceFile))
return false
//Создаем объект для работы с архивом
//Encoding может быть и UTF8
using (ZipFile zip = new ZipFile(destFile, Encoding.Default))
{
//Устанавливаем уровень сжатия
zip.CompressionLevel = Ionic.Zlib.CompressionLevel.Level9
//Задаем системную директорию TEMP для временных файлов
zip.TempFileFolder = Path.GetTempPath()
//Добавляем файл и указываем где он будет располагаться в архиве
//В данном случае - в корне архива
zip.AddFile(sourceFile, "\\")
//Сохраняем архив
zip.Save()
}
return true
}
catch
{
return false
}
def BuildAppDF()
CollectData()
if(!cancelbuild)
PrepareData()
if (!cancelbuild)
BuildDescriptionXML()
if (cancelbuild)
return
Debug.WriteLine("BuildAppDF")
string appdf_file = tmp_dir + package + ".appdf"
/*
if (!File.Exists(tmp_dir + "description.xml"))
{
Debug.WriteLine("no description.xml file!!!!!!!!!!!!!!!!!!!!!!!!!! skiped")
return
}
if( !File.Exists(dataMaster.description.images.icon.FullName) )
{
Debug.WriteLine("no icon file!!!!!!!!!!!!!!!!!!!!!!!!!! skiped")
return
}
if( !File.Exists(dataMaster.description.images.promo.FullName) )
{
Debug.WriteLine("no promo file!!!!!!!!!!!!!!!!!!!!!!!!!! skiped")
return
}
if( !File.Exists(dataMaster.apk_files.apk_file.FullName))
{
Debug.WriteLine("no apk file!!!!!!!!!!!!!!!!!!!!!!!!!! skiped")
return
}
*/
PackFile(tmp_dir + "description.xml", appdf_file)
PackFile(dataMaster.description.images.icon.FullName, appdf_file)
PackFile(dataMaster.description.images.promo.FullName, appdf_file)
PackFile(dataMaster.apk_files.apk_file.FullName, appdf_file)
foreach (FileInfo screen in dataMaster.description.images.screenshots)
PackFile(screen.FullName, appdf_file)
File.Copy(appdf_file, appdf_file.Replace(tmp_dir, appdf_dir))
| [
"[email protected]"
] | |
cde3346e90bf0b24b91ea9df9de7d3821dc8a338 | d850f5f7cc09a8379c04d38f5c26c2e6b73f3484 | /kimai_python/models/project_rate.py | 76b7342f2bc64b749686f63d73e8f0362a61bf71 | [
"MIT"
] | permissive | MPW1412/kimai-python | 8d78e3df3036ab11573e800dce96011552aa6946 | 7c89b0866b85fbc4b1092b30eca21f1be48db533 | refs/heads/master | 2022-10-12T17:24:50.522103 | 2020-04-24T06:21:57 | 2020-04-24T06:21:57 | 264,545,139 | 0 | 0 | MIT | 2020-05-16T23:14:13 | 2020-05-16T23:14:12 | null | UTF-8 | Python | false | false | 5,828 | py | # coding: utf-8
"""
Kimai 2 - API Docs
JSON API for the Kimai 2 time-tracking software. Read more about its usage in the [API documentation](https://www.kimai.org/documentation/rest-api.html) and then download a [Swagger file](doc.json) for import e.g. in Postman. Be aware: it is not yet considered stable and BC breaks might happen. # noqa: E501
OpenAPI spec version: 0.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProjectRate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'rate': 'float',
'internal_rate': 'float',
'is_fixed': 'bool',
'user': 'User'
}
attribute_map = {
'id': 'id',
'rate': 'rate',
'internal_rate': 'internalRate',
'is_fixed': 'isFixed',
'user': 'user'
}
def __init__(self, id=None, rate=None, internal_rate=None, is_fixed=None, user=None): # noqa: E501
"""ProjectRate - a model defined in Swagger""" # noqa: E501
self._id = None
self._rate = None
self._internal_rate = None
self._is_fixed = None
self._user = None
self.discriminator = None
if id is not None:
self.id = id
if rate is not None:
self.rate = rate
if internal_rate is not None:
self.internal_rate = internal_rate
self.is_fixed = is_fixed
if user is not None:
self.user = user
@property
def id(self):
"""Gets the id of this ProjectRate. # noqa: E501
:return: The id of this ProjectRate. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ProjectRate.
:param id: The id of this ProjectRate. # noqa: E501
:type: int
"""
self._id = id
@property
def rate(self):
"""Gets the rate of this ProjectRate. # noqa: E501
:return: The rate of this ProjectRate. # noqa: E501
:rtype: float
"""
return self._rate
@rate.setter
def rate(self, rate):
"""Sets the rate of this ProjectRate.
:param rate: The rate of this ProjectRate. # noqa: E501
:type: float
"""
self._rate = rate
@property
def internal_rate(self):
"""Gets the internal_rate of this ProjectRate. # noqa: E501
:return: The internal_rate of this ProjectRate. # noqa: E501
:rtype: float
"""
return self._internal_rate
@internal_rate.setter
def internal_rate(self, internal_rate):
"""Sets the internal_rate of this ProjectRate.
:param internal_rate: The internal_rate of this ProjectRate. # noqa: E501
:type: float
"""
self._internal_rate = internal_rate
@property
def is_fixed(self):
"""Gets the is_fixed of this ProjectRate. # noqa: E501
:return: The is_fixed of this ProjectRate. # noqa: E501
:rtype: bool
"""
return self._is_fixed
@is_fixed.setter
def is_fixed(self, is_fixed):
"""Sets the is_fixed of this ProjectRate.
:param is_fixed: The is_fixed of this ProjectRate. # noqa: E501
:type: bool
"""
if is_fixed is None:
raise ValueError("Invalid value for `is_fixed`, must not be `None`") # noqa: E501
self._is_fixed = is_fixed
@property
def user(self):
"""Gets the user of this ProjectRate. # noqa: E501
:return: The user of this ProjectRate. # noqa: E501
:rtype: User
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this ProjectRate.
:param user: The user of this ProjectRate. # noqa: E501
:type: User
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProjectRate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProjectRate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
c618026c9962936fdc9c07d9881c1e5b4d611e77 | 99351753f51b2a585f3a0bb1dc11b8c6eebd76df | /setup.py | f547ea175656df3ebba7efc860cec92119a0174e | [] | no_license | FND/tiddlywebplugins.imaker | 6ef680e76145f9f954a66ba2d1cabd15cc0b4637 | bcaeca5a4f2b44d9e48414f48cfa5cae468f6c4c | refs/heads/master | 2021-01-15T18:30:52.466042 | 2013-07-13T10:51:54 | 2013-07-13T10:51:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | AUTHOR = 'Chris Dent'
AUTHOR_EMAIL = '[email protected]'
NAME = 'tiddlywebplugins.imaker'
DESCRIPTION = 'Make TiddlyWeb instances'
VERSION = '0.1.3'
import os
from setuptools import setup, find_packages
# You should carefully review the below (install_requires especially).
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = 'http://pypi.python.org/pypi/%s' % NAME,
platforms = 'Posix; MacOS X; Windows',
packages = find_packages(exclude=['test', 'testpackage']),
install_requires = ['tiddlyweb',
'tiddlywebplugins.utils',
'tiddlywebplugins.pkgstore',
],
zip_safe = False
)
| [
"[email protected]"
] | |
def2f9542d47fd2055869a485e738ece66c185bf | 859fc6793e1c2e019e0ec119a367ea43beeefcdc | /python/ccxt/bitget.py | 127d3af30c18385dab501a01211f45a8f3eafdb4 | [
"MIT"
] | permissive | siwenHT/ccxt | 638b8cee929c2638e7b742eae9c7ac13fdb026ec | d6da2a081f8d66f82509bb720e3f23b124016a5b | refs/heads/master | 2023-09-04T07:54:44.408565 | 2021-11-06T14:17:06 | 2021-11-06T14:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139,831 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitget(Exchange):
def describe(self):
return self.deep_extend(super(bitget, self).describe(), {
'id': 'bitget',
'name': 'Bitget',
'countries': ['SG'],
'version': 'v3',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'has': {
'cancelOrder': True,
'cancelOrders': True,
'CORS': None,
'createOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchWithdrawals': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1d',
'1w': '1w',
},
'hostname': 'bitget.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/88317935-a8a21c80-cd22-11ea-8e2b-4b9fac5975eb.jpg',
'api': {
'data': 'https://api.{hostname}',
'api': 'https://api.{hostname}',
'capi': 'https://capi.{hostname}',
'swap': 'https://capi.{hostname}',
},
'www': 'https://www.bitget.com',
'doc': [
'https://bitgetlimited.github.io/apidoc/en/swap',
'https://bitgetlimited.github.io/apidoc/en/spot',
],
'fees': 'https://www.bitget.cc/zh-CN/rate?tab=1',
'test': {
'rest': 'https://testnet.bitget.com',
},
'referral': 'https://www.bitget.com/expressly?languageType=0&channelCode=ccxt&vipCode=tg9j',
},
'api': {
'data': {
'get': [
'market/history/kline', # Kline data
'market/detail/merged', # Get aggregated ticker
'market/tickers', # Get all trading tickers
'market/allticker', # Get all trading market method 2
'market/depth', # Get Market Depth Data
'market/trade', # Get Trade Detail Data
'market/history/trade', # Get record of trading
'market/detail', # Get Market Detail 24h Volume
'common/symbols', # Query all trading pairs and accuracy supported in the station
'common/currencys', # Query all currencies supported in the station
'common/timestamp', # Query system current time
],
},
'api': {
'get': [
'account/accounts', # Get all accounts of current user(即account_id)。
'accounts/{account_id}/balance', # Get the balance of the specified account
'order/orders', # Query order, deprecated
'order/orders/openOrders',
'order/orders/history',
'order/deposit_withdraw', # Query assets history
],
'post': [
'order/orders/place', # Place order
'order/orders/{order_id}/submitcancel', # Request to cancel an order request
'order/orders/batchcancel', # Bulk order cancellation
'order/orders/{order_id}', # Query an order details
'order/orders/{order_id}/matchresults', # Query the transaction details of an order
'order/matchresults', # Query current order, order history
],
},
'capi': {
'get': [
'market/time',
'market/contracts',
'market/depth',
'market/tickers',
'market/ticker',
'market/trades',
'market/candles',
'market/index',
'market/open_count',
'market/open_interest',
'market/price_limit',
'market/funding_time',
'market/mark_price',
'market/open_count',
'market/historyFundRate',
],
},
'swap': {
'get': [
'account/accounts',
'account/account',
'account/settings',
'position/allPosition',
'position/singlePosition',
'position/holds',
'order/detail',
'order/orders',
'order/fills',
'order/current',
'order/currentPlan', # conditional
'order/history',
'order/historyPlan', # conditional
'trace/closeTrack',
'trace/currentTrack',
'trace/historyTrack',
'trace/summary',
'trace/profitSettleTokenIdGroup',
'trace/profitDateGroupList',
'trace/profitDateList',
'trace/waitProfitDateList',
],
'post': [
'account/leverage',
'account/adjustMargin',
'account/modifyAutoAppendMargin',
'order/placeOrder',
'order/batchOrders',
'order/cancel_order',
'order/cancel_batch_orders',
'order/plan_order',
'order/cancel_plan',
'position/changeHoldModel',
'trace/closeTrackOrder',
],
},
},
'fees': {
'spot': {
'taker': self.parse_number('0.002'),
'maker': self.parse_number('0.002'),
},
'swap': {
'taker': self.parse_number('0.0006'),
'maker': self.parse_number('0.0004'),
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request — Invalid request format
# 401 Unauthorized — Invalid API Key
# 403 Forbidden — You do not have access to the requested resource
# 404 Not Found
# 500 Internal Server Error — We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
# '0': ExchangeError, # 200 successful,when the order placement / cancellation / operation is successful
'4001': ExchangeError, # no data received in 30s
'4002': ExchangeError, # Buffer full. cannot write data
# --------------------------------------------------------
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeError, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not supported in self website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# '30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30038': OnMaintenance, # {"client_oid":"","code":"30038","error_code":"30038","error_message":"Matching engine is being upgraded. Please try in about 1 minute.","message":"Matching engine is being upgraded. Please try in about 1 minute.","order_id":"-1","result":false}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please try again"}
'32019': ExchangeError, # {"code": 32019, "message": "the order price deviates from the price of the previous minute by more than 3%"}
'32020': ExchangeError, # {"code": 32020, "message": "the price is not in the range of the price limit"}
'32021': ExchangeError, # {"code": 32021, "message": "leverage error"}
'32022': ExchangeError, # {"code": 32022, "message": "self function is not supported in your country or region according to the regulations"}
'32023': ExchangeError, # {"code": 32023, "message": "self account has outstanding loan"}
'32024': ExchangeError, # {"code": 32024, "message": "order cannot be placed during delivery"}
'32025': ExchangeError, # {"code": 32025, "message": "order cannot be placed during settlement"}
'32026': ExchangeError, # {"code": 32026, "message": "your account is restricted from opening positions"}
'32027': ExchangeError, # {"code": 32027, "message": "cancelled over 20 orders"}
'32028': AccountSuspended, # {"code": 32028, "message": "account is suspended and liquidated"}
'32029': ExchangeError, # {"code": 32029, "message": "order info does not exist"}
'32030': InvalidOrder, # The order cannot be cancelled
'32031': ArgumentsRequired, # client_oid or order_id is required.
'32038': AuthenticationError, # User does not exist
'32040': ExchangeError, # User have open contract orders or position
'32044': ExchangeError, # {"code": 32044, "message": "The margin ratio after submitting self order is lower than the minimum requirement({0}) for your tier."}
'32045': ExchangeError, # String of commission over 1 million
'32046': ExchangeError, # Each user can hold up to 10 trade plans at the same time
'32047': ExchangeError, # system error
'32048': InvalidOrder, # Order strategy track range error
'32049': ExchangeError, # Each user can hold up to 10 track plans at the same time
'32050': InvalidOrder, # Order strategy rang error
'32051': InvalidOrder, # Order strategy ice depth error
'32052': ExchangeError, # String of commission over 100 thousand
'32053': ExchangeError, # Each user can hold up to 6 ice plans at the same time
'32057': ExchangeError, # The order price is zero. Market-close-all function cannot be executed
'32054': ExchangeError, # Trade not allow
'32055': InvalidOrder, # cancel order error
'32056': ExchangeError, # iceberg per order average should between {0}-{1} contracts
'32058': ExchangeError, # Each user can hold up to 6 initiative plans at the same time
'32059': InvalidOrder, # Total amount should exceed per order amount
'32060': InvalidOrder, # Order strategy type error
'32061': InvalidOrder, # Order strategy initiative limit error
'32062': InvalidOrder, # Order strategy initiative range error
'32063': InvalidOrder, # Order strategy initiative rate error
'32064': ExchangeError, # Time Stringerval of orders should set between 5-120s
'32065': ExchangeError, # Close amount exceeds the limit of Market-close-all(999 for BTC, and 9999 for the rest tokens)
'32066': ExchangeError, # You have open orders. Please cancel all open orders before changing your leverage level.
'32067': ExchangeError, # Account equity < required margin in self setting. Please adjust your leverage level again.
'32068': ExchangeError, # The margin for self position will fall short of the required margin in self setting. Please adjust your leverage level or increase your margin to proceed.
'32069': ExchangeError, # Target leverage level too low. Your account balance is insufficient to cover the margin required. Please adjust the leverage level again.
'32070': ExchangeError, # Please check open position or unfilled order
'32071': ExchangeError, # Your current liquidation mode does not support self action.
'32072': ExchangeError, # The highest available margin for your order’s tier is {0}. Please edit your margin and place a new order.
'32073': ExchangeError, # The action does not apply to the token
'32074': ExchangeError, # The number of contracts of your position, open orders, and the current order has exceeded the maximum order limit of self asset.
'32075': ExchangeError, # Account risk rate breach
'32076': ExchangeError, # Liquidation of the holding position(s) at market price will require cancellation of all pending close orders of the contracts.
'32077': ExchangeError, # Your margin for self asset in futures account is insufficient and the position has been taken over for liquidation.(You will not be able to place orders, close positions, transfer funds, or add margin during self period of time. Your account will be restored after the liquidation is complete.)
'32078': ExchangeError, # Please cancel all open orders before switching the liquidation mode(Please cancel all open orders before switching the liquidation mode)
'32079': ExchangeError, # Your open positions are at high risk.(Please add margin or reduce positions before switching the mode)
'32080': ExchangeError, # Funds cannot be transferred out within 30 minutes after futures settlement
'32083': ExchangeError, # The number of contracts should be a positive multiple of %%. Please place your order again
# token and margin trading
'33001': PermissionDenied, # {"code": 33001, "message": "margin account for self pair is not enabled yet"}
'33002': AccountSuspended, # {"code": 33002, "message": "margin account for self pair is suspended"}
'33003': InsufficientFunds, # {"code": 33003, "message": "no loan balance"}
'33004': ExchangeError, # {"code": 33004, "message": "loan amount cannot be smaller than the minimum limit"}
'33005': ExchangeError, # {"code": 33005, "message": "repayment amount must exceed 0"}
'33006': ExchangeError, # {"code": 33006, "message": "loan order not found"}
'33007': ExchangeError, # {"code": 33007, "message": "status not found"}
'33008': InsufficientFunds, # {"code": 33008, "message": "loan amount cannot exceed the maximum limit"}
'33009': ExchangeError, # {"code": 33009, "message": "user ID is blank"}
'33010': ExchangeError, # {"code": 33010, "message": "you cannot cancel an order during session 2 of call auction"}
'33011': ExchangeError, # {"code": 33011, "message": "no new market data"}
'33012': ExchangeError, # {"code": 33012, "message": "order cancellation failed"}
'33013': InvalidOrder, # {"code": 33013, "message": "order placement failed"}
'33014': OrderNotFound, # {"code": 33014, "message": "order does not exist"}
'33015': InvalidOrder, # {"code": 33015, "message": "exceeded maximum limit"}
'33016': ExchangeError, # {"code": 33016, "message": "margin trading is not open for self token"}
'33017': InsufficientFunds, # {"code": 33017, "message": "insufficient balance"}
'33018': ExchangeError, # {"code": 33018, "message": "self parameter must be smaller than 1"}
'33020': ExchangeError, # {"code": 33020, "message": "request not supported"}
'33021': BadRequest, # {"code": 33021, "message": "token and the pair do not match"}
'33022': InvalidOrder, # {"code": 33022, "message": "pair and the order do not match"}
'33023': ExchangeError, # {"code": 33023, "message": "you can only place market orders during call auction"}
'33024': InvalidOrder, # {"code": 33024, "message": "trading amount too small"}
'33025': InvalidOrder, # {"code": 33025, "message": "base token amount is blank"}
'33026': ExchangeError, # {"code": 33026, "message": "transaction completed"}
'33027': InvalidOrder, # {"code": 33027, "message": "cancelled order or order cancelling"}
'33028': InvalidOrder, # {"code": 33028, "message": "the decimal places of the trading price exceeded the limit"}
'33029': InvalidOrder, # {"code": 33029, "message": "the decimal places of the trading size exceeded the limit"}
'33034': ExchangeError, # {"code": 33034, "message": "You can only place limit order after Call Auction has started"}
'33035': ExchangeError, # This type of order cannot be canceled(This type of order cannot be canceled)
'33036': ExchangeError, # Exceeding the limit of entrust order
'33037': ExchangeError, # The buy order price should be lower than 130% of the trigger price
'33038': ExchangeError, # The sell order price should be higher than 70% of the trigger price
'33039': ExchangeError, # The limit of callback rate is 0 < x <= 5%
'33040': ExchangeError, # The trigger price of a buy order should be lower than the latest transaction price
'33041': ExchangeError, # The trigger price of a sell order should be higher than the latest transaction price
'33042': ExchangeError, # The limit of price variance is 0 < x <= 1%
'33043': ExchangeError, # The total amount must be larger than 0
'33044': ExchangeError, # The average amount should be 1/1000 * total amount <= x <= total amount
'33045': ExchangeError, # The price should not be 0, including trigger price, order price, and price limit
'33046': ExchangeError, # Price variance should be 0 < x <= 1%
'33047': ExchangeError, # Sweep ratio should be 0 < x <= 100%
'33048': ExchangeError, # Per order limit: Total amount/1000 < x <= Total amount
'33049': ExchangeError, # Total amount should be X > 0
'33050': ExchangeError, # Time interval should be 5 <= x <= 120s
'33051': ExchangeError, # cancel order number not higher limit: plan and track entrust no more than 10, ice and time entrust no more than 6
'33059': BadRequest, # {"code": 33059, "message": "client_oid or order_id is required"}
'33060': BadRequest, # {"code": 33060, "message": "Only fill in either parameter client_oid or order_id"}
'33061': ExchangeError, # Value of a single market price order cannot exceed 100,000 USD
'33062': ExchangeError, # The leverage ratio is too high. The borrowed position has exceeded the maximum position of self leverage ratio. Please readjust the leverage ratio
'33063': ExchangeError, # Leverage multiple is too low, there is insufficient margin in the account, please readjust the leverage ratio
'33064': ExchangeError, # The setting of the leverage ratio cannot be less than 2, please readjust the leverage ratio
'33065': ExchangeError, # Leverage ratio exceeds maximum leverage ratio, please readjust leverage ratio
# account
'21009': ExchangeError, # Funds cannot be transferred out within 30 minutes after swap settlement(Funds cannot be transferred out within 30 minutes after swap settlement)
'34001': PermissionDenied, # {"code": 34001, "message": "withdrawal suspended"}
'34002': InvalidAddress, # {"code": 34002, "message": "please add a withdrawal address"}
'34003': ExchangeError, # {"code": 34003, "message": "sorry, self token cannot be withdrawn to xx at the moment"}
'34004': ExchangeError, # {"code": 34004, "message": "withdrawal fee is smaller than minimum limit"}
'34005': ExchangeError, # {"code": 34005, "message": "withdrawal fee exceeds the maximum limit"}
'34006': ExchangeError, # {"code": 34006, "message": "withdrawal amount is lower than the minimum limit"}
'34007': ExchangeError, # {"code": 34007, "message": "withdrawal amount exceeds the maximum limit"}
'34008': InsufficientFunds, # {"code": 34008, "message": "insufficient balance"}
'34009': ExchangeError, # {"code": 34009, "message": "your withdrawal amount exceeds the daily limit"}
'34010': ExchangeError, # {"code": 34010, "message": "transfer amount must be larger than 0"}
'34011': ExchangeError, # {"code": 34011, "message": "conditions not met"}
'34012': ExchangeError, # {"code": 34012, "message": "the minimum withdrawal amount for NEO is 1, and the amount must be an integer"}
'34013': ExchangeError, # {"code": 34013, "message": "please transfer"}
'34014': ExchangeError, # {"code": 34014, "message": "transfer limited"}
'34015': ExchangeError, # {"code": 34015, "message": "subaccount does not exist"}
'34016': PermissionDenied, # {"code": 34016, "message": "transfer suspended"}
'34017': AccountSuspended, # {"code": 34017, "message": "account suspended"}
'34018': AuthenticationError, # {"code": 34018, "message": "incorrect trades password"}
'34019': PermissionDenied, # {"code": 34019, "message": "please bind your email before withdrawal"}
'34020': PermissionDenied, # {"code": 34020, "message": "please bind your funds password before withdrawal"}
'34021': InvalidAddress, # {"code": 34021, "message": "Not verified address"}
'34022': ExchangeError, # {"code": 34022, "message": "Withdrawals are not available for sub accounts"}
'34023': PermissionDenied, # {"code": 34023, "message": "Please enable futures trading before transferring your funds"}
'34026': ExchangeError, # transfer too frequently(transfer too frequently)
'34036': ExchangeError, # Parameter is incorrect, please refer to API documentation
'34037': ExchangeError, # Get the sub-account balance interface, account type is not supported
'34038': ExchangeError, # Since your C2C transaction is unusual, you are restricted from fund transfer. Please contact our customer support to cancel the restriction
'34039': ExchangeError, # You are now restricted from transferring out your funds due to abnormal trades on C2C Market. Please transfer your fund on our website or app instead to verify your identity
# swap
'35001': ExchangeError, # {"code": 35001, "message": "Contract does not exist"}
'35002': ExchangeError, # {"code": 35002, "message": "Contract settling"}
'35003': ExchangeError, # {"code": 35003, "message": "Contract paused"}
'35004': ExchangeError, # {"code": 35004, "message": "Contract pending settlement"}
'35005': AuthenticationError, # {"code": 35005, "message": "User does not exist"}
'35008': InvalidOrder, # {"code": 35008, "message": "Risk ratio too high"}
'35010': InvalidOrder, # {"code": 35010, "message": "Position closing too large"}
'35012': InvalidOrder, # {"code": 35012, "message": "Incorrect order size"}
'35014': InvalidOrder, # {"code": 35014, "message": "Order price is not within limit"}
'35015': InvalidOrder, # {"code": 35015, "message": "Invalid leverage level"}
'35017': ExchangeError, # {"code": 35017, "message": "Open orders exist"}
'35019': InvalidOrder, # {"code": 35019, "message": "Order size too large"}
'35020': InvalidOrder, # {"code": 35020, "message": "Order price too high"}
'35021': InvalidOrder, # {"code": 35021, "message": "Order size exceeded current tier limit"}
'35022': ExchangeError, # {"code": 35022, "message": "Contract status error"}
'35024': ExchangeError, # {"code": 35024, "message": "Contract not initialized"}
'35025': InsufficientFunds, # {"code": 35025, "message": "No account balance"}
'35026': ExchangeError, # {"code": 35026, "message": "Contract settings not initialized"}
'35029': OrderNotFound, # {"code": 35029, "message": "Order does not exist"}
'35030': InvalidOrder, # {"code": 35030, "message": "Order size too large"}
'35031': InvalidOrder, # {"code": 35031, "message": "Cancel order size too large"}
'35032': ExchangeError, # {"code": 35032, "message": "Invalid user status"}
'35037': ExchangeError, # No last traded price in cache
'35039': ExchangeError, # {"code": 35039, "message": "Open order quantity exceeds limit"}
'35040': InvalidOrder, # {"error_message":"Invalid order type","result":"true","error_code":"35040","order_id":"-1"}
'35044': ExchangeError, # {"code": 35044, "message": "Invalid order status"}
'35046': InsufficientFunds, # {"code": 35046, "message": "Negative account balance"}
'35047': InsufficientFunds, # {"code": 35047, "message": "Insufficient account balance"}
'35048': ExchangeError, # {"code": 35048, "message": "User contract is frozen and liquidating"}
'35049': InvalidOrder, # {"code": 35049, "message": "Invalid order type"}
'35050': InvalidOrder, # {"code": 35050, "message": "Position settings are blank"}
'35052': InsufficientFunds, # {"code": 35052, "message": "Insufficient cross margin"}
'35053': ExchangeError, # {"code": 35053, "message": "Account risk too high"}
'35055': InsufficientFunds, # {"code": 35055, "message": "Insufficient account balance"}
'35057': ExchangeError, # {"code": 35057, "message": "No last traded price"}
'35058': ExchangeError, # {"code": 35058, "message": "No limit"}
'35059': BadRequest, # {"code": 35059, "message": "client_oid or order_id is required"}
'35060': BadRequest, # {"code": 35060, "message": "Only fill in either parameter client_oid or order_id"}
'35061': BadRequest, # {"code": 35061, "message": "Invalid instrument_id"}
'35062': InvalidOrder, # {"code": 35062, "message": "Invalid match_price"}
'35063': InvalidOrder, # {"code": 35063, "message": "Invalid order_size"}
'35064': InvalidOrder, # {"code": 35064, "message": "Invalid client_oid"}
'35066': InvalidOrder, # Order interval error
'35067': InvalidOrder, # Time-weighted order ratio error
'35068': InvalidOrder, # Time-weighted order range error
'35069': InvalidOrder, # Time-weighted single transaction limit error
'35070': InvalidOrder, # Algo order type error
'35071': InvalidOrder, # Order total must be larger than single order limit
'35072': InvalidOrder, # Maximum 6 unfulfilled time-weighted orders can be held at the same time
'35073': InvalidOrder, # Order price is 0. Market-close-all not available
'35074': InvalidOrder, # Iceberg order single transaction average error
'35075': InvalidOrder, # Failed to cancel order
'35076': InvalidOrder, # LTC 20x leverage. Not allowed to open position
'35077': InvalidOrder, # Maximum 6 unfulfilled iceberg orders can be held at the same time
'35078': InvalidOrder, # Order amount exceeded 100,000
'35079': InvalidOrder, # Iceberg order price variance error
'35080': InvalidOrder, # Callback rate error
'35081': InvalidOrder, # Maximum 10 unfulfilled trail orders can be held at the same time
'35082': InvalidOrder, # Trail order callback rate error
'35083': InvalidOrder, # Each user can only hold a maximum of 10 unfulfilled stop-limit orders at the same time
'35084': InvalidOrder, # Order amount exceeded 1 million
'35085': InvalidOrder, # Order amount is not in the correct range
'35086': InvalidOrder, # Price exceeds 100 thousand
'35087': InvalidOrder, # Price exceeds 100 thousand
'35088': InvalidOrder, # Average amount error
'35089': InvalidOrder, # Price exceeds 100 thousand
'35090': ExchangeError, # No stop-limit orders available for cancelation
'35091': ExchangeError, # No trail orders available for cancellation
'35092': ExchangeError, # No iceberg orders available for cancellation
'35093': ExchangeError, # No trail orders available for cancellation
'35094': ExchangeError, # Stop-limit order last traded price error
'35095': BadRequest, # Instrument_id error
'35096': ExchangeError, # Algo order status error
'35097': ExchangeError, # Order status and order ID cannot exist at the same time
'35098': ExchangeError, # An order status or order ID must exist
'35099': ExchangeError, # Algo order ID error
# option
'36001': BadRequest, # Invalid underlying index.
'36002': BadRequest, # Instrument does not exist.
'36005': ExchangeError, # Instrument status is invalid.
'36101': AuthenticationError, # Account does not exist.
'36102': PermissionDenied, # Account status is invalid.
'36103': AccountSuspended, # Account is suspended due to ongoing liquidation.
'36104': PermissionDenied, # Account is not enabled for options trading.
'36105': PermissionDenied, # Please enable the account for option contract.
'36106': AccountSuspended, # Funds cannot be transferred in or out, as account is suspended.
'36107': PermissionDenied, # Funds cannot be transferred out within 30 minutes after option exercising or settlement.
'36108': InsufficientFunds, # Funds cannot be transferred in or out, as equity of the account is less than zero.
'36109': PermissionDenied, # Funds cannot be transferred in or out during option exercising or settlement.
'36201': PermissionDenied, # New order function is blocked.
'36202': PermissionDenied, # Account does not have permission to short option.
'36203': InvalidOrder, # Invalid format for client_oid.
'36204': ExchangeError, # Invalid format for request_id.
'36205': BadRequest, # Instrument id does not match underlying index.
'36206': BadRequest, # Order_id and client_oid can not be used at the same time.
'36207': InvalidOrder, # Either order price or fartouch price must be present.
'36208': InvalidOrder, # Either order price or size must be present.
'36209': InvalidOrder, # Either order_id or client_oid must be present.
'36210': InvalidOrder, # Either order_ids or client_oids must be present.
'36211': InvalidOrder, # Exceeding max batch size for order submission.
'36212': InvalidOrder, # Exceeding max batch size for oder cancellation.
'36213': InvalidOrder, # Exceeding max batch size for order amendment.
'36214': ExchangeError, # Instrument does not have valid bid/ask quote.
'36216': OrderNotFound, # Order does not exist.
'36217': InvalidOrder, # Order submission failed.
'36218': InvalidOrder, # Order cancellation failed.
'36219': InvalidOrder, # Order amendment failed.
'36220': InvalidOrder, # Order is pending cancel.
'36221': InvalidOrder, # Order qty is not valid multiple of lot size.
'36222': InvalidOrder, # Order price is breaching highest buy limit.
'36223': InvalidOrder, # Order price is breaching lowest sell limit.
'36224': InvalidOrder, # Exceeding max order size.
'36225': InvalidOrder, # Exceeding max open order count for instrument.
'36226': InvalidOrder, # Exceeding max open order count for underlying.
'36227': InvalidOrder, # Exceeding max open size across all orders for underlying
'36228': InvalidOrder, # Exceeding max available qty for instrument.
'36229': InvalidOrder, # Exceeding max available qty for underlying.
'36230': InvalidOrder, # Exceeding max position limit for underlying.
# --------------------------------------------------------
# swap
'400': BadRequest, # Bad Request
'401': AuthenticationError, # Unauthorized access
'403': PermissionDenied, # Access prohibited
'404': BadRequest, # Request address does not exist
'405': BadRequest, # The HTTP Method is not supported
'415': BadRequest, # The current media type is not supported
'429': DDoSProtection, # Too many requests
'500': ExchangeNotAvailable, # System busy
'1001': RateLimitExceeded, # The request is too frequent and has been throttled
'1002': ExchangeError, # {0} verifications within 24 hours
'1003': ExchangeError, # You failed more than {0} times today, the current operation is locked, please try again in 24 hours
# '00000': ExchangeError, # success
'40001': AuthenticationError, # ACCESS_KEY cannot be empty
'40002': AuthenticationError, # SECRET_KEY cannot be empty
'40003': AuthenticationError, # Signature cannot be empty
'40004': InvalidNonce, # Request timestamp expired
'40005': InvalidNonce, # Invalid ACCESS_TIMESTAMP
'40006': AuthenticationError, # Invalid ACCESS_KEY
'40007': BadRequest, # Invalid Content_Type
'40008': InvalidNonce, # Request timestamp expired
'40009': AuthenticationError, # sign signature error
'40010': AuthenticationError, # sign signature error
'40011': AuthenticationError, # ACCESS_PASSPHRASE cannot be empty
'40012': AuthenticationError, # apikey/password is incorrect
'40013': ExchangeError, # User status is abnormal
'40014': PermissionDenied, # Incorrect permissions
'40015': ExchangeError, # System is abnormal, please try again later
'40016': PermissionDenied, # The user must bind the phone or Google
'40017': ExchangeError, # Parameter verification failed
'40018': PermissionDenied, # Invalid IP
'40102': BadRequest, # Contract configuration does not exist, please check the parameters
'40103': BadRequest, # Request method cannot be empty
'40104': ExchangeError, # Lever adjustment failure
'40105': ExchangeError, # Abnormal access to current price limit data
'40106': ExchangeError, # Abnormal get next settlement time
'40107': ExchangeError, # Abnormal access to index price data
'40108': InvalidOrder, # Wrong order quantity
'40109': OrderNotFound, # The data of the order cannot be found, please confirm the order number
'40200': OnMaintenance, # Server upgrade, please try again later
'40201': InvalidOrder, # Order number cannot be empty
'40202': ExchangeError, # User information cannot be empty
'40203': BadRequest, # The amount of adjustment margin cannot be empty or negative
'40204': BadRequest, # Adjustment margin type cannot be empty
'40205': BadRequest, # Adjusted margin type data is wrong
'40206': BadRequest, # The direction of the adjustment margin cannot be empty
'40207': BadRequest, # The adjustment margin data is wrong
'40208': BadRequest, # The accuracy of the adjustment margin amount is incorrect
'40209': BadRequest, # The current page number is wrong, please confirm
'40300': ExchangeError, # User does not exist
'40301': PermissionDenied, # Permission has not been obtained yet. If you need to use it, please contact customer service
'40302': BadRequest, # Parameter abnormality
'40303': BadRequest, # Can only query up to 20,000 data
'40304': BadRequest, # Parameter type is abnormal
'40305': BadRequest, # Client_oid length is not greater than 50, and cannot be Martian characters
'40306': ExchangeError, # Batch processing orders can only process up to 20
'40308': OnMaintenance, # The contract is being temporarily maintained
'40309': BadSymbol, # The contract has been removed
'40400': ExchangeError, # Status check abnormal
'40401': ExchangeError, # The operation cannot be performed
'40402': BadRequest, # The opening direction cannot be empty
'40403': BadRequest, # Wrong opening direction format
'40404': BadRequest, # Whether to enable automatic margin call parameters cannot be empty
'40405': BadRequest, # Whether to enable the automatic margin call parameter type is wrong
'40406': BadRequest, # Whether to enable automatic margin call parameters is of unknown type
'40407': ExchangeError, # The query direction is not the direction entrusted by the plan
'40408': ExchangeError, # Wrong time range
'40409': ExchangeError, # Time format error
'40500': InvalidOrder, # Client_oid check error
'40501': ExchangeError, # Channel name error
'40502': ExchangeError, # If it is a copy user, you must pass the copy to whom
'40503': ExchangeError, # With the single type
'40504': ExchangeError, # Platform code must pass
'40505': ExchangeError, # Not the same as single type
'40506': AuthenticationError, # Platform signature error
'40507': AuthenticationError, # Api signature error
'40508': ExchangeError, # KOL is not authorized
'40509': ExchangeError, # Abnormal copy end
'40600': ExchangeError, # Copy function suspended
'40601': ExchangeError, # Followers cannot be KOL
'40602': ExchangeError, # The number of copies has reached the limit and cannot process the request
'40603': ExchangeError, # Abnormal copy end
'40604': ExchangeNotAvailable, # Server is busy, please try again later
'40605': ExchangeError, # Copy type, the copy number must be passed
'40606': ExchangeError, # The type of document number is wrong
'40607': ExchangeError, # Document number must be passed
'40608': ExchangeError, # No documented products currently supported
'40609': ExchangeError, # The contract product does not support copying
'40700': BadRequest, # Cursor parameters are incorrect
'40701': ExchangeError, # KOL is not authorized
'40702': ExchangeError, # Unauthorized copying user
'40703': ExchangeError, # Bill inquiry start and end time cannot be empty
'40704': ExchangeError, # Can only check the data of the last three months
'40705': BadRequest, # The start and end time cannot exceed 90 days
'40706': InvalidOrder, # Wrong order price
'40707': BadRequest, # Start time is greater than end time
'40708': BadRequest, # Parameter verification is abnormal
'40709': ExchangeError, # There is no position in self position, and no automatic margin call can be set
'40710': ExchangeError, # Abnormal account status
'40711': InsufficientFunds, # Insufficient contract account balance
'40712': InsufficientFunds, # Insufficient margin
'40713': ExchangeError, # Cannot exceed the maximum transferable margin amount
'40714': ExchangeError, # No direct margin call is allowed
# spot
'invalid sign': AuthenticationError,
'invalid currency': BadSymbol, # invalid trading pair
'invalid symbol': BadSymbol,
'invalid period': BadRequest, # invalid Kline type
'invalid user': ExchangeError,
'invalid amount': InvalidOrder,
'invalid type': InvalidOrder, # {"status":"error","ts":1595700344504,"err_code":"invalid-parameter","err_msg":"invalid type"}
'invalid orderId': InvalidOrder,
'invalid record': ExchangeError,
'invalid accountId': BadRequest,
'invalid address': BadRequest,
'accesskey not None': AuthenticationError, # {"status":"error","ts":1595704360508,"err_code":"invalid-parameter","err_msg":"accesskey not null"}
'illegal accesskey': AuthenticationError,
'sign not null': AuthenticationError,
'req_time is too much difference from server time': InvalidNonce,
'permissions not right': PermissionDenied, # {"status":"error","ts":1595704490084,"err_code":"invalid-parameter","err_msg":"permissions not right"}
'illegal sign invalid': AuthenticationError, # {"status":"error","ts":1595684716042,"err_code":"invalid-parameter","err_msg":"illegal sign invalid"}
'user locked': AccountSuspended,
'Request Frequency Is Too High': RateLimitExceeded,
'more than a daily rate of cash': BadRequest,
'more than the maximum daily withdrawal amount': BadRequest,
'need to bind email or mobile': ExchangeError,
'user forbid': PermissionDenied,
'User Prohibited Cash Withdrawal': PermissionDenied,
'Cash Withdrawal Is Less Than The Minimum Value': BadRequest,
'Cash Withdrawal Is More Than The Maximum Value': BadRequest,
'the account with in 24 hours ban coin': PermissionDenied,
'order cancel fail': BadRequest, # {"status":"error","ts":1595703343035,"err_code":"bad-request","err_msg":"order cancel fail"}
'base symbol error': BadSymbol,
'base date error': ExchangeError,
'api signature not valid': AuthenticationError,
'gateway internal error': ExchangeError,
'audit failed': ExchangeError,
'order queryorder invalid': BadRequest,
'market no need price': InvalidOrder,
'limit need price': InvalidOrder,
'userid not equal to account_id': ExchangeError,
'your balance is low': InsufficientFunds, # {"status":"error","ts":1595594160149,"err_code":"invalid-parameter","err_msg":"invalid size, valid range: [1,2000]"}
'address invalid cointype': ExchangeError,
'system exception': ExchangeError, # {"status":"error","ts":1595711862763,"err_code":"system exception","err_msg":"system exception"}
'50003': ExchangeError, # No record
'50004': BadSymbol, # The transaction pair is currently not supported or has been suspended
'50006': PermissionDenied, # The account is forbidden to withdraw. If you have any questions, please contact customer service.
'50007': PermissionDenied, # The account is forbidden to withdraw within 24 hours. If you have any questions, please contact customer service.
'50008': RequestTimeout, # network timeout
'50009': RateLimitExceeded, # The operation is too frequent, please try again later
'50010': ExchangeError, # The account is abnormally frozen. If you have any questions, please contact customer service.
'50014': InvalidOrder, # The transaction amount under minimum limits
'50015': InvalidOrder, # The transaction amount exceed maximum limits
'50016': InvalidOrder, # The price can't be higher than the current price
'50017': InvalidOrder, # Price under minimum limits
'50018': InvalidOrder, # The price exceed maximum limits
'50019': InvalidOrder, # The amount under minimum limits
'50020': InsufficientFunds, # Insufficient balance
'50021': InvalidOrder, # Price is under minimum limits
'50026': InvalidOrder, # Market price parameter error
'invalid order query time': ExchangeError, # start time is greater than end time; or the time interval between start time and end time is greater than 48 hours
'invalid start time': BadRequest, # start time is a date 30 days ago; or start time is a date in the future
'invalid end time': BadRequest, # end time is a date 30 days ago; or end time is a date in the future
'20003': ExchangeError, # operation failed, {"status":"error","ts":1595730308979,"err_code":"bad-request","err_msg":"20003"}
'01001': ExchangeError, # order failed, {"status":"fail","err_code":"01001","err_msg":"系统异常,请稍后重试"}
},
'broad': {
'invalid size, valid range': ExchangeError,
},
},
'precisionMode': TICK_SIZE,
'options': {
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': [
'spot',
'swap',
],
'parseOHLCV': {
'volume': {
'spot': 'amount',
'swap': 5,
},
},
'defaultType': 'spot', # 'spot', 'swap'
'accountId': None, # '1012838157',
'timeframes': {
'spot': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'2h': '120min',
'4h': '240min',
'6h': '360min',
'12h': '720min',
'1d': '1day',
'1w': '1week',
},
'swap': {
'1m': '60',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
},
},
},
})
def fetch_time(self, params={}):
response = self.dataGetCommonTimestamp(params)
#
# {
# "status":"ok",
# "data":"1595525139400"
# }
#
return self.safe_integer(response, 'data')
def fetch_markets(self, params={}):
types = self.safe_value(self.options, 'fetchMarkets')
if not len(types):
types = [
self.options['defaultType'],
]
result = []
for i in range(0, len(types)):
markets = self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot
#
# {
# "base_currency":"btc",
# "quote_currency":"usdt",
# "symbol":"btc_usdt",
# "tick_size":"2",
# "size_increment":"4",
# "status":"1",
# "base_asset_precision":"8"
# }
#
#
# swap
#
# {
# "symbol":"btcusd",
# "underlying_index":"BTC",
# "quote_currency":"USD",
# "coin":"BTC",
# "contract_val":"1",
# "listing":null,
# "delivery":["07:00:00","15:00:00","23:00:00"],
# "size_increment":"0",
# "tick_size":"1",
# "forwardContractFlag":false,
# "priceEndStep":5
# }
#
id = self.safe_string(market, 'symbol')
marketType = 'spot'
spot = True
swap = False
baseId = self.safe_string_2(market, 'base_currency', 'coin')
quoteId = self.safe_string(market, 'quote_currency')
contractVal = self.safe_number(market, 'contract_val')
if contractVal is not None:
marketType = 'swap'
spot = False
swap = True
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = id.upper()
if spot:
symbol = base + '/' + quote
tickSize = self.safe_string(market, 'tick_size')
sizeIncrement = self.safe_string(market, 'size_increment')
precision = {
'amount': self.parse_number(self.parse_precision(sizeIncrement)),
'price': self.parse_number(self.parse_precision(tickSize)),
}
minAmount = self.safe_number_2(market, 'min_size', 'base_min_size')
status = self.safe_string(market, 'status')
active = None
if status is not None:
active = (status == '1')
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': marketType,
'spot': spot,
'swap': swap,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': precision['price'],
'max': None,
},
},
})
def fetch_markets_by_type(self, type, params={}):
if type == 'spot':
response = self.dataGetCommonSymbols(params)
#
# {
# "status":"ok",
# "ts":1595526622408,
# "data":[
# {
# "base_currency":"btc",
# "quote_currency":"usdt",
# "symbol":"btc_usdt",
# "tick_size":"2",
# "size_increment":"4",
# "status":"1",
# "base_asset_precision":"8"
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_markets(data)
elif type == 'swap':
response = self.capiGetMarketContracts(params)
#
# {
# "data":{
# "contractApis":[
# {
# "instrument_id":"btcusd",
# "underlying_index":"BTC",
# "quote_currency":"USD",
# "coin":"BTC",
# "contract_val":"1",
# "delivery":["07:00:00","15:00:00","23:00:00"],
# "size_increment":"0",
# "tick_size":"1",
# "forwardContractFlag":false,
# "priceEndStep":"5"
# },
# ]
# },
# "status":"ok",
# "err_code":"00000"
# }
#
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType does not support market type ' + type)
def fetch_currencies(self, params={}):
response = self.dataGetCommonCurrencys(params)
#
# {
# "status":"ok",
# "ts":1595537740466,
# "data":[
# "btc",
# "bft",
# "usdt",
# "usdt-omni",
# "usdt-erc20"
# ]
# }
#
result = {}
data = self.safe_value(response, 'data', [])
for i in range(0, len(data)):
id = data[i]
code = self.safe_currency_code(id)
result[code] = {
'id': id,
'code': code,
'info': id,
'type': None,
'name': None,
'active': None,
'fee': None,
'precision': None,
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {'min': None, 'max': None},
},
}
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = None
if market['spot']:
method = 'dataGetMarketDepth'
request['type'] = 'step0' # step0, step1, step2, step3, step4, step5, do not merge depth if step0
elif market['swap']:
method = 'capiGetMarketDepth'
request['limit'] = 100 if (limit is None) else limit # max 100
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.btc_usdt.depth.step0",
# "ts":1595607628197,
# "data":{
# "id":"1595607628197",
# "ts":"1595607628197",
# "bids":[
# ["9534.99","15.36160000000000000000"],
# ["9534.85","0.14580000000000000000"],
# ["9534.73","0.02100000000000000000"],
# ],
# "asks":[
# ["9535.02","7.37160000000000000000"],
# ["9535.03","0.09040000000000000000"],
# ["9535.05","0.02180000000000000000"],
# ]
# }
# }
#
# swap
#
# {
# "asks":[
# ["9579.0","119865",1],
# ["9579.5","90069",1],
# ["9580.0","256673",1],
# ],
# "bids":[
# ["9578.5","2417",1],
# ["9577.5","3024",1],
# ["9577.0","21548",1],
# ],
# "timestamp":"1595664767349"
# }
#
data = self.safe_value(response, 'data', response)
timestamp = self.safe_integer_2(data, 'timestamp', 'ts')
nonce = self.safe_integer(data, 'id')
orderbook = self.parse_order_book(data, symbol, timestamp)
orderbook['nonce'] = nonce
return orderbook
def parse_ticker(self, ticker, market=None):
#
# spot
#
# fetchTicker
#
# {
# "id":"1595538241113",
# "bid":["0.028474000000","1.139400000000"],
# "ask":["0.028482000000","0.353100000000"],
# "amount":"2850.6649",
# "count":"818",
# "open":"0.02821",
# "close":"0.028474",
# "low":"0.02821",
# "high":"0.029091",
# "vol":"79.4548693404"
# }
#
# fetchTickers
#
# {
# "amount":"30086.8095",
# "count":"22450",
# "open":"9525.11",
# "close":"9591.81",
# "low":"9510.68",
# "high":"9659.7",
# "vol":"286239092.250461",
# "symbol":"btc_usdt"
# }
#
# swap
#
# {
# "instrument_id":"btcusd",
# "last":"9574.5",
# "best_ask":"9575.0",
# "best_bid":"9574.0",
# "high_24h":"9672",
# "low_24h":"9512",
# "volume_24h":"567697050",
# "timestamp":"1595538450096"
# }
#
timestamp = self.safe_integer_2(ticker, 'timestamp', 'id')
symbol = None
marketId = self.safe_string_2(ticker, 'instrument_id', 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
parts = marketId.split('_')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_number_2(ticker, 'last', 'close')
open = self.safe_number(ticker, 'open')
bidVolume = None
askVolume = None
bid = self.safe_value(ticker, 'bid')
if bid is None:
bid = self.safe_number(ticker, 'best_bid')
else:
bidVolume = self.safe_number(bid, 1)
bid = self.safe_number(bid, 0)
ask = self.safe_value(ticker, 'ask')
if ask is None:
ask = self.safe_number(ticker, 'best_ask')
else:
askVolume = self.safe_number(ask, 1)
ask = self.safe_number(ask, 0)
baseVolume = self.safe_number_2(ticker, 'amount', 'volume_24h')
quoteVolume = self.safe_number(ticker, 'vol')
vwap = self.vwap(baseVolume, quoteVolume)
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number_2(ticker, 'high', 'high_24h'),
'low': self.safe_number_2(ticker, 'low', 'low_24h'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = None
if market['spot']:
method = 'dataGetMarketDetailMerged'
elif market['swap']:
method = 'capiGetMarketTicker'
request = {
'symbol': market['id'],
}
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.eth_btc.detail.merged",
# "ts":1595538241474,
# "data":{
# "id":"1595538241113",
# "bid":["0.028474000000","1.139400000000"],
# "ask":["0.028482000000","0.353100000000"],
# "amount":"2850.6649",
# "count":"818",
# "open":"0.02821",
# "close":"0.028474",
# "low":"0.02821",
# "high":"0.029091",
# "vol":"79.4548693404"
# }
# }
#
# swap
#
# {
# "symbol":"btcusd",
# "last":"9575.5",
# "best_ask":"9576.0",
# "best_bid":"9575.0",
# "high_24h":"9646",
# "low_24h":"9516",
# "volume_24h":"516656839",
# "timestamp":"1595664217405"
# }
#
data = self.safe_value(response, 'data', response)
return self.parse_ticker(data, market)
def fetch_tickers_by_type(self, type, symbols=None, params={}):
self.load_markets()
method = None
if type == 'spot':
method = 'dataGetMarketTickers'
elif type == 'swap':
method = 'capiGetMarketTickers'
response = getattr(self, method)(params)
#
# spot
#
# {
# "status":"ok",
# "ts":1595542893250,
# "data":[
# {
# "amount":"30086.8095",
# "count":"22450",
# "open":"9525.11",
# "close":"9591.81",
# "low":"9510.68",
# "high":"9659.7",
# "vol":"286239092.250461",
# "symbol":"btc_usdt"
# }
# ]
# }
#
# swap
#
# [
# {
# "symbol":"btcusd",
# "last":"9572",
# "best_ask":"9571.5",
# "best_bid":"9570.5",
# "high_24h":"9646",
# "low_24h":"9516",
# "volume_24h":"515401635",
# "timestamp":"1595664479952"
# }
# ]
#
data = self.safe_value(response, 'data', response)
timestamp = None
if not isinstance(response, list):
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(data)):
ticker = self.parse_ticker(self.extend({
'timestamp': timestamp,
}, data[i]))
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot
#
# {
# "id":"1",
# "price":"9533.81",
# "amount":"0.7326",
# "direction":"sell",
# "ts":"1595604964000"
# }
#
# swap
#
# {
# "trade_id":"670581881367954915",
# "price":"9553.00",
# "size":"20",
# "side":"sell",
# "timestamp":"1595605100004",
# "symbol":"btcusd"
# }
#
# spot fetchMyTrades(private)
#
# {
# "id": 29555,
# "order_id": 59378,
# "match_id": 59335,
# "symbol": "eth_usdt",
# "type": "buy-limit",
# "source": "api",
# "price": "100.1000000000",
# "filled_amount": "0.9845000000",
# "filled_fees": "0.0019690000",
# "created_at": 1494901400487
# }
#
# fetchOrderTrades(private)
#
# spot
#
# {
# "id":"614164775",
# "created_at":"1596298860602",
# "filled_amount":"0.0417000000000000",
# "filled_fees":"0.0000834000000000",
# "match_id":"673491702661292033",
# "order_id":"673491720340279296",
# "price":"359.240000000000",
# "source":"接口",
# "symbol":"eth_usdt",
# "type":"buy-market"
# }
#
# swap
#
# {
# "trade_id":"6667390",
# "symbol":"cmt_btcusdt",
# "order_id":"525946425993854915",
# "price":"9839.00",
# "order_qty":"3466",
# "fee":"-0.0000528407360000",
# "timestamp":"1561121514442",
# "exec_type":"M",
# "side":"3"
# }
#
symbol = None
marketId = self.safe_string(trade, 'symbol')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('_')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId.upper()
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.safe_integer(trade, 'created_at')
timestamp = self.safe_integer_2(trade, 'timestamp', 'ts', timestamp)
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'filled_amount', 'order_qty')
amountString = self.safe_string_2(trade, 'size', 'amount', amountString)
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
orderType = self.safe_string(trade, 'type')
side = None
type = None
if orderType is not None:
side = self.safe_string(trade, 'type')
type = self.parse_order_type(side)
side = self.parse_order_side(side)
else:
side = self.safe_string_2(trade, 'side', 'direction')
type = self.parse_order_type(side)
side = self.parse_order_side(side)
feeCostString = self.safe_string(trade, 'fee')
if feeCostString is None:
feeCostString = self.safe_string(trade, 'filled_fees')
else:
feeCostString = Precise.string_neg(feeCostString)
feeCost = self.parse_number(feeCostString)
fee = None
if feeCost is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
id = self.safe_string_2(trade, 'trade_id', 'id')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, limit=None, since=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = None
if market['spot']:
method = 'dataGetMarketHistoryTrade'
elif market['swap']:
method = 'capiGetMarketTrades'
if market['spot']:
if limit is not None:
request['size'] = limit # default 1, max 2000
elif market['swap']:
if limit is None:
limit = 100 # default 20, max 100
request['limit'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.btc_usdt.trade.detail",
# "ts":1595604968430,
# "data":{
# "ts":"1595604964000",
# "data":[
# {"id":"1","price":"9533.81","amount":"0.7326","direction":"sell","ts":"1595604964000"},
# {"id":"2","price":"9533.67","amount":"1.1591","direction":"buy","ts":"1595604961000"},
# {"id":"3","price":"9533.67","amount":"1.5022","direction":"sell","ts":"1595604959000"},
# ]
# }
# }
#
# swap
#
# [
# {"trade_id":"670833198971748613","price":"9578.50","size":"5412","side":"sell","timestamp":"1595665018790","symbol":"btcusd"},
# {"trade_id":"670833194240574915","price":"9579.00","size":"3972","side":"buy","timestamp":"1595665017662","symbol":"btcusd"},
# {"trade_id":"670833194240573915","price":"9579.00","size":"1227","side":"buy","timestamp":"1595665017662","symbol":"btcusd"},
# ]
#
trades = None
if isinstance(response, list):
trades = response
else:
data = self.safe_value(response, 'data', {})
trades = self.safe_value_2(data, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m'):
#
# spot
#
# {
# "id":"1594694700000",
# "amount":"283.6811",
# "count":"234",
# "open":"9230.00",
# "close":"9227.15",
# "low":"9206.66",
# "high":"9232.33",
# "vol":"2618015.032504000000"
# }
#
# swap
#
# [
# "1594693800000",
# "9240",
# "9241",
# "9222",
# "9228.5",
# "3913370",
# "424.003616350563"
# ]
#
options = self.safe_value(self.options, 'parseOHLCV', {})
volume = self.safe_value(options, 'volume', {})
if isinstance(ohlcv, list):
volumeIndex = self.safe_string(volume, market['type'], 'amount')
return [
self.safe_integer(ohlcv, 0), # timestamp
self.safe_number(ohlcv, 1), # Open
self.safe_number(ohlcv, 2), # High
self.safe_number(ohlcv, 3), # Low
self.safe_number(ohlcv, 4), # Close
# self.safe_number(ohlcv, 5), # Quote Volume
# self.safe_number(ohlcv, 6), # Base Volume
self.safe_number(ohlcv, volumeIndex), # Volume, bitget will return base volume in the 7th element for future markets
]
else:
volumeIndex = self.safe_value(volume, market['type'], 6)
return [
self.safe_integer(ohlcv, 'id'),
self.safe_number(ohlcv, 'open'), # Open
self.safe_number(ohlcv, 'high'), # High
self.safe_number(ohlcv, 'low'), # Low
self.safe_number(ohlcv, 'close'), # Close
self.safe_number(ohlcv, volumeIndex), # Base Volume
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = None
type = market['type']
options = self.safe_value(self.options, 'timeframes', {})
intervals = self.safe_value(options, type, {})
interval = self.safe_value(intervals, self.timeframes[timeframe])
if market['spot']:
method = 'dataGetMarketHistoryKline'
request['period'] = interval
if limit is not None:
request['size'] = limit # default 150, max 1000
elif market['swap']:
duration = self.parse_timeframe(timeframe)
method = 'capiGetMarketCandles'
request['granularity'] = interval
now = self.milliseconds()
if since is None:
if limit is None:
limit = 1000
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
else:
request['start'] = self.iso8601(since)
if limit is None:
request['end'] = self.iso8601(now)
else:
request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ch":"market.btc_usdt.kline.15min",
# "ts":1595594183874,
# "data":[
# {"id":"1594694700000","amount":"283.6811","count":"234","open":"9230.00","close":"9227.15","low":"9206.66","high":"9232.33","vol":"2618015.032504000000"},
# {"id":"1594695600000","amount":"457.2904","count":"238","open":"9227.15","close":"9229.46","low":"9223.80","high":"9235.14","vol":"4220734.684570000000"},
# {"id":"1594696500000","amount":"501.2353","count":"255","open":"9229.46","close":"9227.78","low":"9222.69","high":"9230.74","vol":"4625779.185006000000"},
# ]
# }
#
# swap
#
# [
# ["1594764900000","9255.5","9261","9251","9255.5","3958946","427.742307964305"],
# ["1594765800000","9255.5","9264","9252","9258","3609496","389.832756058107"],
# ["1594766700000","9258","9260","9244.5","9250.5","3738600","403.97870345085"],
# ]
#
candles = response
if not isinstance(response, list):
candles = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(candles, market, timeframe, since, limit)
def parse_spot_balance(self, response):
#
# {
# "status":"ok",
# "ts":1595681450932,
# "data":{
# "list":[
# {"balance":"0.0000000000000000","currency":"BTC","type":"trade"},
# {"balance":"0.0000000000000000","currency":"BTC","type":"frozen"},
# {"balance":"0.0000000000000000","currency":"BTC","type":"lock"},
# ],
# "id":"7420922606",
# "type":"spot",
# "state":"working"
# }
# }
#
result = {'info': response}
data = self.safe_value(response, 'data')
balances = self.safe_value(data, 'list')
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
if not (code in result):
account = self.account()
result[code] = account
type = self.safe_value(balance, 'type')
if type == 'trade':
result[code]['free'] = self.safe_string(balance, 'balance')
elif (type == 'frozen') or (type == 'lock'):
used = self.safe_string(result[code], 'used')
result[code]['used'] = Precise.string_add(used, self.safe_string(balance, 'balance'))
return self.parse_balance(result)
def parse_swap_balance(self, response):
#
# swap
#
# [
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"bchusd","margin_frozen":"0","timestamp":"1595673431547","margin_mode":"fixed","forwardContractFlag":false},
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"ethusd","margin_frozen":"0","timestamp":"1595673431573","margin_mode":"fixed","forwardContractFlag":false},
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"cmt_btcsusdt","margin_frozen":"0","timestamp":"1595673431577","margin_mode":"fixed","forwardContractFlag":true},
# ]
#
#
result = {}
for i in range(0, len(response)):
balance = response[i]
marketId = self.safe_string(balance, 'symbol')
symbol = marketId
if marketId in self.markets_by_id:
symbol = self.markets_by_id[marketId]['symbol']
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_string(balance, 'equity')
account['free'] = self.safe_string(balance, 'total_avail_balance')
result[symbol] = account
return self.parse_balance(result)
def fetch_accounts(self, params={}):
request = {
'method': 'accounts',
}
response = self.apiGetAccountAccounts(self.extend(request, params))
#
# {
# "status":"ok",
# "ts":1595679591824,
# "data":[
# {"id":"7420922606","type":"spot","state":"working"}
# ]
# }
#
data = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(data)):
account = data[i]
accountId = self.safe_string(account, 'id')
type = self.safe_string_lower(account, 'type')
result.append({
'id': accountId,
'type': type,
'currency': None,
'info': account,
})
return result
def find_account_by_type(self, type):
self.load_markets()
self.load_accounts()
accountsByType = self.group_by(self.accounts, 'type')
accounts = self.safe_value(accountsByType, type)
if accounts is None:
raise ExchangeError(self.id + " findAccountByType() could not find an accountId with type '" + type + "', specify the 'accountId' parameter instead") # eslint-disable-line quotes
numAccounts = len(accounts)
if numAccounts > 1:
raise ExchangeError(self.id + " findAccountByType() found more than one accountId with type '" + type + "', specify the 'accountId' parameter instead") # eslint-disable-line quotes
return accounts[0]
def get_account_id(self, params):
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_string(self.options, 'accountId')
accountId = self.safe_string(params, 'accountId', defaultAccountId)
if accountId is not None:
return accountId
defaultType = self.safe_string(self.options, 'defaultType', 'margin')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
if type is None:
raise ArgumentsRequired(self.id + " getAccountId() requires an 'accountId' parameter")
account = self.find_account_by_type(type)
return account['id']
def fetch_balance(self, params={}):
self.load_markets()
self.load_accounts()
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance() requires a 'type' parameter, one of 'spot', 'swap'")
method = None
query = self.omit(params, 'type')
if type == 'spot':
accountId = self.get_account_id(params)
method = 'apiGetAccountsAccountIdBalance'
query['account_id'] = accountId
query['method'] = 'balance'
elif type == 'swap':
method = 'swapGetAccountAccounts'
response = getattr(self, method)(query)
#
# spot
#
# {
# "status":"ok",
# "ts":1595681450932,
# "data":{
# "list":[
# {"balance":"0.0000000000000000","currency":"BTC","type":"trade"},
# {"balance":"0.0000000000000000","currency":"BTC","type":"frozen"},
# {"balance":"0.0000000000000000","currency":"BTC","type":"lock"},
# ],
# "id":"7420922606",
# "type":"spot",
# "state":"working"
# }
# }
#
# swap
#
# [
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"bchusd","margin_frozen":"0","timestamp":"1595673431547","margin_mode":"fixed","forwardContractFlag":false},
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"ethusd","margin_frozen":"0","timestamp":"1595673431573","margin_mode":"fixed","forwardContractFlag":false},
# {"equity":"0","fixed_balance":"0","total_avail_balance":"0","margin":"0","realized_pnl":"0","unrealized_pnl":"0","symbol":"cmt_btcsusdt","margin_frozen":"0","timestamp":"1595673431577","margin_mode":"fixed","forwardContractFlag":true},
# ]
#
return self.parse_balance_by_type(type, response)
def parse_balance_by_type(self, type, response):
if type == 'spot':
return self.parse_spot_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
def parse_order_status(self, status):
statuses = {
'submitted': 'open',
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'buy-market': 'buy',
'sell-market': 'sell',
'buy-limit': 'buy',
'sell-limit': 'sell',
'1': 'long', # open long
'2': 'short', # open short
'3': 'long', # close long
'4': 'short', # close short
}
return self.safe_string(sides, side, side)
def parse_order_type(self, type):
types = {
'buy-market': 'market',
'sell-market': 'market',
'buy-limit': 'limit',
'sell-limit': 'limit',
'1': 'open', # open long
'2': 'open', # open short
'3': 'close', # close long
'4': 'close', # close short
}
return self.safe_string(types, type, type)
def parse_order(self, order, market=None):
#
# createOrder
#
# spot
#
# {
# "status":"ok",
# "ts":1595792596056,
# "data":671368296142774272
# }
#
# swap
#
# {
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "order_id":"671757564085534713"
# }
#
# cancelOrder
#
# spot
#
# {
# "status": "ok",
# "ts": 1595818631279,
# "data": 671368296142774272
# }
#
# swap
#
# {
# "order_id":"671757564085534713",
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "symbol":"cmt_ethusdt",
# "result":true,
# "err_code":null,
# "err_msg":null
# }
#
# fetchOpenOrders, fetchClosedOrders, fetchOrder
#
# spot
#
# {
# "account_id":"7420922606",
# "amount":"0.1000000000000000",
# "canceled_at":"1595872129618",
# "created_at":"1595872089525",
# "filled_amount":"0.000000000000",
# "filled_cash_amount":"0.000000000000",
# "filled_fees":"0.000000000000",
# "finished_at":"1595872129618",
# "id":"671701716584665088",
# "price":"150.000000000000",
# "source":"接口",
# "state":"canceled",
# "symbol":"eth_usdt",
# "type":"buy-limit"
# }
#
# swap
#
# {
# "symbol":"cmt_ethusdt",
# "size":"1",
# "timestamp":"1595885546770",
# "client_oid":"f3aa81d6-9a4c-4eab-bebe-ebc19da21cf2",
# "createTime":"1595885521200",
# "filled_qty":"0",
# "fee":"0.00000000",
# "order_id":"671758053112020913",
# "price":"150.00",
# "price_avg":"0.00",
# "status":"0",
# "type":"1",
# "order_type":"0",
# "totalProfits":null
# }
#
id = self.safe_string(order, 'order_id')
id = self.safe_string_2(order, 'id', 'data', id)
timestamp = self.safe_integer_2(order, 'created_at', 'createTime')
type = self.safe_string(order, 'type')
side = self.parse_order_side(type)
type = self.parse_order_type(type)
# if (side != 'buy') and (side != 'sell'):
# side = self.parse_order_side(type)
# }
# if (type != 'limit') and (type != 'market'):
# if 'pnl' in order:
# type = 'futures'
# else:
# type = 'swap'
# }
# }
symbol = None
marketId = self.safe_string(order, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
symbol = marketId.upper()
if (symbol is None) and (market is not None):
symbol = market['symbol']
amount = self.safe_string_2(order, 'amount', 'size')
filled = self.safe_string_2(order, 'filled_amount', 'filled_qty')
cost = self.safe_string(order, 'filled_cash_amount')
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'price_avg')
status = self.parse_order_status(self.safe_string_2(order, 'state', 'status'))
feeCost = self.safe_number_2(order, 'filled_fees', 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'client_oid')
return self.safe_order2({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
#
# spot
#
# account_id True string Account ID, obtained using the accounts method. Currency transactions use the accountid of the'spot' account; for loan asset transactions, please use the accountid of the'margin' account
# amount True string A limit order indicates the quantity of the order, when a market price buy order indicates how much money to buy, and when a market price sell order indicates how much currency to sell
# price False string Order price, market order does not pass self parameter
# source False string Order source api
# symbol True string Trading pair btc_usdt, eth_btc ...
# type True string Order Type buy-market: buy at market price, sell-market: sell at market price, buy-limit: buy at limit price, sell-limit: sell at limit price
#
# swap
#
# symbol String Yes Contract ID
# client_oid String Yes customize order IDs to identify your orders.(Less than 50 characters without special characters,
# size String Yes Quantity to buy or sell(value not equal to 0 or negative)
# type String Yes 1 Open long 2Open short 3 Close long 4 Close short
# order_type String Yes 0: Normal order(Unfilled and 0 imply normal limit order) 1: Post only 2: Fill or Kill 3: Immediate Or Cancel
# match_price String Yes 0 Limit price 1 market price
# price String No Price of each contract
#
request = {
'symbol': market['id'],
}
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId', self.uuid())
params = self.omit(params, ['client_oid', 'clientOrderId'])
method = None
if market['spot']:
accountId = self.get_account_id({
'type': market['type'],
})
method = 'apiPostOrderOrdersPlace'
request['account_id'] = accountId
request['method'] = 'place'
request['type'] = side + '-' + type
if type == 'limit':
request['amount'] = self.amount_to_precision(symbol, amount)
request['price'] = self.price_to_precision(symbol, price)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
cost = self.safe_number(params, 'amount')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if cost is None:
cost = amount * price
elif cost is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'amount' extra parameter(the exchange-specific behaviour)")
else:
cost = amount if (cost is None) else cost
request['amount'] = self.cost_to_precision(symbol, cost)
elif side == 'sell':
request['amount'] = self.amount_to_precision(symbol, amount)
# ...
elif market['swap']:
request['order_type'] = '0' # '0' = Normal order, None and 0 imply a normal limit order, '1' = Post only, '2' = Fill or Kill, '3' = Immediate Or Cancel
request['client_oid'] = clientOrderId
orderType = self.safe_string(params, 'type')
if orderType is None:
raise ArgumentsRequired(self.id + " createOrder() requires a type parameter, '1' = open long, '2' = open short, '3' = close long, '4' = close short for " + market['type'] + ' orders')
request['size'] = self.amount_to_precision(symbol, amount)
request['type'] = orderType
# if match_price is set to '1', the price parameter will be ignored for market orders
if type == 'limit':
request['match_price'] = '0'
request['price'] = self.price_to_precision(symbol, price)
elif type == 'market':
request['match_price'] = '1'
method = 'swapPostOrderPlaceOrder'
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ts":1595792596056,
# "data":"671368296142774272"
# }
#
# swap
#
# {
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "order_id":"671757564085534713"
# }
#
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
market = None
type = None
if symbol is None:
defaultType = self.safe_string_2(self.options, 'cancelOrder', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument for spot orders')
else:
market = self.market(symbol)
type = market['type']
query = self.omit(params, 'type')
method = None
request = {}
if type == 'spot':
method = 'apiPostOrderOrdersOrderIdSubmitcancel'
request['order_id'] = id
request['method'] = 'submitcancel'
elif type == 'swap':
method = 'swapPostOrderCancelOrder'
request['orderId'] = id
request['symbol'] = market['id']
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {"status": "ok", "ts": 1595818631279, "data": 671368296142774272}
#
# swap
#
# {
# "order_id":"671757564085534713",
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "symbol":"cmt_ethusdt",
# "result":true,
# "err_code":null,
# "err_msg":null
# }
#
return self.parse_order(response, market)
def cancel_orders(self, ids, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
if type is None:
raise ArgumentsRequired(self.id + " cancelOrders() requires a type parameter(one of 'spot', 'swap').")
params = self.omit(params, 'type')
request = {}
method = None
if type == 'spot':
method = 'apiPostOrderOrdersBatchcancel'
request['method'] = 'batchcancel'
jsonIds = self.json(ids)
parts = jsonIds.split('"')
request['order_ids'] = ''.join(parts)
elif type == 'swap':
method = 'swapPostOrderCancelBatchOrders'
request['symbol'] = market['id']
request['ids'] = ids
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status": "ok",
# "data": {
# "success": [
# "673451224205135872",
# ],
# "failed": [
# {
# "err-msg": "invalid record",
# "order-id": "673451224205135873",
# "err-code": "base record invalid"
# }
# ]
# }
# }
#
# swap
#
# {
# "result":true,
# "symbol":"cmt_btcusdt",
# "order_ids":[
# "258414711",
# "478585558"
# ],
# "fail_infos":[
# {
# "order_id":"258414711",
# "err_code":"401",
# "err_msg":""
# }
# ]
# }
#
return response
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder() requires a type parameter(one of 'spot', 'swap').")
method = None
request = {}
if type == 'spot':
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method = 'apiPostOrderOrdersClientOid'
request['client_oid'] = clientOid
else:
method = 'apiPostOrderOrdersOrderId'
request['order_id'] = id
request['method'] = 'getOrder'
elif type == 'swap':
method = 'swapGetOrderDetail'
request['symbol'] = market['id']
request['orderId'] = id
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# "status":"ok",
# "ts":1595897886717,
# "data":{
# "account_id":"7420922606",
# "amount":"0.1000000000000000",
# "canceled_at":"1595818631541",
# "created_at":"1595792595897",
# "filled_amount":"0.000000000000",
# "filled_cash_amount":"0.000000000000",
# "filled_fees":"0.000000000000",
# "finished_at":"1595818631541",
# "id":"671368296142774272",
# "price":"150.000000000000",
# "source":"接口",
# "state":"canceled",
# "symbol":"eth_usdt",
# "type":"buy-limit"
# }
# }
#
#
# swap
#
# {
# "symbol":"cmt_ethusdt",
# "size":"1",
# "timestamp":"1595896459890",
# "client_oid":"58775e54-0592-491c-97e8-e2369025f2d1",
# "createTime":"1595885404607",
# "filled_qty":"0",
# "fee":"0",
# "order_id":"671757564085534713",
# "price":"150",
# "price_avg":"0",
# "status":"-1",
# "type":"1",
# "order_type":"0",
# "totalProfits":"0"
# }
#
data = self.safe_value(response, 'data', response)
return self.parse_order(data, market)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
request = {
'symbol': market['id'],
}
method = None
if type == 'spot':
method = 'apiGetOrderOrdersOpenOrders'
# request['from'] = self.safe_string(params, 'from') # order id
# request['direct'] = 'next' # or 'prev'
request['method'] = 'openOrders'
if limit is None:
request['size'] = limit # default 100, max 1000
elif type == 'swap':
method = 'swapGetOrderOrders'
request['status'] = '3' # 0 Failed, 1 Partially Filled, 2 Fully Filled 3 = Open + Partially Filled, 4 Canceling
request['from'] = '1'
request['to'] = '1'
if limit is None:
request['limit'] = 100 # default 100, max 100
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
#
# {
# "status":"ok",
# "ts":1595875165865,
# "data":[
# {
# "account_id":"7420922606",
# "amount":"0.1000000000000000",
# "canceled_at":"1595872129618",
# "created_at":"1595872089525",
# "filled_amount":"0.000000000000",
# "filled_cash_amount":"0.000000000000",
# "filled_fees":"0.000000000000",
# "finished_at":"1595872129618",
# "id":"671701716584665088",
# "price":"150.000000000000",
# "source":"接口",
# "state":"canceled",
# "symbol":"eth_usdt",
# "type":"buy-limit"
# }
# ]
# }
#
# swap
#
# [
# {
# "symbol":"cmt_ethusdt",
# "size":"1",
# "timestamp":"1595885546770",
# "client_oid":"f3aa81d6-9a4c-4eab-bebe-ebc19da21cf2",
# "createTime":"1595885521200",
# "filled_qty":"0",
# "fee":"0.00000000",
# "order_id":"671758053112020913",
# "price":"150.00",
# "price_avg":"0.00",
# "status":"0",
# "type":"1",
# "order_type":"0",
# "totalProfits":null
# }
# ]
#
data = response
if not isinstance(response, list):
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, None, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchClosedOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
request = {
'symbol': market['id'],
}
method = None
if type == 'spot':
method = 'apiGetOrderOrdersHistory'
# Value range [((end_time) – 48h),(end_time)]
# the query window is 48 hours at most
# the window shift range is the last 30 days
if since is not None:
request['start_time'] = since
# request['end_time'] = self.safe_integer(params, 'end_time')
# request['from'] = self.safe_string(params, 'from') # order id
# request['direct'] = 'next' # or 'prev'
request['method'] = 'openOrders'
if limit is None:
request['size'] = limit # default 100, max 1000
elif type == 'swap':
method = 'swapGetOrderOrders'
request['status'] = '2' # 0 Failed, 1 Partially Filled, 2 Fully Filled 3 = Open + Partially Filled, 4 Canceling
request['from'] = '1'
request['to'] = '1'
if limit is None:
request['limit'] = 100 # default 100, max 100
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
#
# {
# "status":"ok",
# "ts":1595875165865,
# "data":[
# {
# "account_id":"7420922606",
# "amount":"0.1000000000000000",
# "canceled_at":"1595872129618",
# "created_at":"1595872089525",
# "filled_amount":"0.000000000000",
# "filled_cash_amount":"0.000000000000",
# "filled_fees":"0.000000000000",
# "finished_at":"1595872129618",
# "id":"671701716584665088",
# "price":"150.000000000000",
# "source":"接口",
# "state":"canceled",
# "symbol":"eth_usdt",
# "type":"buy-limit"
# }
# ]
# }
#
# swap
#
# [
# {
# "symbol":"cmt_ethusdt",
# "size":"1",
# "timestamp":"1595885546770",
# "client_oid":"f3aa81d6-9a4c-4eab-bebe-ebc19da21cf2",
# "createTime":"1595885521200",
# "filled_qty":"0",
# "fee":"0.00000000",
# "order_id":"671758053112020913",
# "price":"150.00",
# "price_avg":"0.00",
# "status":"0",
# "type":"1",
# "order_type":"0",
# "totalProfits":null
# }
# ]
#
data = response
if not isinstance(response, list):
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, None, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchDeposits() requires a currency code argument')
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'method': 'deposit_withdraw',
'type': 'deposit',
'size': 12,
}
response = self.apiGetOrderDepositWithdraw(self.extend(request, params))
#
# {
# "status": "ok",
# "data": [
# {
# "id": 1171,
# "type": "deposit",
# "currency": "usdt",
# "tx_hash": "ed03094b84eafbe4bc16e7ef766ee959885ee5bcb265872baaa9c64e1cf86c2b",
# "amount": 7.457467,
# "address": "rae93V8d2mdoUQHwBDBdM4NHCMehRJAsbm",
# "address_tag": "100040",
# "fee": 0,
# "state": "safe",
# "created_at": 1510912472199,
# "updated_at": 1511145876575
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_transactions(data, currency, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals() requires a currency code argument')
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'method': 'deposit_withdraw',
'type': 'withdraw',
'size': 12,
}
response = self.apiGetOrderDepositWithdraw(self.extend(request, params))
#
# {
# "status": "ok",
# "data": [
# {
# "id": 1171,
# "type": "withdraw",
# "currency": "usdt",
# "tx_hash": "ed03094b84eafbe4bc16e7ef766ee959885ee5bcb265872baaa9c64e1cf86c2b",
# "amount": 7.457467,
# "address": "rae93V8d2mdoUQHwBDBdM4NHCMehRJAsbm",
# "address_tag": "100040",
# "fee": 0,
# "state": "safe",
# "created_at": 1510912472199,
# "updated_at": 1511145876575
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_transactions(data, currency, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
# withdrawals
'WaitForOperation': 'pending', # 等待提现
'OperationLock': 'pending', # 初审锁定成功
'OperationSuccess': 'ok', # 提现成功
'Cancel': 'canceled', # 用户撤销
'Sure': 'ok', # 复审锁定成功
'Fail': 'failed', # 出币异常
'WaitForChainSure': 'ok', # 等待链上确认
# deposits
'WAIT_0': 'pending', # 待确认
'WAIT_1': 'pending', # 待确认
'DATA_CHANGE': 'pending', # 待确认中
'SUCCESS': 'ok', # 充值成功
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits, fetchWithdrawals
#
# {
# "id": 1171,
# "type": "withdraw",
# "currency": "usdt",
# "tx_hash": "ed03094b84eafbe4bc16e7ef766ee959885ee5bcb265872baaa9c64e1cf86c2b",
# "amount": 7.457467,
# "address": "rae93V8d2mdoUQHwBDBdM4NHCMehRJAsbm",
# "address_tag": "100040",
# "fee": 0,
# "state": "safe",
# "created_at": 1510912472199,
# "updated_at": 1511145876575
# }
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'address_tag')
tagFrom = None
tagTo = tag
addressFrom = None
addressTo = address
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
elif type == 'deposit':
type = 'deposit'
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
txid = self.safe_string(transaction, 'tx_hash')
timestamp = self.safe_integer(transaction, 'created_at')
updated = self.safe_integer(transaction, 'updated_at')
feeCost = self.safe_number(transaction, 'fee')
fee = None
if feeCost is not None:
fee = {
'currency': code,
'cost': feeCost,
}
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': tagFrom,
'tagTo': tagTo,
'tag': tag,
'status': status,
'type': type,
'updated': updated,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
query = self.omit(params, 'type')
if type == 'swap':
raise ArgumentsRequired(self.id + ' fetchMyTrades() is not supported for ' + type + ' type')
#
# spot
#
# POST /api/v1/order/matchresults Query current order, order history
# symbol True string trading pair btc_usdt, eth_btc ...
# types False string Query order type combination buy-market, sell-market, buy-limit, sell-limit
# start_date False string Query start date, date format yyyy-mm-dd -61 days [-61day, end-date]
# end_date False string Query end date, date format yyyy-mm-dd Now [start-date, now]
# from False string Query start ID order record id
# direct False string Query direction ‘next’ is default , the transaction record ID is sorted from large to small prev,next
# size False string Query record size 100 <=100
#
request = {
'symbol': market['id'],
'method': 'matchresults',
# 'types': 'buy-market,sell-market,buy-limit,sell-limit',
# 'start_date': self.ymd(since),
# 'end_date': self.ymd(self.milliseconds()),
# 'size': 100,
# 'direct': 'next',
}
if since is not None:
request['start_date'] = self.ymd(since)
end = self.sum(since, 2 * 24 * 60 * 60 * 1000)
request['end_date'] = self.ymd(end)
if limit is not None:
request['size'] = limit # default 100, max 100
response = self.apiPostOrderMatchresults(self.extend(request, query))
#
# {
# "status": "ok",
# "data": [
# {
# "id": 29555,
# "order_id": 59378,
# "match_id": 59335,
# "symbol": "eth_usdt",
# "type": "buy-limit",
# "source": "api",
# "price": "100.1000000000",
# "filled_amount": "0.9845000000",
# "filled_fees": "0.0019690000",
# "created_at": 1494901400487
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = self.safe_string(params, 'type', market['type'])
params = self.omit(params, 'type')
method = None
request = {}
if type == 'spot':
request['order_id'] = id
request['method'] = 'matchresults'
method = 'apiPostOrderOrdersOrderIdMatchresults'
elif type == 'swap':
request['orderId'] = id
request['symbol'] = market['id']
method = 'swapGetOrderFills'
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "ts":1596298917277,
# "data":[
# {
# "id":"614164775",
# "created_at":"1596298860602",
# "filled_amount":"0.0417000000000000",
# "filled_fees":"0.0000834000000000",
# "match_id":"673491702661292033",
# "order_id":"673491720340279296",
# "price":"359.240000000000",
# "source":"接口",
# "symbol":"eth_usdt",
# "type":"buy-market"
# }
# ]
# }
#
# swap
#
#
# [
# {
# "trade_id":"6667390",
# "symbol":"cmt_btcusdt",
# "order_id":"525946425993854915",
# "price":"9839.00",
# "order_qty":"3466",
# "fee":"-0.0000528407360000",
# "timestamp":"1561121514442",
# "exec_type":"M",
# "side":"3"
# }
# ]
#
data = response
if not isinstance(data, list):
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_position(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.swapGetPositionSinglePosition(self.extend(request, params))
#
# {
# "margin_mode":"fixed", # Margin mode: crossed / fixed
# "holding":[
# {
# "symbol":"cmt_btcusdt", # Contract name
# "liquidation_price":"0.00", # Estimated liquidation price
# "position":"0", # Position Margin, the margin for holding current positions
# "avail_position":"0", # Available position
# "avg_cost":"0.00", # Transaction average price
# "leverage":"2", # Leverage
# "realized_pnl":"0.00000000", # Realized Profit and loss
# "keepMarginRate":"0.005", # Maintenance margin rate
# "side":"1", # Position Direction Long or short, Mark obsolete
# "holdSide":"1", # Position Direction Long or short
# "timestamp":"1557571623963", # System timestamp
# "margin":"0.0000000000000000", # Used margin
# "unrealized_pnl":"0.00000000", # Unrealized profit and loss
# }
# ]
# }
return response
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
response = self.swapGetPositionAllPosition(params)
#
# [
# {
# "margin_mode":"fixed",
# "holding":[
# {
# "liquidation_price":"0.00",
# "position":"0",
# "avail_position":"0",
# "avg_cost":"0.00",
# "symbol":"btcusd",
# "leverage":"20",
# "keepMarginRate":"0.005",
# "realized_pnl":"0.00000000",
# "unrealized_pnl":"0",
# "side":"long",
# "holdSide":"1",
# "timestamp":"1595698564915",
# "margin":"0.0000000000000000"
# },
# ]
# },
# ]
#
# todo unify parsePosition/parsePositions
return response
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
if (api == 'capi') or (api == 'swap'):
request = '/api/swap/' + self.version + request
else:
request = '/' + api + '/v1' + request
query = self.omit(params, self.extract_params(path))
url = self.implode_hostname(self.urls['api'][api]) + request
if (api == 'data') or (api == 'capi'):
if query:
url += '?' + self.urlencode(query)
elif api == 'swap':
self.check_required_credentials()
timestamp = str(self.milliseconds())
auth = timestamp + method + request
if method == 'POST':
body = self.json(params)
auth += body
else:
if params:
query = self.urlencode(self.keysort(params))
url += '?' + query
auth += '?' + query
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers = {
'ACCESS-KEY': self.apiKey,
'ACCESS-SIGN': signature,
'ACCESS-TIMESTAMP': timestamp,
'ACCESS-PASSPHRASE': self.password,
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
elif api == 'api':
timestamp = str(self.milliseconds())
auth = ''
query = self.keysort(query)
auth = self.rawencode(query)
hash = self.hash(self.encode(self.secret), 'sha1')
signed = auth
signature = self.hmac(self.encode(auth), self.encode(hash), hashlib.md5)
if len(auth) > 0:
signed += '&'
signed += 'sign=' + signature + '&req_time=' + timestamp + '&accesskey=' + self.apiKey
if method == 'GET':
if query:
url += '?' + signed
elif method == 'POST':
url += '?sign=' + signature + '&req_time=' + timestamp + '&accesskey=' + self.apiKey
body = auth
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
#
# spot
#
# {"status":"fail","err_code":"01001","err_msg":"系统异常,请稍后重试"}
# {"status":"error","ts":1595594160149,"err_code":"invalid-parameter","err_msg":"invalid size, valid range: [1,2000]"}
# {"status":"error","ts":1595684716042,"err_code":"invalid-parameter","err_msg":"illegal sign invalid"}
# {"status":"error","ts":1595700216275,"err_code":"bad-request","err_msg":"your balance is low!"}
# {"status":"error","ts":1595700344504,"err_code":"invalid-parameter","err_msg":"invalid type"}
# {"status":"error","ts":1595703343035,"err_code":"bad-request","err_msg":"order cancel fail"}
# {"status":"error","ts":1595704360508,"err_code":"invalid-parameter","err_msg":"accesskey not null"}
# {"status":"error","ts":1595704490084,"err_code":"invalid-parameter","err_msg":"permissions not right"}
# {"status":"error","ts":1595711862763,"err_code":"system exception","err_msg":"system exception"}
# {"status":"error","ts":1595730308979,"err_code":"bad-request","err_msg":"20003"}
#
# swap
#
# {"code":"40015","msg":"","requestTime":1595698564931,"data":null}
# {"code":"40017","msg":"Order id must not be blank","requestTime":1595702477835,"data":null}
# {"code":"40017","msg":"Order Type must not be blank","requestTime":1595698516162,"data":null}
# {"code":"40301","msg":"","requestTime":1595667662503,"data":null}
# {"code":"40017","msg":"Contract code must not be blank","requestTime":1595703151651,"data":null}
# {"code":"40108","msg":"","requestTime":1595885064600,"data":null}
# {"order_id":"513468410013679613","client_oid":null,"symbol":"ethusd","result":false,"err_code":"order_no_exist_error","err_msg":"订单不存在!"}
#
message = self.safe_string(response, 'err_msg')
errorCode = self.safe_string_2(response, 'code', 'err_code')
feedback = self.id + ' ' + body
nonEmptyMessage = ((message is not None) and (message != ''))
if nonEmptyMessage:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
nonZeroErrorCode = (errorCode is not None) and (errorCode != '00000')
if nonZeroErrorCode:
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback) # unknown message
| [
"[email protected]"
] | |
9d5aafb14738f910a84f9fa615fc45a6cd8f3cc2 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /es6qJTs5zYf8nEBkG_10.py | 6084216bb61e092527df14f88ae710259e20fc5d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py |
import re
def is_rectangle(lst):
if len(lst) != 4:
return False
num = []
items = []
for x in range(0, len(lst)):
result = [int(d) for d in re.findall(r'-?\d+', lst[x])]
num.extend(result)
for x in range(0, len(num)):
if num[x] not in items:
items.append(num[x])
if len(items) != 4:
return False
return True
| [
"[email protected]"
] | |
8745eab3a8a025abd42708022865113cd6d9859f | fd326562890d4f1987c384fc7c60374938231222 | /OOP/DefinningClasses/Spoopify/project/album.py | 55660f2a89fde3334a12f48e6c5ecfbc8cdc378d | [] | no_license | miro-lp/SoftUni | cc3b0ff742218c9ceaf93f05c319ccfeed5bc8a4 | 283d9328537919de49f7f6a301e58593bae9ca2a | refs/heads/main | 2023-08-23T21:22:07.856226 | 2021-08-25T15:10:18 | 2021-08-25T15:10:18 | 318,134,101 | 2 | 1 | null | 2021-08-10T12:51:54 | 2020-12-03T09:03:08 | Python | UTF-8 | Python | false | false | 1,675 | py | from .song import Song
class Album:
def __init__(self, name, *songs):
self.name = name
self.songs = list(songs)
self.published = False
def add_song(self, song: Song):
if self.published:
return "Cannot add songs. Album is published."
else:
if song.single:
return f"Cannot add {song.name}. It's a single"
else:
if song.name not in [i.name for i in self.songs]:
self.songs.append(song)
return f"Song {song.name} has been added to the album {self.name}."
else:
return "Song is already in the album."
def remove_song(self, song: str):
if self.published:
return "Cannot remove songs. Album is published."
else:
if song in [i.name for i in self.songs]:
for s in self.songs:
if s.name == song:
self.songs.remove(s)
break
return f"Removed song {song} from album {self.name}."
else:
return "Song is not in the album."
def publish(self):
if self.published:
return f"Album {self.name} is already published."
else:
self.published = True
return f"Album {self.name} has been published."
def details(self):
name_info = f"Album {self.name}"
album_info = "\n".join([f"== {s.get_info()}" for s in self.songs])
if len(self.songs) > 0:
return name_info + "\n" + album_info + "\n"
else:
return name_info + "\n"
| [
"[email protected]"
] | |
930c2c52d19f93eb89a1d6d1cd65fddba65c9851 | df126574e5fae32aa6ba8ae927942208107897b5 | /pyconll/load.py | 2cfdd19dd6edcbb04a5310f3f37bfb3799be6585 | [
"MIT"
] | permissive | ZmeiGorynych/pyconll | 865781a9ac2b5c0b9fe2a26d7d14fce60d4454a7 | 6784295db5fde769754e2b1ac46d6100484e45cc | refs/heads/master | 2020-04-14T11:38:14.167823 | 2018-12-28T22:12:38 | 2018-12-28T22:12:38 | 163,819,354 | 0 | 0 | null | 2019-01-02T09:15:40 | 2019-01-02T09:15:40 | null | UTF-8 | Python | false | false | 3,400 | py | """
A wrapper around the Conll class that allow for easy loading of treebanks from
multiple formats. This module also contains logic for iterating over treebank
data without storing Conll objects in memory.
"""
import requests
from pyconll._parser import iter_sentences
from pyconll.unit import Conll
def load_from_string(source):
"""
Load CoNLL-U source in a string into a Conll object.
Args:
source: The CoNLL-U formatted string.
Returns:
A Conll object equivalent to the provided source.
Raises:
ParseError: If there is an error parsing the input into a Conll object.
"""
lines = source.splitlines()
c = Conll(lines)
return c
def load_from_file(filename):
"""
Load a CoNLL-U file given the filename where it resides.
Args:
filename: The location of the file.
Returns:
A Conll object equivalent to the provided file.
Raises:
IOError: If there is an error opening the given filename.
ParseError: If there is an error parsing the input into a Conll object.
"""
with open(filename) as f:
c = Conll(f)
return c
def load_from_url(url):
"""
Load a CoNLL-U file that is pointed to by a given URL.
Args:
url: The URL that points to the CoNLL-U file.
Returns:
A Conll object equivalent to the provided file.
Raises:
requests.exceptions.RequestException: If the url was unable to be properly
retrieved and status was 4xx or 5xx.
ParseError: If there is an error parsing the input into a Conll object.
"""
resp = requests.get(url)
resp.raise_for_status()
resp.encoding = 'utf-8'
lines = resp.text.splitlines()
c = Conll(lines)
return c
def iter_from_string(source):
"""
Iterate over a CoNLL-U string's sentences.
Use this method if you only need to iterate over the CoNLL-U file once and
do not need to create or store the Conll object.
Args:
source: The CoNLL-U string.
Yields:
The sentences that make up the CoNLL-U file.
Raises:
ParseError: If there is an error parsing the input into a Conll object.
"""
lines = source.splitlines()
for sentence in iter_sentences(lines):
yield sentence
def iter_from_file(filename):
"""
Iterate over a CoNLL-U file's sentences.
Args:
filename: The name of the file whose sentences should be iterated over.
Yields:
The sentences that make up the CoNLL-U file.
Raises:
IOError if there is an error opening the file.
ParseError: If there is an error parsing the input into a Conll object.
"""
with open(filename) as f:
for sentence in iter_sentences(f):
yield sentence
def iter_from_url(url):
"""
Iterate over a CoNLL-U file that is pointed to by a given URL.
Args:
url: The URL that points to the CoNLL-U file.
Yields:
The sentences that make up the CoNLL-U file.
Raises:
requests.exceptions.RequestException: If the url was unable to be properly
retrieved.
ParseError: If there is an error parsing the input into a Conll object.
"""
resp = requests.get(url)
resp.raise_for_status()
lines = resp.text.splitlines()
for sentence in iter_sentences(lines):
yield sentence
| [
"[email protected]"
] | |
42e066146f1fa97f71238d54a52fa96707339fed | 0274f2c465f110598456624581f569331221068b | /impl/set_mode.py | 4e67d0d1e69ae5d21f0e2a6144f1fe0e173dbafa | [] | no_license | bluecube/thesis | 63e745076c86a3122e9c3d7ff42ff22e32921860 | 588db206e64de9b681372fea9a70d3fa2aa598df | refs/heads/master | 2016-09-06T00:01:03.840006 | 2013-05-27T09:36:51 | 2013-05-27T09:36:51 | 1,376,241 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | #!/usr/bin/python
"""
set_mode.py
Set the GPS to SiRF or NMEA mode.
"""
from __future__ import division, print_function, unicode_literals
import gps
import logging
import sys
import argparse
from gps.sirf_messages import *
def setup_logging():
logging.basicConfig(
format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level = logging.INFO
)
setup_logging()
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
arg_parser = argparse.ArgumentParser(description="Set the GPS to SiRF or NMEA mode.")
arg_parser.add_argument('gps',
help="Port with a GPS receiver.")
arg_parser.add_argument('--protocol',
help="To which mode to switch the receiver, protocol is either 'NMEA' or 'SIRF'",
default="SIRF")
arguments = arg_parser.parse_args()
x = gps.gps.Gps(arguments.gps)
x.set_protocol(arguments.protocol)
| [
"[email protected]"
] | |
9bc5ca18a12c96f4e0ca190e8213607b463a5682 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/fsa.py | f6b272fc35d04a6d7b15c3cc189b3879a96524a4 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 140 | py | ii = [('ShawHDE.py', 2), ('PettTHE.py', 71), ('ClarGE2.py', 2), ('GellWPT2.py', 1), ('CrokTPS.py', 2), ('BuckWGM.py', 1), ('RoscTTI.py', 1)] | [
"[email protected]"
] | |
230b8d139a3fe1b4a2b0befd673aebccdac45332 | c5959b7e4fc5b752b54a6352449c1bb0d28d9115 | /bab/bab-12/mysql_fetchmany.py | 1efde507ffa28d2168fa13356bdb0bff188622af | [] | no_license | romanbatavi/kickstarter-python | f5592a371740b28c045ef99dd510d1c6a92ff8d1 | ed3eb692e09a3f44fd3e0b16ab7b042ee2658db6 | refs/heads/master | 2023-03-29T11:34:23.774873 | 2021-04-04T09:11:28 | 2021-04-04T09:11:28 | 354,500,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | ######################################################
# Nama file: mysql_fetchmany.py
######################################################
import mysql.connector
import sys
def main():
try:
conn = mysql.connector.connect(
user="raharjo",
password="123456789",
host="127.0.0.1",
database="PythonDB"
)
sql = """
SELECT KODE, JUDUL, PENULIS
FROM BUKU
"""
try:
cur = conn.cursor(buffered=True)
cur.execute(sql)
# menangkap satu baris data dalam cursor
rows = cur.fetchmany(2)
print("Menggunakan fetchmany(2):")
# menampilkan data yang telah ditangkap
for (kode,judul,penulis) in rows:
print(kode, '\t',
judul, '\t',
penulis)
except:
print("Pengambilan data gagal")
sys.exit(1)
else:
cur.close()
except mysql.connector.Error as e:
print("ERROR ", e)
else:
conn.close()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
941eed0f81560dccbcd378a4fa258db160bfd547 | 07bab8cd09c27e93c6eb0e0c47b6f472b4a89d45 | /web/home/urls.py | f6c1eb52d83ff48da122cf768e4c2cc1af36d91e | [] | no_license | arunchaganty/webbed-feet | 0b0de344a64fe6a1d5619982d603a785d4ee02cb | d7b3d96900935d43bea97d175cb5552a1aba02d5 | refs/heads/master | 2021-01-01T06:44:59.555676 | 2011-11-28T17:17:52 | 2011-11-28T17:17:52 | 887,725 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from django.conf.urls.defaults import *
urlpatterns = patterns('web.home.views',
(r'^logout/$', 'logout'),
(r'^login/$', 'login'),
(r'^ping/$', 'ping'),
(r'^help/$', 'help'),
(r'', 'home'),
)
| [
"[email protected]"
] | |
2b519425fc80b6a980b77f8685872dc03c6b8b2c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03547/s895509734.py | 521f80f68c31034aaefe1ca9b2e831263c85a6bb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | #ABC078A
x,y = input().split()
print("<" if x<y else "=" if x==y else ">") | [
"[email protected]"
] | |
573f4c351e671916ffa5970d3e4f0805bfefe12d | 8c4a366c5dc9762e3c922b991e64c691a154ea88 | /36.py | 141e9ea9b51adc24496e1eb27e73e0aabb26772a | [] | no_license | VINITHAKO/pro | bd7fec9b46d7975c46ba0cb42d353bc10965dbdb | c1c3cd943606324f5252f46dd33edf7e180bbb48 | refs/heads/master | 2020-06-24T13:36:15.327211 | 2019-08-13T06:10:52 | 2019-08-13T06:10:52 | 198,976,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | v = int(input())
n = [ int(x) for x in input().split()]
v = len(n)
s = 0
for i in range(0,v-2):
for j in range(i+1, v-1):
for k in range(j+1, v):
if n[i] > n[j] > n[k] :
s =s+ 1
print(s)
| [
"[email protected]"
] | |
a90cf650bf1fb64fc1e55e83bc390af95dac2afa | f14bfd79d8bdcd012f21895084598d4bfe9fb9f2 | /0x03-python-data_structures/7-add_tuple.py | 4112e9aab615eb65a9654a62ed3705ceb696cbbd | [] | no_license | ch-canaza/holbertonschool-higher_level_programming | 1c62ae2e7798d79e619d8a133c3929720f317196 | 1d7402c90de37b920e163a04f196491a99d516c0 | refs/heads/master | 2023-01-18T23:06:57.738005 | 2020-11-12T21:57:53 | 2020-11-12T21:57:53 | 259,409,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | #!/usr/bin/python3
def add_tuple(tuple_a=(), tuple_b=()):
len_a = len(tuple_a)
len_b = len(tuple_b)
if len_a < 2:
for i in range(2 - len_a):
tuple_a += (0,)
if len_b < 2:
for i in range(2 - len_b):
tuple_b += (0,)
a = tuple_a[0] + tuple_b[0]
b = tuple_a[1] + tuple_b[1]
return (a, b)
| [
"[email protected]"
] | |
3f6185d27af2a0cec9d7825cae72b4a476206fe0 | 466607c14d76c8d798e08f05dde2d79a07f6e069 | /tests/databases/constructed_molecule/mongo_db/utilities.py | 48ae44f71584e4bcfa63e090dc020faf4557262b | [
"MIT"
] | permissive | andrewtarzia/stk | 7c77006bacd4d3d45838ffb3b3b4c590f1bce336 | 9242c29dd4b9eb6927c202611d1326c19d73caea | refs/heads/main | 2023-08-03T12:29:21.096641 | 2023-07-27T09:45:25 | 2023-07-27T09:45:25 | 191,198,174 | 0 | 1 | MIT | 2023-09-04T16:53:05 | 2019-06-10T15:49:25 | Python | UTF-8 | Python | false | false | 850 | py | from collections import Counter
from ...utilities import DatabaseState, get_entry
def get_database_state(database):
"""
Get the state of a :class:`.ValueMongoDb`.
Parameters
----------
database : :class:`.ValueMongoDb`
The database whose state is wanted.
Returns
-------
:class:`.DatabaseState`
The current state of `database`.
"""
entries = Counter()
entries.update(map(get_entry, database._molecules.find({})))
entries.update(map(get_entry, database._position_matrices.find({})))
entries.update(
map(
get_entry,
database._constructed_molecules.find({}),
)
)
entries.update(
map(
get_entry,
database._building_block_position_matrices.find({}),
)
)
return DatabaseState(entries)
| [
"[email protected]"
] | |
872a9c9f67e0dda9dbb5ded25dcc5a53ba331d4f | c0f7cc71eb5732d3b90da4f1e40c3f806f63bb29 | /python/fibermeas/exceptions.py | e4375607ef2fc9760f3570e749c674ad21f10151 | [
"BSD-3-Clause"
] | permissive | sdss/fibermeas | 6bb696ca2e46ded83baf3bc09a7b0e2024884789 | 4d29ff58a14b025cf6320ab1caef5f4bcbba394b | refs/heads/master | 2023-06-24T12:09:38.449736 | 2021-05-07T08:14:52 | 2021-05-07T08:14:52 | 329,701,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-12-05 12:01:21
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-12-05 12:19:32
from __future__ import print_function, division, absolute_import
class FibermeasError(Exception):
"""A custom core Fibermeas exception"""
def __init__(self, message=None):
message = 'There has been an error' \
if not message else message
super(FibermeasError, self).__init__(message)
class FibermeasNotImplemented(FibermeasError):
"""A custom exception for not yet implemented features."""
def __init__(self, message=None):
message = 'This feature is not implemented yet.' \
if not message else message
super(FibermeasNotImplemented, self).__init__(message)
class FibermeasAPIError(FibermeasError):
"""A custom exception for API errors"""
def __init__(self, message=None):
if not message:
message = 'Error with Http Response from Fibermeas API'
else:
message = 'Http response error from Fibermeas API. {0}'.format(message)
super(FibermeasAPIError, self).__init__(message)
class FibermeasApiAuthError(FibermeasAPIError):
"""A custom exception for API authentication errors"""
pass
class FibermeasMissingDependency(FibermeasError):
"""A custom exception for missing dependencies."""
pass
class FibermeasWarning(Warning):
"""Base warning for Fibermeas."""
class FibermeasUserWarning(UserWarning, FibermeasWarning):
"""The primary warning class."""
pass
class FibermeasSkippedTestWarning(FibermeasUserWarning):
"""A warning for when a test is skipped."""
pass
class FibermeasDeprecationWarning(FibermeasUserWarning):
"""A warning for deprecated features."""
pass
| [
"[email protected]"
] | |
e1fb0821c00054365b7452d5b9ee05d208da67f2 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractUnknowntranslationsCom.py | 896536aca1cc20dfe50f401116dd029e1050a31a | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 559 | py |
def extractUnknowntranslationsCom(item):
'''
Parser for 'unknowntranslations.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"[email protected]"
] | |
2f78bd1f6bc462d026579f398379118dc3bdc186 | c6759b857e55991fea3ef0b465dbcee53fa38714 | /tools/nntool/nntool/importer/tflite2/tflite_schema_head/WhileOptions.py | c1ecce2b777e295124727c75a226511d952f2ee9 | [
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] | permissive | GreenWaves-Technologies/gap_sdk | 1b343bba97b7a5ce62a24162bd72eef5cc67e269 | 3fea306d52ee33f923f2423c5a75d9eb1c07e904 | refs/heads/master | 2023-09-01T14:38:34.270427 | 2023-08-10T09:04:44 | 2023-08-10T09:04:44 | 133,324,605 | 145 | 96 | Apache-2.0 | 2023-08-27T19:03:52 | 2018-05-14T07:50:29 | C | UTF-8 | Python | false | false | 2,169 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite_schema_head
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class WhileOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = WhileOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsWhileOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def WhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# WhileOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# WhileOptions
def CondSubgraphIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# WhileOptions
def BodySubgraphIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
def WhileOptionsStart(builder): builder.StartObject(2)
def Start(builder):
return WhileOptionsStart(builder)
def WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex): builder.PrependInt32Slot(0, condSubgraphIndex, 0)
def AddCondSubgraphIndex(builder, condSubgraphIndex):
return WhileOptionsAddCondSubgraphIndex(builder, condSubgraphIndex)
def WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex): builder.PrependInt32Slot(1, bodySubgraphIndex, 0)
def AddBodySubgraphIndex(builder, bodySubgraphIndex):
return WhileOptionsAddBodySubgraphIndex(builder, bodySubgraphIndex)
def WhileOptionsEnd(builder): return builder.EndObject()
def End(builder):
return WhileOptionsEnd(builder) | [
"[email protected]"
] | |
5fdaf863e782058cb85f3d022793642b1156ffb6 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/statistics/trait_statistic_tracker.py | 3413320ad579da85723ab46f98b68565cc9fbddb | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,846 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\statistics\trait_statistic_tracker.py
# Compiled at: 2020-10-02 01:43:01
# Size of source mod 2**32: 15625 bytes
from collections import defaultdict
import alarms, random, services
from date_and_time import create_time_span, TimeSpan
from distributor.rollback import ProtocolBufferRollback
from event_testing.resolver import SingleSimResolver
from sims.sim_info_lod import SimInfoLODLevel
from sims4.resources import Types
from sims4.tuning.tunable import TunableSimMinute
from statistics.continuous_statistic_tracker import ContinuousStatisticTracker
from statistics.trait_statistic import TraitStatistic, TraitStatisticStates, TraitStatisticGroup
from tunable_time import TunableTimeSpan, TunableTimeOfDay
class TraitStatisticTracker(ContinuousStatisticTracker):
PERIODIC_TEST_TIMER = TunableTimeSpan(description='\n A repeating time span of how often we will run the periodic\n tests on Trait Statistics.\n ')
PERIODIC_TEST_TIMER_RANDOMIZER = TunableSimMinute(description='\n A random amount of time between 0 and this will be added to each Sim when setting up the initial\n alarm such that all of of the timers will not be triggered at once leading to a potential spike.\n ',
default=30,
minimum=0)
END_OF_DAY_TIME = TunableTimeOfDay(description='\n The time of day in which we will consider the end of day for the trait statistic end of day\n behaviors: daily cap, neglect, etc.\n ')
periodic_trait_statistics = None
@classmethod
def get_periodic_tested_trait_statistics(cls):
if cls.periodic_trait_statistics is None:
cls.periodic_trait_statistics = []
statistics_manager = services.get_instance_manager(Types.STATISTIC)
for statistic in statistics_manager.types.values():
if not issubclass(statistic, TraitStatistic):
continue
if statistic.periodic_tests.modifiers:
cls.periodic_trait_statistics.append(statistic)
return cls.periodic_trait_statistics
__slots__ = ('_trait_statistic_periodic_test_alarm_handle', '_end_of_day_alarm_handle',
'load_in_progress', '_time_to_next_periodic_test', '_trait_statistic_groups',
'__weakref__')
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._trait_statistic_periodic_test_alarm_handle = None
self._time_to_next_periodic_test = None
self._end_of_day_alarm_handle = None
self.load_in_progress = False
self._trait_statistic_groups = None
def should_suppress_calculations(self):
return self.load_in_progress
def _cancel_alarms(self):
if self._trait_statistic_periodic_test_alarm_handle is not None:
alarms.cancel_alarm(self._trait_statistic_periodic_test_alarm_handle)
self._trait_statistic_periodic_test_alarm_handle = None
if self._end_of_day_alarm_handle is not None:
alarms.cancel_alarm(self._end_of_day_alarm_handle)
self._end_of_day_alarm_handle = None
def destroy(self):
self._cancel_alarms()
super().destroy()
def _periodic_tests_callback(self, _):
resolver = SingleSimResolver(self.owner)
statistics_to_test = self.get_periodic_tested_trait_statistics()
for statistic in statistics_to_test:
values = statistic.periodic_tests.get_modified_value(resolver)
if values != 0:
self.add_value(statistic, values)
def _load_delayed_active_statistics(self):
statistic_manager = services.get_instance_manager(Types.STATISTIC)
for trait_statistic_data in self._delayed_active_lod_statistics:
statistic_type = statistic_manager.get(trait_statistic_data.trait_statistic_id)
stat = self.add_statistic(statistic_type, from_load=True)
if stat is None:
continue
stat.load(trait_statistic_data)
self._delayed_active_lod_statistics = None
def _get_stat_data_for_active_lod(self, statistic):
return statistic.get_save_message(self)
def on_lod_update(self, old_lod, new_lod):
super().on_lod_update(old_lod, new_lod)
if new_lod >= SimInfoLODLevel.ACTIVE:
if self._trait_statistic_periodic_test_alarm_handle is not None:
return
duration = TraitStatisticTracker.PERIODIC_TEST_TIMER()
if self._time_to_next_periodic_test is None:
initial_duration = duration + create_time_span(minutes=(random.randint(0, self.PERIODIC_TEST_TIMER_RANDOMIZER)))
else:
initial_duration = TimeSpan(self._time_to_next_periodic_test)
self._time_to_next_periodic_test = None
self._trait_statistic_periodic_test_alarm_handle = alarms.add_alarm(self, initial_duration,
(self._periodic_tests_callback),
repeating=True,
repeating_time_span=duration,
cross_zone=True)
now = services.time_service().sim_now
time_till_end_of_day = now.time_till_next_day_time(TraitStatisticTracker.END_OF_DAY_TIME)
self._end_of_day_alarm_handle = alarms.add_alarm(self, time_till_end_of_day,
(self.on_day_end),
repeating=True,
repeating_time_span=create_time_span(days=1),
cross_zone=True)
else:
self._cancel_alarms()
def get_trait_state(self, trait):
trait_statistic = self.get_statistic(trait.trait_statistic)
if trait_statistic is None:
return TraitStatisticStates.LOCKED
if trait_statistic.trait_data.trait is trait:
return trait_statistic.state
if trait_statistic.opposing_trait_data.trait is trait:
return len(TraitStatisticStates) - trait_statistic.state - 1
def on_day_end(self, *args, **kwargs):
if self._statistics is None:
return
for statistic in self._statistics.values():
statistic.perform_end_of_day_actions()
def reset_daily_caps(self):
if self._statistics is None:
return
for statistic in self._statistics.values():
statistic.reset_daily_caps()
def add_statistic(self, stat_type, from_load=False, **kwargs):
stat = (super().add_statistic)(stat_type, **kwargs)
if stat is None:
return
if not from_load:
stat.startup_statistic()
if stat.group != TraitStatisticGroup.NO_GROUP:
if self._trait_statistic_groups is not None:
if stat.group in self._trait_statistic_groups:
if len(self._trait_statistic_groups[stat.group]) >= TraitStatistic.GROUPS[stat.group]:
stat.add_group_limiter()
return stat
def _on_statistic_state_changed(self, changed_statistic):
group_being_changed = changed_statistic.group
if group_being_changed == TraitStatisticGroup.NO_GROUP:
return
if changed_statistic.trait_unlocked:
if self._trait_statistic_groups is None:
self._trait_statistic_groups = defaultdict(set)
else:
if type(changed_statistic) in self._trait_statistic_groups[group_being_changed]:
return
self._trait_statistic_groups[group_being_changed].add(type(changed_statistic))
if len(self._trait_statistic_groups[group_being_changed]) < TraitStatistic.GROUPS[group_being_changed]:
return
for statistic in self._statistics.values():
if statistic.group != group_being_changed:
continue
if statistic.trait_unlocked:
continue
statistic.add_group_limiter()
else:
if self._trait_statistic_groups is None:
return
if group_being_changed not in self._trait_statistic_groups:
return
if type(changed_statistic) not in self._trait_statistic_groups[group_being_changed]:
return
else:
currently_locked = len(self._trait_statistic_groups[group_being_changed]) >= TraitStatistic.GROUPS[group_being_changed]
self._trait_statistic_groups[group_being_changed].remove(type(changed_statistic))
if not self._trait_statistic_groups[group_being_changed]:
del self._trait_statistic_groups[group_being_changed]
if not self._trait_statistic_groups:
self._trait_statistic_groups = None
if currently_locked:
for statistic in self._statistics.values():
if statistic.group != group_being_changed:
continue
statistic.remove_group_limiter()
def remove_all_statistics_by_group(self, trait_statistic_group):
if self._statistics is None:
return
for stat_type in tuple(self._statistics.keys()):
if stat_type.group != trait_statistic_group:
continue
self.remove_statistic(stat_type)
def reset_all_statistics_by_group(self, trait_statistic_group):
if self._statistics is None:
return
for statistic in self._statistics.values():
if statistic.group != trait_statistic_group:
continue
statistic.set_value((statistic.default_value), ignore_caps=True)
statistic.reset_daily_caps()
def save(self, msg):
saved_data = False
if self._statistics is not None:
for statistic in self._statistics.values():
with ProtocolBufferRollback(msg.trait_statistics) as (statistic_msg):
statistic.save(statistic_msg)
saved_data = True
else:
if self._delayed_active_lod_statistics is not None:
msg.trait_statistics.extend(self._delayed_active_lod_statistics)
if self._trait_statistic_periodic_test_alarm_handle is not None:
msg.time_to_next_periodic_test = self._trait_statistic_periodic_test_alarm_handle.get_remaining_time().in_ticks()
saved_data = True
else:
if self._time_to_next_periodic_test is not None:
msg.time_to_next_periodic_test = self._time_to_next_periodic_test
return saved_data
def load(self, msg):
statistic_manager = services.get_instance_manager(Types.STATISTIC)
for trait_statistic_data in msg.trait_statistics:
statistic_type = statistic_manager.get(trait_statistic_data.trait_statistic_id)
if statistic_type is None:
continue
else:
if self.owner.lod >= statistic_type.min_lod_value:
stat = self.add_statistic(statistic_type, from_load=True)
if stat is None:
continue
stat.load(trait_statistic_data)
if statistic_type.min_lod_value == SimInfoLODLevel.ACTIVE:
if self._delayed_active_lod_statistics is None:
self._delayed_active_lod_statistics = list()
self._delayed_active_lod_statistics.append(trait_statistic_data)
if msg.HasField('time_to_next_periodic_test'):
self._time_to_next_periodic_test = msg.time_to_next_periodic_test | [
"[email protected]"
] | |
faf1f3684f84eac1eeec3b33cdfae00169f4277a | c38b292e7bfaa95ac9a5fbf56d247403941139f4 | /ticket_details/views.py | b9fc42bf585a470a67fc1c6f572b4ab7f393d0dc | [] | no_license | poojapauskar/vzcards-api | f6a79938ee032c1da1d04b86a266d939d73b8797 | ed0185f36e274d46f978a8f670a4189571280e8b | refs/heads/master | 2020-12-29T02:38:14.288750 | 2017-08-08T12:30:51 | 2017-08-08T12:30:51 | 44,667,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | from ticket_create.models import Ticket_create
from ticket_details.serializers import Ticket_detailsSerializer
from rest_framework import generics
# from ticket.permissions import IsOwnerOrReadOnly
# from rest_framework import permissions
from django.shortcuts import get_object_or_404
class Ticket_detailsDetail(generics.ListAPIView):
serializer_class = Ticket_detailsSerializer
def get_queryset(self):
ticket_id = self.kwargs['ticket_id']
objects=Ticket_create.objects.filter(ticket_id=ticket_id)
return objects
from django.contrib.auth.models import User
from ticket_details.serializers import UserSerializer
from rest_framework import permissions
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
from django.shortcuts import render
# Create your views here.
| [
"[email protected]"
] | |
d1353c31503e6d8d403d93c3e6896c4c42bb991d | 16b81ffcb40b429bde1e9bc10e5eeddd9d2ec51f | /leetcode/largest-number.py | 0fe00afd4ba572d7756d887bcf9932bddb740397 | [] | no_license | suminb/coding-exercise | 564424b7b98ea768c57a5b7f106fd7844e8e2843 | b8b9377e7a76b498a9e6fb325743b16cbc943932 | refs/heads/master | 2023-06-16T03:01:59.009635 | 2020-05-09T11:09:25 | 2020-05-09T11:09:25 | 171,614,062 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | # 179. Largest Number
# difficulty: medium
# https://leetcode.com/problems/largest-number/
from typing import List
import pytest
class CustomString(str):
def __gt__(self, other):
return self + other > other + self
def __lt__(self, other):
return self + other < other + self
class Solution:
def largestNumber(self, nums: List[int]) -> str:
number = ''.join(sorted([CustomString(x) for x in nums], reverse=True))
while len(number) > 1 and number[0] == '0':
number = number[1:]
return number
def test_compare():
assert CustomString('12') > CustomString('121')
assert CustomString('12') > CustomString('120')
assert CustomString('122') > CustomString('121')
assert CustomString('121') > CustomString('120')
@pytest.mark.parametrize('nums, expected', [
([], ''),
([5], '5'),
([0, 0], '0'),
([0, 0, 0], '0'),
([10, 2], '210'),
([10, 0, 0], '1000'),
([121, 12], '12121'),
([[3, 30, 34, 5, 9], '9534330']),
([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], '9876543210'),
([93, 5, 3, 1, 3, 412, 45, 6151823579123, 3752], '9361518235791235454123752331'),
([1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6], '9876655443322110'),
])
def test(nums, expected):
s = Solution()
assert expected == s.largestNumber(nums)
if __name__ == '__main__':
pytest.main(['-v', __file__])
| [
"[email protected]"
] | |
f743a0d111f8b3360e91aaf494b4103c4ca98714 | bef35774ebe121d9657a3d513a21073b8086b331 | /advanced-algorithms/greedy-algorithm/min_operations.py | 542e345b8a1ad9a2798e2bfebe31f3c693f476b0 | [] | no_license | greatertomi/Python-Algorithms-DataStructure | dab0097ec8a450dfddc47e3153359e6fcd873225 | a0ad6aa78086ba39e6f100ac551ddadb9433f724 | refs/heads/master | 2022-04-14T10:52:32.986333 | 2020-04-17T16:04:22 | 2020-04-17T16:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | def min_operations(target):
steps = 0
while target != 0:
while target/2 == target//2:
target = target // 2
steps += 1
target -= 1
steps += 1
return steps
print(min_operations(18))
| [
"[email protected]"
] | |
949bdd33912dec9468f0c4e0d06933263a1fd57f | 27789165f2d33a6cdc75387589c685bb4a5108d0 | /browse/motif_report.py | fba179d699171580442df54077dfddff6db5e1e6 | [] | no_license | poneill/collecTF | 61006e7616ea6593677a14f1ee3b57ff4b337c6b | 435f3da67cbb78b7ee0499e80bd52793b231a563 | refs/heads/master | 2021-01-18T05:17:10.901697 | 2014-05-29T22:56:45 | 2014-05-29T22:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,003 | py | """
This file contains the class implementation for reports. A MotifReport object is
constructed from a collection of curation-site-instance objects that have the
same TF and species. For some uses of MotifReport class, all
curation-site-instance objects have to have exactly the same TF-instances
objects, but in other cases, having the same TF (there can be paralog TFs in the
same organism).
"""
import base
from base import metasite
import sys
class MotifReport:
def __init__(self, m_cur_site_insts, nm_cur_site_insts=[]):
"""Given a collection of motif-associated and non-motif-associated
curation site instances, create the report object. The constructor also
checks if all curation_site_instance objects have the same TF and
species"""
# make sure that the list is not empty
assert m_cur_site_insts
self.m_cur_site_insts = m_cur_site_insts
self.nm_cur_site_insts = nm_cur_site_insts
# make sure all curation-site-instance objects have the same TF
#accessions and genome accession
#self.TF_accession_check()
#self.genome_accession_check()
def set_non_motif_curation_site_instances(self,non_motif_curation_site_insts):
"""Add some non-motif-associated curation-site-instances into the motif
report object."""
self.nm_cur_site_insts = non_motif_curation_site_insts
def TF_check(self):
"""Check if all curation_site_instance objects have the same TF"""
head = self.m_cur_site_insts[0]
assert all(head.curation.TF == csi.curation.TF
for csi in self.m_cur_site_insts)
assert all(head.curation.TF == csi.curation.TF
for csi in self.nm_cur_site_insts)
def species_check(self):
"""Check if all curation_site_instance objects have the same species"""
head = self.m_cur_site_insts[0]
assert all(head.site_instance.genome.taxonomy == csi.site_instance.genome.taxonomy
for csi in self.m_cur_site_insts)
assert all(head.site_instance.genome.taxonomy == csi.site_instance.genome.taxonomy
for csi in self.nm_cur_site_insts)
def TF_accession_check(self):
"""Check if all curation-site-instances have the same TF-instance.
Although all MotifReport objects have to satisfy the same TF and same
species criteria, all TFs don't have to be exactly the same protein, or
all species don't have to be same genomes. In other words, most of the
cases, it is fine to run this function and get a False result."""
head = self.m_cur_site_insts[0]
return (all(head.curation.TF_instances == csi.curation.TF_instances
for csi in self.m_cur_site_insts) and
all(head.curation.TF_instances == csi.curation.TF_instances
for csi in self.nm_cur_site_insts))
def genome_accession_check(self):
"""Check if all curation-site-instances have the same
genome-accession. Like TF_accession_check function, all
curation-site-instance objects do not have to have the same genome."""
head = self.m_cur_site_insts[0]
return (all(head.site_instance.genome == csi.site_instance.genome
for csi in self.m_cur_site_insts) and
all(head.site_instance.genome == csi.site_instance.genome
for csi in self.nm_cur_site_insts))
@property
def TF_name(self):
"""Return the name of the TF"""
#self.TF_accession_check()
return self.m_cur_site_insts[0].curation.TF.name
@property
def TF_accession(self):
"""Return the TF accession number.
This function should be called only if all curation-site-instance
objects have the same TF accession numbers."""
get_TF = lambda csi: csi.curation.TF_instances.all()[0]
assert all(get_TF(self.m_cur_site_insts[0]) == get_TF(x)
for x in self.m_cur_site_insts)
return str(get_TF(self.m_cur_site_insts[0]).protein_accession)
@property
def species_name(self):
"""Return the name of the species"""
return self.m_cur_site_insts[0].site_instance.genome.taxonomy.name
@property
def genome_accession(self):
"""Return the genome accession number.
This function should be called only when all curation-site-instance
objects have the same genome accession number."""
get_genome = lambda csi: csi.site_instance.genome
assert self.genome_accession_check()
return str(get_genome(self.m_cur_site_insts[0]).genome_accession)
def align_sites(self):
"""Align all binding sites using Lasagna"""
# make sure meta-sites are computed beforehand.
self.get_meta_sites()
sys.stdout.write('aligning sites...')
sys.stdout.flush()
r = base.bioutils.run_lasagna([x.delegate_site_instance for x in self.meta_sites])
sys.stdout.write('\t [done]\n')
return r
def get_meta_sites(self):
"""Create meta-sites from curation-site-instances."""
if not hasattr(self, 'meta_sites'):
# Compute them here.
sys.stdout.write('creating meta sites...')
sys.stdout.flush()
self.meta_sites = metasite.create_meta_sites(self.m_cur_site_insts,
self.nm_cur_site_insts)
sys.stdout.write('\t [done]\n')
return self.meta_sites
def set_meta_sites(self, meta_sites):
"""Instead of computing meta-sites, assign them directly. This method is
used if the Report object is an ensemble reports, from multiple
species/TFs. In this case, since meta-sites should be specific for
TF-species combination, computing meta-sites would give exactly the same
collection: union of meta-sites from individual reports."""
self.meta_sites = meta_sites
@property
def num_motif_cur_site_insts(self):
"""Return the number of motif-associated curation-site-instances."""
return len(self.m_cur_site_insts)
def get_all_motif_cur_site_insts(self):
"""Return all motif-associated curation-site-instances (ids only by
default; otherwise, return all objects)."""
return self.m_cur_site_insts.all()
def get_all_motif_cur_site_insts_ids(self):
return [x.pk for x in self.m_cur_site_insts]
def get_all_non_motif_cur_site_insts(self):
"""Return all non-motif-associated curation-site-instances (ids only by
default; otherwise, return all objects)."""
return self.nm_cur_site_insts.all()
def get_all_non_motif_cur_site_insts_ids(self):
return [x.pk for x in self.nm_cur_site_insts]
def get_all_cur_site_insts(self):
"""Return all curation-site-instance objects (both motif associated and
non-motif associated)."""
return (self.get_all_motif_cur_site_insts() |
self.get_all_non_motif_cur_site_insts())
def get_all_cur_site_insts_ids(self):
return [x.pk for x in self.get_all_cur_site_insts()]
def generate_browse_result_dict(self):
"""Generate a dictionary of values to add to the template context which
will be rendered in browse-result page. The view will use these values
just before rendering the template."""
return {
'TF_name': self.TF_name,
'species_name': self.species_name,
'cur_site_insts': self.get_all_cur_site_insts_ids(),
}
def generate_view_reports_dict(self):
"""Generate a dictionary of values to add to the template context which
will be rendered in view_reports page."""
return {
'TF_name': self.TF_name,
'species_name': self.species_name,
'meta_sites': self.get_meta_sites(),
'aligned_sites': self.align_sites(),
'cur_site_insts': self.get_all_cur_site_insts_ids(),
}
def make_reports(cur_site_insts):
"""Given a collection of motif-associated and non-motif-associated
curation-site-instance objects, group them by TF and species and create
MotifReport objects. If there is no motif-associated curation-site-instances for
a given TF and species, a report object is not created, even if there are
some non-motif-associated curation-site-instances. In other words, report
objects are created for (TF,species) that have at least one motif-associated
curation-site-instance."""
# find all tuples of (TF,species)
tf_species = cur_site_insts.values_list("curation__TF", "site_instance__genome__taxonomy")\
.distinct()\
.order_by("curation__TF__name", "site_instance__genome__taxonomy__name")
# group all curation-site-instances by TF and species
reports = []
for (TF,species) in tf_species:
# filter motif-assoc cur-site-insts and non-motif-assoc ones by TF and species
m_csis = cur_site_insts.filter(curation__TF=TF,
site_instance__genome__taxonomy=species,
site_type="motif_associated")
nm_csis = cur_site_insts.filter(curation__TF=TF,
site_instance__genome__taxonomy=species,
site_type="non_motif_associated")
# generate a report only if there is at least one
if m_csis: reports.append(MotifReport(m_csis, nm_csis))
return reports
def make_ensemble_report(cur_site_insts):
"""Given a collection of curation-site-instance objects, align all of them
and generate a report."""
motif_associated = cur_site_insts.filter(site_type="motif_associated")
non_motif_associated = cur_site_insts.filter(site_type="non_motif_associated")
return MotifReport(motif_associated, non_motif_associated)
def merge_reports(reports):
"""Merge a collection of reports, without recomputing meta-sites. This
method depreceates <make_ensemble_report> function. """
# Get all motif associated and non-motif-associated sites from all
# reports. Each report has a collection of binding sites for a specific
# TF/species.
assert reports
# merge all curation site
all_csi = reduce(lambda x,y: x|y,
[r.get_all_cur_site_insts() for r in reports[1:]],
reports[0].get_all_cur_site_insts())
all_motif_csi = all_csi.filter(site_type='motif_associated')
all_non_motif_csi = all_csi.filter(site_type='non_motif_associated')
ensemble = MotifReport(all_motif_csi, all_non_motif_csi)
# instead of computing meta-sites again, set them using existing meta-site
# collection from individual reports
ensemble.set_meta_sites([ms for r in reports for ms in r.get_meta_sites()])
return ensemble
| [
"[email protected]"
] | |
8e8be09d2a84c32daac3f5af9233b2981859a259 | 3b9bf497cd29cea9c24462e0411fa8adbfa6ba60 | /placement-test/pt-test-2/Kangaroo.py | 9915efe82926333e63f3e269d4b28590f33cbaa9 | [] | no_license | niteesh2268/coding-prepation | 918823cb7f4965bec096ec476c639a06a9dd9692 | 19be0766f6b9c298fb32754f66416f79567843c1 | refs/heads/master | 2023-01-02T05:30:59.662890 | 2020-10-17T13:12:34 | 2020-10-17T13:12:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the kangaroo function below.
def kangaroo(x1, v1, x2, v2):
if v2 >= v1:
return 'NO'
if (x2-x1)%(v1-v2) == 0:
return 'YES'
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
x1V1X2V2 = input().split()
x1 = int(x1V1X2V2[0])
v1 = int(x1V1X2V2[1])
x2 = int(x1V1X2V2[2])
v2 = int(x1V1X2V2[3])
result = kangaroo(x1, v1, x2, v2)
fptr.write(result + '\n')
fptr.close()
| [
"akualajayaprakash@gmailcom"
] | akualajayaprakash@gmailcom |
6fe75c8f5b323d1035afb99283468de94ec63bc8 | 4a81f78adf1a1adfc7bb1d19e42171cc856b2ccc | /bin/sqlformat | aebd25c96ed652302ce3c726c885c87b9c66a703 | [] | no_license | Prones94/Music_Site | af910c18c442b802971bc0a624891860266d8f85 | 924bffbc2a935217e84292e682c06fe1e83bb018 | refs/heads/master | 2022-04-21T02:33:41.006384 | 2020-04-21T22:38:15 | 2020-04-21T22:38:15 | 256,361,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | #!/Users/admin/Desktop/MAKE/BEW/BEW-1.2/Music/music_site/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
3a71509d0c18293aa9c9a0b17eaa0b048bc7c70a | 19a974c502ba48eb6e47a3dceeef90afacc1c9d0 | /pi/.pycharm_helpers/python_stubs/-1965838824/_lsprof.py | 0705bbfcb10220a688091160aa3aba261e28f944 | [] | no_license | DiderickM/TheoDisplay | 357541c9a35fa15c8c7360f3f10aa67499bc6872 | d126237e2fb6054c8055d934136731328e0eda48 | refs/heads/main | 2022-12-26T19:21:35.766225 | 2020-10-06T20:49:19 | 2020-10-06T20:49:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,496 | py | # encoding: utf-8
# module _lsprof
# from /usr/lib/python2.7/lib-dynload/_lsprof.arm-linux-gnueabihf.so
# by generator 1.145
""" Fast profiler """
# no imports
# no functions
# classes
class Profiler(object):
"""
Profiler(custom_timer=None, time_unit=None, subcalls=True, builtins=True)
Builds a profiler object using the specified timer function.
The default timer is a fast built-in one based on real time.
For custom timer functions returning integers, time_unit can
be a float specifying a scale (i.e. how long each integer unit
is, in seconds).
"""
def clear(self): # real signature unknown; restored from __doc__
"""
clear()
Clear all profiling information collected so far.
"""
pass
def disable(self): # real signature unknown; restored from __doc__
"""
disable()
Stop collecting profiling information.
"""
pass
def enable(self, subcalls=True, builtins=True): # real signature unknown; restored from __doc__
"""
enable(subcalls=True, builtins=True)
Start collecting profiling information.
If 'subcalls' is True, also records for each function
statistics separated according to its current caller.
If 'builtins' is True, records the time spent in
built-in functions separately from their caller.
"""
pass
def getstats(self): # real signature unknown; restored from __doc__
"""
getstats() -> list of profiler_entry objects
Return all information collected by the profiler.
Each profiler_entry is a tuple-like object with the
following attributes:
code code object
callcount how many times this was called
reccallcount how many times called recursively
totaltime total time in this entry
inlinetime inline time in this entry (not in subcalls)
calls details of the calls
The calls attribute is either None or a list of
profiler_subentry objects:
code called code object
callcount how many times this is called
reccallcount how many times this is called recursively
totaltime total time spent in this call
inlinetime inline time (not in further subcalls)
"""
return []
def __init__(self, custom_timer=None, time_unit=None, subcalls=True, builtins=True): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class profiler_entry(object):
# no doc
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
callcount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how many times this was called"""
calls = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""details of the calls"""
code = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""code object or built-in function name"""
inlinetime = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""inline time in this entry (not in subcalls)"""
reccallcount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how many times called recursively"""
totaltime = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""total time in this entry"""
n_fields = 6
n_sequence_fields = 6
n_unnamed_fields = 0
class profiler_subentry(object):
# no doc
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
callcount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how many times this is called"""
code = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""called code object or built-in function name"""
inlinetime = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""inline time (not in further subcalls)"""
reccallcount = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""how many times this is called recursively"""
totaltime = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""total time spent in this call"""
n_fields = 5
n_sequence_fields = 5
n_unnamed_fields = 0
| [
"[email protected]"
] | |
abf65c1f1b69d082fe55861689026c4a2695eae2 | 5a514c289084b358c658590f2e2599aea4e4e0a8 | /lib/engine/engine.py | ae0a01f5e1e06f6ca173a7444aeed05615dae022 | [] | no_license | gladiopeace/Tentacle | 602d51015d25f71e331a27ccf07270567d4b4a0e | 7b32937496415b77aec35a10551a4ff33863a829 | refs/heads/master | 2023-08-22T13:45:34.700076 | 2021-10-21T08:39:24 | 2021-10-21T08:39:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,293 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author = 'orleven'
import os
import time
import asyncio
import traceback
import async_timeout
from typing import AsyncIterable
from typing import Iterable
from typing import Union
from lib.core.data import logger
from lib.core.data import paths
from lib.core.async_pool import PoolCollector
from lib.core.pocmanage import POCManager
from lib.core.database import TaskDataDB
from script import Script
class Engine(object):
def __init__(self, name:str, targets: AsyncIterable, pm: POCManager, engine_name='Engine'):
self.spend_time = 0
self.name = name
self.pm = pm
self.engine_name = engine_name
self.targets = targets
self._total_task_count = 0
self._error_task_count = 0
self._find_task_count = 0
self.interval_time = 60
self.start_time = time.time()
self.is_continue = True
self.hashdb = TaskDataDB(os.path.join(paths.DATA_PATH, name))
self.hashdb.connect()
self.hashdb.init()
def print_progress(self,manager: PoolCollector):
found_count = self._find_task_count
error_count = self._error_task_count
remaining_count = manager.remain_task_count
scanning_count = manager.scanning_task_count
scanned_count = self._total_task_count - manager.remain_task_count
total_count = self._total_task_count
self.spend_time = time.time() - self.start_time
msg = '[%s] %s found | %s error | %s remaining | %s scanning | %s scanned in %.2f seconds.(total %s)' % (
self.name, found_count, error_count, remaining_count, scanning_count, scanned_count, self.spend_time,
total_count)
logger.sysinfo(msg)
async def _progress_daemon(self, manager: PoolCollector):
while True:
await asyncio.sleep(self.interval_time)
self.print_progress(manager)
async def submit_task(self, manager: PoolCollector):
"""subclass should override this function for _submit_task"""
async def do_scan(self, module: Script,target: Union[dict]) -> Iterable[dict]:
"""subclass should override this function for do_scan"""
async def enum(self):
"""subclass should override this function for enum"""
| [
"[email protected]"
] | |
d4209a045d8c2874fb783624e9d57859a3f90d01 | febb7a4b889c2f40637e2b688eb770cf0809226f | /fython/test/instruction/iruc_test.py | 4bd165906959e2adc6c9080adb1c9ba8161931ea | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | nicolasessisbreton/fython | 68253552c626640b5efc2a7cea9384c8e0425c08 | 988f5a94cee8b16b0000501a22239195c73424a1 | refs/heads/master | 2021-01-10T07:10:06.793158 | 2017-08-25T17:27:05 | 2017-08-25T17:27:05 | 50,076,320 | 48 | 3 | null | 2016-08-21T17:16:12 | 2016-01-21T02:30:31 | Python | UTF-8 | Python | false | false | 225 | py | s = r"""
.a.fy
int dimension(10) target x
int dimension(:) pointer y
y => x[3:5]
"""
from fython.test import *
writer(s)
w = load('.a', force=1, release=1, verbose=0)
# print(open(w.module.url.fortran_path, 'r').read())
| [
"[email protected]"
] | |
f957ccaad9c4850b8a815994ef49492ba8f5b299 | 54f826d6103263e3983d1861ff902c62deb59916 | /cart/admin.py | 17023013cabe0fe5be3f2b25a5ad0d8d4e551005 | [] | no_license | arumjin/gt-qlik | e37582ae7d3f0ee1d4da0319459f92b588b1beb4 | 9cf859a669b2302f5430972528275fecdce70926 | refs/heads/master | 2022-11-08T18:57:35.386732 | 2020-06-30T00:55:15 | 2020-06-30T00:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from django.contrib import admin
from .models import CartItem,Cart
# Register your models here.
class CartItemAdmin(admin.ModelAdmin):
list_display = (
'chart',
'cart'
)
admin.site.register(CartItem,CartItemAdmin)
class CartAdmin(admin.ModelAdmin):
list_display = (
'user',
)
admin.site.register(Cart,CartAdmin)
| [
"[email protected]"
] | |
97a7a1b661d847598f5ef105388d61266babb110 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/441/usersdata/311/109834/submittedfiles/lista1.py | 9c83a6d93621cba2175ab70f2088ceea4fd25712 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | # -*- coding: utf-8 -*-
n=int(input('Digite a quantidade de numeros: '))
a=[]
for i in range(n):
a.append(int(input('Digite o numero%d:' %(i+1))))
| [
"[email protected]"
] | |
362467cd5e32cd4dcb90e29eaca44d0b17706341 | 3b030444b2d1d9d57197ccba41387b447114b210 | /config.py | fb716512cf2763712c551c3b4015a4743de47d8e | [] | no_license | popfido/PairCNN-Ranking | ec85e45ef54f05a6b1778297cd316b2fa8a23a90 | b29bbe774888e154a8bad5dafa67ec24aba33256 | refs/heads/master | 2020-03-09T10:21:58.509310 | 2018-04-09T09:27:02 | 2018-04-09T09:27:02 | 128,735,443 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | # coding:utf8
import warnings
class DefaultConfig(object):
env = 'default' # visdom 环境
model = 'PairCNN' # 使用的模型,名字必须与models/__init__.py中的名字一致
train_dir = './'
train_data_root = './data/train/' # 训练集存放路径
validate_data_root = './data/validate' # 验证集存放路径
test_data_root = './data/test/' # 测试集存放路径
load_model_path = None # 加载预训练的模型的路径,为None代表不加载
dev_ratio = 0.1 # Ratio of dev/validation data picked from training set
batch_size = 128 # batch size
use_gpu = False # user GPU or not
num_workers = 4 # how many workers for loading data
print_freq = 20 # print info every N batch
eval_freq = 100 # Evaluate model on dev set after this many steps (default: 100)
checkpoint_freq = 100 # Save model after this many steps (default: 100)
debug_file = '/tmp/debug' # if os.path.exists(debug_file): enter ipdb
result_file = 'result.csv'
seed = 233 # Random seed (default: 233)
max_epoch = 20
lr = 0.1 # initial learning rate
lr_decay = 0.95 # when val_loss increase, lr = lr*lr_decay
embedding_dim = 64 # Dimensionality of character embedding (default: 64)
filter_sizes = "2,3" # Comma-separated filter sizes (default: '2,3')
num_filters = 64 # Number of filters per filter size (default: 64)
num_hidden = 100 # Number of hidden layer units (default: 100)
dropout_keep_prob = 0.5 # Dropout keep probability (default: 0.5)
max_len_left = 10 # max document length of left input
max_len_right = 10 # max document length of right input
weight_decay = 1e-4 # l2_regularization
vocab_size = 300000 # Most number of words in vocab (default: 300000)
def parse(self, kwargs):
"""
根据字典kwargs 更新 config参数
"""
for k, v in kwargs.items():
if not hasattr(self, k):
warnings.warn("Warning: opt has not attribut %s" % k)
setattr(self, k, v)
print('user config:')
for k, v in self.__class__.__dict__.items():
if not k.startswith('__'):
print(k, getattr(self, k))
DefaultConfig.parse = parse
opt = DefaultConfig()
# opt.parse = parse
| [
"[email protected]"
] | |
f4c3288cf1c1417cd9ed9515fb2741abe00f3bb9 | 07b4dd9a88f3404c4851ea7cbb57c67035bc9a54 | /eric.py | b4e8a8f25539f10b194515115cc8fd428448ebe5 | [] | no_license | surajgholap/python-Misc | 9c9d02c42bb37b7378d7336343f8bef7cd802edf | 4a8ce4bfa5a959692d98663b7b5c0b67a165835f | refs/heads/master | 2021-06-17T19:19:25.021038 | 2021-01-27T20:54:03 | 2021-01-27T20:54:03 | 142,781,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | from collections import Counter
import requests
def top_five(corpus):
count_map = {}
for i in corpus:
try:
count_map[i] += 1
except:
count_map[i] = 1
# words = corpus.split()
# counter = Counter(words)
# most_fav = counter.most_common(5)
# for i in most_fav:
# print(i)
def clean_func(corpus, stop):
new = []
for i in corpus.split(" "):
i = i.lower()
if i.isalpha() and i not in stop:
new.append(i)
top_five(" ".join(new))
response = requests.get(
"https://ocw.mit.edu/ans7870/6/6.006/s08/lecturenotes/files/t8.shakespeare.txt")
stop_words = requests.get(
"https://gist.githubusercontent.com/sebleier/554280/raw/7e0e4a1ce04c2bb7bd41089c9821dbcf6d0c786c/NLTK's%2520list%2520of%2520english%2520stopwords")
stop_list = stop_words.text.splitlines()
# print(stop_list)
content = response.text.splitlines()
content = " ".join(content[245:])
# print(content)
clean_func(content, stop_list)
| [
"[email protected]"
] | |
f56acf97b9abbc3b137bf4f924ed3ee07b7c5424 | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/vitchyr/disentanglement/n_object_pnp/exp_5_on_random_object_init.py | c4a5e28964dd857af355d261e18481bdc3671f3c | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,317 | py | import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.experiments.disentanglement.contextual_encoder_distance_launcher import (
encoder_goal_conditioned_sac_experiment
)
from rlkit.launchers.launcher_util import run_experiment
if __name__ == "__main__":
variant = dict(
env_id='OneObjectPickAndPlace2DEnv-v0',
qf_state_encoder_is_goal_encoder=True,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_using_encoder_settings=dict(
encode_state=False,
encode_goal=False,
detach_encoder_via_goal=False,
detach_encoder_via_state=False,
),
sac_trainer_kwargs=dict(
reward_scale=1,
discount=0.99,
soft_target_tau=1e-3,
target_update_period=1,
single_loss_weight=0.5,
use_automatic_entropy_tuning=True,
),
num_presampled_goals=5000,
max_path_length=100,
algo_kwargs=dict(
batch_size=256,
# num_epochs=500,
# num_eval_steps_per_epoch=400,
# num_expl_steps_per_train_loop=1000,
# num_trains_per_train_loop=1000,
# min_num_steps_before_training=1000,
num_epochs=3,
num_eval_steps_per_epoch=100,
num_expl_steps_per_train_loop=100,
num_trains_per_train_loop=100,
min_num_steps_before_training=100,
),
replay_buffer_kwargs=dict(
fraction_future_context=0.5,
fraction_distribution_context=0.5,
max_size=int(1e6),
),
save_debug_video=False,
visualize_representation=False,
debug_visualization_kwargs=dict(
save_period=20,
initial_save_period=2,
),
save_video=True,
save_video_kwargs=dict(
save_video_period=20,
rows=3,
columns=3,
subpad_length=1,
subpad_color=127,
pad_length=1,
pad_color=0,
num_columns_per_rollout=5,
),
evaluation_goal_sampling_mode='random',
exploration_goal_sampling_mode='random',
exploration_policy_kwargs=dict(
exploration_version='occasionally_repeat',
repeat_prob=0.5,
),
encoder_cnn_kwargs=dict(
kernel_sizes=[3, 3, 3],
n_channels=[8, 16, 32],
strides=[1, 1, 1],
paddings=[0, 0, 0],
pool_type='none',
hidden_activation='relu',
),
use_image_observations=True,
env_renderer_kwargs=dict(
width=12,
height=12,
output_image_format='CHW',
),
video_renderer_kwargs=dict(
width=48,
height=48,
output_image_format='CHW',
),
debug_renderer_kwargs=dict(
width=48,
height=48,
output_image_format='CHW',
),
use_separate_encoder_for_policy=True,
encoder_kwargs=dict(
hidden_sizes=[],
),
distance_scatterplot_save_period=50,
distance_scatterplot_initial_save_period=10,
)
search_space = {
'reward_type': [
'state_distance',
# 'encoder_distance',
],
'use_image_observations': [
True,
# False,
],
'latent_dim': [
8,
# 16,
],
'max_path_length': [
40,
],
'encoder_kwargs.hidden_sizes': [
[],
],
'env_id': [
'TwoObject-PickAndPlace-OnRandomObjectInit-2D-v1',
],
'replay_buffer_kwargs.fraction_future_context': [
0.5,
],
'disentangled_qf_kwargs.architecture': [
# 'single_head_match_many_heads',
'many_heads',
],
'sac_trainer_kwargs.single_loss_weight': [
# 1.0,
# 0.9,
# 0.5,
# 0.1,
0.0,
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_name = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
# n_seeds = 2
# mode = 'sss'
# exp_name = 'n-object-pnp--exp-5--on-random-object-init'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for seed in range(n_seeds):
variant['exp_id'] = exp_id
# variant['seed'] = seed
run_experiment(
encoder_goal_conditioned_sac_experiment,
exp_name=exp_name,
mode=mode,
variant=variant,
use_gpu=False,
num_exps_per_instance=2,
# slurm_config_name='cpu_co',
gcp_kwargs=dict(
zone='us-east1-c',
gpu_kwargs=dict(
gpu_model='nvidia-tesla-k80',
num_gpu=1,
)
),
time_in_mins=int(2.5*24*60),
)
| [
"[email protected]"
] | |
2c797dce206728933f0809dc69e3834e9b33077d | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/span/ldestination.py | 9a6afadcebc410e9ab61d574a95437569b884b82 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,545 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LDestination(Mo):
"""
The local SPAN destination, which is where network traffic is sent for analysis by a network analyzer. When you create a traffic monitoring session, you must select an local SPAN source and destination. The type of session (Tenant, Access, or Fabric) determines the allowed types of local SPAN sources and destinations. The destination can be either a port or endpoint group. If the destination is a port, it should not be one that has been configured for other purposes.
"""
meta = ClassMeta("cobra.model.span.LDestination")
meta.moClassName = "spanLDestination"
meta.rnFormat = "ldst"
meta.category = MoCategory.REGULAR
meta.label = "Local Span Destination"
meta.writeAccessMask = 0x20001
meta.readAccessMask = 0x20001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.span.Session")
meta.superClasses.add("cobra.model.span.Destination")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.rnPrefixes = [
('ldst', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14403, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "mtu", "mtu", 2039, PropCategory.REGULAR)
prop.label = "MTU"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(64, 9216)]
prop.defaultValue = 1518
prop.defaultValueStr = "1518"
meta.props.add("mtu", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "operSt", "operSt", 2040, PropCategory.REGULAR)
prop.label = "Operational State"
prop.isOper = True
prop.defaultValue = 2
prop.defaultValueStr = "down"
prop._addConstant("down", "down", 2)
prop._addConstant("failed", "failed", 3)
prop._addConstant("partial", "partial", 4)
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("up", "up", 1)
meta.props.add("operSt", prop)
prop = PropMeta("str", "operStQual", "operStQual", 2041, PropCategory.REGULAR)
prop.label = "Operational State Qualifier"
prop.isOper = True
prop.defaultValue = 3
prop.defaultValueStr = "no-oper-src-dst"
prop._addConstant("Dst-PC-Member-Not-Supported-err", "a-pc-member-port-cannot-be-a-local-span-destination", 19)
prop._addConstant("active", "the-session-is-up", 1)
prop._addConstant("dummy-src-err", "span-src-is-partially/fully-impacted,-not-programmed-due-to-hw-res-exhaustion", 20)
prop._addConstant("error", "generic-error", 4)
prop._addConstant("hw-err", "hardware-error", 15)
prop._addConstant("invalid-dst-mode", "dst-in-wrong-mode", 9)
prop._addConstant("invalid-ip", "no-valid-ip-address", 12)
prop._addConstant("invalid-src-mode", "src-in-wrong-mode", 10)
prop._addConstant("invalid-vrf", "no-valid-vrf", 11)
prop._addConstant("no-dst", "no-dest-configured", 6)
prop._addConstant("no-eg-intf", "egress-interface-not-resolved", 14)
prop._addConstant("no-hw-res", "no-hardware-resource", 2)
prop._addConstant("no-oper-src-dst", "no-operational-src/dst", 3)
prop._addConstant("no-route", "no-route-to-destination-ip-address", 13)
prop._addConstant("no-src", "no-sources-configured", 5)
prop._addConstant("no-src-dst", "no-src/dst-configured", 7)
prop._addConstant("not-supported-err", "configuration-not-supported-on-this-tor", 17)
prop._addConstant("pc-with-lacp-err", "pc-destination-with-lacp-not-supported", 18)
prop._addConstant("shut", "session-admin-shut", 8)
prop._addConstant("ver-error", "erspan-version-not-supported", 16)
meta.props.add("operStQual", prop)
prop = PropMeta("str", "port", "port", 2051, PropCategory.REGULAR)
prop.label = "Port"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("port", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "spanCfgFailedBmp", "spanCfgFailedBmp", 14942, PropCategory.REGULAR)
prop.label = "None"
prop.isOper = True
prop._addConstant("span:LDestinationdescr_failed_flag", None, 4)
prop._addConstant("span:LDestinationlcOwn_failed_flag", None, -9223372036854775808)
prop._addConstant("span:LDestinationmodTs_failed_flag", None, 2305843009213693952)
prop._addConstant("span:LDestinationmonPolDn_failed_flag", None, 64)
prop._addConstant("span:LDestinationmtu_failed_flag", None, 8)
prop._addConstant("span:LDestinationnameAlias_failed_flag", None, 2)
prop._addConstant("span:LDestinationname_failed_flag", None, 1)
prop._addConstant("span:LDestinationport_failed_flag", None, 128)
meta.props.add("spanCfgFailedBmp", prop)
prop = PropMeta("str", "spanCfgFailedTs", "spanCfgFailedTs", 14944, PropCategory.REGULAR)
prop.label = "None"
prop.isOper = True
meta.props.add("spanCfgFailedTs", prop)
prop = PropMeta("str", "spanCfgState", "spanCfgState", 14943, PropCategory.REGULAR)
prop.label = "None"
prop.isOper = True
meta.props.add("spanCfgState", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
e871199a0c8a30d8eeef55c5dcd37eb21f00c5ad | 404378e736e3b9c8b22cedda872af2da7562b58d | /Class Project/ClassProject_SenWang 2/FiPy-3.1.3/fipy/variables/distanceVariable.py | 99aec464772849bedb2044fc2604c1479e4df37f | [
"LicenseRef-scancode-public-domain"
] | permissive | wangsen992/bayesian-surrogate-modeling-coursework | d6d13b8fb457bc685d9fe51ef30c14c9cd3539b9 | 7abe9e6c5761b27ac99960fb2e4b98f4dda746eb | refs/heads/master | 2020-09-09T23:15:20.088030 | 2019-11-14T02:29:07 | 2019-11-14T02:29:07 | 221,593,138 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,920 | py | #!/usr/bin/env python
## -*-Pyth-*-
# ###################################################################
# FiPy - Python-based finite volume PDE solver
#
# FILE: "distanceVariable.py"
#
# Author: Jonathan Guyer <[email protected]>
# Author: Daniel Wheeler <[email protected]>
# Author: James Warren <[email protected]>
# mail: NIST
# www: http://www.ctcms.nist.gov/fipy/
#
# ========================================================================
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. FiPy is an experimental
# system. NIST assumes no responsibility whatsoever for its use by
# other parties, and makes no guarantees, expressed or implied, about
# its quality, reliability, or any other characteristic. We would
# appreciate acknowledgement if the software is used.
#
# This software can be redistributed and/or modified freely
# provided that any derivative works bear some notice that they are
# derived from it, and any modified versions bear some notice that
# they have been modified.
# ========================================================================
#
# ###################################################################
##
__docformat__ = 'restructuredtext'
from fipy.tools import numerix
from fipy.tools.numerix import MA
from fipy.tools.decorators import getsetDeprecated
from fipy.variables.cellVariable import CellVariable
from fipy.tests.doctestPlus import register_skipper
import sys
import os
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def _checkForLSMLIB():
return module_exists('pylsmlib')
def _checkForSKFMM():
return module_exists('skfmm')
def _parseLSMSolver():
args = [s.lower() for s in sys.argv[1:]]
# any command-line specified solver takes precedence over environment variables
if '--lsmlib' in args:
if _checkForLSMLIB():
return "lsmlib"
else:
return None
elif '--skfmm' in args:
if _checkForSKFMM():
return "skfmm"
else:
return None
elif 'FIPY_LSM' in os.environ:
return os.environ['FIPY_LSM'].lower()
elif _checkForLSMLIB():
return 'lsmlib'
elif _checkForSKFMM():
return 'skfmm'
else:
return None
LSM_SOLVER = _parseLSMSolver()
register_skipper(flag="LSM",
test=lambda : LSM_SOLVER is not None,
why="neither `lsmlib` nor `skfmm` can be found on the $PATH")
register_skipper(flag="LSMLIB",
test=lambda : LSM_SOLVER == 'lsmlib',
why="`lsmlib` must be used to run some tests")
register_skipper(flag="SKFMM",
test=lambda : LSM_SOLVER == 'skfmm',
why="`skfmm` must be used to run some tests")
__all__ = ["DistanceVariable"]
class DistanceVariable(CellVariable):
r"""
A `DistanceVariable` object calculates :math:`\phi` so it satisfies,
.. math::
\abs{\nabla \phi} = 1
using the fast marching method with an initial condition defined
by the zero level set. The solution can either be first or second
order.
Here we will define a few test cases. Firstly a 1D test case
>>> from fipy.meshes import Grid1D
>>> from fipy.tools import serialComm
>>> mesh = Grid1D(dx = .5, nx = 8, communicator=serialComm)
>>> from distanceVariable import DistanceVariable
>>> var = DistanceVariable(mesh = mesh, value = (-1., -1., -1., -1., 1., 1., 1., 1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> answer = (-1.75, -1.25, -.75, -0.25, 0.25, 0.75, 1.25, 1.75)
>>> print var.allclose(answer) #doctest: +LSM
1
A 1D test case with very small dimensions.
>>> dx = 1e-10
>>> mesh = Grid1D(dx = dx, nx = 8, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., -1., -1., -1., 1., 1., 1., 1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> answer = numerix.arange(8) * dx - 3.5 * dx
>>> print var.allclose(answer) #doctest: +LSM
1
A 2D test case to test `_calcTrialValue` for a pathological case.
>>> dx = 1.
>>> dy = 2.
>>> from fipy.meshes import Grid2D
>>> mesh = Grid2D(dx = dx, dy = dy, nx = 2, ny = 3)
>>> var = DistanceVariable(mesh = mesh, value = (-1., 1., 1., 1., -1., 1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> vbl = -dx * dy / numerix.sqrt(dx**2 + dy**2) / 2.
>>> vbr = dx / 2
>>> vml = dy / 2.
>>> crossProd = dx * dy
>>> dsq = dx**2 + dy**2
>>> top = vbr * dx**2 + vml * dy**2
>>> sqrt = crossProd**2 *(dsq - (vbr - vml)**2)
>>> sqrt = numerix.sqrt(max(sqrt, 0))
>>> vmr = (top + sqrt) / dsq
>>> answer = (vbl, vbr, vml, vmr, vbl, vbr)
>>> print var.allclose(answer) #doctest: +LSM
1
The `extendVariable` method solves the following equation for a given
extensionVariable.
.. math::
\nabla u \cdot \nabla \phi = 0
using the fast marching method with an initial condition defined at
the zero level set.
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 2, ny = 2, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., 1., 1., 1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> extensionVar = CellVariable(mesh = mesh, value = (-1, .5, 2, -1))
>>> tmp = 1 / numerix.sqrt(2)
>>> print var.allclose((-tmp / 2, 0.5, 0.5, 0.5 + tmp)) #doctest: +LSM
1
>>> var.extendVariable(extensionVar, order=1) #doctest: +LSM
>>> print extensionVar.allclose((1.25, .5, 2, 1.25)) #doctest: +LSM
1
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 3, ny = 3, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., 1., 1.,
... 1., 1., 1.,
... 1., 1., 1.))
>>> var.calcDistanceFunction(order=1) #doctest: +LSM
>>> extensionVar = CellVariable(mesh = mesh, value = (-1., .5, -1.,
... 2., -1., -1.,
... -1., -1., -1.))
>>> v1 = 0.5 + tmp
>>> v2 = 1.5
>>> tmp1 = (v1 + v2) / 2 + numerix.sqrt(2. - (v1 - v2)**2) / 2
>>> tmp2 = tmp1 + 1 / numerix.sqrt(2)
>>> print var.allclose((-tmp / 2, 0.5, 1.5, 0.5, 0.5 + tmp,
... tmp1, 1.5, tmp1, tmp2)) #doctest: +LSM
1
>>> answer = (1.25, .5, .5, 2, 1.25, 0.9544, 2, 1.5456, 1.25)
>>> var.extendVariable(extensionVar, order=1) #doctest: +LSM
>>> print extensionVar.allclose(answer, rtol = 1e-4) #doctest: +LSM
1
Test case for a bug that occurs when initializing the distance
variable at the interface. Currently it is assumed that adjacent cells
that are opposite sign neighbors have perpendicular normal vectors. In
fact the two closest cells could have opposite normals.
>>> mesh = Grid1D(dx = 1., nx = 3)
>>> var = DistanceVariable(mesh = mesh, value = (-1., 1., -1.))
>>> var.calcDistanceFunction() #doctest: +LSM
>>> print var.allclose((-0.5, 0.5, -0.5)) #doctest: +LSM
1
Testing second order. This example failed with Scikit-fmm.
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 4, ny = 4, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., -1., 1., 1.,
... -1., -1., 1., 1.,
... 1., 1., 1., 1.,
... 1, 1, 1, 1))
>>> var.calcDistanceFunction(order=2) #doctest: +LSM
>>> answer = [-1.30473785, -0.5, 0.5, 1.49923009,
... -0.5, -0.35355339, 0.5, 1.45118446,
... 0.5, 0.5, 0.97140452, 1.76215286,
... 1.49923009, 1.45118446, 1.76215286, 2.33721352]
>>> print numerix.allclose(var, answer, rtol=1e-9) #doctest: +LSM
True
** A test for a bug in both LSMLIB and Scikit-fmm **
The following test gives different result depending on whether
LSMLIB or Scikit-fmm is used. There is a deeper problem that is
related to this issue. When a value becomes "known" after
previously being a "trial" value it updates its neighbors'
values. In a second order scheme the neighbors one step away also
need to be updated (if the in between cell is "known" and the far
cell is a "trial" cell), but are not in either package. By luck
(due to trial values having the same value), the values calculated
in Scikit-fmm for the following example are correct although an
example that didn't work for Scikit-fmm could also be constructed.
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 4, ny = 4, communicator=serialComm)
>>> var = DistanceVariable(mesh = mesh, value = (-1., -1., -1., -1.,
... 1., 1., -1., -1.,
... 1., 1., -1., -1.,
... 1., 1., -1., -1.))
>>> var.calcDistanceFunction(order=2) #doctest: +LSM
>>> var.calcDistanceFunction(order=2) #doctest: +LSM
>>> answer = [-0.5, -0.58578644, -1.08578644, -1.85136395,
... 0.5, 0.29289322, -0.58578644, -1.54389939,
... 1.30473785, 0.5, -0.5, -1.5,
... 1.49547948, 0.5, -0.5, -1.5]
The 3rd and 7th element are different for LSMLIB. This is because
the 15th element is not "known" when the "trial" value for the 7th
element is calculated. Scikit-fmm calculates the values in a
slightly different order so gets a seemingly better answer, but
this is just chance.
>>> print numerix.allclose(var, answer, rtol=1e-9) #doctest: +SKFMM
True
"""
def __init__(self, mesh, name = '', value = 0., unit = None, hasOld = 0):
"""
Creates a `distanceVariable` object.
:Parameters:
- `mesh`: The mesh that defines the geometry of this variable.
- `name`: The name of the variable.
- `value`: The initial value.
- `unit`: the physical units of the variable
- `hasOld`: Whether the variable maintains an old value.
"""
CellVariable.__init__(self, mesh, name = name, value = value, unit = unit, hasOld = hasOld)
self._markStale()
def _calcValue(self):
return self._value
def extendVariable(self, extensionVariable, order=2):
"""
Calculates the extension of `extensionVariable` from the zero
level set.
:Parameters:
- `extensionVariable`: The variable to extend from the zero
level set.
"""
dx, shape = self.getLSMshape()
extensionValue = numerix.reshape(extensionVariable, shape)
phi = numerix.reshape(self._value, shape)
if LSM_SOLVER == 'lsmlib':
from pylsmlib import computeExtensionFields as extension_velocities
elif LSM_SOLVER == 'skfmm':
from skfmm import extension_velocities
else:
raise Exception("Neither `lsmlib` nor `skfmm` can be found on the $PATH")
tmp, extensionValue = extension_velocities(phi, extensionValue, ext_mask=phi < 0., dx=dx, order=order)
extensionVariable[:] = extensionValue.flatten()
def getLSMshape(self):
mesh = self.mesh
if hasattr(mesh, 'nz'):
raise Exception("3D meshes not yet implemented")
elif hasattr(mesh, 'ny'):
dx = (mesh.dy, mesh.dx)
shape = (mesh.ny, mesh.nx)
elif hasattr(mesh, 'nx'):
dx = (mesh.dx,)
shape = mesh.shape
else:
raise Exception("Non grid meshes can not be used for solving the FMM.")
return dx, shape
def calcDistanceFunction(self, order=2):
"""
Calculates the `distanceVariable` as a distance function.
:Parameters:
- `order`: The order of accuracy for the distance funtion
calculation, either 1 or 2.
"""
dx, shape = self.getLSMshape()
if LSM_SOLVER == 'lsmlib':
from pylsmlib import distance
elif LSM_SOLVER == 'skfmm':
from skfmm import distance
else:
raise Exception("Neither `lsmlib` nor `skfmm` can be found on the $PATH")
self._value = distance(numerix.reshape(self._value, shape), dx=dx, order=order).flatten()
self._markFresh()
@getsetDeprecated
def getCellInterfaceAreas(self):
return self.cellInterfaceAreas
@property
def cellInterfaceAreas(self):
"""
Returns the length of the interface that crosses the cell
A simple 1D test:
>>> from fipy.meshes import Grid1D
>>> mesh = Grid1D(dx = 1., nx = 4)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-1.5, -0.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh, value=(0, 0., 1., 0))
>>> print numerix.allclose(distanceVariable.cellInterfaceAreas,
... answer)
True
A 2D test case:
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 3, ny = 3)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (1.5, 0.5, 1.5,
... 0.5,-0.5, 0.5,
... 1.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh,
... value=(0, 1, 0, 1, 0, 1, 0, 1, 0))
>>> print numerix.allclose(distanceVariable.cellInterfaceAreas, answer)
True
Another 2D test case:
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> from fipy.variables.cellVariable import CellVariable
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh,
... value=(0, numerix.sqrt(2) / 4, numerix.sqrt(2) / 4, 0))
>>> print numerix.allclose(distanceVariable.cellInterfaceAreas,
... answer)
True
Test to check that the circumfrence of a circle is, in fact,
:math:`2\pi r`.
>>> mesh = Grid2D(dx = 0.05, dy = 0.05, nx = 20, ny = 20)
>>> r = 0.25
>>> x, y = mesh.cellCenters
>>> rad = numerix.sqrt((x - .5)**2 + (y - .5)**2) - r
>>> distanceVariable = DistanceVariable(mesh = mesh, value = rad)
>>> print numerix.allclose(distanceVariable.cellInterfaceAreas.sum(), 1.57984690073)
1
"""
from fipy.variables.interfaceAreaVariable import _InterfaceAreaVariable
return _InterfaceAreaVariable(self)
@getsetDeprecated
def _getCellInterfaceNormals(self):
return self._cellInterfaceNormals
@property
def _cellInterfaceNormals(self):
"""
Returns the interface normals over the cells.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> v = 1 / numerix.sqrt(2)
>>> answer = CellVariable(mesh=mesh,
... value=(((0, 0, v, 0),
... (0, 0, 0, 0),
... (0, 0, 0, 0),
... (0, v, 0, 0)),
... ((0, 0, v, 0),
... (0, 0, 0, 0),
... (0, 0, 0, 0),
... (0, v, 0, 0))))
>>> print numerix.allclose(distanceVariable._cellInterfaceNormals, answer)
True
"""
dim = self.mesh.dim
valueOverFaces = numerix.repeat(self._cellValueOverFaces[numerix.newaxis, ...], dim, axis=0)
cellFaceIDs = self.mesh.cellFaceIDs
if cellFaceIDs.shape[-1] > 0:
interfaceNormals = self._interfaceNormals[...,cellFaceIDs]
else:
interfaceNormals = 0
return MA.where(valueOverFaces < 0, 0, interfaceNormals)
@getsetDeprecated
def _getInterfaceNormals(self):
return self._interfaceNormals
@property
def _interfaceNormals(self):
"""
Returns the normals on the boundary faces only, the other are set to zero.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.faceVariable import FaceVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> v = 1 / numerix.sqrt(2)
>>> answer = FaceVariable(mesh=mesh,
... value=((0, 0, v, 0, 0, 0, 0, v, 0, 0, 0, 0),
... (0, 0, v, 0, 0, 0, 0, v, 0, 0, 0, 0)))
>>> print numerix.allclose(distanceVariable._interfaceNormals, answer)
True
"""
M = self.mesh.dim
interfaceFlag = numerix.repeat(self._interfaceFlag[numerix.newaxis, ...], M, axis=0)
return numerix.where(interfaceFlag, self._levelSetNormals, 0)
@getsetDeprecated
def _getInterfaceFlag(self):
return self._interfaceFlag
@property
def _interfaceFlag(self):
"""
Returns 1 for faces on boundary and 0 otherwise.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.faceVariable import FaceVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> answer = FaceVariable(mesh=mesh,
... value=(0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0))
>>> print numerix.allclose(distanceVariable._interfaceFlag, answer)
True
"""
adjacentCellIDs = self.mesh._adjacentCellIDs
val0 = numerix.take(numerix.array(self._value), adjacentCellIDs[0])
val1 = numerix.take(numerix.array(self._value), adjacentCellIDs[1])
return numerix.where(val1 * val0 < 0, 1, 0)
@getsetDeprecated
def _getCellInterfaceFlag(self):
return self._cellInterfaceFlag
@property
def _cellInterfaceFlag(self):
"""
Returns 1 for those cells on the interface:
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh, value=(0, 1, 1, 0))
>>> print numerix.allclose(distanceVariable._cellInterfaceFlag, answer)
True
"""
from fipy.variables.interfaceFlagVariable import _InterfaceFlagVariable
return _InterfaceFlagVariable(self)
@getsetDeprecated
def _getCellValueOverFaces(self):
return self._cellValueOverFaces
@property
def _cellValueOverFaces(self):
"""
Returns the cells values at the faces.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.cellVariable import CellVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> answer = CellVariable(mesh=mesh,
... value=((-.5, .5, .5, 1.5),
... (-.5, .5, .5, 1.5),
... (-.5, .5, .5, 1.5),
... (-.5, .5, .5, 1.5)))
>>> print numerix.allclose(distanceVariable._cellValueOverFaces, answer)
True
"""
M = self.mesh._maxFacesPerCell
N = self.mesh.numberOfCells
return numerix.reshape(numerix.repeat(numerix.array(self._value)[numerix.newaxis, ...], M, axis=0), (M, N))
@getsetDeprecated
def _getLevelSetNormals(self):
return self._levelSetNormals
@property
def _levelSetNormals(self):
"""
Return the face level set normals.
>>> from fipy.meshes import Grid2D
>>> from fipy.variables.faceVariable import FaceVariable
>>> mesh = Grid2D(dx = .5, dy = .5, nx = 2, ny = 2)
>>> distanceVariable = DistanceVariable(mesh = mesh,
... value = (-0.5, 0.5, 0.5, 1.5))
>>> v = 1 / numerix.sqrt(2)
>>> answer = FaceVariable(mesh=mesh,
... value=((0, 0, v, v, 0, 0, 0, v, 0, 0, v, 0),
... (0, 0, v, v, 0, 0, 0, v, 0, 0, v, 0)))
>>> print numerix.allclose(distanceVariable._levelSetNormals, answer)
True
"""
faceGrad = self.grad.arithmeticFaceValue
faceGradMag = numerix.array(faceGrad.mag)
faceGradMag = numerix.where(faceGradMag > 1e-10,
faceGradMag,
1e-10)
faceGrad = numerix.array(faceGrad)
## set faceGrad zero on exteriorFaces
exteriorFaces = self.mesh.exteriorFaces
if len(exteriorFaces.value) > 0:
faceGrad[..., exteriorFaces.value] = 0.
return faceGrad / faceGradMag
def _test():
import fipy.tests.doctestPlus
return fipy.tests.doctestPlus.testmod()
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
b44857c46d895f4857faeac4e3deb4dff8e60872 | c7a1406b2230acaf412542124ef744c83171fa9a | /perdiem/campaign/apps.py | d14c5e8fe339d634cbe6cded1f0c73ad9cefa0c4 | [] | no_license | GilbertRoy/perdiem-django | de2f1351088597fb2b5e739388f28ff346e5e824 | 3d1f00b21a28f71cb89e49986d07b893e5abe1d9 | refs/heads/master | 2020-03-14T03:30:21.445845 | 2018-04-21T21:44:20 | 2018-04-21T21:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.apps import AppConfig
class CampaignConfig(AppConfig):
name = 'campaign'
def ready(self):
import campaign.signals
| [
"[email protected]"
] | |
e4e396bdb9c9ff1453ea79cb8ca39725235f75db | 8e1f493ce9fc34b42637bc7d69560aab20c384a3 | /simple_filter/scripts/simple_kalman.py | 7f10fadc9c8eb8b01238b66e7502fb534f4f3abd | [] | no_license | AmyPhung/comprobo20 | 6f980a82174b3938527fb5939cdd539420aaff42 | 2eff4918275542d2d28828df97c8100d2391cfb0 | refs/heads/master | 2023-04-28T21:49:52.085491 | 2021-05-17T22:27:14 | 2021-05-17T22:27:14 | 290,074,022 | 0 | 0 | null | 2020-08-25T00:47:21 | 2020-08-25T00:47:21 | null | UTF-8 | Python | false | false | 4,471 | py | #!/usr/bin/env python3
"""
This script implements a Kalman filter for the system:
x_0 ~ N(0, sigma_sq)
x_t = x_{t-1} + w_t, w_t ~ N(0, sigma_m_sq)
z_t = x_t + v_t, v_t ~ N(0, sigma_z_sq)
"""
import matplotlib.pyplot as plt
import rospy
from numpy import arange
from numpy.random import randn
from math import e, sqrt, pi
from dynamic_reconfigure.server import Server
from simple_filter.cfg import SimpleKalmanConfig
class SimpleWorld(object):
""" A simple system with dynamics:
x_0 ~ N(0, sigma_sq)
x_t = x_{t-1} + w_t, w_t ~ N(0, sigma_m_sq)
z_t = x_t + v_t, v_t ~ N(0, sigma_z_sq)
"""
def __init__(self, mu_0, sigma_0, sigma_m_sq, sigma_z_sq):
""" the initial state is sampled from N(mu_0, sigma_0).
the movement noise is sigma_m_sq and the measurement noise is sigma_z_sq
"""
self.x_true = mu_0 + sqrt(sigma_0)*randn()
self.sigma_m_sq = sigma_m_sq
self.sigma_z_sq = sigma_z_sq
def get_z_t(self):
""" Sample an observation centered at x_true plus Gaussian noise
with variance sigma_sq_z and mean 0 """
return self.x_true + sqrt(self.sigma_z_sq)*randn()
def get_x_t(self):
""" Sample next system state as the current system state plus Gaussian
noise with variance sigma_sq_m and mean 0 """
self.x_true = self.x_true + sqrt(self.sigma_m_sq)*randn()
return self.x_true
class SimpleKalmanFilter(object):
""" A Kalman filter node that estimates a single state x_t using noisy position measurements """
def __init__(self):
""" Sets up the world model and loads initial parameters """
rospy.init_node('simple_kalman')
plt.ion()
# initial beliefs
self.mu = 0
self.sigma_sq = 1
# motor noise
sigma_m_sq = rospy.get_param('~sigma_m_sq', 0.01)
# observation noise
sigma_z_sq = rospy.get_param('~sigma_z_sq', .1)
# time to pause between plots
self.pause_time = rospy.get_param('~pause_time', 0.5)
self.graphs = None
self.world = SimpleWorld(self.mu, self.sigma_sq, sigma_m_sq, sigma_z_sq)
srv = Server(SimpleKalmanConfig, self.config_callback)
def config_callback(self, config, level):
""" Get the pause_time, movement noise, and measurement noise """
self.pause_time = config['pause_time']
self.world.sigma_m_sq = config['sigma_m_sq']
self.world.sigma_z_sq = config['sigma_z_sq']
return config
def run(self):
while not rospy.is_shutdown():
# Graph new observation from the system
z_t = self.world.get_z_t()
self.graphs = self.plot_pdf(z_t)
# Do Kalman updates
K_t = (self.sigma_sq + self.world.sigma_m_sq)/(self.sigma_sq + self.world.sigma_m_sq + self.world.sigma_z_sq)
self.mu = self.mu + K_t*(z_t - self.mu)
self.sigma_sq = (1-K_t)*(self.sigma_sq+self.world.sigma_m_sq)
plt.pause(self.pause_time)
self.graphs = self.plot_pdf(z_t)
# sample next state
self.world.get_x_t()
plt.pause(self.pause_time)
def plot_pdf(self, z):
""" Plot the Gaussian PDF with the specified mean (mu) and variance (sigma_sq)
x_true is the true system state which will be plotted in blue
z is the current observation which will be plotted in red """
xs = arange(min(-5,z-2,self.world.x_true-2), max(5,z+2,self.world.x_true+2), .005)
p_of_x = [1./sqrt(2*pi*self.sigma_sq)*e**(-(x - self.mu)**2/(2*self.sigma_sq)) for x in xs]
plt.xlim([min(xs), max(xs)])
if self.graphs:
self.graphs[0].set_xdata(xs)
self.graphs[0].set_ydata(p_of_x)
self.graphs[1].set_xdata(self.world.x_true)
self.graphs[2].set_xdata(z)
else:
self.graphs = []
self.graphs.append(plt.plot(xs, p_of_x)[0])
self.graphs.append(plt.plot(self.world.x_true, 0,'b.')[0])
self.graphs.append(plt.plot(z, 0,'r.')[0])
self.graphs[1].set_markersize(20)
self.graphs[2].set_markersize(20)
plt.ylim([0, 5])
plt.legend(('probability density','true position','measured position'))
plt.show(block=False)
return self.graphs
if __name__ == '__main__':
node = SimpleKalmanFilter()
node.run()
| [
"[email protected]"
] | |
77d284e2b345dc9df82af95355126cbf386ca2fd | a74a0317d8b8e1cf5135cbd0821617f70c8879ca | /old/python_resume/file.py | a0b117dbafe07691e133591f387574ae6e1beeb9 | [] | no_license | chuck1/python-resume | cbd3c0eb2fe3d0894b3809a2ac1526d171d6afc2 | 5b83fa831525faba17f72173cfff9c2155bd21fc | refs/heads/master | 2021-01-10T19:14:08.036676 | 2017-01-04T01:23:03 | 2017-01-04T01:23:03 | 42,127,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | import os
import json
class Manager(object):
def __init__(self, root):
self.root = root
def get_path(self, filename):
return os.path.join(self.root, filename)
def read_text(self, filename):
path = self.get_path(filename)
with open(path, 'r') as f:
text = f.read()
return text
def write_text(self, filename, text):
path = self.get_path(filename)
#try:
#os.makedirs(os.path.dirname(path))
#except:
# pass
#fd = os.open(path, os.O_WRONLY, 0666)
#os.fchmod(fd,0666)
#os.close(fd)
with open(path, 'w') as f:
f.write(text)
def read_json(self, filename):
try:
text = self.read_text(filename)
except:
text = "{}"
j = json.loads(text)
return j
def write_json(self, filename, j):
text = json.dumps(j)
self.write_text(filename, text)
| [
"[email protected]"
] | |
ce72b7bb239177efb435d6cc7d06c93e1377518a | 3fa8eead6e001c4d5a6dc5b1fd4c7b01d7693292 | /ros _navigation_in_5_days/src/initialize_particles/scripts/init_particles_caller.py | e82c4f46632c1080b34ac406afdbf5a7b7ed4ca5 | [] | no_license | MarzanShuvo/Ros_from_the_construct | 09261902841cdd832672658947790ec5fbba4cd3 | 4798234284d9d0bab3751e9d8ac2df95ae34a5bf | refs/heads/master | 2023-08-24T17:28:09.182113 | 2021-10-23T07:57:02 | 2021-10-23T07:57:02 | 339,105,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | #! /usr/bin/env python
import rospy
from std_srvs.srv import Empty, EmptyRequest
import sys
rospy.init_node('service_client')
rospy.wait_for_service('/global_localization')
disperse_particles_service = rospy.ServiceProxy('/global_localization', Empty)
msg = EmptyRequest()
result = disperse_particles_service(msg)
print(result) | [
"[email protected]"
] | |
a50e4c86ed9764db44777c7fcb47ec51f6780d04 | 6b2dcf691bc7f019d86270ec0588f5232fc3e2b0 | /inflearn_practice/section7/최대점수 구하기.py | e8be936e34aa64f6cf45a818cae04129b1c64022 | [] | no_license | limgeonho/Algorithm | 02c55fbf5b09b718dbc2aee83a887143d121ddaf | 3d4d1ccd6ee3c52dc36ac3cf5f681690fcfdb6ab | refs/heads/master | 2023-06-01T21:05:00.100998 | 2021-06-21T15:04:26 | 2021-06-21T15:04:26 | 371,552,176 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | #최대점수 구하기
#다음문제를 푸는 지 풀지 않는지에 대한 선택(O, X)문제
def DFS(L, time, sum):
global res
if time > m:
return
if L == n:
if sum > res:
res = sum
else:
DFS(L+1, time + t[L], sum + v[L])
DFS(L+1, time, sum)
n, m = map(int, input().split())
v= list()
t = list()
for _ in range(n):
a, b = map(int, input().split())
v.append(a)
t.append(b)
res = -2147000000
DFS(0, 0, 0)
print(res) | [
"[email protected]"
] | |
a2ba4afc7c10c24b24bd646ab7250dcd81777313 | 0d9dd4ac458ac954e453e6f7810ca5e1c759f82d | /list | fb6c972de61fce49941283ab222a8e272a50cc63 | [
"MIT"
] | permissive | ovkulkarni/create-repo | 9335307481686c8109baae7d88cd819dd7ca0cb6 | 0073cd761106e0c5453429204e8da56ba249eb1d | refs/heads/master | 2021-01-10T05:20:54.898788 | 2016-03-30T14:14:03 | 2016-03-30T14:15:06 | 53,800,960 | 0 | 1 | null | 2016-03-14T02:44:39 | 2016-03-13T18:35:40 | Python | UTF-8 | Python | false | false | 3,590 | #!/usr/bin/env python3
######################################################################################
# #
#The MIT License (MIT) #
# #
#Copyright (c) 2016 Omkar Kulkarni #
# #
#Permission is hereby granted, free of charge, to any person obtaining a copy #
#of this software and associated documentation files (the "Software"), to deal #
#in the Software without restriction, including without limitation the rights #
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
#copies of the Software, and to permit persons to whom the Software is #
#furnished to do so, subject to the following conditions: #
# #
#The above copyright notice and this permission notice shall be included in all #
#copies or substantial portions of the Software. #
# #
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
#SOFTWARE. #
# #
######################################################################################
import requests
import yaml
import sys
import json
import getpass
import os
from termcolor import colored
try:
current_dir = os.path.dirname(os.path.realpath(__file__))
with open(current_dir + '/config.yml', 'r') as f:
config = yaml.load(f.read())
password = getpass.getpass("Enter your Github Password: ")
session = requests.Session()
session.auth = (config["username"], password)
url = 'https://api.github.com/user/repos'
print('[=] Sending request to Github...')
r = session.get(url)
if r.status_code == 200:
returned = json.loads(r.text)
for item in returned:
if item["private"]:
print(colored("[PRIVATE] {} - {}".format(item["full_name"], item["html_url"]), "red"))
else:
print("{} - {}".format(item["full_name"], item["html_url"]))
else:
print("[-] Unable to access repositories. Github returned an error of {}".format(r.status_code))
print("[-] Here is the full content Github returned: {}".format(json.loads(r.text)["message"]))
except KeyboardInterrupt as e:
print("\nExiting...")
sys.exit()
except requests.ConnectionError as e:
print("\n[-] Not Connected To Internet!")
print("Exiting...")
sys.exit()
except BaseException as e:
print("\nReceived an error of {}".format(str(e)))
print("Exiting...")
sys.exit()
| [
"[email protected]"
] | ||
2e3a3c24699f253c7671d55206bcd6aa7151e478 | 5522054c40e9a35b68351bfa546c2e9fffd01340 | /mobileoperators/settings.py | 9fc9e98b471b6627856ba177fb3dccefadbf3c3f | [] | no_license | thepylot/Mobile-Networks-App | 6ee36243c4861063da8b1c086fc0db882a27cb09 | 4893b810b697e399564e1fb1bb6f738b61950b76 | refs/heads/master | 2021-11-27T14:22:13.843167 | 2019-02-08T10:37:25 | 2019-02-08T10:37:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | """
Django settings for mobileoperators project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qhe1goa#897=s5hq^ci--vyby&2ty8wp_2t4dq!85u1iq%3kgb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'208.68.36.230',
'127.0.0.1',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mobile',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mobileoperators.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mobileoperators.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"[email protected]"
] | |
690bf90029924555962d2aa02c4d1d296434d857 | 4bf53a42b336e67ce75e220dc87f75af9911f036 | /tapiriik/urls.py | 4ced6e6a50e2b58565261db7601a7cafecb0b985 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | patricksan/tapiriik | 5cee925d256f5e2b23397487ef807b5766b710ba | 1628f8759c9e2d0562b92dd25561a347389f6cf3 | refs/heads/master | 2020-12-11T08:07:10.991800 | 2018-02-27T13:14:59 | 2018-02-27T13:14:59 | 38,956,416 | 0 | 0 | Apache-2.0 | 2018-02-27T13:15:00 | 2015-07-12T09:22:01 | Python | UTF-8 | Python | false | false | 7,197 | py | from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'tapiriik.web.views.dashboard', name='dashboard'),
url(r'^auth/redirect/(?P<service>[^/]+)$', 'tapiriik.web.views.oauth.authredirect', {}, name='oauth_redirect', ),
url(r'^auth/redirect/(?P<service>[^/]+)/(?P<level>.+)$', 'tapiriik.web.views.oauth.authredirect', {}, name='oauth_redirect', ),
url(r'^auth/return/(?P<service>[^/]+)$', 'tapiriik.web.views.oauth.authreturn', {}, name='oauth_return', ),
url(r'^auth/return/(?P<service>[^/]+)/(?P<level>.+)$', 'tapiriik.web.views.oauth.authreturn', {}, name='oauth_return', ), # django's URL magic couldn't handle the equivalent regex
url(r'^auth/login/(?P<service>.+)$', 'tapiriik.web.views.auth_login', {}, name='auth_simple', ),
url(r'^auth/login-ajax/(?P<service>.+)$', 'tapiriik.web.views.auth_login_ajax', {}, name='auth_simple_ajax', ),
url(r'^auth/persist-ajax/(?P<service>.+)$', 'tapiriik.web.views.auth_persist_extended_auth_ajax', {}, name='auth_persist_extended_auth_ajax', ),
url(r'^auth/disconnect/(?P<service>.+)$', 'tapiriik.web.views.auth_disconnect', {}, name='auth_disconnect', ),
url(r'^auth/disconnect-ajax/(?P<service>.+)$', 'tapiriik.web.views.auth_disconnect_ajax', {}, name='auth_disconnect_ajax', ),
url(r'^auth/logout$', 'tapiriik.web.views.auth_logout', {}, name='auth_logout', ),
url(r'^account/setemail$', 'tapiriik.web.views.account_setemail', {}, name='account_set_email', ),
url(r'^account/settz$', 'tapiriik.web.views.account_settimezone', {}, name='account_set_timezone', ),
url(r'^account/configure$', 'tapiriik.web.views.account_setconfig', {}, name='account_set_config', ),
url(r'^account/rollback/?$', 'tapiriik.web.views.account_rollback_initiate', {}, name='account_rollback_initiate', ),
url(r'^account/rollback/(?P<task_id>.+)$', 'tapiriik.web.views.account_rollback_status', {}, name='account_rollback_status', ),
url(r'^rollback$', 'tapiriik.web.views.rollback_dashboard', {}, name='rollback_dashboard', ),
url(r'^configure/save/(?P<service>.+)?$', 'tapiriik.web.views.config.config_save', {}, name='config_save', ),
url(r'^configure/dropbox$', 'tapiriik.web.views.config.dropbox', {}, name='dropbox_config', ),
url(r'^configure/flow/save/(?P<service>.+)?$', 'tapiriik.web.views.config.config_flow_save', {}, name='config_flow_save', ),
url(r'^settings/?$', 'tapiriik.web.views.settings.settings', {}, name='settings_panel', ),
url(r'^dropbox/browse-ajax/?$', 'tapiriik.web.views.dropbox.browse', {}, name='dropbox_browse_ajax', ),
url(r'^dropbox/browse-ajax/(?P<path>.+)?$', 'tapiriik.web.views.dropbox.browse', {}, name='dropbox_browse_ajax', ),
url(r'^sync/status$', 'tapiriik.web.views.sync_status', {}, name='sync_status'),
url(r'^sync/activity$', 'tapiriik.web.views.sync_recent_activity', {}, name='sync_recent_activity'),
url(r'^sync/schedule/now$', 'tapiriik.web.views.sync_schedule_immediate', {}, name='sync_schedule_immediate'),
url(r'^sync/errors/(?P<service>[^/]+)/clear/(?P<group>.+)$', 'tapiriik.web.views.sync_clear_errorgroup', {}, name='sync_clear_errorgroup'),
url(r'^activities$', 'tapiriik.web.views.activities_dashboard', {}, name='activities_dashboard'),
url(r'^activities/fetch$', 'tapiriik.web.views.activities_fetch_json', {}, name='activities_fetch_json'),
url(r'^sync/remote_callback/trigger_partial_sync/(?P<service>.+)$', 'tapiriik.web.views.sync_trigger_partial_sync_callback', {}, name='sync_trigger_partial_sync_callback'),
url(r'^diagnostics/$', 'tapiriik.web.views.diag_dashboard', {}, name='diagnostics_dashboard'),
url(r'^diagnostics/queue$', 'tapiriik.web.views.diag_queue_dashboard', {}, name='diagnostics_queue_dashboard'),
url(r'^diagnostics/errors$', 'tapiriik.web.views.diag_errors', {}, name='diagnostics_errors'),
url(r'^diagnostics/error/(?P<error>.+)$', 'tapiriik.web.views.diag_error', {}, name='diagnostics_error'),
url(r'^diagnostics/graphs$', 'tapiriik.web.views.diag_graphs', {}, name='diagnostics_graphs'),
url(r'^diagnostics/user/unsu$', 'tapiriik.web.views.diag_unsu', {}, name='diagnostics_unsu'),
url(r'^diagnostics/user/(?P<user>.+)$', 'tapiriik.web.views.diag_user', {}, name='diagnostics_user'),
url(r'^diagnostics/payments/$', 'tapiriik.web.views.diag_payments', {}, name='diagnostics_payments'),
url(r'^diagnostics/ip$', 'tapiriik.web.views.diag_ip', {}, name='diagnostics_ip'),
url(r'^diagnostics/login$', 'tapiriik.web.views.diag_login', {}, name='diagnostics_login'),
url(r'^supported-activities$', 'tapiriik.web.views.supported_activities', {}, name='supported_activities'),
# url(r'^supported-services-poll$', 'tapiriik.web.views.supported_services_poll', {}, name='supported_services_poll'),
url(r'^payments/claim$', 'tapiriik.web.views.payments_claim', {}, name='payments_claim'),
url(r'^payments/claim-ajax$', 'tapiriik.web.views.payments_claim_ajax', {}, name='payments_claim_ajax'),
url(r'^payments/promo-claim-ajax$', 'tapiriik.web.views.payments_promo_claim_ajax', {}, name='payments_promo_claim_ajax'),
url(r'^payments/claim-wait-ajax$', 'tapiriik.web.views.payments_claim_wait_ajax', {}, name='payments_claim_wait_ajax'),
url(r'^payments/claim/(?P<code>[a-f0-9]+)$', 'tapiriik.web.views.payments_claim_return', {}, name='payments_claim_return'),
url(r'^payments/return$', 'tapiriik.web.views.payments_return', {}, name='payments_return'),
url(r'^payments/confirmed$', 'tapiriik.web.views.payments_confirmed', {}, name='payments_confirmed'),
url(r'^payments/ipn$', 'tapiriik.web.views.payments_ipn', {}, name='payments_ipn'),
url(r'^payments/external/(?P<provider>[^/]+)/refresh$', 'tapiriik.web.views.payments_external_refresh', {}, name='payments_external_refresh'),
url(r'^ab/begin/(?P<key>[^/]+)$', 'tapiriik.web.views.ab_web_experiment_begin', {}, name='ab_web_experiment_begin'),
url(r'^privacy$', 'tapiriik.web.views.privacy.privacy', name='privacy'),
url(r'^garmin_connect_users$', TemplateView.as_view(template_name='static/garmin_connect_users.html'), name='garmin_connect_users'),
url(r'^faq$', TemplateView.as_view(template_name='static/faq.html'), name='faq'),
url(r'^credits$', TemplateView.as_view(template_name='static/credits.html'), name='credits'),
url(r'^contact$', TemplateView.as_view(template_name='static/contact.html'), name='contact'),
# Examples:
# url(r'^$', 'tapiriik.views.home', name='home'),
# url(r'^tapiriik/', include('tapiriik.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
| [
"[email protected]"
] | |
09da270e1e2c06a0e560ef30f7fadd91ddaec7e6 | 5330918e825f8d373d3907962ba28215182389c3 | /CMGTools/ZJetsTutorial/python/samples/run2012/diboson.py | b5453eedeeb69a296e71f73508c838dda097261d | [] | no_license | perrozzi/cmg-cmssw | 31103a7179222c7aa94f65e83d090a5cf2748e27 | 1f4cfd936da3a6ca78f25959a41620925c4907ca | refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22 | 2021-01-16T23:15:58.556441 | 2017-05-11T22:43:15 | 2017-05-11T22:43:15 | 13,272,641 | 1 | 0 | null | 2017-05-11T22:43:16 | 2013-10-02T14:05:21 | C++ | UTF-8 | Python | false | false | 2,913 | py | import CMGTools.RootTools.fwlite.Config as cfg
# exclusive madgraph samples
# -- -- -- -- -- -- -- --
WWJetsTo2L2Nu = cfg.MCComponent(
name = 'WWJetsTo2L2Nu',
files = [],
xSection = 5.824, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
WZJetsTo2L2Q = cfg.MCComponent(
name = 'WZJetsTo2L2Q',
files = [],
xSection = 2.207, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
WZJetsTo3LNu = cfg.MCComponent(
name = 'WZJetsTo3LNu',
files = [],
xSection = 1.058, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZJetsTo2L2Nu = cfg.MCComponent(
name = 'ZZJetsTo2L2Nu',
files = [],
xSection = 0.716, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZJetsTo2L2Q = cfg.MCComponent(
name = 'ZZJetsTo2L2Q',
files = [],
xSection = 2.502, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZJetsTo4L = cfg.MCComponent(
name = 'ZZJetsTo4L',
files = [],
xSection = 0.181, #PG from twiki: https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorkingSummer2012#MC_samples_and_cross_sections
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
mc_diboson_xcl = [
WWJetsTo2L2Nu,
WZJetsTo2L2Q,
WZJetsTo3LNu,
ZZJetsTo2L2Nu,
ZZJetsTo2L2Q,
ZZJetsTo4L
]
# inclusive pythia samples
# -- -- -- -- -- -- -- --
WW = cfg.MCComponent(
name = 'WW',
files = [],
# xSection = 57.1097, # correction factor from Valentina
xSection = 54.838, #PG numbers from Andrew
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
WZ = cfg.MCComponent(
name = 'WZ',
files = [],
# xSection = 32.3161,
# xSection = 32.3161 * 0.97, #PG scale factor wrt exclusive samples XS
xSection = 33.21, #PG number from Andrew
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
ZZ = cfg.MCComponent(
name = 'ZZ',
files = [],
# xSection = 8.25561, # correction factor from Valentina
# xSection = 8.3 * 2.13, #PG scale factor wrt exclusive samples XS
xSection = 17.654, #PG number from Andrew
nGenEvents = 1,
triggers = [],
effCorrFactor = 1 )
# inclusive pythia samples
mc_diboson_inc = [
WW,
WZ,
ZZ
]
# exclusive madgraph samples
mc_diboson = mc_diboson_xcl
| [
"[email protected]"
] | |
dd539c83e1d88b90fd293cba625a36db7451ac85 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/new-cp2k.py | 11ac45a17e0441860ee71dfff7ef01d83976f612 | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 25,853 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
"""This module defines an ASE interface to CP2K.
Developed on the basis of
1) pycp2k by Singroup
https://github.com/SINGROUP/pycp2k
2) ase.calculator.cp2k by Ole Schuett
https://gitlab.com/ase/ase/blob/master/ase/calculators/cp2k.py
3) jasp by John Kitchin
https://github.com/jkitchin/jasp
Before running, two environment flay should be set:
1) $CP2K_DATA_DIR, path of the directory containing the basis set files (basis set, pseudo_potential, ...)
2) $ASE_CP2K_COMMAND, pointing to the command launching cp2k e.g. 'cp2k.sopt' or 'mpirun -n 4 cp2k.ssmp'.
For more information about cp2k, please visit:
http://www.cp2k.org
Author: Xing Wang <[email protected]>
"""
import sys
from subprocess import Popen, PIPE
import os
from os.path import join, isfile, split, islink
import numpy as np
import ase.io
from ase import Atoms, Atom
from ase.calculators.calculator import FileIOCalculator, all_changes, Parameters
from ase.units import Rydberg
from xcp2k.cp2k_tools import *
from xcp2k.cp2krc import *
from scipy.constants import physical_constants, c, h, hbar, e
from xcp2k.classes._CP2K_INPUT1 import _CP2K_INPUT1
from xcp2k.inputparser import CP2KInputParser
import logging
import traceback
logger = logging.getLogger('CP2K')
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# logger.setLevel(logging.DEBUG)
class CP2K(FileIOCalculator):
"""ASE-Calculator for CP2K.
CP2K is a program to perform atomistic and molecular simulations of solid
state, liquid, molecular, and biological systems. It provides a general
framework for different methods such as e.g., density functional theory
(DFT) using a mixed Gaussian and plane waves approach (GPW) and classical
pair and many-body potentials.
CP2K is freely available under the GPL license.
It is written in Fortran 2003 and can be run efficiently in parallel.
Check http://www.cp2k.org about how to obtain and install CP2K.
Make sure that you also have the CP2K-shell available, since it is required
by the CP2K-calulator.
Arguments:
debug: bool
Flag to enable debug mode. Default is ``False``.
nodes: int
Number of nodes used for the calcuation. Default is ``1``.
env: str
System of the Cluster.
Default is ``SLURM``.
"""
name = 'cp2k'
implemented_properties = ['energy', 'energies', 'forces', 'stress', 'charges', 'frequencies']
def __init__(self, restart=None, mode = 0, label = 'cp2k', ignore_bad_restart_file=False,
queue = None,
atoms=None, command=None,
debug=False, **kwargs):
"""Construct CP2K-calculator object."""
# {'nodes': None, 'ntasks-per-node': None, partition': None, 'account': None, 'time': '01:00:00'},
FileIOCalculator.__init__(self, restart, ignore_bad_restart_file,
label, atoms, **kwargs)
self.prefix = label.split('/')[-1]
self.directory = './' + label[0:-len(self.prefix)]
self.set_queue(queue)
if debug:
logger.setLevel(logging.DEBUG)
self.CP2K_INPUT = _CP2K_INPUT1()
self._debug = debug
self.out = None
self.inp = None
self.symmetry = None
self.results = {}
self.parameters = {} # calculational parameters
self.atoms = None
self.positions = None
if atoms is not None:
atoms.calc = self
self.atoms = atoms
self.natoms = len(atoms)
def set_queue(self, queue = None):
command = os.environ.get('ASE_CP2K_COMMAND')
if queue:
# Write the file
if not os.path.exists(self.directory):
os.makedirs(self.directory)
with open('%s.job_file' % self.directory, 'w') as fh:
fh.writelines("#!/bin/bash\n")
fh.writelines("#SBATCH --job-name=%s \n" % self.prefix)
fh.writelines("#SBATCH --output=%s.out\n" % self.prefix)
fh.writelines("#SBATCH --error=%s.err\n" % self.prefix)
fh.writelines("#SBATCH --wait\n")
for key, value in queue.items():
if value:
fh.writelines("#SBATCH --%s=%s\n" %(key, value))
fh.writelines('''export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK \n
module load CP2K/6.1-intel-2018a \n
ulimit -s unlimited\n
export ASE_CP2K_COMMAND="mpirun cp2k.popt -i cp2k.inp -o cp2k.out"\n
export CP2K_DATA_DIR=/home/ubelix/dcb/xw20n572/apps/cp2k-7.1.0/data\n''')
fh.writelines("%s \n" % command)
self.command = "sbatch {0}".format('.job_file')
else:
self.command = command
def update(self, atoms):
if self.calculation_required(atoms, ['energy']):
if (self.atoms is None or
self.atoms.positions.shape != atoms.positions.shape):
# Completely new calculation just reusing the same
# calculator, so delete any old VASP files found.
self.clean()
self.calculate(atoms)
def write(self, label):
'Write atoms, parameters and calculated results into restart files.'
logger.debug("Writting restart to: ", label)
self.atoms.write(label + '_restart.traj')
f = open(label + '_params.ase', 'a')
for key, val in self.parameters.items():
f.write('{0} = {1} \n'.format(key, val))
f.close()
open(label + '_results.ase', 'w').write(repr(self.results))
def read(self, label):
'Read atoms, parameters and calculated results from restart files.'
self.atoms = ase.io.read(label + '_restart.traj')
#self.parameters = Parameters.read(label + '_params.ase')
results_txt = open(label + '_results.ase').read()
self.results = eval(results_txt, {'array': np.array})
def read_inp(self, ):
#
if self.inp is None:
self.inp = join(self.directory, 'cp2k.inp')
inputparser = CP2KInputParser()
inpcalc = inputparser.parse(self, self.inp)
# print(inpcalc.CP2K_INPUT)
self.prefix = inpcalc.CP2K_INPUT.GLOBAL.Project_name
# print(inpcalc.CP2K_INPUT.FORCE_EVAL_list[0].SUBSYS.COORD.Default_keyword)
self.natoms = len(inpcalc.CP2K_INPUT.FORCE_EVAL_list[0].SUBSYS.COORD.Default_keyword)
self.inpcalc = inpcalc
# print(inputparser)
# print(calc.CP2K_INPUT)
def update_atoms(self, atoms):
"""read new geometry when ."""
# Updata atoms positions and cell
if self.CP2K_INPUT.GLOBAL.Run_type.upper() == 'GEO_OPT':
xyzfile = join(self.directory, self.prefix+'-pos-1.xyz')
atoms_sorted = ase.io.read(xyzfile)
atoms.positions = atoms_sorted.positions
self.atoms = atoms
if self.CP2K_INPUT.GLOBAL.Run_type.upper() == 'CELL_OPT':
xyzfile = join(self.directory, self.prefix+'-pos-1.xyz')
atoms_sorted = ase.io.read(xyzfile)
atoms.positions = atoms_sorted.positions
atoms.cell = self.read_cell()
self.atoms = atoms
#
def read_cell(self,):
#
cell = np.zeros([3, 3])
n = len(self.outlines)
for i in range(n):
if 'CELL| Volume' in self.outlines[i]:
for j in range(3):
data = self.outlines[i + 1 + j].split()
for icell in range(3):
cell[j, icell] = float(data[4 + icell])
return cell
#
def read_results(self, out = None):
# self.read_inp()
if not out:
self.out = join(self.directory, 'cp2k.out')
# print(self.out)
with open(self.out, 'r') as f:
self.outlines = f.readlines()
self.read_info()
converged = self.read_convergence()
if not converged:
os.system('tail -20 ' + self.out)
# raise RuntimeError('CP2K did not converge!\n' +
# 'The last lines of output are printed above ' +
# 'and should give an indication why.')
self.read_energy()
self.read_geometry()
# self.read_forces()
# self.read_time()
#self.read_stress()
#
def read_info(self):
#
energies = []
for line in self.outlines:
if line.rfind('GLOBAL| Project name') > -1:
self.prefix = line.split()[-1]
if line.rfind('NUMBER OF NEB REPLICA') > -1:
self.nimages = int(line.split()[-1])
if line.rfind('BAND TOTAL ENERGY [au]') > -1:
e = float(line.split()[-1])
energies.append(e)
self.band_total_energies = energies
def set_results(self, atoms):
#self.read(atoms)
self.old_params = self.params.copy()
self.atoms = atoms.copy()
self.positions = atoms.positions # +++++++++++##????
self.name = 'cp2k'
def read_convergence(self):
converged = False
for n, line in enumerate(self.outlines[-100:-1]):
if line.rfind('PROGRAM ENDED AT') > -1:
converged = True
if line.rfind('The number of warnings') > -1:
data = int(line.split()[9])
if data>0:
print(line)
return converged
def read_energy(self):
energies = []
free_energies = []
cone = physical_constants['Hartree energy in eV'][0]
#
for line in self.outlines:
if line.rfind('ENERGY|') > -1:
E0 = float(line.split()[8])*cone
energies.append(E0)
self.results['energy'] = E0
elif line.rfind('Total energy uncorrected') > -1:
F = float(line.split()[5])
free_energies.append(F)
self.results['free_energy'] = F
self.results['energies'] = energies
self.results['free_energies'] = free_energies
def read_forces(self):
"""Method that reads forces from the output file.
If 'all' is switched on, the forces for all ionic steps
in the output file will be returned, in other case only the
forces for the last ionic configuration are returned."""
conf = physical_constants['atomic unit of force'][0]/physical_constants['electron volt'][0]*10**(-10)
forces = np.zeros([self.natoms, 3])
for n, line in enumerate(self.outlines):
if line.rfind('# Atom Kind Element') > -1:
try :
for iatom in range(self.natoms):
data = self.outlines[n + iatom + 1].split()
for iforce in range(3):
forces[iatom, iforce] = float(data[3 + iforce])*conf
except:
print('read forces error, cp2k run may be interupt')
self.results['forces'] = forces
def read_bader_charge(self, filename = None, atoms = None):
if filename is None:
filename = 'ACF.dat'
# if 'ACF.dat' is None:
# os.system('bader *.cube')
if atoms is None:
atoms = self.atoms
natoms = len(atoms)
bader_charge = np.zeros([natoms])
with open(filename, 'r') as f:
lines = f.readlines()
for iatom in range(natoms):
data = lines[iatom + 2].split()
bader_charge[iatom] = float(data[4])
self.results['bader_charge'] = bader_charge
def read_charges_moments(self):
"""Method that reads charges from the output file.
"""
self.get_number_of_spins()
index = 4
if self.spin == 2:
index = 5
for n, line in enumerate(self.outlines):
if line.rfind('Mulliken Population Analysis') > -1:
charges = []
moments = []
for iatom in range(self.natoms):
data = self.outlines[n + iatom + 3].split()
charges.append([iatom, data[1], float(data[index])])
if self.spin == 2:
moments.append([iatom, data[1], float(data[index + 1])])
self.results['charges-M'] = charges
self.results['moments-M'] = moments
#
for n, line in enumerate(self.outlines):
if line.rfind('Hirshfeld Charges') > -1:
charges = []
moments = []
for iatom in range(self.natoms):
data = self.outlines[n + iatom + 3].split()
charges.append([iatom, data[1], float(data[index + 1])])
if self.spin == 2:
moments.append([iatom, data[1], float(data[index])])
self.results['charges-H'] = charges
self.results['moments-H'] = moments
def read_stress(self):
stress = None
for n, line in enumerate(self.outlines):
if (line.rfind('STRESS TENSOR [GPa]') > -1):
stress = []
for i in [n + 3, n + 4, n + 5]:
data = self.outlines[i].split()
stress += [float(data[1]), float(data[2]), float(data[3])]
# rearrange in 6-component form and return
self.results['stress'] = np.array([stress[0], stress[4], stress[8],
stress[5], stress[2], stress[1]])
def read_time(self):
for n, line in enumerate(self.outlines):
if (line.rfind('TOTAL TIME') > -1):
time = float(self.outlines[n + 2].split()[6])
self.results['time'] = time
#
def read_frequency(self):
frequencies = []
#
# print(self.out)
for line in self.outlines:
if line.rfind('VIB|Frequency') > -1:
for f in line.split()[2:]:
frequencies.append(float(f))
self.results['frequencies'] = frequencies
def clean(self):
"""Method which cleans up after a calculation.
The default files generated by cp2k will be deleted IF this
method is called.
"""
files = ['cp2k.out']
for f in files:
try:
os.remove(f)
except OSError:
pass
def calculation_required(self, atoms, quantities):
if (self.positions is None or
(self.atoms != atoms) or
(self.directory != self.old_directory) or
(self.params != self.old_params) or not self.converged):
return True
return False
def set_atoms(self, atoms):
if (atoms != self.atoms):
self.converged = None
self.atoms = atoms.copy()
def get_atoms(self):
atoms = self.atoms.copy()
atoms.set_calculator(self)
return atoms
def read_version(self):
version = None
for line in self.outlines:
if line.find('CP2K| version string') != -1: # find the first occurence
version = "CP@K version " + line.split[-1]
break
return version
def get_time(self):
return self.results['time']
def get_forces(self, atoms):
self.update(atoms)
return self.results['forces']
def get_charges(self, atoms):
self.update(atoms)
return self.results['charges']
def get_stress(self, atoms):
self.update(atoms)
if self.stress is None:
raise NotImplementedError
return self.stress
def create_cell(self, CELL, atoms):
"""Creates the cell for a SUBSYS from an ASE Atoms object.
Creates the cell unit vectors and replicates the periodic boundary
conditions. Notice that this doesn't affect the PBCs used for
electrostatics! (use create_poisson())
args:
subsys: pycp2k.parsedclasses._subsys1
The SUBSYS for which the cell is created.
atoms: ASE Atoms
The ASE Atoms object from which the cell is extracted.
"""
cell = atoms.get_cell()
A = cell[0, :]
B = cell[1, :]
C = cell[2, :]
CELL.A = A.tolist()
CELL.B = B.tolist()
CELL.C = C.tolist()
pbc = atoms.get_pbc()
periodicity = []
if pbc[0]:
periodicity.append("X")
if pbc[1]:
periodicity.append("Y")
if pbc[2]:
periodicity.append("Z")
if len(periodicity) == 0:
CELL.Periodic = "NONE"
else:
CELL.Periodic = "".join(periodicity)
#
if hasattr(atoms, 'symmetry'):
CELL.Symmetry = atoms.symmetry
def create_coord(self, COORD, atoms, molnames=None, symbol = 'True'):
"""Creates the atomic coordinates for a SUBSYS from an ASE Atoms object.
args:
subsys: pycp2k.parsedclasses._subsys1
The SUBSYS for which the coordinates are created.
atoms: ASE Atoms
Atoms from which the coordinates are extracted.
molnames: list of strings
The MOLNAME for each atom in correct order
"""
atom_list = []
for i_atom, atom in enumerate(atoms):
if symbol:
if hasattr(atoms, 'kinds'):
new_atom = [atoms.kinds[i_atom], atom.position[0], atom.position[1], atom.position[2]]
else:
new_atom = [atom.symbol, atom.position[0], atom.position[1], atom.position[2]]
else:
new_atom = [atom.position[0], atom.position[1], atom.position[2]]
if molnames is not None:
new_atom.append(molnames[i_atom])
atom_list.append(new_atom)
COORD.Default_keyword = atom_list
def create_constraint(self, constraint, atoms, molnames=None):
"""Creates the atomic coordinates for a SUBSYS from an ASE Atoms object.
args:
subsys: pycp2k.parsedclasses._subsys1
The SUBSYS for which the coordinates are created.
atoms: ASE Atoms
Atoms from which the coordinates are extracted.
molnames: list of strings
The MOLNAME for each atom in correct order
"""
#write constraint
from ase.constraints import FixAtoms, FixScaled
self.natoms = len(atoms)
sflags = np.zeros((self.natoms, 3), dtype=bool)
sflags_all = []
if self.atoms.constraints:
for constr in self.atoms.constraints:
if isinstance(constr, FixScaled):
sflags[constr.a] = constr.mask
elif isinstance(constr, FixAtoms):
sflags_all = sflags_all + constr.index.tolist()
# this is the same like "kind" module
for iatom, atom in enumerate(self.atoms):
fixed = ''.join([a for a, b in zip('XYZ', sflags[iatom]) if b])
if len(fixed) != 0:
fixed_atoms = constraint.FIXED_ATOMS_add()
fixed_atoms.Components_to_fix = fixed
fixed_atoms.List = iatom + 1
fixed_lists = ''.join(' ' + str(x + 1) for x in sflags_all)
#print(sflags_all)
if len(sflags_all) != 0:
fixed_atoms = constraint.FIXED_ATOMS_add()
fixed_atoms.List = fixed_lists
def create_poisson(self, poisson, atoms):
"""Creates the periodicity for a POISSON section and tries to guess a
good solver.
args:
poisson: pycp2k.parsedclasses._poisson1
The poisson section from DFT or MM for which the periodicity is
created.
atoms: ASE Atoms
The atoms from which the periodicity is extracted.
"""
# Define periodicity
pbc = atoms.get_pbc()
if sum(pbc) == 0:
poisson.Periodic = "NONE"
else:
poisson.Periodic = pbc[0]*"X" + pbc[1]*"Y" + pbc[2]*"Z"
def write_input(self, atoms, properties=None, system_changes=None):
"""Creates an input file for CP2K executable from the object tree
defined in CP2K_INPUT.
"""
#self.old_input = self.new_input
#print("write_input_file")
self.pre_write_input_file()
SUBSYS = self.CP2K_INPUT.FORCE_EVAL_list[0].SUBSYS
CONSTRAINT = self.CP2K_INPUT.MOTION.CONSTRAINT
# write atoms
self.create_cell(SUBSYS.CELL, self.atoms)
self.create_coord(SUBSYS.COORD, self.atoms)
self.create_constraint(CONSTRAINT, self.atoms)
# write Kind
#kinds = dict([(s.Section_parameters, s) for s in SUBSYS.KIND_list])
#print(kinds)
#for elem in set(self.atoms.get_chemical_symbols()):
# if elem not in kinds.keys():
# KIND = SUBSYS.KIND_add(elem) # Section_parameters can be provided as argument.
# KIND.Basis_set = "DZVP-MOLOPT-SR-GTH"
# KIND.Potential = "GTH-PBE"
input_contents = self.CP2K_INPUT._print_input(-1)
# Write the file
if len(self.prefix) > 0 and not os.path.exists(self.directory):
os.makedirs(self.directory) # cp2k expects dirs to exist
with open(join(self.directory, 'cp2k.inp'), 'w') as input_file:
input_file.write(input_contents)
def pre_write_input_file(self):
"""Creates an input file for CP2K executable from the object tree
defined in CP2K_INPUT.
"""
#self.old_input = self.new_input
#print("write_input_file")
GLOBAL = self.CP2K_INPUT.GLOBAL
FORCE_EVAL = self.CP2K_INPUT.FORCE_EVAL_list[0]
DFT = FORCE_EVAL.DFT
SCF = DFT.SCF
# project name
GLOBAL.Project_name = self.prefix
if GLOBAL.Run_type is None:
GLOBAL.Run_type = self.params['global']['RUN_TYPE']
#
if not FORCE_EVAL.Method:
FORCE_EVAL.Method = "Quickstep"
# xc functional
#if self.params['xc']['XC'] is not None:
# DFT.XC.XC_FUNCTIONAL.Section_parameters = self.params['xc']['XC']
# forces
#calc_forces = ['ENERGY_FORCE', 'GEO_OPT', 'CELL_OPT', 'MD']
#if GLOBAL.Run_type.upper() in calc_forces:
# self.CP2K_INPUT.FORCE_EVAL_list[0].PRINT.FORCES.Section_parameters = "ON"
# ***todo
#self.CP2K_INPUT.FORCE_EVAL_list[0].PRINT.FORCES.Filename = "forces"
# basis_set
#if not DFT.Basis_set_file_name:
# DFT.Basis_set_file_name = "BASIS_MOLOPT"
#if not DFT.Potential_file_name:
# DFT.Potential_file_name = "POTENTIAL"
def get_atomic_kinds(self):
"""Returns number of atomic kind.
['O', 'C']
"""
kinds = {}
# print(self.out)
nk = 0
for line in self.outlines:
if line.rfind('Atomic kind:') > -1:
nk += 1
kind = line.split()[3]
na = int(line.split()[-1])
flag=True
for k, e in kinds.items():
# print(k, e)
if e[0]==kind:
flag=False
kinds[k][1] = na
if flag:
kinds[nk] = [kind, na]
print(kinds)
self.kinds = kinds
def get_fermi_level(self):
"""Return the Fermi level."""
energies = []
free_energies = []
cone = physical_constants['Hartree energy in eV'][0]
#
# print(self.out)
for line in self.outlines:
if line.rfind('Fermi Energy') > -1 and line.rfind('eV') > -1:
Ef = float(line.split()[-1])
if line.rfind('Fermi Energy') > -1 and line.rfind('eV') == -1:
Ef = float(line.split()[-1])*cone
self.Ef = Ef
#
def read_bandgap(self,):
"""Return the Fermi level."""
#
# print(self.out)
bandgap = 10000000
for line in self.outlines:
if line.rfind('HOMO - LUMO gap') > -1:
tempe = float(line.split()[-1])
if tempe < bandgap:
bandgap = tempe
self.bandgap = bandgap
def get_number_of_spins(self):
"""Returns number of spins.
1 if not spin-polarized
2 if spin-polarized
"""
# print(self.out)
for line in self.outlines:
if line.rfind('DFT| Spin') > -1:
method = line.split()[-1]
break
if method=='UKS':
spin=2
else:
spin=1
self.spin = spin
def read_geometry(self, prefix = None):
atoms = None
if prefix:
self.prefix = prefix
# print(self.prefix)
filename = self.directory + '/{0}.in'.format(self.prefix)
filename1 = self.directory + '/{0}-pos-1.xyz'.format(self.prefix)
# print(filename)
if os.path.isfile(filename):
atoms = ase.io.read(filename)
atoms.wrap()
elif os.path.isfile(filename1):
atoms = ase.io.read(filename1)
atoms.wrap()
atoms.cell = self.read_cell()
self.results['geometry'] = atoms
return atoms | [
"[email protected]"
] | |
341f3d6642671fb82aeba75ca4bc26459d43bd1f | f1593773b199c435114b316348b81126aa212cd6 | /web_flask/6-number_odd_or_even.py | 55e21f462a589f3a87e7aec051ee81b1abdeeef8 | [] | no_license | ledbagholberton/AirBnB_clone_v2 | 0f0f0889ed7fac9767e45b7fc17eafc388469738 | 8fefc58e76184fcfe86ec16dde1791fd8ff4777f | refs/heads/master | 2020-07-07T02:20:17.093914 | 2019-09-10T06:13:44 | 2019-09-10T06:13:44 | 203,214,786 | 0 | 0 | null | 2019-08-19T17:01:24 | 2019-08-19T17:01:24 | null | UTF-8 | Python | false | false | 1,942 | py | #!/usr/bin/python3
""" start a Flask Web application
"""
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_hbnb():
""" Print Hello HBNB """
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def only_hbnb():
""" Print HBNB """
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def cissome(text):
""" Print C + <name> without underscore """
return("C {}".format(text.replace("_", " ")))
@app.route('/python/', strict_slashes=False)
@app.route('/python', strict_slashes=False)
def pythonalone():
""" Print Python is cool ...by default """
return("Python is cool")
@app.route('/python/<text>', strict_slashes=False)
def pythonissome(text):
""" Print Python + <name> without underscore """
return("Python {}".format(text.replace("_", " ")))
@app.route('/number/<nummer>', strict_slashes=False)
def numberisint(nummer):
""" Print number if it s a number """
if nummer.isdigit():
return("{} is a number".format(nummer))
@app.route('/number_template/<nummer>', strict_slashes=False)
def number_template(nummer):
""" Print a template with a variable """
if nummer.isdigit():
return render_template('5-number.html', name=nummer)
else:
return render_template('no_found.html'), 404
@app.route('/number_odd_or_even/<nummer>', strict_slashes=False)
def number_even(nummer):
""" Print a template witheven or odd """
if nummer.isdigit():
if (int(nummer) % 2) == 0:
return render_template('6-number_odd_or_even.html',
name=nummer, kind="even")
else:
return render_template('6-number_odd_or_even.html',
name=nummer, kind="odd")
else:
return render_template('no_found.html'), 404
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
| [
"[email protected]"
] | |
bffd4513031c134591a90b558e1174567f6690bc | 3155c38585c5d1cf27c4d8065cb5821f5b980983 | /package/awesome_panel/database/settings.py | 0c78182ac946b4d7c64acbe8293eb5400c0aa261 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | jlstevens/awesome-panel | 460c86ac886a86fa1a3f6ec79b6186f292ca10bc | c67b0f4529a3ce6a8517648f49fef8358e2e2c8b | refs/heads/master | 2020-11-25T03:11:10.018557 | 2019-12-16T20:57:07 | 2019-12-16T20:57:07 | 228,474,317 | 0 | 0 | Apache-2.0 | 2019-12-16T20:55:56 | 2019-12-16T20:55:55 | null | UTF-8 | Python | false | false | 459 | py | """In this module we provide a list of settings"""
GITHUB_URL = "https://github.com/MarcSkovMadsen/awesome-panel/"
GITHUB_BLOB_MASTER_URL = "https://github.com/MarcSkovMadsen/awesome-panel/blob/master/"
GITHUB_RAW_URL = "https://raw.githubusercontent.com/MarcSkovMadsen/awesome-panel/master/"
GITHUB_THUMBNAILS_URL = (
"https://github.com/MarcSkovMadsen/awesome-panel/blob/master/assets/images/thumbnails/"
)
THUMBNAILS_ROOT = "assets/images/thumbnails/"
| [
"[email protected]"
] | |
2441cabae99ae34d4d6dd1b980b760e07462a3ee | bee9a140f51f85c612f4e869747aae3d155188c5 | /src/main/python/systemds/operator/algorithm/builtin/l2svm.py | cd7db9e4dc52e3b06e2141612f7bb57105b73816 | [
"Apache-2.0"
] | permissive | clarapueyoballarin/systemds | cb64a494afd14da142269c788c76edb236d8b755 | a68a71ddb089ebdd52e8f316a03bda281f4532ba | refs/heads/master | 2023-05-22T16:39:04.409220 | 2021-06-17T16:14:15 | 2021-06-17T16:14:15 | 341,305,828 | 0 | 1 | Apache-2.0 | 2021-02-24T22:51:31 | 2021-02-22T18:59:49 | Java | UTF-8 | Python | false | false | 2,344 | py | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/l2svm.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def l2svm(X: OperationNode, Y: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""
:param X: matrix X of feature vectors
:param Y: matrix Y of class labels have to be a single column
:param intercept: No Intercept ( If set to TRUE then a constant bias column is added to X)
:param epsilon: Procedure terminates early if the reduction in objective function value is less than epsilon (tolerance) times the initial objective function value.
:param lambda: Regularization parameter (lambda) for L2 regularization
:param maxIterations: Maximum number of conjugate gradient iterations
:param maxii: -
:param verbose: Set to true if one wants print statements updating on loss.
:param columnId: The column Id used if one wants to add a ID to the print statement, Specificly usefull when L2SVM is used in MSVM.
:return: 'OperationNode' containing model matrix
"""
X._check_matrix_op()
Y._check_matrix_op()
params_dict = {'X':X, 'Y':Y}
params_dict.update(kwargs)
return Matrix(X.sds_context, 'l2svm', named_input_nodes=params_dict)
| [
"[email protected]"
] | |
499ca439f8deb4c3c382d1c47c6df47873853d24 | e4de060c295fba0d0386d0a7678e744ced18b920 | /build/car_szenario/cmake/car_szenario-genmsg-context.py | 309534aaf9d5f6c3f170188f065ab4b9cae655ff | [] | no_license | jbenzhhn/carla_hhn | af9497d01ce1f34ee0016ca660a0cc5af5f71be8 | abd803bcdd506641c8152ec994468518ea809f1b | refs/heads/master | 2023-04-05T10:50:28.934452 | 2021-04-07T14:31:41 | 2021-04-07T14:31:41 | 355,151,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/automotive/catkin_ws/src/car_szenario/msg/RoadInfo.msg"
services_str = ""
pkg_name = "car_szenario"
dependencies_str = "std_msgs;geometry_msgs;nav_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "car_szenario;/home/automotive/catkin_ws/src/car_szenario/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg;nav_msgs;/opt/ros/melodic/share/nav_msgs/cmake/../msg;actionlib_msgs;/opt/ros/melodic/share/actionlib_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"[email protected]"
] | |
f78c251f5afd3689e2c6083e1fc40349ec45fb72 | 3fa4aedf320396c3d780ba3cd3c4760ac007ee30 | /nba_api/stats/endpoints/boxscoremiscv2.py | 9a949ea6e5741e796fda0f6a041829728481a016 | [
"MIT"
] | permissive | Fragadule/nba_api | fb4adfe14d355223838df80aa52ab68d2be3c492 | 9df8ba11ade56a1f6b4ff0791adc276052a286c6 | refs/heads/master | 2020-04-13T19:51:43.835155 | 2018-12-28T13:41:47 | 2018-12-28T13:41:47 | 163,414,364 | 0 | 0 | MIT | 2018-12-28T13:37:28 | 2018-12-28T13:37:28 | null | UTF-8 | Python | false | false | 1,841 | py | from nba_api.stats.endpoints._base import Endpoint
from nba_api.stats.library.http import NBAStatsHTTP
from nba_api.stats.library.parameters import EndPeriod, EndRange, RangeType, StartPeriod, StartRange
class BoxScoreMiscV2(Endpoint):
endpoint = 'boxscoremiscv2'
expected_data = {'sqlPlayersMisc': ['GAME_ID', 'TEAM_ID', 'TEAM_ABBREVIATION', 'TEAM_CITY', 'PLAYER_ID', 'PLAYER_NAME', 'START_POSITION', 'COMMENT', 'MIN', 'PTS_OFF_TOV', 'PTS_2ND_CHANCE', 'PTS_FB', 'PTS_PAINT', 'OPP_PTS_OFF_TOV', 'OPP_PTS_2ND_CHANCE', 'OPP_PTS_FB', 'OPP_PTS_PAINT', 'BLK', 'BLKA', 'PF', 'PFD'], 'sqlTeamsMisc': ['GAME_ID', 'TEAM_ID', 'TEAM_NAME', 'TEAM_ABBREVIATION', 'TEAM_CITY', 'MIN', 'PTS_OFF_TOV', 'PTS_2ND_CHANCE', 'PTS_FB', 'PTS_PAINT', 'OPP_PTS_OFF_TOV', 'OPP_PTS_2ND_CHANCE', 'OPP_PTS_FB', 'OPP_PTS_PAINT', 'BLK', 'BLKA', 'PF', 'PFD']}
def __init__(self,
game_id,
end_period=EndPeriod.default,
end_range=EndRange.default,
range_type=RangeType.default,
start_period=StartPeriod.default,
start_range=StartRange.default):
self.nba_response = NBAStatsHTTP().send_api_request(
endpoint=self.endpoint,
parameters={
'GameID': game_id,
'EndPeriod': end_period,
'EndRange': end_range,
'RangeType': range_type,
'StartPeriod': start_period,
'StartRange': start_range
},
)
data_sets = self.nba_response.get_data_sets()
self.data_sets = [Endpoint.DataSet(data=data_set) for data_set_name, data_set in data_sets.items()]
self.sql_players_misc = Endpoint.DataSet(data=data_sets['sqlPlayersMisc'])
self.sql_teams_misc = Endpoint.DataSet(data=data_sets['sqlTeamsMisc'])
| [
"[email protected]"
] | |
9e576c530de7906567dbe5b9d96b25f93accd231 | cf8be80fe9d7acfae03d86430d1c8ff8d22a8655 | /ribosome/components/internal/mapping.py | 3f86df4834cd10bddda137ef16920a9828206e20 | [
"MIT"
] | permissive | tek/ribosome-py | 4da2faf3f7c2d646c5a90bf73e81ec12bd360d38 | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | refs/heads/master | 2022-12-21T22:46:49.075358 | 2020-08-31T16:22:51 | 2020-08-31T16:22:51 | 66,086,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | from typing import Callable
from amino import do, curried, Do, __, _, Either
from amino.lenses.lens import lens
from amino.logging import module_log
from ribosome.nvim.io.state import NS
from ribosome.data.plugin_state import PluginState
from ribosome.nvim.io.compute import NvimIO
from ribosome.compute.program import Program
from ribosome.config.component import Components
from ribosome.nvim.api.command import nvim_command
from ribosome.data.mapping import Mapping, MapMode
log = module_log()
def mapping_handler(mapping: Mapping) -> Callable[[Components], Either[str, Program]]:
def mapping_handler(components: Components) -> Either[str, Program]:
return components.all.find_map(__.mappings.lift(mapping)).to_either(f'no handler for {mapping}')
return mapping_handler
def mapping_cmd(plugin: str, mapping: Mapping, mode: MapMode) -> NvimIO[None]:
buf = '<buffer>' if mapping.buffer else ''
keys = mapping.keys.replace('<', '<lt>')
rhs = f''':call {plugin}Map('{mapping.ident}', '{keys}')<cr>'''
return nvim_command(
f'{mode.mnemonic}map',
buf,
'<silent>',
mapping.keys,
rhs,
)
@do(NS[PluginState, None])
def activate_mapping(mapping: Mapping) -> Do:
handler = yield NS.inspect_either(mapping_handler(mapping)).zoom(lens.components)
yield NS.modify(__.append.active_mappings((mapping.ident, handler)))
plugin = yield NS.inspect(_.camelcase_name)
yield NS.lift(mapping.modes.traverse(curried(mapping_cmd)(plugin, mapping), NvimIO))
__all__ = ('activate_mapping',)
| [
"[email protected]"
] | |
73faa7aa222e5a2139a1e51d55fc948bf578dafc | 059a61afa19361fe2dd3509cda7924a3eb74b8e0 | /bookmanager/book/models.py | 4dab062e1015fd45c479fdd1df213fcc75dbe06f | [
"MIT"
] | permissive | songaiwen/Django2 | bf3628b7dcd1c28b65644ecfb4442091fdf54991 | 685e41a7f90e4d245f361f8fb78992aebd422978 | refs/heads/master | 2020-03-19T17:54:17.669938 | 2018-06-19T01:38:43 | 2018-06-19T01:38:43 | 136,783,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | from django.db import models
"""
create your models here
定义模型类
模型迁移
操作数据库
"""
#1.定义模型需要集成model.Model
#准备书籍列表信息的模型类
class BookInfo(models.Model):
#创建字段,字段类型,自动创建主键并自动增长
name = models.CharField(max_length=20)
def __str__(self):
#将模型类以字符串的方式输出
return self.name
#准备人物列表信息的模型类
class PeopleInfo(models.Model):
name = models.CharField(max_length=20)
gender = models.BooleanField()
#外键约束,人物属于哪本书
book = models.ForeignKey(BookInfo)
def __str__(self):
return self.name
| [
"[email protected]"
] | |
b9679f711689aefe75d559577c6463c81f3bf6a7 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-chatbot/aliyunsdkchatbot/request/v20171011/CreateCategoryRequest.py | 81f144f15ac24e87bc0a3e60b668be68a5c5c461 | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateCategoryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Chatbot', '2017-10-11', 'CreateCategory','beebot')
def get_ParentCategoryId(self):
return self.get_query_params().get('ParentCategoryId')
def set_ParentCategoryId(self,ParentCategoryId):
self.add_query_param('ParentCategoryId',ParentCategoryId)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name) | [
"[email protected]"
] | |
45bf5ef9b36c9ae30464888f0e2bd5c5942bd3b6 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/contrib/training/python/training/sequence_queueing_state_saver_test.py | 78d7c44ef28e450af4715332642d6a448fb9e068 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 28,300 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.SequenceQueueingStateSaver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.training.python.training import sequence_queueing_state_saver as sqss
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SequenceQueueingStateSaverTest(test.TestCase):
def testSequenceInputWrapper(self):
with self.cached_session():
length = 3
key = "key"
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
input_wrapper = sqss._SequenceInputWrapper(length, key, sequences,
context)
self.assertTrue(isinstance(input_wrapper.length, ops.Tensor))
self.assertTrue(isinstance(input_wrapper.key, ops.Tensor))
self.assertTrue(isinstance(input_wrapper.sequences["seq1"], ops.Tensor))
self.assertTrue(isinstance(input_wrapper.sequences["seq2"], ops.Tensor))
self.assertTrue(isinstance(input_wrapper.context["context1"], ops.Tensor))
def testStateSaverWithTwoSimpleSteps(self):
with self.cached_session() as sess:
batch_size_value = 2
batch_size = constant_op.constant(batch_size_value)
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()),
dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
capacity=100)
initial_key_value_0, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_1, _ = sess.run((key, state_saver.prefetch_op))
initial_key_value_0 = initial_key_value_0.decode("ascii")
initial_key_value_1 = initial_key_value_1.decode("ascii")
# Step 1
next_batch = state_saver.next_batch
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
expected_first_keys = set(
("00000_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_second_keys = set(
("00001_of_00002:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
expected_final_keys = set(
("STOP:%s" % x).encode("ascii")
for x in (initial_key_value_0, initial_key_value_1))
self.assertEqual(set(key_value), expected_first_keys)
self.assertEqual(set(next_key_value), expected_second_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 0:2, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 0:2, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value,
np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value,
np.tile(initial_states["state2"],
(batch_size_value, 1)))
self.assertAllEqual(length_value, [2, 2])
# Step 2
(key_value, next_key_value, seq1_value, seq2_value, context1_value,
state1_value, state2_value, length_value, _, _) = sess.run(
(next_batch.key, next_batch.next_key, next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.context["context1"],
next_batch.state("state1"), next_batch.state("state2"),
next_batch.length,
next_batch.save_state("state1", next_batch.state("state1") + 1),
next_batch.save_state("state2", next_batch.state("state2") - 1)))
self.assertEqual(set(key_value), expected_second_keys)
self.assertEqual(set(next_key_value), expected_final_keys)
self.assertAllEqual(context1_value,
np.tile(context["context1"], (batch_size_value, 1)))
self.assertAllEqual(seq1_value,
np.tile(sequences["seq1"][np.newaxis, 2:4, :],
(batch_size_value, 1, 1)))
self.assertAllEqual(seq2_value,
np.tile(sequences["seq2"][np.newaxis, 2:4, :, :],
(batch_size_value, 1, 1, 1)))
self.assertAllEqual(state1_value, 1 + np.tile(initial_states["state1"],
(batch_size_value, 1, 1)))
self.assertAllEqual(state2_value, -1 + np.tile(initial_states["state2"],
(batch_size_value, 1)))
self.assertAllEqual(length_value, [1, 1])
# Finished. Let's make sure there's nothing left in the barrier.
self.assertEqual(0, state_saver.barrier.ready_size().eval())
def testStateSaverFailsIfPaddedLengthIsNotMultipleOfNumUnroll(self):
with self.cached_session() as sess:
batch_size = constant_op.constant(32)
num_unroll = 17
bad_padded_length = 3
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
with self.assertRaisesOpError(
"should be a multiple of: 17, but saw value: %d" % bad_padded_length):
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(bad_padded_length, 5),
initial_states["state1"]: 1.0
})
def _testStateSaverFailsIfCapacityTooSmall(self, batch_size):
with self.cached_session() as sess:
num_unroll = 2
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None,))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
capacity=10)
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
sequences["seq2"]: np.random.rand(num_unroll),
initial_states["state1"]: 1.0
})
def testStateSaverFailsIfCapacityTooSmallTensor(self):
batch_size_value = 32
batch_size = constant_op.constant(batch_size_value)
with self.assertRaisesOpError(
".*capacity needs to be >= batch_size.*"):
self._testStateSaverFailsIfCapacityTooSmall(batch_size)
def testStateSaverFailsIfCapacityTooSmallInt(self):
batch_size = 32
with self.assertRaisesRegexp(
ValueError,
"capacity %d needs to be >= batch_size %d" % (10, batch_size)):
self._testStateSaverFailsIfCapacityTooSmall(batch_size)
def testStateSaverFailsIfInconsistentPaddedLength(self):
with self.cached_session() as sess:
batch_size = constant_op.constant(32)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None,))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
with self.assertRaisesOpError(
"Dimension 0 of tensor labeled sorted_sequences_seq2 "
"should be: %d, shape received: %d" % (num_unroll, 2 * num_unroll)):
sess.run([state_saver.prefetch_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
sequences["seq2"]: np.random.rand(2 * num_unroll),
initial_states["state1"]: 1.0
})
def testStateSaverFailsIfInconsistentWriteState(self):
# TODO(b/26910386): Identify why this infrequently causes timeouts.
with self.cached_session() as sess:
batch_size = constant_op.constant(1)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
with self.assertRaisesRegexp(KeyError, "state was not declared: state2"):
save_op = next_batch.save_state("state2", None)
with self.assertRaisesRegexp(ValueError, "Rank check failed for.*state1"):
save_op = next_batch.save_state("state1", np.random.rand(1, 1))
with self.assertRaisesOpError(
r"convert_state1:0 should be: 1, shape received:\] \[1 1\]"):
state_input = array_ops.placeholder(dtypes.float32)
with ops.control_dependencies([state_saver.prefetch_op]):
save_op = next_batch.save_state("state1", state_input)
sess.run([save_op],
feed_dict={
length: 1,
key: "key",
sequences["seq1"]: np.random.rand(num_unroll, 5),
initial_states["state1"]: 1.0,
state_input: np.random.rand(1, 1)
})
def testStateSaverWithManyInputsReadWriteThread(self):
batch_size_value = 32
num_proc_threads = 100
with self.cached_session() as sess:
batch_size = constant_op.constant(batch_size_value)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5)),
"seq2": array_ops.placeholder(
dtypes.float32, shape=(None, 4, 2)),
"seq3": array_ops.placeholder(
dtypes.float64, shape=(None,))
}
context = {
"context1": array_ops.placeholder(
dtypes.string, shape=(3, 4)),
"context2": array_ops.placeholder(
dtypes.int64, shape=())
}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=(6, 7)),
"state2": array_ops.placeholder(
dtypes.int32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
cancel_op = state_saver.close(cancel_pending_enqueues=True)
update_1 = next_batch.save_state("state1", 1 + next_batch.state("state1"))
update_2 = next_batch.save_state("state2",
-1 + next_batch.state("state2"))
original_values = {}
def insert(which):
for i in range(20):
# Insert varying length inputs
pad_i = num_unroll * (1 + (i % 10))
length_i = int(np.random.rand() * pad_i)
key_value = "key_%02d_%04d" % (which, i)
stored_state = {
"length": length_i,
"seq1": np.random.rand(pad_i, 5),
"seq2": np.random.rand(pad_i, 4, 2),
"seq3": np.random.rand(pad_i),
"context1": np.random.rand(3, 4).astype(np.str),
"context2": np.asarray(
100 * np.random.rand(), dtype=np.int32),
"state1": np.random.rand(6, 7),
"state2": np.asarray(
100 * np.random.rand(), dtype=np.int32)
}
original_values[key_value] = stored_state
sess.run([state_saver.prefetch_op],
feed_dict={
length: stored_state["length"],
key: key_value,
sequences["seq1"]: stored_state["seq1"],
sequences["seq2"]: stored_state["seq2"],
sequences["seq3"]: stored_state["seq3"],
context["context1"]: stored_state["context1"],
context["context2"]: stored_state["context2"],
initial_states["state1"]: stored_state["state1"],
initial_states["state2"]: stored_state["state2"]
})
processed_count = [0]
def process_and_check_state():
next_batch = state_saver.next_batch
while True:
try:
(got_key, next_key, length, total_length, sequence, sequence_count,
context1, context2, seq1, seq2, seq3, state1, state2, _,
_) = (sess.run([
next_batch.key, next_batch.next_key, next_batch.length,
next_batch.total_length, next_batch.sequence,
next_batch.sequence_count, next_batch.context["context1"],
next_batch.context["context2"], next_batch.sequences["seq1"],
next_batch.sequences["seq2"], next_batch.sequences["seq3"],
next_batch.state("state1"), next_batch.state("state2"),
update_1, update_2
]))
except errors_impl.OutOfRangeError:
# SQSS has been closed
break
self.assertEqual(len(got_key), batch_size_value)
processed_count[0] += len(got_key)
for i in range(batch_size_value):
key_name = got_key[i].decode("ascii").split(":")[1]
# We really saved this unique key
self.assertTrue(key_name in original_values)
# The unique key matches next_key
self.assertEqual(key_name,
next_key[i].decode("ascii").split(":")[1])
# Pull out the random values we used to create this example
stored_state = original_values[key_name]
self.assertEqual(total_length[i], stored_state["length"])
self.assertEqual("%05d_of_%05d:%s" %
(sequence[i], sequence_count[i], key_name),
got_key[i].decode("ascii"))
expected_length = max(
0,
min(num_unroll,
stored_state["length"] - sequence[i] * num_unroll))
self.assertEqual(length[i], expected_length)
expected_state1 = stored_state["state1"] + sequence[i]
expected_state2 = stored_state["state2"] - sequence[i]
expected_sequence1 = stored_state["seq1"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
expected_sequence2 = stored_state["seq2"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
expected_sequence3 = stored_state["seq3"][sequence[i] * num_unroll:(
sequence[i] + 1) * num_unroll]
self.assertAllClose(state1[i], expected_state1)
self.assertAllEqual(state2[i], expected_state2)
# context1 is strings, which come back as bytes
self.assertAllEqual(context1[i].astype(np.str),
stored_state["context1"])
self.assertAllEqual(context2[i], stored_state["context2"])
self.assertAllClose(seq1[i], expected_sequence1)
self.assertAllClose(seq2[i], expected_sequence2)
self.assertAllClose(seq3[i], expected_sequence3)
# Total number of inserts will be a multiple of batch_size
insert_threads = [
self.checkedThread(
insert, args=(which,)) for which in range(batch_size_value)
]
process_threads = [
self.checkedThread(process_and_check_state)
for _ in range(num_proc_threads)
]
for t in insert_threads:
t.start()
for t in process_threads:
t.start()
for t in insert_threads:
t.join()
time.sleep(3) # Allow the threads to run and process for a while
cancel_op.run()
for t in process_threads:
t.join()
# Each thread processed at least 2 sequence segments
self.assertGreater(processed_count[0], 2 * 20 * batch_size_value)
def testStateSaverProcessesExamplesInOrder(self):
with self.cached_session() as sess:
batch_size_value = 32
batch_size = constant_op.constant(batch_size_value)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {"context1": array_ops.placeholder(dtypes.string, shape=(3, 4))}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
update = next_batch.save_state("state1", 1 + next_batch.state("state1"))
get_ready_size = state_saver.barrier.ready_size()
get_incomplete_size = state_saver.barrier.incomplete_size()
global_insert_key = [0]
def insert(insert_key):
# Insert varying length inputs
sess.run([state_saver.prefetch_op],
feed_dict={
length: np.random.randint(2 * num_unroll),
key: "%05d" % insert_key[0],
sequences["seq1"]: np.random.rand(2 * num_unroll, 5),
context["context1"]: np.random.rand(3, 4).astype(np.str),
initial_states["state1"]: 0.0
})
insert_key[0] += 1
for _ in range(batch_size_value * 100):
insert(global_insert_key)
def process_and_validate(check_key):
true_step = int(check_key[0] / 2) # Each entry has two slices
check_key[0] += 1
got_keys, input_index, _ = sess.run(
[next_batch.key, next_batch.insertion_index, update])
decoded_keys = [int(x.decode("ascii").split(":")[-1]) for x in got_keys]
min_key = min(decoded_keys)
min_index = int(min(input_index)) # numpy scalar
max_key = max(decoded_keys)
max_index = int(max(input_index)) # numpy scalar
# The current min key should be above the previous min
self.assertEqual(min_key, true_step * batch_size_value)
self.assertEqual(max_key, (true_step + 1) * batch_size_value - 1)
self.assertEqual(2**63 + min_index, true_step * batch_size_value)
self.assertEqual(2**63 + max_index,
(true_step + 1) * batch_size_value - 1)
# There are now (batch_size * 100 * 2) / batch_size = 200 full steps
global_step_key = [0]
for _ in range(200):
process_and_validate(global_step_key)
# Processed everything in the queue
self.assertEqual(get_incomplete_size.eval(), 0)
self.assertEqual(get_ready_size.eval(), 0)
def testStateSaverCanHandleVariableBatchsize(self):
with self.cached_session() as sess:
batch_size = array_ops.placeholder(dtypes.int32)
num_unroll = 17
length = array_ops.placeholder(dtypes.int32)
key = array_ops.placeholder(dtypes.string)
sequences = {
"seq1": array_ops.placeholder(
dtypes.float32, shape=(None, 5))
}
context = {"context1": array_ops.placeholder(dtypes.string, shape=(3, 4))}
initial_states = {
"state1": array_ops.placeholder(
dtypes.float32, shape=())
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states)
next_batch = state_saver.next_batch
update = next_batch.save_state("state1", 1 + next_batch.state("state1"))
for insert_key in range(128):
# Insert varying length inputs
sess.run([state_saver.prefetch_op],
feed_dict={
length: np.random.randint(2 * num_unroll),
key: "%05d" % insert_key,
sequences["seq1"]: np.random.rand(2 * num_unroll, 5),
context["context1"]: np.random.rand(3, 4).astype(np.str),
initial_states["state1"]: 0.0
})
all_received_indices = []
# Pull out and validate batch sizes 0, 1, ..., 7
for batch_size_value in range(8):
got_keys, input_index, context1, seq1, state1, _ = sess.run(
[
next_batch.key, next_batch.insertion_index,
next_batch.context["context1"], next_batch.sequences["seq1"],
next_batch.state("state1"), update
],
feed_dict={batch_size: batch_size_value})
# Indices may have come in out of order within the batch
all_received_indices.append(input_index.tolist())
self.assertEqual(got_keys.size, batch_size_value)
self.assertEqual(input_index.size, batch_size_value)
self.assertEqual(context1.shape, (batch_size_value, 3, 4))
self.assertEqual(seq1.shape, (batch_size_value, num_unroll, 5))
self.assertEqual(state1.shape, (batch_size_value,))
# Each input was split into 2 iterations (sequences size == 2*num_unroll)
expected_indices = [[], [0], [0, 1], [1, 2, 3], [2, 3, 4, 5],
[4, 5, 6, 7, 8], [6, 7, 8, 9, 10, 11],
[9, 10, 11, 12, 13, 14, 15]]
self.assertEqual(len(all_received_indices), len(expected_indices))
for received, expected in zip(all_received_indices, expected_indices):
self.assertAllEqual([x + 2**63 for x in received], expected)
def testStateSaverScopeNames(self):
batch_size = constant_op.constant(2)
sqss_scope_name = "unique_scope_name_for_sqss"
num_unroll = 2
length = 3
key = string_ops.string_join([
"key_", string_ops.as_string(
math_ops.cast(10000 * random_ops.random_uniform(()), dtypes.int32))
])
padded_length = 4
sequences = {
"seq1": np.random.rand(padded_length, 5),
"seq2": np.random.rand(padded_length, 4, 2)
}
context = {"context1": [3, 4]}
initial_states = {
"state1": np.random.rand(6, 7),
"state2": np.random.rand(8)
}
state_saver = sqss.SequenceQueueingStateSaver(
batch_size=batch_size,
num_unroll=num_unroll,
input_length=length,
input_key=key,
input_sequences=sequences,
input_context=context,
initial_states=initial_states,
name=sqss_scope_name)
prefetch_op = state_saver.prefetch_op
next_batch = state_saver.next_batch
self.assertTrue(
state_saver.barrier.barrier_ref.name.startswith("%s/" %
sqss_scope_name))
self.assertTrue(prefetch_op.name.startswith("%s/" % sqss_scope_name))
self.assertTrue(next_batch.key.name.startswith("%s/" % sqss_scope_name))
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
1a6f968edf5fdb4c61c2389a23c364c8b3fffc69 | c11c337d4f2a609326fe8545c70dafb918ad8110 | /maintenance/mtrack/scripts.py | 757f45ca27e71c689cf94262e0725a2cd3a0d47e | [
"MIT",
"BSD-2-Clause"
] | permissive | summertriangle-dev/arposandra | 7b7f62b63cebe07c6b3b24321a0d01623dfed2b3 | d4fcbec32e86a96c7d810d3d146695eb0b384889 | refs/heads/master | 2023-07-25T02:55:37.534890 | 2023-07-07T01:05:12 | 2023-07-07T01:18:02 | 213,795,406 | 19 | 4 | NOASSERTION | 2023-03-04T05:48:36 | 2019-10-09T01:48:47 | Python | UTF-8 | Python | false | false | 3,905 | py | # TODO: I haven't looked very carefully at optimizing these queries.
# May want to come back after a couple years and see how they're doing.
# We sort based on latest release.
def update_set_sort_table():
return f"""
INSERT INTO card_p_set_index_v2__sort_dates
(SELECT representative, server_id, MAX(date) FROM card_index_v1__release_dates
INNER JOIN card_p_set_index_v2__card_ids ON (id = card_ids)
GROUP BY (representative, server_id))
ON CONFLICT (representative, server_id) DO UPDATE SET
date = excluded.date;
WITH rd AS (
SELECT representative, (CASE WHEN MIN(date) < '2020-08-05 08:00:00'::timestamp THEN 0 ELSE 1 END) AS have_shio
FROM card_index_v1__release_dates
INNER JOIN card_p_set_index_v2__card_ids ON (id = card_ids)
WHERE server_id = 'jp'
GROUP BY (representative)
)
UPDATE card_p_set_index_v2 SET nijigasaki_member_state =
(SELECT have_shio FROM rd WHERE rd.representative = card_p_set_index_v2.representative)
WHERE nijigasaki_member_state IS NULL;
-- Do it twice for sets without a release date.
UPDATE card_p_set_index_v2 SET nijigasaki_member_state = 0
WHERE nijigasaki_member_state IS NULL
"""
# Tries to set the release date based on feature list from newly added history
# records. If a card was released without a feature and featured later, the
# date will be set wrong. This won't happen though. In theory...
def update_card_release_dates(prefix):
return f"""
WITH rdates AS (
SELECT DISTINCT ON (card_id, {prefix}history_v5__dates.serverid)
card_id, {prefix}history_v5__dates.serverid, {prefix}history_v5__dates.date
FROM {prefix}history_v5__card_ids
INNER JOIN {prefix}history_v5__dates ON (
{prefix}history_v5__dates.id = {prefix}history_v5__card_ids.id
AND {prefix}history_v5__card_ids.serverid = {prefix}history_v5__dates.serverid
AND type = (CASE
WHEN what = 2 THEN 1
WHEN what = 3 THEN 2
WHEN what = 4 THEN 2
ELSE 2
END)
)
ORDER BY card_id, {prefix}history_v5__dates.serverid, date
)
INSERT INTO card_index_v1__release_dates (
(SELECT card_id, serverid, date FROM rdates)
) ON CONFLICT DO NOTHING;
-- First try the entire history table, because we want the oldest source, but restrict to cards that appeared in the partial update.
UPDATE card_index_v1 SET
source = (SELECT history_v5__card_ids.what FROM history_v5__card_ids
INNER JOIN history_v5 USING (id, serverid) WHERE card_id = card_index_v1.id
ORDER BY sort_date LIMIT 1)
WHERE (SELECT what FROM {prefix}history_v5__card_ids WHERE card_id = card_index_v1.id LIMIT 1) IS NOT NULL;
-- If still null it wasn't featured before, so go ahead and use the new hist list
UPDATE card_index_v1 SET
source = (SELECT what FROM {prefix}history_v5__card_ids WHERE card_id = card_index_v1.id LIMIT 1)
WHERE source IS NULL
"""
def update_hist_event_link():
return """
WITH event_match AS (
SELECT event_v2.serverid AS sid, event_id, history_v5__dates.id AS hid FROM history_v5__dates
INNER JOIN event_v2 ON (history_v5__dates.serverid=event_v2.serverid
AND EXTRACT(epoch FROM history_v5__dates.date - event_v2.start_t) = 0)
WHERE type = 1
)
INSERT INTO history_v5__dates (
(SELECT hid, sid, 7, NULL, event_id FROM event_match)
) ON CONFLICT DO NOTHING;
"""
| [
"[email protected]"
] | |
7c66f962600a93eac899ce8e78b47514877212f8 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/numpy/core/_add_newdocs.py | aa3210020cc4228d0e9d702b9be0b9074a6b7b7f | [] | no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-numpy-1.18.3-c5tvrr2q5vwgtvc6f3ld57v6y4ahvr2h/lib/python3.7/site-packages/numpy/core/_add_newdocs.py | [
"[email protected]"
] | |
8e547b29dfdd757e9da010a8fcb2e0a74ff18ac0 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-web/azure/mgmt/web/models/hosting_environment_profile.py | de68d8bc558ed823419c5846beb0d775e9a116d2 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,438 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class HostingEnvironmentProfile(Model):
"""Specification for an App Service Environment to use for this resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID of the App Service Environment.
:type id: str
:ivar name: Name of the App Service Environment.
:vartype name: str
:ivar type: Resource type of the App Service Environment.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(HostingEnvironmentProfile, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
| [
"[email protected]"
] | |
e8d197368d8a83bbf36c5e39a424a7e7a44b5b7c | 632dcb4e37cadd87cb7ff8715b0048df5cd0d11b | /CompuCell3D/core/Demos/SBMLSolverExamples/SBMLSolverAntimony/SBMLSolverAntimony2/Simulation/SBMLSolverAntimony2Steppables.py | 007262772d5e9b8242557c617ed99a115ce20b47 | [
"MIT"
] | permissive | CompuCell3D/CompuCell3D | df638e3bdc96f84b273978fb479842d071de4a83 | 65a65eaa693a6d2b3aab303f9b41e71819f4eed4 | refs/heads/master | 2023-08-26T05:22:52.183485 | 2023-08-19T17:13:19 | 2023-08-19T17:13:19 | 12,253,945 | 51 | 41 | null | 2023-08-27T16:36:14 | 2013-08-20T20:53:07 | C++ | UTF-8 | Python | false | false | 4,920 | py | from cc3d.core.PySteppables import *
class SBMLSolverSteppable(SteppableBasePy):
def __init__(self, frequency=1):
SteppableBasePy.__init__(self, frequency)
def start(self):
# Antimony model string: cell type 1
model_string_type1 = """model type1()
# Model
S1 => S2; k1*S1
# Initial conditions
S1 = 0
S2 = 1
k1 = 1
end"""
# Antimony model string: cell type 2
model_string_type2 = """model type2()
# Model
S2 => S1; k2*S2
# Initial conditions
S1 = 0
S2 = 0
k2 = 1
end"""
# adding options that setup SBML solver integrator
# these are optional but useful when encountering integration instabilities
options = {'relative': 1e-10, 'absolute': 1e-12}
self.set_sbml_global_options(options)
step_size = 0.001
# Apply model strings to cell types
self.add_antimony_to_cell_types(model_string=model_string_type1, model_name='dpType1', cell_types=[self.TYPE1],
step_size=step_size)
self.add_antimony_to_cell_types(model_string=model_string_type2, model_name='dpType2', cell_types=[self.TYPE2],
step_size=step_size)
def step(self, mcs):
self.timestep_sbml()
def finish(self):
# this function may be called at the end of simulation - used very infrequently though
return
class SecretionSteppable(SecretionBasePy):
def __init(self, frequency=1):
SecretionBasePy.__init__(self, frequency)
def step(self, mcs):
consume_s1 = 1
consume_s2 = 1
secrete_s1 = 1
secrete_s2 = 1
field1 = self.field.Field1
field2 = self.field.Field2
for cell in self.cell_list_by_type(self.TYPE1):
this_cell_s1 = cell.sbml.dpType1['S1']
this_cell_s2 = cell.sbml.dpType1['S2']
cell_volume = cell.volume
if this_cell_s2 > 0.75:
this_secrete_s2 = secrete_s2
else:
this_secrete_s2 = 0
pixel_list = CellPixelList(self.pixelTrackerPlugin, cell)
sbml_values = cell.sbml.dpType1.values()
s1_consumed = 0
for pixel_data in pixel_list:
pt = pixel_data.pixel
field_value = field1.get(pt)
s1_consumed += field_value * consume_s1
s2_secreted = this_cell_s2 * this_secrete_s2
cell.sbml.dpType1['S1'] = this_cell_s1 + s1_consumed
cell.sbml.dpType1['S2'] = this_cell_s2 - s2_secreted
for pixel_data in pixel_list:
pt = pixel_data.pixel
field1_val = field1.get(pt) - s1_consumed / cell_volume
field2_val = field2.get(pt) + s2_secreted / cell_volume
field1.set(pt, field1_val)
field2.set(pt, field2_val)
for cell in self.cell_list_by_type(self.TYPE2):
this_cell_s1 = cell.sbml.dpType2['S1']
this_cell_s2 = cell.sbml.dpType2['S2']
cell_volume = cell.volume
if this_cell_s1 > 0.75:
this_secrete_s1 = secrete_s1
else:
this_secrete_s1 = 0
pixel_list = CellPixelList(self.pixelTrackerPlugin, cell)
s2_consumed = 0
for pixel_data in pixel_list:
pt = pixel_data.pixel
field_value = field2.get(pt)
s2_consumed += field_value * consume_s2
S1_secreted = this_cell_s1 * this_secrete_s1
cell.sbml.dpType2['S1'] = this_cell_s1 - S1_secreted
cell.sbml.dpType2['S2'] = this_cell_s2 + s2_consumed
for pixel_data in pixel_list:
pt = pixel_data.pixel
field1_val = field1.get(pt) + S1_secreted / cell_volume
field2_val = field2.get(pt) - s2_consumed / cell_volume
field1.set(pt, field1_val)
field2.set(pt, field2_val)
# Demo: accessing SBML values for further manipulation/coupling with other components
class IdFieldVisualizationSteppable(SteppableBasePy):
def __init__(self, frequency=1):
SteppableBasePy.__init__(self, frequency)
self.create_scalar_field_cell_level_py("IdFieldS1")
self.create_scalar_field_cell_level_py("IdFieldS2")
def step(self, mcs):
id_field_s1 = self.field.IdFieldS1
id_field_s2 = self.field.IdFieldS2
for cell in self.cell_list_by_type(self.TYPE1):
id_field_s1[cell] = cell.sbml.dpType1['S1']
id_field_s2[cell] = cell.sbml.dpType1['S2']
for cell in self.cell_list_by_type(self.TYPE2):
id_field_s1[cell] = cell.sbml.dpType2['S1']
id_field_s2[cell] = cell.sbml.dpType2['S2']
| [
"[email protected]"
] | |
d09732d24ed5c663b058fda1ea1a9f991a6ba5c1 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/5823ce64bfec97cdfa0a781253795a6945d469f3-<diff>-bug.py | a06396edf7b6ec24911dd1c5288c295e62b8976d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | @set_module('mxnet.symbol.numpy')
def diff(a, n=1, axis=(- 1), prepend=None, append=None):
'\n Calculate the n-th discrete difference along the given axis.\n\n Parameters\n ----------\n a : ndarray\n Input array\n n : int, optional\n The number of times values are differenced. If zero, the input is returned as-is.\n axis : int, optional\n The axis along which the difference is taken, default is the last axis.\n prepend, append : ndarray, optional\n Not supported yet\n\n Returns\n -------\n diff : ndarray\n The n-th differences.\n The shape of the output is the same as a except along axis where the dimension is smaller by n.\n The type of the output is the same as the type of the difference between any two elements of a.\n This is the same as the type of a in most cases.\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.diff(x)\n array([ 1, 2, 3, -7])\n >>> np.diff(x, n=2)\n array([ 1, 1, -10])\n\n >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])\n >>> np.diff(x)\n array([[2, 3, 4],\n [5, 1, 2]])\n >>> np.diff(x, axis=0)\n array([[-1, 2, 0, -2]])\n\n Notes\n -----\n Optional inputs `prepend` and `append` are not supported yet\n '
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _npi.diff(a, n=n, axis=axis) | [
"[email protected]"
] | |
95d5c5cadc9fb00f3c1f71d28ec0233c15f404b7 | 5a1e1756025bacae88b619d388ebf61b330001ab | /1.Class/Language_Python-master/Language_Python-master/LC4_HW3.py | 7acbc1e8e531e1ec64cb6af03598dee0507db0cb | [] | no_license | reshmaladi/Python | d1953497703aa15e163cd8ac27be23e3e5c3e947 | 8e9092af63476fef35d221e20acf418983957e53 | refs/heads/master | 2021-10-15T00:55:08.136039 | 2021-10-01T14:32:16 | 2021-10-01T14:32:16 | 165,836,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | x = input("Enter a 1st string \t")
y = input("Enter a 2nd string \t")
print("Swap : \n" + y[0:2] +x[2:] + "\n" + x[0:2] + y[2:]) | [
"[email protected]"
] | |
765503c6c7b8f463814da29afd3332f8f03d5e40 | a053d60e2c84750cf1c51142bfdf6dec5048bf25 | /demo.py | 4cfc291b204313593868375ac5df2099451fc16d | [] | no_license | Sharpiless/paddlex-driver-state-recognition | ed57e58bebcdccc19302dcb49e950dd66be9ed45 | 81f81f72e9b893c8adca8f9aaba3615dc7aff7c7 | refs/heads/master | 2023-03-18T23:02:15.255664 | 2020-06-02T15:42:38 | 2020-06-02T15:42:38 | 268,839,488 | 2 | 3 | null | 2021-03-07T13:43:21 | 2020-06-02T15:32:52 | Java | UTF-8 | Python | false | false | 1,480 | py | import matplotlib
import paddlex as pdx
import paddle.fluid as fluid
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
from facedet import FaceDet
fontC = ImageFont.truetype('./platech.ttf', 20, 0)
def drawText(img, addText, x1, y1):
color = (20, 255, 20)
# img = Image.fromarray(image)
draw = ImageDraw.Draw(img)
draw.text((x1, y1),
addText.encode("utf-8").decode("utf-8"),
color, font=fontC)
imagex = np.array(img)
return imagex
save_dir = './best_model'
model = pdx.load_model(save_dir)
classes = {'c0': 'normal driving',
'c1': 'texting-right',
'c2': 'talking on the phone-right',
'c3': 'texting-left',
'c4': 'talking on the phone-left',
'c5': 'operating the radio',
'c6': 'drinking',
'c7': 'reaching behind',
'c8': 'hair and makeup',
'c9': 'talking to passenger'}
base = './test_images'
det = FaceDet(thread=0.1)
for im in os.listdir(base):
pt = os.path.join(base, im)
result = model.predict(pt)
print(result)
lbl = classes[result[0]['category']]+' '+str(result[0]['score'])
image = cv2.imread(pt)
image = det.detect(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = drawText(image, lbl, 0, 10)
plt.imshow(image)
plt.show() | [
"[email protected]"
] | |
7730f23c1fe157a139eaf71edadb4982a38877c1 | 0d39e91482abe7f40523e9e225ede5464295888f | /mitogen/unix.py | 1af1c0ec6b66522ccdaa603778a48f45502f81cc | [
"BSD-3-Clause"
] | permissive | eamanu/python-mitogen | bdccdd7ceca4f1b114bf3e28556eb0d959b008e8 | e93c7aae83b130abe1ef2dcf829d32e40f9fe8b1 | refs/heads/master | 2022-04-29T17:01:32.451975 | 2019-10-24T00:30:20 | 2019-10-24T00:45:18 | 217,181,829 | 1 | 0 | BSD-3-Clause | 2022-03-29T21:58:20 | 2019-10-24T01:02:03 | Python | UTF-8 | Python | false | false | 7,133 | py | # Copyright 2019, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
"""
Permit connection of additional contexts that may act with the authority of
this context. For now, the UNIX socket is always mode 0600, i.e. can only be
accessed by root or the same UID. Therefore we can always trust connections to
have the same privilege (auth_id) as the current process.
"""
import errno
import logging
import os
import socket
import struct
import sys
import tempfile
import mitogen.core
import mitogen.master
LOG = logging.getLogger(__name__)
class Error(mitogen.core.Error):
"""
Base for errors raised by :mod:`mitogen.unix`.
"""
pass
class ConnectError(Error):
"""
Raised when :func:`mitogen.unix.connect` fails to connect to the listening
socket.
"""
#: UNIX error number reported by underlying exception.
errno = None
def is_path_dead(path):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
try:
s.connect(path)
except socket.error:
e = sys.exc_info()[1]
return e.args[0] in (errno.ECONNREFUSED, errno.ENOENT)
finally:
s.close()
return False
def make_socket_path():
return tempfile.mktemp(prefix='mitogen_unix_', suffix='.sock')
class ListenerStream(mitogen.core.Stream):
def on_receive(self, broker):
sock, _ = self.receive_side.fp.accept()
try:
self.protocol.on_accept_client(sock)
except:
sock.close()
raise
class Listener(mitogen.core.Protocol):
stream_class = ListenerStream
keep_alive = True
@classmethod
def build_stream(cls, router, path=None, backlog=100):
if not path:
path = make_socket_path()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(path) and is_path_dead(path):
LOG.debug('%r: deleting stale %r', cls.__name__, path)
os.unlink(path)
sock.bind(path)
os.chmod(path, int('0600', 8))
sock.listen(backlog)
stream = super(Listener, cls).build_stream(router, path)
stream.accept(sock, sock)
router.broker.start_receive(stream)
return stream
def __repr__(self):
return '%s.%s(%r)' % (
__name__,
self.__class__.__name__,
self.path,
)
def __init__(self, router, path):
self._router = router
self.path = path
def _unlink_socket(self):
try:
os.unlink(self.path)
except OSError:
e = sys.exc_info()[1]
# Prevent a shutdown race with the parent process.
if e.args[0] != errno.ENOENT:
raise
def on_shutdown(self, broker):
broker.stop_receive(self.stream)
self._unlink_socket()
self.stream.receive_side.close()
def on_accept_client(self, sock):
sock.setblocking(True)
try:
pid, = struct.unpack('>L', sock.recv(4))
except (struct.error, socket.error):
LOG.error('listener: failed to read remote identity: %s',
sys.exc_info()[1])
return
context_id = self._router.id_allocator.allocate()
try:
sock.send(struct.pack('>LLL', context_id, mitogen.context_id,
os.getpid()))
except socket.error:
LOG.error('listener: failed to assign identity to PID %d: %s',
pid, sys.exc_info()[1])
return
context = mitogen.parent.Context(self._router, context_id)
stream = mitogen.core.MitogenProtocol.build_stream(
router=self._router,
remote_id=context_id,
auth_id=mitogen.context_id,
)
stream.name = u'unix_client.%d' % (pid,)
stream.accept(sock, sock)
LOG.debug('listener: accepted connection from PID %d: %s',
pid, stream.name)
self._router.register(context, stream)
def _connect(path, broker, sock):
try:
# ENOENT, ECONNREFUSED
sock.connect(path)
# ECONNRESET
sock.send(struct.pack('>L', os.getpid()))
mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12))
except socket.error:
e = sys.exc_info()[1]
ce = ConnectError('could not connect to %s: %s', path, e.args[1])
ce.errno = e.args[0]
raise ce
mitogen.parent_id = remote_id
mitogen.parent_ids = [remote_id]
LOG.debug('client: local ID is %r, remote is %r',
mitogen.context_id, remote_id)
router = mitogen.master.Router(broker=broker)
stream = mitogen.core.MitogenProtocol.build_stream(router, remote_id)
stream.accept(sock, sock)
stream.name = u'unix_listener.%d' % (pid,)
mitogen.core.listen(stream, 'disconnect', _cleanup)
mitogen.core.listen(router.broker, 'shutdown',
lambda: router.disconnect_stream(stream))
context = mitogen.parent.Context(router, remote_id)
router.register(context, stream)
return router, context
def connect(path, broker=None):
LOG.debug('client: connecting to %s', path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
return _connect(path, broker, sock)
except:
sock.close()
raise
def _cleanup():
"""
Reset mitogen.context_id and friends when our connection to the parent is
lost. Per comments on #91, these globals need to move to the Router so
fix-ups like this become unnecessary.
"""
mitogen.context_id = 0
mitogen.parent_id = None
mitogen.parent_ids = []
| [
"[email protected]"
] | |
0d568e564026f66f38a7a55aeaa4e39b6c3b6cff | 80ea4c1ce04ee8e0ecd85ee71f8bffdbcbd368aa | /iupick/settings/testing.py | 12021d836718a468cf6d733409edc755763667f5 | [
"MIT"
] | permissive | Oswaldinho24k/geo-csv | 659ad24f5e8bcecc869143a61e58b38260cc1901 | 0100435c5d5a5fd12133b376b305e8fa79ddb8f0 | refs/heads/master | 2020-03-15T21:20:34.095967 | 2018-05-06T15:45:34 | 2018-05-06T15:45:34 | 132,353,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | # -*- coding: utf-8 -*-
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
AUTH_SECRET_PREFIX = 'sk_test_' | [
"[email protected]"
] | |
6dc0ac7d042b1950915b2898b7c5223a44ba9af5 | 86d884eb096ed599c6069e2844985aa6ec30cb6b | /finite_difference/diffusion_coefficient/analyse_AHL.py | 90363c09ea189ea4f7c21ba98bc5b006d7a2c5cf | [] | no_license | zcqsntr/synbiobrain | 46e770471dcfbc5082f271c4e1e5d8b694155780 | 66758554774c087b8c19c6d50fca5ea733b607f4 | refs/heads/master | 2022-11-10T16:28:45.888929 | 2022-10-11T09:07:53 | 2022-10-11T09:07:53 | 183,600,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py |
import sys
import matplotlib.backends.backend_pdf
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
sys.path.append('/home/neythen/Desktop/Projects/synbiobrain/')
from diffusion_sim import *
os.environ['PYOPENCL_CTX'] = '0'
nx = 300
ny = 300
node_radius = 20/40
node_dim = np.array([10, 10])
grid_corners = np.array([[-10, 10], [-10, 10]])
grid = SynBioBrainFD(grid_corners, nx, ny, 'float32')
vertex_positions = np.array([grid.get_node_position(i) for i in range(grid.n_nodes)])
barriers = ['1', '0.8', '0.6', '0.4', '0.2', '0.15', '0.1', '0.05', '0.01']
all_cohesive_ts = []
for barrier in barriers:
print(barrier)
activated_ts = np.load('/home/neythen/Desktop/Projects/synbiobrain/finite_difference/results/diffusion_factor/'+ barrier +'_barrier/output/GFP_ts.npy')
cohesive_ts = count_cohesive_nodes_FD(activated_ts, vertex_positions, node_dim, node_radius, grid_corners)
all_cohesive_ts.append(cohesive_ts)
all_cohesive_ts = np.array(all_cohesive_ts)
np.save('all_cohesive_ts.npy', all_cohesive_ts)
| [
"[email protected]"
] | |
f6dc05455cd47ae55195c50ba74336f3c0fbbd8c | 9a5505ebc6a4a9f7d710e1ef8ce488b578b63c6e | /pycon/sponsorship/migrations/0008_remove_obsolete_benefit_records.py | e8122e5fb061226ee878cdeaa3743c783bc26e75 | [
"BSD-3-Clause"
] | permissive | arpitjainn189/pycon | 9dabbfd6119a1b2a957469d40e223d063bb91494 | 492c47820d6dc546e79c707180b3c7b3925e8e72 | refs/heads/master | 2022-12-23T15:53:53.365038 | 2020-10-01T09:57:08 | 2020-10-01T09:57:08 | 300,229,565 | 0 | 0 | BSD-3-Clause | 2020-10-01T09:54:30 | 2020-10-01T09:54:29 | null | UTF-8 | Python | false | false | 1,390 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
OBSOLETE_BENEFITS = [
{'name': 'Company URL',
'type': 'simple',
},
{'name': 'Company Description',
'type': 'text',
},
{'name': 'Web logo',
'type': 'weblogo',
}
]
def forward(apps, schema_editor):
Benefit = apps.get_model('sponsorship', 'Benefit')
BenefitLevel = apps.get_model('sponsorship', 'BenefitLevel')
SponsorBenefit = apps.get_model('sponsorship', 'SponsorBenefit')
db_alias = schema_editor.connection.alias
names = [b['name'] for b in OBSOLETE_BENEFITS]
# Clean up other records that use these first
BenefitLevel.objects.using(db_alias).filter(benefit__name__in=names).delete()
SponsorBenefit.objects.using(db_alias).filter(benefit__name__in=names).delete()
# Now we can remove the Benefit records themselves
Benefit.objects.using(db_alias).filter(name__in=names).delete()
def back(apps, schema_editor):
Benefit = apps.get_model('sponsorship', 'Benefit')
db_alias = schema_editor.connection.alias
for ben in OBSOLETE_BENEFITS:
Benefit.objects.using(db_alias).get_or_create(**ben)
class Migration(migrations.Migration):
dependencies = [
('sponsorship', '0007_auto_20150721_1533'),
]
operations = [
migrations.RunPython(forward, back),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.