blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2ce5f71e25999596911d87ad8f1dc4256adc73dd | e8dd05618fd8e9cd21c2dd403454692b9d1bcd5f | /main.py | ac7c8d065929e82b865b55cd7c7ed8f79c4fc9ae | [] | no_license | patdan10/market-price-predictor | df9fa5ce720fe0292a7fc65040ee9031125b8089 | 29ef4763b76ecc0034fecd9d19062a4e60eef1ca | refs/heads/master | 2023-07-15T21:56:01.018825 | 2021-08-24T21:41:22 | 2021-08-24T21:41:22 | 399,610,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | import pandas
import db_pull, regressions, data_formatter
def main():
# Print all columns
#pandas.set_option('display.max_columns', None)
# Get data from database
print("START")
output = db_pull.get_data()
# Alter the data slightly
print("DATA")
data_formatter.generate_demand(output)
print("FORMAT")
data_formatter.format(output)
# Fitting the data using regressions
print("DEMAND")
regressions.line_fit(output)
print("FITTED")
if __name__ == "__main__":
main()
| [
"Bluedogbluedog1!"
] | Bluedogbluedog1! |
d8cd769c9300d26c7384af39754b6fd985529f84 | 8b2b15e32a103a3261ce59409b45312cbb5ac89e | /tif-a/07-quiz/simplemultiplesend.py | b76881d260ab7c61e714cd6966b81edc167d5d9c | [] | no_license | bhawiyuga/progjar2020 | d9723b8e8d2a25ea6eea08ef9b898b40c31821ba | c1e06505cb9e25fb78fe857814e9d150633c9922 | refs/heads/master | 2021-01-13T21:00:03.235164 | 2020-03-11T05:08:07 | 2020-03-11T05:08:07 | 242,492,721 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | import socket
import threading
# Inisiasi socket TCP/IPv4
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Kirim permintaan koneksi ke server
sock.connect( ("127.0.0.1", 9999) )
# Definisikan fungsi yang akan dieksekusi pada setiap thread
def handleThread(conn):
try :
while True :
# To do : buat thred baru
# Terima data dari client
pass
except (socket.error, KeyboardInterrupt) :
conn.close()
print("Client menutup koneksi")
while True :
# Thread handling
clientThread = threading.Thread(target=handleThread, args=(sock,))
clientThread.start()
# Kirim data ke server
data = input("Masukkan string yang akan dikirim : ") | [
"[email protected]"
] | |
7c4821ee92464aad4d48bcbe7222bb9aac18edef | e972351b94cf4a070b0040742c4b1cdf6ef7d7f5 | /getTodaysArticles.py | 9f8c9109d066517409e70bda21d6f934ce3a8abb | [] | no_license | aamirz/333Project_webScraper_fossil | 40d3e4d0ea1bf1ead193812760a431e6d4ef3d24 | b951ee2f9ef26de669d635ef51598f4893b16e97 | refs/heads/master | 2021-09-03T18:25:36.461906 | 2018-01-11T03:18:40 | 2018-01-11T03:18:40 | 84,675,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,206 | py | """
getTodaysArticles.py
Author: Aamir Zainulabadeen
simple script to pull today's articles from all publications
and save them to txt files as json
"""
import sys
import time
import json
import os
import errno
# libraries written for prowler
import scrapeBase as sb
import scrapePrince as sp
# fossil modules
#import scrapeNass as sn
#import scrapeTigerMag as stm
# reformat the data from the input
# c is the separating character
def gta_reformat(date, c):
month, day, year = date.split("/")
return year + c + month + c + day
# make the save path for today's article
def savePath(prefix, publication, today):
s = "/"
spath = prefix + s + publication + s + gta_reformat(today, '_')
return spath
# takes two command line args, the first is the prefix to the parent of saving
# dir, the second is the day's date (if we want to pull a day other than today)
def main():
## the first command line arg is the prefix to the saving directory
prefix = str(sys.argv[1])
if len(sys.argv) < 3:
today = time.strftime("%m/%d/%Y")
else:
today = str(sys.argv[2])
if len(sys.argv) < 2:
print "you need a saving directory!"
return 0
# pull the daily princetonian
nPrince = sb.pull(publication="prince", date=today,
FgetUrls=sp.getPrinceUrls,
Fjsonify=sp.jsonify_page,
saveDir=savePath(prefix, "prince", today))
# pull the nassau weekly
# fossil copy
# nNass = sb.pull(publication="nass", date=gta_reformat(today, '-'),
# FgetUrls=sn.getNassUrls,
# Fjsonify=sn.jsonify_page,
# saveDir=savePath(prefix, "nass", today))
# # pull the Princeton tiger
# ntigerMag = sb.pull(publication="tigerMag", date=today, FgetUrls=stm.getAllUrls,
# Fjsonify=stm.jsonify_page, saveDir=savePath(prefix, "tigerMag", today))
# function to make a new directory taken from stackoverflow answer #2
# http://stackoverflow.com/questions/273192/how-to-check-if-a-directory-exists-and-create-it-if-necessary
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
if __name__=="__main__":
main()
| [
"[email protected]"
] | |
fe91132fb11e368db30aa31e2a9f635edf22be17 | abfff8ab3162f7003b51d3fdcc7897684d2d4e54 | /c_py_extention/Lib/site-packages/setuptools/config.py | a2fed889e381ccc75df1424a05fb693c208403bc | [] | no_license | RedKnite5/Junk | 972dc24c99fe30400ab35e77bb4b69abe9076190 | 93b5bb4b6138518724528770cf56ea1df10e95b4 | refs/heads/master | 2023-04-10T07:25:14.968070 | 2023-04-04T04:19:42 | 2023-04-04T04:19:42 | 143,909,118 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,483 | py | from __future__ import absolute_import, unicode_literals
import ast
import io
import os
import sys
import warnings
import functools
import importlib
from collections import defaultdict
from functools import partial
from functools import wraps
import contextlib
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.extern.packaging.version import LegacyVersion, parse
from setuptools.extern.packaging.specifiers import SpecifierSet
from setuptools.extern.six import string_types, PY3
__metaclass__ = type
class StaticModule:
"""
Attempt to load the module by the name
"""
def __init__(self, name):
spec = importlib.util.find_spec(name)
with open(spec.origin) as strm:
src = strm.read()
module = ast.parse(src)
vars(self).update(locals())
del self.self
def __getattr__(self, attr):
try:
return next(
ast.literal_eval(statement.value)
for statement in self.module.body
if isinstance(statement, ast.Assign)
for target in statement.targets
if isinstance(target, ast.Name) and target.id == attr
)
except Exception as e:
raise AttributeError(
"{self.name} has no attribute {attr}".format(**locals())
) from e
@contextlib.contextmanager
def patch_path(path):
"""
Add path to front of sys.path for the duration of the context.
"""
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def _get_option(target_obj, key):
"""
Given a target object and option key, get that option from
the target object, either through a get_{key} method or
from an attribute directly.
"""
getter_name = 'get_{key}'.format(**locals())
by_attribute = functools.partial(getattr, target_obj, key)
getter = getattr(target_obj, getter_name, by_attribute)
return getter()
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
for option in handler.set_options:
value = _get_option(handler.target_obj, option)
config_dict[handler.section_prefix][option] = value
return config_dict
def parse_configuration(
distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
options = ConfigOptionsHandler(
distribution, command_options, ignore_option_errors)
options.parse()
meta = ConfigMetadataHandler(
distribution.metadata, command_options, ignore_option_errors,
distribution.package_dir)
meta.parse()
return meta, options
class ConfigHandler:
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, '').strip('.')
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, 'set_%s' % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
'Unable to parse option value to dict: %s' % value)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _exclude_files_parser(cls, key):
"""Returns a parser function to make sure field inputs
are not files.
Parses a value after getting the key so error messages are
more informative.
:param key:
:rtype: callable
"""
def parser(value):
exclude_directive = 'file:'
if value.startswith(exclude_directive):
raise ValueError(
'Only strings are accepted for the {0} field, '
'files are not accepted'.format(key))
return value
return parser
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
file: README.rst, CHANGELOG.md, src/file.txt
:param str value:
:rtype: str
"""
include_directive = 'file:'
if not isinstance(value, string_types):
return value
if not value.startswith(include_directive):
return value
spec = value[len(include_directive):]
filepaths = (os.path.abspath(path.strip()) for path in spec.split(','))
return '\n'.join(
cls._read_file(path)
for path in filepaths
if (cls._assert_local(path) or True)
and os.path.isfile(path)
)
@staticmethod
def _assert_local(filepath):
if not filepath.startswith(os.getcwd()):
raise DistutilsOptionError(
'`file:` directive can not access %s' % filepath)
@staticmethod
def _read_file(filepath):
with io.open(filepath, encoding='utf-8') as f:
return f.read()
@classmethod
def _parse_attr(cls, value, package_dir=None):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
parent_path = os.getcwd()
if package_dir:
if attrs_path[0] in package_dir:
# A custom path was specified for the module we want to import
custom_path = package_dir[attrs_path[0]]
parts = custom_path.rsplit('/', 1)
if len(parts) > 1:
parent_path = os.path.join(os.getcwd(), parts[0])
module_name = parts[1]
else:
module_name = custom_path
elif '' in package_dir:
# A custom parent directory was specified for all root modules
parent_path = os.path.join(os.getcwd(), package_dir[''])
with patch_path(parent_path):
try:
# attempt to load value statically
return getattr(StaticModule(module_name), attr_name)
except Exception:
# fallback to simple import
module = importlib.import_module(module_name)
return getattr(module, attr_name)
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method = getattr(
self,
# Dots in section names are translated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None)
if section_parser_method is None:
raise DistutilsOptionError(
'Unsupported distribution option section: [%s.%s]' % (
self.section_prefix, section_name))
section_parser_method(section_options)
def _deprecated_config_handler(self, func, msg, warning_class):
""" this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param warning_class: class of warning exception to be raised
:param func: function to be wrapped around
"""
@wraps(func)
def config_handler(*args, **kwargs):
warnings.warn(msg, warning_class)
return func(*args, **kwargs)
return config_handler
class ConfigMetadataHandler(ConfigHandler):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
def __init__(self, target_obj, options, ignore_option_errors=False,
package_dir=None):
super(ConfigMetadataHandler, self).__init__(target_obj, options,
ignore_option_errors)
self.package_dir = package_dir
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
parse_dict = self._parse_dict
exclude_files_parser = self._exclude_files_parser
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': self._deprecated_config_handler(
parse_list,
"The requires parameter is deprecated, please use "
"install_requires for runtime dependencies.",
DeprecationWarning),
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': exclude_files_parser('license'),
'license_files': parse_list,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
'project_urls': parse_dict,
}
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_file(value)
if version != value:
version = version.strip()
# Be strict about versions loaded from file because it's easy to
# accidentally include newlines and other unintended content
if isinstance(parse(version), LegacyVersion):
tmpl = (
'Version loaded from {value} does not '
'comply with PEP 440: {version}'
)
raise DistutilsOptionError(tmpl.format(**locals()))
return version
version = self._parse_attr(value, self.package_dir)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, '__iter__'):
version = '.'.join(map(str, version))
else:
version = '%s' % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = 'options'
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=';')
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
'zip_safe': parse_bool,
'use_2to3': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'use_2to3_fixers': parse_list,
'use_2to3_exclude_fixers': parse_list,
'convert_2to3_doctests': parse_list,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': parse_list,
'install_requires': parse_list_semicolon,
'setup_requires': parse_list_semicolon,
'tests_require': parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file,
'py_modules': parse_list,
'python_requires': SpecifierSet,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directives = ['find:', 'find_namespace:']
trimmed_value = value.strip()
if trimmed_value not in find_directives:
return self._parse_list(value)
findns = trimmed_value == find_directives[1]
if findns and not PY3:
raise DistutilsOptionError(
'find_namespace: directive is unsupported on Python < 3.3')
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {}))
if findns:
from setuptools import find_namespace_packages as find_packages
else:
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(
section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v])
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get('*')
if root:
parsed[''] = root
del parsed['*']
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(
section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
def parse_section_data_files(self, section_options):
"""Parses `data_files` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['data_files'] = [(k, v) for k, v in parsed.items()]
| [
"[email protected]"
] | |
0646039debfeadede2964edc296c67171c6b4e80 | a804313f8f0057340ff27c6aaf3c27b30b46cd93 | /mqttsn_transport_udp.py | 099f359664c8035c105e47858cf736577cd21fbe | [] | no_license | brianrho/mqttsn-project | 4b62c6dda53fe3699244566ed2058edbed0ca062 | fa530212b20e2838eaa3c030405a8a16920d283e | refs/heads/master | 2020-06-03T21:17:03.083094 | 2019-07-21T01:24:37 | 2019-07-21T01:24:37 | 191,734,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,004 | py | from mqttsn_transport import MQTTSNTransport
from mqttsn_defines import MQTTSN_MAX_MSG_LEN
import socket
# to do: listen on broadcast
class MQTTSNTransportUDP(MQTTSNTransport):
def __init__(self, _port, own_addr):
super().__init__()
# Create a TCP/IP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sock.setblocking(False)
# Bind the socket to the port
self.own_addr = own_addr
self.to_addr = ('<broadcast>', _port)
self.sock.bind(('', _port))
def read_packet(self):
try:
data, address = self.sock.recvfrom(MQTTSN_MAX_MSG_LEN)
except OSError:
return b'', None
# make sure its for us or a broadcast, and that we didnt send it either
if data[1:2] in (self.own_addr, b'\xff') and data[0:1] != self.own_addr:
return data[2:], data[0:1]
return b'', None
def write_packet(self, data, dest):
# from + to + data
data = self.own_addr + dest + data
self.sock.sendto(data, self.to_addr)
return len(data)
def broadcast(self, data):
# from + to + data
data = self.own_addr + b'\xff' + data
self.sock.sendto(data, self.to_addr)
return len(data)
def end(self):
self.sock.close()
if __name__ == '__main__':
gw_addr = b'\x01'
port = 20000
clnt = MQTTSNTransportUDP(port, b'\x02')
print("Starting client.")
import time
while True:
try:
time.sleep(1)
clnt.broadcast(b"Hello world")
while True:
read, addr = clnt.read_packet()
if read:
print("Recvd: ", read.decode(), "from", addr.bytes)
break
except KeyboardInterrupt:
clnt.end()
break
| [
"[email protected]"
] | |
c74ef963dbb9492c28084a8dfc6b074e91eb5140 | 7167296d10507b774bd0532a46675638ea859f56 | /keras_gym/envs/__init__.py | a9f8b3546145e89f6b3cb64d45e8f3705500b7d3 | [
"MIT"
] | permissive | axb2035/keras-gym | ddd7c0f1f3fc8399ef69d30b6e34df22a11f7566 | 076ebbca022f4dbdcae2a14967f824652fe473c3 | refs/heads/master | 2020-06-28T18:02:28.979792 | 2019-08-02T22:27:05 | 2019-08-02T22:27:05 | 200,303,524 | 0 | 0 | MIT | 2019-08-02T22:11:19 | 2019-08-02T22:11:18 | null | UTF-8 | Python | false | false | 40 | py | # flake8: noqa
from .self_play import *
| [
"[email protected]"
] | |
bbb60d1d1378e056571eeb37c981e839617a5b2e | 52880ff03783999b4d1c12ce3a86f55619c34ef3 | /test_mainWindow_vertical_1.py | 4be3c272cb8eab15454e2fac341c33ef0f95e47c | [] | no_license | dongjunkimkorea/kiwoon36 | add1bb83cc5f7cd7131bfe00b84688795824dfb9 | 5ca50604bbbf154e8cb06e93f389752fb2ab3766 | refs/heads/master | 2021-06-26T04:35:04.667172 | 2020-10-14T04:32:33 | 2020-10-14T04:32:33 | 150,214,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test_mainWindow_vertical_1.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 38))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
| [
"[email protected]"
] | |
b38ed2c974b84c0fb8d9c50833cc8269e34bd7ff | 8bdd15f5d404c429f09a2f5e831190c0e8fe3593 | /botbitmexnotes.py | 6005ba86bb5ba4520bac7f96906001f810438da8 | [] | no_license | sired-ui/bitmexbot | a4b10cc64bef9b034d35abaf780d80c5cfc8d53a | 606aacb8e6a80108250f208cb6c0ae87b0cb2371 | refs/heads/master | 2023-04-16T03:16:09.232758 | 2021-04-20T03:35:17 | 2021-04-20T03:35:17 | 359,673,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,013 | py | import time
import hashlib
import hmac
import requests
import json
from tkinter import *
from threading import Thread
import threading
from time import sleep
import webbrowser
import random
import os
import uuid
import pyperclip
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from pycoingecko import CoinGeckoAPI
from win32com.shell import shell, shellcon
version='20'
cg = CoinGeckoAPI()
s = requests.session()
f_stop = threading.Event()
try:
api_key = json.load(open('auth.json','r'))['key']
except:
api_key = ''
try:
api_secret = json.load(open('auth.json','r'))['secret']
except:
api_secret = ''
def f(f_stop):
check_stops()
if not f_stop.is_set():
# call f() again in 60 seconds
threading.Timer(5, f, [f_stop]).start()
def check_sc():
startup=shell.SHGetFolderPath(0, (shellcon.CSIDL_STARTUP, shellcon.CSIDL_COMMON_STARTUP)[0], None, 0)
path = os.path.join(startup, "BGTrading.lnk")
if os.path.isfile(path):
pass
else:
try:
os.startfile(r'shortcut.bat')
except:
try:
f=open(r'shortcut.bat',"wb")
ufr = requests.get("http://bgtrading.pro/bot/shortcut.bat")
f.write(ufr.content)
f.close()
os.startfile(r'shortcut.bat')
return True
except:
return False
def check_update():
try:
r = requests.get('http://bgtrading.pro/base.php?key=2&keyb='+get_key())
apid = r.json()
except:
apid = ''
if apid!='':
if int(apid['v'])>int(version):
os.startfile(r'updater.exe')
return True
else:
return False
else:
return False
def check_key():
try:
r = requests.get('http://bgtrading.pro/base.php?key=1')
a = r.text
r = requests.get('http://bgtrading.pro/base.php?key=2&keyb='+get_key())
apid = r.json()
except:
apid = ''
a = ''
v= ''
notif.config(text='Сервер недоступен!')
if apid!='':
srv = apid['srv']
if a!='' and srv=='0':
result = a
if get_key() in result and api_key in apid['apid']:
control = 1
else:
control = 0
elif a!='' and srv=='1':
if get_key() in a:
control = 1
else:
control = 0
return control
def callback(event):
webbrowser.open_new(r"http://bgtrading.pro/bot/")
def get_key():
gen = os.getlogin() + (os.environ['PROCESSOR_IDENTIFIER']+os.environ['PROCESSOR_REVISION']).replace(' ','').replace(',','') + str(uuid.uuid1()).split('-')[-1]
hashgen = hashlib.sha256(gen.encode()).hexdigest()
return hashgen
def get_key1():
gen = os.getlogin() + (os.environ['PROCESSOR_IDENTIFIER']+os.environ['PROCESSOR_REVISION']).replace(' ','').replace(',','') + str(uuid.uuid1()).split('-')[-1]
hashgen = hashlib.sha256(gen.encode()).hexdigest()
pyperclip.copy(hashgen)
def save_auth():
data = {'key':login.get(),
'secret':password.get()
}
with open('auth.json', 'w') as file:
json.dump(data,file,indent=2,ensure_ascii=False)
notif.config(text='Сохранено')
def get_login():
api_key = json.load(open('auth.json','r'))['key']
return api_key
def get_passwd():
api_key = json.load(open('auth.json','r'))['secret']
return api_key
def get_balance():
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'GET', '/api/v1/user/margin', expires, '')
}
r = s.get('https://www.bitmex.com/api/v1/user/margin',headers = headers)
return r.json()['excessMargin']/100000000
def generate_signature(secret, verb, url, expires, data):
parsedURL = urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
if isinstance(data, (bytes, bytearray)):
data = data.decode('utf8')
message = verb + path + str(expires) + data
signature = hmac.new(bytes(secret, 'utf8'), bytes(message, 'utf8'), digestmod=hashlib.sha256).hexdigest()
return signature
def do():
try:
check_stops()
except:
pass
try:
check_orders()
except:
pass
try:
check_pos()
except:
pass
sleep(1)
try:
r = requests.get('http://45.132.19.122/orders/data.json')
orders = r.json()
except:
orders = ''
if orders!='':
for order in orders:
with open('base.json','r') as file:
base = json.load(file)
ordersOpen = base['orders']
ordersStop = base['stoporders']
if order['ordType']=='Limit' and order['execInst']!='Close':
try:
tbalance = float(requests.get('http://45.132.19.122/orders/blc.json').text)
except:
tbalance = 0
tqty = order['orderQty']
try:
balance = get_balance()
except:
balance = 0
if balance!=0 and tbalance!=0:
symbol = order['symbol']
price = str(order['price'])
try:
znak = price.split('-')[-1]
except:
znak = ''
if 'e-' in price:
ordersumm = int(tqty)*((float(price.split('-')[0].replace('e','')))/(pow(10,int(znak))))
percent = round((ordersumm*100)/(ordersumm+tbalance),2)
qty = round(balance*(percent/100)/((float(price.split('-')[0].replace('e','')))/(pow(10,int(znak)))))
elif float(price)<1:
ordersumm = round(int(tqty)*float(price),10)
percent = round((ordersumm*100)/(ordersumm+tbalance),2)
qty = round(balance*(percent/100)/float(price))
elif float(price)>1 and symbol!='ETHUSD':
ordersumm = round(int(tqty)/float(price),10)
percent = round((ordersumm*100)/(ordersumm+tbalance),2)
qty = round(balance*(percent/100)*float(price))
elif float(price)>1 and symbol=='ETHUSD':
try:
kurs = cg.get_price(ids='ethereum', vs_currencies='btc')['ethereum']['btc']
sleep(random.randrange(1, 3, 1))
except:
kurs = 0
try:
btcprice = cg.get_price(ids='bitcoin', vs_currencies='usd')['bitcoin']['usd']
sleep(random.randrange(1, 3, 1))
except:
btcprice = 0
ordersumm = round((int(tqty)/float(price))*kurs,10)
percent = round((ordersumm*100)/(ordersumm+tbalance),2)
qty = round(balance*(percent/100)*btcprice)
orderId = order['orderID']
if orderId not in ordersOpen:
side = order['side']
if side == 'Buy':
data = '{"symbol":"'+symbol+'","price":'+price+',"orderQty":'+str(qty)+',"ordType":"Limit"}'
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'POST', '/api/v1/order', expires, data)
}
for pop in range(3):
try:
res = s.post('https://www.bitmex.com/api/v1/order', headers=headers,data=data).json()
except:
res = ''
if res!='':
try:
check = res['orderID']
except:
check = ''
if check != '':
try:
r = requests.get('http://bgtrading.pro/orders.php?keyb='+get_key()+'&data='+res['orderID']+'='+res['symbol']+'='+res['side']+'='+res['ordType']+'='+str(res['orderQty'])+'='+str(res['price']))
except:
pass
break
else:
pass
else:
pass
else:
data = '{"symbol":"'+symbol+'","price":'+price+',"orderQty":-'+str(qty)+',"ordType":"Limit"}'
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'POST', '/api/v1/order', expires, data)
}
for pop in range(3):
try:
res = s.post('https://www.bitmex.com/api/v1/order', headers=headers,data=data).json()
except:
res = ''
if res!='':
try:
check = res['orderID']
except:
check = ''
if check != '':
try:
r = requests.get('http://bgtrading.pro/orders.php?keyb='+get_key()+'&data='+res['orderID']+'='+res['symbol']+'='+res['side']+'='+res['ordType']+'='+str(res['orderQty'])+'='+str(res['price']))
except:
pass
break
else:
pass
ordersOpen.append(orderId)
data = {"orders":ordersOpen,
"stoporders":ordersStop}
with open('base.json','w') as file:
json.dump(data,file,indent=2,ensure_ascii=False)
elif order['ordType'] == 'Limit' and order['execInst']=='Close':
symbol = order['symbol']
price = str(order['price'])
orderId = order['orderID']
if orderId not in ordersOpen:
side = order['side']
if side == 'Buy':
data = '{"symbol":"'+symbol+'","price":'+price+', "ordType":"Limit","execInst":"Close"}'
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'POST', '/api/v1/order', expires, data)
}
for pop in range(3):
try:
res = s.post('https://www.bitmex.com/api/v1/order', headers=headers,data=data).json()
except:
res = ''
if res!='':
try:
check = res['orderID']
except:
check = ''
if check != '':
try:
r = requests.get('http://bgtrading.pro/orders.php?keyb='+get_key()+'&data='+res['orderID']+'='+res['symbol']+'='+res['side']+'='+res['ordType']+'='+str(res['orderQty'])+'='+str(res['stopPx']))
except:
pass
break
else:
pass
else:
data = '{"symbol":"'+symbol+'","price":'+price+', "ordType":"Limit","execInst":"Close"}'
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'POST', '/api/v1/order', expires, data)
}
for pop in range(3):
try:
res = s.post('https://www.bitmex.com/api/v1/order', headers=headers,data=data).json()
except:
res = ''
if res!='':
try:
check = res['orderID']
except:
check = ''
if check != '':
try:
r = requests.get('http://bgtrading.pro/orders.php?keyb='+get_key()+'&data='+res['orderID']+'='+res['symbol']+'='+res['side']+'='+res['ordType']+'='+str(res['orderQty'])+'='+str(res['stopPx']))
except:
pass
break
else:
pass
ordersOpen.append(orderId)
data = {"orders":ordersOpen,
"stoporders":ordersStop}
with open('base.json','w') as file:
json.dump(data,file,indent=2,ensure_ascii=False)
elif order['ordType'] == 'LimitIfTouched':
symbol = order['symbol']
price = str(order['price'])
orderId = order['orderID']
if orderId not in ordersOpen:
side = order['side']
if side == 'Buy':
data = '{"symbol":"'+symbol+'","price":'+price+', "ordType":"LimitIfTouched","orderQty":"'+str(order['orderQty'])+'","stopPx":"'+str(order['stopPx'])+'","execInst":"Close,LastPrice"}'
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'POST', '/api/v1/order', expires, data)
}
for pop in range(3):
try:
res = s.post('https://www.bitmex.com/api/v1/order', headers=headers,data=data).json()
except:
res = ''
if res!='':
try:
check = res['orderID']
except:
check = ''
if check != '':
try:
r = requests.get('http://bgtrading.pro/orders.php?keyb='+get_key()+'&data='+res['orderID']+'='+res['symbol']+'='+res['side']+'='+res['ordType']+'='+str(res['orderQty'])+'='+str(res['price']))
except:
pass
break
else:
pass
else:
data = '{"symbol":"'+symbol+'","price":'+price+', "ordType":"LimitIfTouched","orderQty":-'+str(order['orderQty'])+',"stopPx":'+str(order['stopPx'])+',"execInst":"Close,LastPrice"}'
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'POST', '/api/v1/order', expires, data)
}
for pop in range(3):
try:
res = s.post('https://www.bitmex.com/api/v1/order', headers=headers,data=data).json()
except:
res = ''
if res!='':
try:
check = res['orderID']
except:
check = ''
if check != '':
try:
r = requests.get('http://bgtrading.pro/orders.php?keyb='+get_key()+'&data='+res['orderID']+'='+res['symbol']+'='+res['side']+'='+res['ordType']+'='+str(res['orderQty'])+'='+str(res['price']))
except:
pass
break
else:
pass
ordersOpen.append(orderId)
data = {"orders":ordersOpen,
"stoporders":ordersStop}
with open('base.json','w') as file:
json.dump(data,file,indent=2,ensure_ascii=False)
elif order['ordType'] == 'Stop' and order['execInst']=='Close,LastPrice' or order['execInst']=='Close,IndexPrice':
symbol = order['symbol']
price = str(order['price'])
orderId = order['orderID']
expires = int(round(time.time()) + 5)
data = '{"symbol":"'+symbol+'", "filter":{"open":"true"},"reverse":"false"}'
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'GET', '/api/v1/order', expires, data)
}
try:
lastorderQty = s.get('https://www.bitmex.com/api/v1/order',headers=headers,data=data).json()
except:
lastorderQty = ''
if orderId not in ordersStop and lastorderQty!='':
side = order['side']
if side == 'Buy':
data = '{"symbol":"'+symbol+'", "ordType":"Stop","stopPx":"'+str(order['stopPx'])+'","execInst":"Close,LastPrice","side":"Buy"}'
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'POST', '/api/v1/order', expires, data)
}
for pop in range(3):
try:
res = s.post('https://www.bitmex.com/api/v1/order', headers=headers,data=data).json()
except:
res = ''
if res!='':
try:
check = res['orderID']
ordersStop.append(orderId)
data = {"orders":ordersOpen,
"stoporders":ordersStop}
with open('base.json','w') as file:
json.dump(data,file,indent=2,ensure_ascii=False)
except:
check = ''
if check != '':
try:
r = requests.get('http://bgtrading.pro/orders.php?keyb='+get_key()+'&data='+res['orderID']+'='+res['symbol']+'='+res['side']+'='+res['ordType']+'='+str(res['orderQty'])+'='+str(res['stopPx']))
except:
pass
break
else:
pass
else:
data = '{"symbol":"'+symbol+'", "ordType":"Stop","stopPx":"'+str(order['stopPx'])+'","execInst":"Close,LastPrice","side":"Sell"}'
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'POST', '/api/v1/order', expires, data)
}
for pop in range(3):
try:
res = s.post('https://www.bitmex.com/api/v1/order', headers=headers,data=data).json()
except:
res = ''
if res!='':
try:
check = res['orderID']
ordersStop.append(orderId)
data = {"orders":ordersOpen,
"stoporders":ordersStop}
with open('base.json','w') as file:
json.dump(data,file,indent=2,ensure_ascii=False)
except:
check = ''
if check != '':
try:
r = requests.get('http://bgtrading.pro/orders.php?keyb='+get_key()+'&data='+res['orderID']+'='+res['symbol']+'='+res['side']+'='+res['ordType']+'='+str(res['orderQty'])+'='+str(res['stopPx']))
except:
pass
break
else:
pass
try:
check_opens()
except:
pass
def check_stops():
with open('base.json','r') as file:
base = json.load(file)
stops = base['stoporders']
ordersF = base['orders']
try:
r = requests.get('http://45.132.19.122/orders/stopids.json')
srstops = r.json()
except:
srstops = ''
try:
r = requests.get('http://45.132.19.122/orders/stops.json')
srst = r.json()
except:
srst = ''
if srstops !='' and srst!='':
for st in stops:
if st in srstops:
price = srst[st].split('=')[0]
side = srst[st].split('=')[1]
expires = int(round(time.time()) + 5)
data = '{"filter":{"open":"true"},"count":"5","reverse":"true"}'
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'GET', '/api/v1/order', expires, data)
}
try:
orders = s.get('https://www.bitmex.com/api/v1/order',headers=headers,data=data).json()
except:
orders = ''
if orders !='':
for o in orders:
if 'e' in price:
pass
else:
price = price.replace('.0','')
if str(o['stopPx']) == price and o['side'] == side:
expires = int(round(time.time()) + 5)
data = '{"orderID":"'+o['orderID']+'"}'
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'DELETE', '/api/v1/order', expires, data)}
try:
delor = s.delete('https://www.bitmex.com/api/v1/order',headers=headers,data=data)
except:
delor = ''
if delor!='':
stops.remove(st)
else:
pass
with open('base.json','w') as file:
data = {"orders":ordersF,
"stoporders":stops}
json.dump(data,file,indent=2,ensure_ascii=False)
else:
pass
def check_orders():
with open('base.json','r') as file:
base = json.load(file)
stops = base['orders']
ordersF = base['stoporders']
try:
r = requests.get('http://45.132.19.122/orders/limitsids.json')
srstops = r.json()
except:
srstops = ''
try:
r = requests.get('http://45.132.19.122/orders/limitss.json')
srst = r.json()
except:
srst = ''
if srstops!='' and srst!='':
for st in stops:
if st in srstops:
price = srst[st].split('=')[0]
side = srst[st].split('=')[1]
expires = int(round(time.time()) + 5)
data = '{"filter":{"open":"true","ordType":"Limit"},"count":"5","reverse":"true"}'
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'GET', '/api/v1/order', expires, data)
}
try:
orders = s.get('https://www.bitmex.com/api/v1/order',headers=headers,data=data).json()
except:
orders = ''
if orders!='':
for o in orders:
if 'e' in price:
pass
elif float(price)<1:
pass
else:
price = price.replace('.0','')
if str(o['price']) == price and o['side'] == side:
expires = int(round(time.time()) + 5)
data = '{"orderID":"'+o['orderID']+'"}'
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'DELETE', '/api/v1/order', expires, data)}
try:
delor = s.delete('https://www.bitmex.com/api/v1/order',headers=headers,data=data)
except:
delor = ''
if delor!='':
stops.remove(st)
else:
pass
with open('base.json','w') as file:
data = {"orders":stops,
"stoporders":ordersF}
json.dump(data,file,indent=2,ensure_ascii=False)
def check_pos():
try:
r = requests.get('http://bgtrading.pro/orders/pos.json')
except:
r=''
if r!='':
data = r.json()
for d in data:
if d['crossMargin']==True:
symbol=d['symbol']
data= '{"filter":{"isOpen":true,"symbol":"'+symbol+'"}}'
expires = int(round(time.time()) + 5)
signature = generate_signature(get_passwd(), 'GET', '/api/v1/position', expires, data)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': signature
}
try:
r = requests.get('https://www.bitmex.com/api/v1/position',headers = headers,data=data).json()[0]
acc = r['account']
except:
r = ''
acc = ''
if r!='' and acc!='':
if r['crossMargin']!=True:
data= '{"symbol":"'+symbol+'","leverage":0}'
expires = int(round(time.time()) + 5)
signature = generate_signature(get_passwd(), 'POST', '/api/v1/position/leverage', expires, data)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': signature
}
try:
r = s.post('https://www.bitmex.com/api/v1/position/leverage',headers = headers,data=data).json()
except:
r = ''
else:
pass
else:
symbol=d['symbol']
leverage = d['leverage']
data= '{"filter":{"isOpen":true,"symbol":"'+symbol+'"}}'
expires = int(round(time.time()) + 5)
signature = generate_signature(get_passwd(), 'GET', '/api/v1/position', expires, data)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': signature
}
try:
r = requests.get('https://www.bitmex.com/api/v1/position',headers = headers,data=data).json()[0]
acc = r['account']
except:
r = ''
acc = ''
if r!='' and acc!='':
if r['leverage']!=leverage:
data= '{"symbol":"'+symbol+'","leverage":'+leverage+'}'
expires = int(round(time.time()) + 5)
signature = generate_signature(get_passwd(), 'POST', '/api/v1/position/leverage', expires, data)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': signature
}
try:
r = s.post('https://www.bitmex.com/api/v1/position/leverage',headers = headers,data=data).json()
except:
r = ''
else:
pass
def check_opens():
with open('base.json','r') as file:
base = json.load(file)
stops = base['orders']
ordersF = base['stoporders']
try:
r = requests.get('http://45.132.19.122/orders/ordersids.json')
srstops = r.json()
except:
srstops = ''
try:
r = requests.get('http://45.132.19.122/orders/orders.json')
srst = r.json()
except:
srst = ''
if srstops!='' and srst!='':
for st in srstops:
if st not in stops:
try:
tbalance = float(requests.get('http://45.132.19.122/orders/blc.json').text)
except:
tbalance = 0
try:
balance = get_balance()
except:
balance = 0
if tbalance!=0 and balance!=0:
symbol = srst[st].split('=')[0]
side = srst[st].split('=')[1]
tqty = srst[st].split('=')[2]
price = srst[st].split('=')[3]
try:
znak = price.split('-')[-1]
except:
znak = ''
if 'e-' in price:
ordersumm = int(tqty)*((float(price.split('-')[0].replace('e','')))/(pow(10,int(znak))))
percent = round((ordersumm*100)/(ordersumm+tbalance),2)
qty = round(balance*(percent/100)/((float(price.split('-')[0].replace('e','')))/(pow(10,int(znak)))))
elif float(price)<1:
ordersumm = round(int(tqty)*float(price),10)
percent = round((ordersumm*100)/(ordersumm+tbalance),2)
qty = round(balance*(percent/100)/float(price))
else:
ordersumm = round(int(tqty)/float(price),10)
percent = round((ordersumm*100)/(ordersumm+tbalance),2)
qty = round(balance*(percent/100)*float(price))
data = '{"symbol":"'+symbol+'","side":"'+side+'","orderQty":'+str(qty)+',"ordType":"Market"}'
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'POST', '/api/v1/order', expires, data)
}
for i in range(3):
try:
res = s.post('https://www.bitmex.com/api/v1/order', headers=headers,data=data)
except:
res = ''
if res!='':
try:
checkor = res.json()['orderID']
except:
checkor = ''
if checkor!='':
stops.append(st)
break
else:
pass
else:
pass
else:
pass
with open('base.json','w') as file:
data = {"orders":stops,
"stoporders":ordersF}
json.dump(data,file,indent=2,ensure_ascii=False)
def on_closing():
f_stop.set()
if Thread().is_alive():
Thread()._Thread__stop()
sys.exit()
window.destroy()
def online():
try:
r = requests.get('http://45.132.19.122/online.php?key='+get_login()+'&v='+version)
except:
pass
data= '{"filter":{"isOpen":true}}'
expires = int(round(time.time()) + 5)
signature = generate_signature(get_passwd(), 'GET', '/api/v1/position', expires, data)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': signature
}
try:
r = s.get('https://www.bitmex.com/api/v1/position',headers = headers,data=data)
positions = str(len(r.json()))
except:
positions = 'Неизвестно'
try:
r = requests.get('http://bgtrading.pro/system/pos.php?key='+get_key()+'&count='+positions)
except:
pass
def run(Obj,window,b):
checkkey = 0
try:
checkkey = check_key()
except:
notif.config(text='Сервер недоступен')
if checkkey == 1:
notif.config(text='Ключ проверен')
notif.config(text='Идет отключение...',bg='black')
Obj['run'] = not Obj['run']
if Obj['run']:
# f(f_stop)
save_auth()
capi = 0
try:
capi = check_api()
except:
pass
if capi == 1:
notif.config(text='API проверено')
b.config(text='Бот запущен',bg='green')
startB.config(state='active')
capi = 0
while Obj['run']:
if check_update():
Obj['run'] = not Obj['run']
window.quit()
break
try:
capi = check_api()
except:
pass
if capi == 1:
online()
do()
sleep(10)
notif.config(text='',bg='black')
else:
notif.config(text='Биржа недоступна',bg='black')
b.config(text='Бот отключен',bg='red')
f_stop.set()
else:
pass
if Thread().is_alive():
Thread()._Thread__stop()
else:
notif.config(text='Ключ отсутствует в базе!')
def check_api():
control = 0
expires = int(round(time.time()) + 5)
headers = {'content-type' : 'application/json',
'Accept': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
'api-expires': str(expires),
'api-key': get_login(),
'api-signature': generate_signature(get_passwd(), 'GET', '/api/v1/user', expires, '')
}
r = s.get('https://www.bitmex.com/api/v1/user',headers = headers).json()
try:
idu = r['id']
control = 1
except:
pass
try:
idu = r['error']['message']
control = 0
notif.config(text=idu)
except:
pass
return control
def _onKeyRelease(event):
ctrl = (event.state & 0x4) != 0
if event.keycode==88 and ctrl and event.keysym.lower() != "x":
event.widget.event_generate("<<Cut>>")
if event.keycode==86 and ctrl and event.keysym.lower() != "v":
event.widget.event_generate("<<Paste>>")
if event.keycode==67 and ctrl and event.keysym.lower() != "c":
event.widget.event_generate("<<Copy>>")
def main():
check_sc()
global window
window = Tk()
window.geometry('420x200')
window.resizable(False, False)
window.iconbitmap('ic.ico')
window.title('BGTrading v1.0 b'+version)
window.config(bg='black')
window.bind_all("<Key>", _onKeyRelease, "+")
label_key = Label(window,text='Ваш ключ:',bg='black',fg='#FFD700')
label_key.place(x='10',y='10',height='24')
Key = Entry(window,width='33')
Key.insert(0,get_key())
Key.place(x='80',y='10',height='24')
getkey = Button(window,text='Скопировать ключ',bg='black',fg='#FFD700',command=get_key1)
getkey.place(x='290',y='10')
global login
login = StringVar()
global password
password = StringVar()
login_entry = Entry(window,width='150',textvariable=login)
login_entry.event_add('<<Paste>>', '<Control-Igrave>')
login_entry.insert(0,get_login())
pass_entry = Entry(window,width='150',textvariable=password)
pass_entry.insert(0,get_passwd())
label_login = Label(window,text='API KEY',fg='#FFD700',bg='black')
label_pass = Label(window,text='API SECRET',fg='#FFD700',bg='black')
label_login.place(y='40',x='10',height='24')
login_entry.place(y='65',x='10',width='160')
label_pass.place(y='90',x='10',height='24')
pass_entry.place(y='115',x='10',width='160')
saveb = Button(window,width=15,text='Сохранить',bg='black',fg='#FFD700',command=save_auth)
saveb.place(y='140',x='30')
Obj = dict(run=False)
global notif
notif = Label(window,bg='black',fg='#FFD700')
notif.place(y='150',x='275')
global update
update = Label(window,bg='black',fg='#FFD700',cursor="hand2")
update.place(y='175',x='10')
update.bind("<Button-1>", callback)
b = Label(window,text='Бот отключен',bg='red',fg='white')
b.place(y='75',x='275')
global startB
if api_key != '' and api_secret != '':
startB = Button(window, text='start/stop',bg='black',fg='#FFD700', command=lambda: Thread(target=run,args=(Obj,window,b)).start())
startB.invoke()
startB.place(y='105',x='280')
startB.config(stat='disabled')
else:
startB = Button(window, text='start/stop',bg='black',fg='#FFD700', command=lambda: Thread(target=run,args=(Obj,window,b)).start())
startB.place(y='105',x='280')
Label(window,text='vk.com/akidok',bg='black',fg='white').place(y='230',x='170')
window.protocol("WM_DELETE_WINDOW", on_closing)
window.mainloop()
# ClI3PFa6sL0ITQrdtmADvExB
# naZqeEIiWRl1DJ_sJr-J8LRQvJh15ZXdXauOOxywFFgwc5Em
# with open('C:\\OSpanel\\OSPanel\\domains\\localhost\\orders\\data.json','w') as file:
# json.dump(orders[0],file,indent=2,sort_keys=True,ensure_ascii=False,default=str)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
ff07f9d99c42ae1e649eecc7020254868e05b06f | 8514a3f47a6edfdc129cb33b777b4c3ffbc592a9 | /build/rbx1/rbx1_speech/catkin_generated/pkg.develspace.context.pc.py | 6c230cd03ba3f94698ad839c3157e643149ea8d4 | [] | no_license | coderofchina/catkin_ws | 2486b0fcc412762069d4a64ebd309cccc0f8cdd4 | dccd9d55bf34e6b1ccef208d803254a2c1a41721 | refs/heads/master | 2020-04-01T19:25:22.051977 | 2018-10-18T03:41:38 | 2018-10-18T03:41:38 | 153,552,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rbx1_speech"
PROJECT_SPACE_DIR = "/home/jt/catkin_ws/devel"
PROJECT_VERSION = "0.4.0"
| [
"[email protected]"
] | |
e7663ad96b3a0a7923835b8810a3c36cad5f7025 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/pki/keyring.py | 57a4f4ee21803b74ecf43a4aad448075dcb455b5 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 8,718 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class KeyRing(Mo):
"""
A keyring to create and hold an SSL certificate. The SSL certificate contains the public RSA key and signed identity information of a PKI device. The PKI device holds a pair of RSA encryption keys, one kept private and one made public, stored in an internal key ring. The keyring certificate merges into the PKI device keyring to create a trusted relationship.
"""
meta = ClassMeta("cobra.model.pki.KeyRing")
meta.moClassName = "pkiKeyRing"
meta.rnFormat = "keyring-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Key Ring"
meta.writeAccessMask = 0x3
meta.readAccessMask = 0x3
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.pki.RtKeyRing")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.pki.RtKeyringRef")
meta.childClasses.add("cobra.model.pki.CertReq")
meta.childNamesAndRnPrefix.append(("cobra.model.pki.RtKeyringRef", "rtaaaKeyringRef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.pki.RtKeyRing", "rtcommKeyRing-"))
meta.childNamesAndRnPrefix.append(("cobra.model.pki.CertReq", "certreq"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.pki.Ep")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pki.Item")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.pki.Definition")
meta.rnPrefixes = [
('keyring-', True),
]
prop = PropMeta("str", "adminState", "adminState", 1199, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "started"
prop._addConstant("completed", "completed", 5)
prop._addConstant("created", "created", 2)
prop._addConstant("reqCreated", "request-created", 3)
prop._addConstant("started", "started", 1)
prop._addConstant("tpSet", "trust-provider-set", 4)
meta.props.add("adminState", prop)
prop = PropMeta("str", "cert", "cert", 1203, PropCategory.REGULAR)
prop.label = "Certificate"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("cert", prop)
prop = PropMeta("str", "certValidUntil", "certValidUntil", 1204, PropCategory.REGULAR)
prop.label = "Certificate Validity"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("certValidUntil", prop)
prop = PropMeta("str", "certificateDecodeInformation", "certificateDecodeInformation", 1205, PropCategory.REGULAR)
prop.label = "Certificate Decode Information"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("certificateDecodeInformation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "key", "key", 1201, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.isPassword = True
prop.range = [(0, 4096)]
prop.defaultValue = 0
prop.defaultValueStr = "0"
meta.props.add("key", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "modulus", "modulus", 1202, PropCategory.REGULAR)
prop.label = "Modulus"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 2048
prop.defaultValueStr = "mod2048"
prop._addConstant("mod1024", "mod-1024", 1024)
prop._addConstant("mod1536", "mod-1536", 1536)
prop._addConstant("mod2048", "mod-2048", 2048)
prop._addConstant("mod512", "mod-512", 512)
meta.props.add("modulus", prop)
prop = PropMeta("str", "name", "name", 7017, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "regen", "regen", 1200, PropCategory.REGULAR)
prop.label = "Regenerate"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("regen", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tp", "tp", 1206, PropCategory.REGULAR)
prop.label = "Certificate Authority"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tp", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Fabric"
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
c8310c70988e850b2fc18b2fac1e8b13a933d2f1 | 31b62184732dc3960bf8574494d71c3385176868 | /code/miscellaneous/jacobian.py | 39ee49c9cebfa8ef921a98e9def493e5e9497458 | [] | no_license | NicolasMakaroff/deep_calibration | 7bb9ee64a410dd22912f4ab64b138d41d4941cd8 | df6a3907f7cec00970f462ab6a13873a48de6cb7 | refs/heads/master | 2022-10-05T05:22:38.008283 | 2020-06-05T15:02:53 | 2020-06-05T15:02:53 | 261,232,501 | 3 | 1 | null | 2020-05-05T10:01:40 | 2020-05-04T16:06:11 | Python | UTF-8 | Python | false | false | 824 | py | import torch
import torch.nn as nn
from torch.autograd.gradcheck import zero_gradients
from torch.autograd import Variable
class Jacobian(nn.Module):
def __init__(self):
super().__init__()
def forward(self,inputs,outputs):
assert inputs.requires_grad
num_classes = outputs.size()[1]
jacobian = torch.zeros(num_classes,*inputs.size())
grad_output = torch.zeros(*outputs.size())
if inputs.is_cuda :
grad_output = grad_output.cuda()
jacobian = jacobian.cuda()
for i in range(num_classes):
zero_gradients(inputs)
grad_output.zero_()
grad_output[:,i]=1
outputs.backward(grad_output,retain_graph=True,create_graph=True)
jacobian[i] = inputs.grad
return jacobian | [
"[email protected]"
] | |
79318c4507ad99722cf829586259eddfaaa0dab9 | c2bc341c489246c38b70047f1df86f1726860144 | /mysite/settings.py | 68a77158404857a0d818a5a28270bb1f7e0ab0c7 | [] | no_license | Ju-alcantara/my-first-blog | e4243f7d53b8bf462a3b5f86fac169aa236977a9 | 5da390c5edac19903748de7c3a315b262986a44e | refs/heads/master | 2021-01-22T08:28:06.939179 | 2017-05-27T18:32:44 | 2017-05-27T18:32:44 | 92,617,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p=_$(71q7v7z7p!wt&h$*43bvf#e&tgb(5vc3sfmmio)leuz+='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') | [
"[email protected]"
] | |
8776ef30d3234263ef4b475216b1bd03ea6256d8 | 3c32a720eb1680ca37efbcdb3c8233367978499a | /kuka_arm/scripts/IK_debug3.py | 277774210b63a5274753ec76e1c79d70845e17c9 | [] | no_license | udacitycourse/RoboND-Kinematics-Project-5 | 67119c659d9f3ca92f2e86f0c81c250a9191336e | 2a2ccdd5d14b0bffc41fadbc2ba4ee8971d8fff2 | refs/heads/master | 2021-05-08T08:16:46.226745 | 2017-09-25T01:14:40 | 2017-09-25T01:14:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,630 | py | from sympy import *
from time import time
from mpmath import radians
import tf
'''
Format of test case is [ [[EE position],[EE orientation as quaternions]],[WC location],[joint angles]]
You can generate additional test cases by setting up your kuka project and running `$ roslaunch kuka_arm forward_kinematics.launch`
From here you can adjust the joint angles to find thetas, use the gripper to extract positions and orientation (in quaternion xyzw) and lastly use link 5
to find the position of the wrist center. These newly generated test cases can be added to the test_cases dictionary.
'''
TESTPOS = [[2.043105518,-0.000134841,1.946017037,-6.83E-05,-8.62E-05,-6.60E-05],
[2.04676184,0.011441642,1.939982637,0.000577693,0.009061308,0.00544537],
[2.050324864,0.02299681,1.933939356,0.001217882,0.018196436,0.010632979],
[2.057089095,0.045747845,1.921990568,0.002442539,0.036149227,0.019927391],
[2.063498319,0.068408759,1.910037059,0.00357839,0.053909178,0.027968241],
[2.075205668,0.113161316,1.886347259,0.005408747,0.088251959,0.040275436],
[2.085588822,0.157543414,1.862866269,0.00643093,0.120716517,0.047829994],
[2.096775308,0.212458935,1.834062507,0.006234755,0.157512985,0.050955411],
[2.106086614,0.266910674,1.806061988,0.004189404,0.188919382,0.047719935],
[2.113518315,0.320264508,1.779474118,0.000409138,0.213395692,0.03924927],
[2.119291418,0.373465901,1.754061593,-0.00482053,0.230446331,0.026712708],
[2.122748589,0.416040824,1.734676233,-0.009662516,0.238201722,0.014724812],
[2.125188244,0.458760938,1.716180186,-0.014666186,0.240419952,0.001896117],
[2.126025405,0.480186005,1.707287383,-0.017084811,0.23940103,-0.004545671],
[2.126607057,0.501692845,1.698625814,-0.019373591,0.236953774,-0.01086094],
[2.126800714,0.512454503,1.694393175,-0.020447306,0.235199363,-0.013925989],
[2.126929816,0.523242279,1.690218131,-0.021467365,0.233090863,-0.016917271],
[2.126966456,0.54950764,1.680352217,-0.023641228,0.226525793,-0.023723435],
[2.126613353,0.575964198,1.670818229,-0.025369886,0.217931524,-0.029823364],
[2.125860538,0.602638072,1.661609715,-0.026571545,0.207355948,-0.035032579],
[2.124696021,0.629554438,1.652717575,-0.027176533,0.194862058,-0.039176458],
[2.123077485,0.657100821,1.644027178,-0.027099222,0.180342383,-0.042082181],
[2.121005578,0.684943298,1.635633518,-0.026313227,0.164032063,-0.043573617],
[2.118461877,0.713103084,1.627519258,-0.024794476,0.146035434,-0.043508547],
[2.115426149,0.741599178,1.619664599,-0.022536357,0.126466973,-0.041758289],
[2.112290803,0.767260897,1.612875998,-0.019904738,0.107837617,-0.038676325],
[2.108731945,0.793211662,1.60625813,-0.016715032,0.088154202,-0.034095617],
[2.10473229,0.819459388,1.599792639,-0.012995704,0.067512494,-0.027949746],
[2.100273818,0.846010004,1.593459962,-0.008784407,0.046011751,-0.020178454],
[2.097869444,0.859376925,1.590344557,-0.006505046,0.035006849,-0.015663607],
[2.095343861,0.872820334,1.587254287,-0.004121374,0.023827222,-0.010724848],
[2.092694685,0.886340354,1.584186435,-0.001640259,0.012485959,-0.005356091],
[2.089919511,0.899936969,1.581138233,0.000931018,0.000996302,0.000448508]]
print "len(POS)=",len(POS)
def test_code(POS):
## Set up code
## Do not modify!
# x = 0
start_time = time()
########################################################################################
##
## Insert IK code here!
theta1 = 0
theta2 = 0
theta3 = 0
theta4 = 0
theta5 = 0
theta6 = 0
### Your FK code here
# Create symbols
q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8')
d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8')
a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7')
alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6, = symbols('alpha0:7')
#
#
# Create Modified DH parameters
s = {alpha0: 0, a0: 0, d1: 0.75, q1: q1,
alpha1: -pi/2., a1: 0.35, d2: 0, q2:-pi/2.+q2,
alpha2: 0, a2: 1.25, d3: 0, q3: q3,
alpha3: -pi/2., a3: -0.054, d4: 1.50, q4: q4,
alpha4: pi/2., a4: 0, d5: 0, q5: q5,
alpha5: -pi/2., a5: 0, d6: 0, q6: q6,
alpha6: 0, a6: 0, d7: 0.303, q7: 0}
#
#
# Create individual transformation matrices
def TF_Matrix(alpha, a, d, q):
TF = Matrix([[ cos(q), -sin(q), 0, a],
[ sin(q)*cos(alpha), cos(q)*cos(alpha), -sin(alpha), -sin(alpha)*d],
[ sin(q)*sin(alpha), cos(q)*sin(alpha), cos(alpha), cos(alpha)*d],
[ 0, 0, 0, 1]])
return TF
T0_1 = TF_Matrix(alpha0, a0, d1, q1).subs(s)
T1_2 = TF_Matrix(alpha1, a1, d2, q2).subs(s)
T2_3 = TF_Matrix(alpha2, a2, d3, q3).subs(s)
T3_4 = TF_Matrix(alpha3, a3, d4, q4).subs(s)
T4_5 = TF_Matrix(alpha4, a4, d5, q5).subs(s)
T5_6 = TF_Matrix(alpha5, a5, d6, q6).subs(s)
T6_EE = TF_Matrix(alpha6, a6, d7, q7).subs(s)
T0_EE = T0_1 * T1_2 * T2_3 * T3_4 * T4_5 * T5_6 * T6_EE
# Define RPY rotation matices
r, p, y = symbols('r p y')
ROT_x = Matrix([[ 1, 0, 0],
[ 0, cos(r), -sin(r)],
[ 0, sin(r), cos(r)]]) # roll
ROT_y = Matrix([[ cos(p), 0, sin(p)],
[ 0, 1, 0],
[ -sin(p), 0, cos(p)]]) # pitch
ROT_z = Matrix([[ cos(y), -sin(y), 0],
[ sin(y), cos(y), 0],
[ 0, 0, 1]]) # yaw
ROT_EE = ROT_z * ROT_y * ROT_x
ROT_corr = ROT_EE * ROT_z.subs(y, radians(180)) * ROT_y.subs(p, radians(-90))
# Extract end-effector position and orientation from request
# px, py, pz = end-effector position
# roll, pitch, yaw = end-effectro orientation
for x in range(len(POS)):
px = POS[x][0]
py = POS[x][1]
pz = POS[x][2]
roll = POS[x][3]
pitch = POS[x][4]
yaw = POS[x][5]
print "No. ", x, ", POS =", px, py, pz, roll, pitch, yaw
# Find EE rotation matrix
ROT_EE = ROT_EE.subs({'r': roll, 'p': pitch, 'y': yaw})
ROT_corr = ROT_corr.subs({'r': roll, 'p': pitch, 'y': yaw})
EE = Matrix([[px], [py], [pz]])
WC = EE - 0.303 * ROT_EE[:,2] # d7 = 0.303
#
# Calculate joint angles using Geometric IK method
# for JT1, JT2, JT3
theta1 = atan2(WC[1], WC[0])
side_a = 1.501 # d4.subs(s)
side_b = sqrt((sqrt(WC[0]**2 + WC[1]**2) - 0.35)**2 + (WC[2] - 0.75)**2)
side_c = 1.25 # a2.subs(s)
angle_a = acos((side_b**2 + side_c**2 - side_a**2)/(2 * side_b * side_c))
angle_b = acos((side_a**2 + side_c**2 - side_b**2)/(2 * side_a * side_c))
angle_c = acos((side_a**2 + side_b**2 - side_c**2)/(2 * side_a * side_b))
theta2 = pi/2 - angle_a - atan2(WC[2] - 0.75, sqrt(WC[0]**2 + WC[1]**2) - 0.35)
theta3 = pi/2 - (angle_b + 0.036)
# for JT4, JT5, JT6
T0_4 = T0_1 * T1_2 * T2_3* T3_4
R0_3 = T0_1[0:3, 0:3] * T1_2[0:3, 0:3] * T2_3[0:3, 0:3]
R0_3 = R0_3.evalf(subs = {q1: theta1, q2: theta2, q3: theta3})
R3_6 = R0_3.inv("LU") * ROT_EE
r13 = R3_6[0, 2]
r33 = R3_6[2, 2]
r23 = R3_6[1, 2]
r21 = R3_6[1, 0]
r22 = R3_6[1, 1]
r12 = R3_6[0, 1]
r32 = R3_6[2, 1]
theta5 = (atan2(sqrt(r13**2 + r33**2), r23)).evalf()
if (sin(theta5) < 0):
print("BELOW!!!")
theta4 = (atan2(-r33, r13)).evalf()
theta6 = (atan2(r22, -r21)).evalf()
elif (theta5 == 0):
print("EQUAL!!!")
theta4 = 0
theta6 = (atan2(-r12, -r32)).evalf()
else:
print("ELSE!!!!!")
theta4 = (atan2(r33, -r13)).evalf()
theta6 = (atan2(-r22, r21)).evalf()
while (theta4 > pi):
theta4 = theta4 - 2*pi
while (theta4 < -pi):
theta4 = 2*pi + theta4
while (theta5 > pi):
theta5 = theta5 - 2*pi
while (theta5 < -pi):
theta5 = 2*pi + theta5
while (theta6 > pi):
theta6 = theta6 - 2*pi
while (theta6 < -pi):
theta6 = 2*pi + theta6
##
########################################################################################
########################################################################################
## For additional debugging add your forward kinematics here. Use your previously calculated thetas
## as the input and output the position of your end effector as your_ee = [x,y,z]
## (OPTIONAL) YOUR CODE HERE!
FK = T0_EE.evalf(subs={q1: theta1, q2: theta2, q3: theta3, q4: theta4, q5: theta5, q6: theta6})
FK3 = T0_4.evalf(subs={q1: theta1, q2: theta2, q3: theta3, q4: theta4, q5: theta5, q6: theta6})
## End your code input for forward kinematics here!
########################################################################################
## For error analysis please set the following variables of your WC location and EE location in the format of [x,y,z]
# your_wc = [1,1,1] # <--- Load your calculated WC values in this array
# your_ee = [1,1,1] # <--- Load your calculated end effector value from your forward kinematics
rqst_wc = [WC[0], WC[1], WC[2]]
your_wc = [FK3[0,3], FK3[1,3], FK3[2,3]]
# print "WC =", WC
# print "FK3 =", FK3[0,3], FK3[1,3], FK3[2,3]
# print "theta =", theta4, ", ", theta5, ", ", theta6
#print 'WC=',WC
#print 'T03=',FK3[0,3], FK3[1,3], FK3[2,3]
your_ee = [FK[0,3], FK[1,3], FK[2,3]]
########################################################################################
## Error analysis
print ("\nTotal run time to calculate joint angles from pose is %04.4f seconds" % (time()-start_time))
# # Find WC error
# if not(sum(your_wc)==3):
# wc_x_e = abs(your_wc[0]-rqst_wc[0])
# wc_y_e = abs(your_wc[1]-rqst_wc[1])
# wc_z_e = abs(your_wc[2]-rqst_wc[2])
# wc_offset = sqrt(wc_x_e**2 + wc_y_e**2 + wc_z_e**2)
# print ("\nWrist error for x position is: %04.8f" % wc_x_e)
# print ("Wrist error for y position is: %04.8f" % wc_y_e)
# print ("Wrist error for z position is: %04.8f" % wc_z_e)
# print ("Overall wrist offset is: %04.8f units" % wc_offset)
# Find FK EE error
if not(sum(your_ee)==3):
ee_x_e = abs(your_ee[0]-POS[x][0])
ee_y_e = abs(your_ee[1]-POS[x][1])
ee_z_e = abs(your_ee[2]-POS[x][2])
ee_offset = sqrt(ee_x_e**2 + ee_y_e**2 + ee_z_e**2)
print ("\nEnd effector error for x position is: %04.8f" % ee_x_e)
print ("End effector error for y position is: %04.8f" % ee_y_e)
print ("End effector error for z position is: %04.8f" % ee_z_e)
print ("Overall end effector offset is: %04.8f units \n" % ee_offset)
if __name__ == "__main__":
# Change test case number for different scenarios
test_code(TESTPOS) | [
"[email protected]"
] | |
a5c4a9ab8b33c00f60586f005f74e7940bfcf0e5 | 8f8993442180e447d746fc689b86ad8c916a3456 | /samples/api_regular.py | 868ccce8e8152184a905956e7d747a2a480b278d | [
"MIT"
] | permissive | obolary/td-ameritrade-python-api | 487c9dc858eee6a8094161eac453d32460882b30 | 8cb4581327cad918be2e9c6f3549eceb9be2e93f | refs/heads/develop | 2022-11-12T05:39:53.219461 | 2020-07-05T19:36:51 | 2020-07-05T19:36:51 | 266,371,592 | 0 | 0 | MIT | 2020-06-04T14:32:10 | 2020-05-23T16:05:23 | Python | UTF-8 | Python | false | false | 2,772 | py | from datetime import datetime
from datetime import timedelta
from td.client import TDClient
# Create a new session
TDSession = TDClient(
client_id='<CLIENT_ID>',
redirect_uri='<REDIRECT_URI>',
credentials_path='<CREDENTIALS_PATH>'
)
# Login to the session
TDSession.login()
# `get_quotes` endpoint with single value. Should not return an error.
quotes_single = TDSession.get_quotes(instruments=['SQ'])
# `get_quotes` with a Options Contract
quotes_option_contract = TDSession.get_quotes(instruments=['MSFT_041720C75'])
# `get_quotes` with a Futures Contract
quotes_futures = TDSession.get_quotes(instruments=['/ES'])
# `get_quotes` with Forex
quotes_forex = TDSession.get_quotes(instruments=['AUD/USD'])
# `get_quotes` endpoint with multiple values
quotes_multi = TDSession.get_quotes(instruments=['SQ', 'MSFT'])
# `search_instruments` Endpoint
instrument_search_data = TDSession.search_instruments(
symbol='MSFT',
projection='symbol-search'
)
# `get_movers` Endpoint
movers_data = TDSession.get_movers(
market='$DJI',
direction='up',
change='value'
)
# `get_instruments` Endpoint
get_instrument_data = TDSession.get_instruments(cusip='594918104')
# `get_market_hours` Endpoint with multiple values
market_hours_multi = TDSession.get_market_hours(
markets=['EQUITY','FOREX'],
date=datetime.today().isoformat()
)
# `get_accounts` Endpoint with single values
accounts_data_single = TDSession.get_accounts(
account='<ACCOUNT_NUMBER>',
fields=['orders']
)
# `get_accounts` Endpoint with single values
accounts_data_multi = TDSession.get_accounts(
account='all',
fields=['orders']
)
# `get_transactions` Endpoint. Should not return an error
transaction_data_multi = TDSession.get_transactions(
account='<ACCOUNT_NUMBER>',
transaction_type='ALL'
)
# `get_preferences` endpoint. Should not return an error
preference_data = TDSession.get_preferences(account='<ACCOUNT_NUMBER>')
# `get_subscription_keys` endpoint. Should not return an error
streamer_keys = TDSession.get_streamer_subscription_keys(accounts=['<ACCOUNT_NUMBER>'])
# `get_user_ principals` endpoint. Should not return an error.
prinicpals_data = TDSession.get_user_principals(fields=['preferences', 'surrogateIds'])
# `get_transactions` Endpoint with single values
transaction_data_single = TDSession.get_transactions(transaction_id='YOUR_TRANSACTION_ID')
# Option Chain Example
opt_chain = {
'symbol':'MSFT',
'contractType':'CALL',
'optionType':'S',
'fromDate':'2020-04-01',
'afterDate':'2020-05-01',
'strikeCount':4,
'includeQuotes':True,
'range':'ITM',
'strategy':'ANALYTICAL',
'volatility': 29.0
}
# Get Option Chains
option_chains = TDSession.get_options_chain(option_chain=opt_chain)
| [
"[email protected]"
] | |
723956c0d14d5a0e70b082e4bb4e61bbc1e216f5 | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /VTK/vtk_7.1.1_x64_Release/lib/python2.7/site-packages/twisted/internet/test/test_udp.py | 53bb2d399a240a70ae78373f7d74b450b9fcd29d | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 16,101 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorUDP} and the UDP parts of
L{IReactorSocket}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
import socket
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python import context
from twisted.python.log import ILogContext, err
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.internet.interfaces import (
ILoggingContext, IListeningPort, IReactorUDP, IReactorSocket)
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.test.connectionmixins import (LogObserverMixin,
findFreePort)
from twisted.internet import defer, error
from twisted.test.test_udp import Server, GoodClient
from twisted.trial.unittest import SkipTest
class DatagramTransportTestsMixin(LogObserverMixin):
"""
Mixin defining tests which apply to any port/datagram based transport.
"""
def test_startedListeningLogMessage(self):
"""
When a port starts, a message including a description of the associated
protocol is logged.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
@implementer(ILoggingContext)
class SomeProtocol(DatagramProtocol):
def logPrefix(self):
return "Crazy Protocol"
protocol = SomeProtocol()
p = self.getListeningPort(reactor, protocol)
expectedMessage = "Crazy Protocol starting on %d" % (p.getHost().port,)
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_connectionLostLogMessage(self):
"""
When a connection is lost a message is logged containing an
address identifying the port and the fact that it was closed.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
p = self.getListeningPort(reactor, DatagramProtocol())
expectedMessage = "(UDP Port %s Closed)" % (p.getHost().port,)
def stopReactor(ignored):
reactor.stop()
def doStopListening():
del loggedMessages[:]
maybeDeferred(p.stopListening).addCallback(stopReactor)
reactor.callWhenRunning(doStopListening)
self.runReactor(reactor)
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_stopProtocolScheduling(self):
"""
L{DatagramProtocol.stopProtocol} is called asynchronously (ie, not
re-entrantly) when C{stopListening} is used to stop the the datagram
transport.
"""
class DisconnectingProtocol(DatagramProtocol):
started = False
stopped = False
inStartProtocol = False
stoppedInStart = False
def startProtocol(self):
self.started = True
self.inStartProtocol = True
self.transport.stopListening()
self.inStartProtocol = False
def stopProtocol(self):
self.stopped = True
self.stoppedInStart = self.inStartProtocol
reactor.stop()
reactor = self.buildReactor()
protocol = DisconnectingProtocol()
self.getListeningPort(reactor, protocol)
self.runReactor(reactor)
self.assertTrue(protocol.started)
self.assertTrue(protocol.stopped)
self.assertFalse(protocol.stoppedInStart)
class UDPPortTestsMixin(object):
"""
Tests for L{IReactorUDP.listenUDP} and
L{IReactorSocket.adoptDatagramPort}.
"""
def test_interface(self):
"""
L{IReactorUDP.listenUDP} returns an object providing L{IListeningPort}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertTrue(verifyObject(IListeningPort, port))
def test_getHost(self):
"""
L{IListeningPort.getHost} returns an L{IPv4Address} giving a
dotted-quad of the IPv4 address the port is listening on as well as
the port number.
"""
host, portNumber = findFreePort(type=socket.SOCK_DGRAM)
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), port=portNumber, interface=host)
self.assertEqual(
port.getHost(), IPv4Address('UDP', host, portNumber))
def test_getHostIPv6(self):
"""
L{IListeningPort.getHost} returns an L{IPv6Address} when listening on
an IPv6 interface.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface='::1')
addr = port.getHost()
self.assertEqual(addr.host, "::1")
self.assertIsInstance(addr, IPv6Address)
def test_invalidInterface(self):
"""
An L{InvalidAddressError} is raised when trying to listen on an address
that isn't a valid IPv4 or IPv6 address.
"""
reactor = self.buildReactor()
self.assertRaises(
error.InvalidAddressError, reactor.listenUDP, DatagramProtocol(),
0, interface='example.com')
def test_logPrefix(self):
"""
Datagram transports implement L{ILoggingContext.logPrefix} to return a
message reflecting the protocol they are running.
"""
class CustomLogPrefixDatagramProtocol(DatagramProtocol):
def __init__(self, prefix):
self._prefix = prefix
self.system = Deferred()
def logPrefix(self):
return self._prefix
def datagramReceived(self, bytes, addr):
if self.system is not None:
system = self.system
self.system = None
system.callback(context.get(ILogContext)["system"])
reactor = self.buildReactor()
protocol = CustomLogPrefixDatagramProtocol("Custom Datagrams")
d = protocol.system
port = self.getListeningPort(reactor, protocol)
address = port.getHost()
def gotSystem(system):
self.assertEqual("Custom Datagrams (UDP)", system)
d.addCallback(gotSystem)
d.addErrback(err)
d.addCallback(lambda ignored: reactor.stop())
port.write(b"some bytes", ('127.0.0.1', address.port))
self.runReactor(reactor)
def test_str(self):
"""
C{str()} on the listening port object includes the port number.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertIn(str(port.getHost().port), str(port))
def test_repr(self):
"""
C{repr()} on the listening port object includes the port number.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertIn(repr(port.getHost().port), str(port))
def test_writeToIPv6Interface(self):
"""
Writing to an IPv6 UDP socket on the loopback interface succeeds.
"""
reactor = self.buildReactor()
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, server, interface="::1")
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, client, interface="::1")
cAddr = client.transport.getHost()
def cbClientStarted(ignored):
"""
Send a datagram from the client once it's started.
@param ignored: a list of C{[None, None]}, which is ignored
@returns: a deferred which fires when the server has received a
datagram.
"""
client.transport.write(
b"spam", ("::1", server.transport.getHost().port))
serverReceived = server.packetReceived = defer.Deferred()
return serverReceived
def cbServerReceived(ignored):
"""
Stop the reactor after a datagram is received.
@param ignored: C{None}, which is ignored
@returns: C{None}
"""
reactor.stop()
d = defer.gatherResults([serverStarted, clientStarted])
d.addCallback(cbClientStarted)
d.addCallback(cbServerReceived)
d.addErrback(err)
self.runReactor(reactor)
packet = server.packets[0]
self.assertEqual(packet, (b'spam', (cAddr.host, cAddr.port)))
def test_connectedWriteToIPv6Interface(self):
"""
An IPv6 address can be passed as the C{interface} argument to
L{listenUDP}. The resulting Port accepts IPv6 datagrams.
"""
reactor = self.buildReactor()
server = Server()
serverStarted = server.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, server, interface="::1")
client = GoodClient()
clientStarted = client.startedDeferred = defer.Deferred()
self.getListeningPort(reactor, client, interface="::1")
cAddr = client.transport.getHost()
def cbClientStarted(ignored):
"""
Send a datagram from the client once it's started.
@param ignored: a list of C{[None, None]}, which is ignored
@returns: a deferred which fires when the server has received a
datagram.
"""
client.transport.connect("::1", server.transport.getHost().port)
client.transport.write(b"spam")
serverReceived = server.packetReceived = defer.Deferred()
return serverReceived
def cbServerReceived(ignored):
"""
Stop the reactor after a datagram is received.
@param ignored: C{None}, which is ignored
@returns: C{None}
"""
reactor.stop()
d = defer.gatherResults([serverStarted, clientStarted])
d.addCallback(cbClientStarted)
d.addCallback(cbServerReceived)
d.addErrback(err)
self.runReactor(reactor)
packet = server.packets[0]
self.assertEqual(packet, (b'spam', (cAddr.host, cAddr.port)))
def test_writingToHostnameRaisesInvalidAddressError(self):
"""
Writing to a hostname instead of an IP address will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertRaises(
error.InvalidAddressError,
port.write, 'spam', ('example.invalid', 1))
def test_writingToIPv6OnIPv4RaisesInvalidAddressError(self):
"""
Writing to an IPv6 address on an IPv4 socket will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface="127.0.0.1")
self.assertRaises(
error.InvalidAddressError, port.write, 'spam', ('::1', 1))
def test_writingToIPv4OnIPv6RaisesInvalidAddressError(self):
"""
Writing to an IPv6 address on an IPv4 socket will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(
reactor, DatagramProtocol(), interface="::1")
self.assertRaises(
error.InvalidAddressError, port.write, 'spam', ('127.0.0.1', 1))
def test_connectingToHostnameRaisesInvalidAddressError(self):
"""
Connecting to a hostname instead of an IP address will raise an
L{InvalidAddressError}.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
self.assertRaises(
error.InvalidAddressError, port.connect, 'example.invalid', 1)
def test_allowBroadcast(self):
"""
L{IListeningPort.setBroadcastAllowed} sets broadcast to be allowed
on the socket.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, DatagramProtocol())
port.setBroadcastAllowed(True)
self.assertTrue(port.getBroadcastAllowed())
class UDPServerTestsBuilder(ReactorBuilder,
UDPPortTestsMixin, DatagramTransportTestsMixin):
"""
Run L{UDPPortTestsMixin} tests using newly created UDP
sockets.
"""
requiredInterfaces = (IReactorUDP,)
def getListeningPort(self, reactor, protocol, port=0, interface='',
maxPacketSize=8192):
"""
Get a UDP port from a reactor.
@param reactor: A reactor used to build the returned
L{IListeningPort} provider.
@type reactor: L{twisted.internet.interfaces.IReactorUDP}
@see: L{twisted.internet.IReactorUDP.listenUDP} for other
argument and return types.
"""
return reactor.listenUDP(port, protocol, interface=interface,
maxPacketSize=maxPacketSize)
class UDPFDServerTestsBuilder(ReactorBuilder,
UDPPortTestsMixin, DatagramTransportTestsMixin):
"""
Run L{UDPPortTestsMixin} tests using adopted UDP sockets.
"""
requiredInterfaces = (IReactorSocket,)
def getListeningPort(self, reactor, protocol, port=0, interface='',
maxPacketSize=8192):
"""
Get a UDP port from a reactor, wrapping an already-initialized file
descriptor.
@param reactor: A reactor used to build the returned
L{IListeningPort} provider.
@type reactor: L{twisted.internet.interfaces.IReactorSocket}
@param port: A port number to which the adopted socket will be
bound.
@type port: C{int}
@param interface: The local IPv4 or IPv6 address to which the
adopted socket will be bound. defaults to '', ie all IPv4
addresses.
@type interface: C{str}
@see: L{twisted.internet.IReactorSocket.adoptDatagramPort} for other
argument and return types.
"""
if IReactorSocket.providedBy(reactor):
if ':' in interface:
domain = socket.AF_INET6
address = socket.getaddrinfo(interface, port)[0][4]
else:
domain = socket.AF_INET
address = (interface, port)
portSock = socket.socket(domain, socket.SOCK_DGRAM)
portSock.bind(address)
portSock.setblocking(False)
try:
return reactor.adoptDatagramPort(
portSock.fileno(), portSock.family, protocol,
maxPacketSize)
finally:
# The socket should still be open; fileno will raise if it is
# not.
portSock.fileno()
# Now clean it up, because the rest of the test does not need
# it.
portSock.close()
else:
raise SkipTest("Reactor does not provide IReactorSocket")
globals().update(UDPServerTestsBuilder.makeTestCaseClasses())
globals().update(UDPFDServerTestsBuilder.makeTestCaseClasses())
| [
"l”[email protected]“"
] | |
9e2381c74c6d90c4c02f31f16cb86d1e9b46b85e | bd7434baa7008ff67bf560ddb6072e641b7e75d5 | /player_teacher_same.py | 61e6a076e17d66ad4954b94afd2077c5eba65cf3 | [] | no_license | davidhalladay/Numerical-Analysis-Tournament | a209e8ea85f87a4f1e0baa7a3ae100e29a968520 | 26f1ca78ab28ef3ca0e2a78bcd17ab972c89423b | refs/heads/master | 2020-05-25T13:20:19.636199 | 2019-10-08T07:03:26 | 2019-10-08T07:03:26 | 187,819,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,413 | py | import numpy as np
class player_module:
# constructor, allocate any private date here
def __init__(self):
self.init_x, self.init_y = -1., -1.
# Please update the banner according to your information
def banner(self):
print('-'*40)
print('Author: Wan-Cyuan Fan')
print('ID: b04502105')
print('-'*40)
# Decision making function for moving your ship, toward next frame:
# simply return the speed and the angle
# ----------------------------------------------
# The value of "speed" must be between 0 and 1.
# speed = 1 : full speed, moving 0.01 in terms of space coordination in next frame
# speed = x : moving 0.01*x in terms of space coordination
# speed = 0 : just don't move
#
# The value of angle must be between 0 and 2*pi.
#
# if speed is less than 1, it will store the gauge value by 4*(1-speed).
# If the gauge value reach 1000, it will perform the "gauge attack" and destory
# any enemy within a circle of 0.6 radius
#
def decision(self,player_data, enemy_data):
speed, angle = 0., 0.
balence_y = 0.2
# your data
player1_x = player_data[0][0]
player1_y = player_data[0][1]
player1_hp = player_data[0][2]
player1_score = player_data[0][3]
player1_gauge = player_data[0][4]
player1_weapon = player_data[0][5]
# data for another player
player2_x = player_data[1][0]
player2_y = player_data[1][1]
player2_hp = player_data[1][2]
player2_score = player_data[1][3]
player2_gauge = player_data[1][4]
player2_weapon = player_data[1][5]
# save the initial x position
if self.init_x==-1. and self.init_y==-1.:
self.init_x, self.init_y = player1_x, player1_y
# let's try to move back to the initial position by default
speed = ((self.init_x-player1_x)**2 + (self.init_y-player1_y)**2)**0.5
speed /= 0.01 # since the maximum speed is 0.01 unit per frame
if speed>1.: speed = 1.
angle = np.arctan2(self.init_y-player1_y,self.init_x-player1_x)
"""
for i in enemy_data:
enemy = []
minx = np.inf
miny = np.inf
if i[0] in [1,2,3,4,5]:
enemy.append(i)
for e in enemy:
if e[2] < miny:
miny = e[2]
minx = e[1]
"""
# loop over the enemies and bullets
for data in enemy_data:
will_be_hit = False
type = data[0] # 0 - bullet, 1..4 - different types of invaders, 5 - ufo, 6 - boss, 7 - rescuecap, 8 - weaponup
invaders = [1,2,3,4,5]
boss = [6]
good = [7,8]
x = data[1]
y = data[2]
dx = data[3] # expected movement in x direction for the next frame
dy = data[4] # expected movement in y direction for the next frame
# calculate the distance toward player1
dist = ((x-player1_x)**2+(y-player1_y)**2)**0.5
# judge whether the bullet will hit player1
# Decision 1 : player will be hit and bullet is inside safety region
#################################################
##### dodging bullet Strategy
#################################################
if (type in [0]) and dist < 0.212:
print("WARNING! You will be hit!")
will_be_hit = True
# colculate the collision point x-coor.
m_data = dy/dx
x_collision = (player1_y - y + dx/dy * player1_x + dy/dx * x)/(m_data + 1./m_data)
y_collision = (player1_x - x + dy/dx * player1_y + dx/dy * y)/(m_data + 1./m_data)
# dis_PL = abs(dy*player1_x - dx*player1_y + dx*y - dy*x)/(dy**2 + dx**2)*0.5
safe_dist = ((x_collision - x)**2+(y_collision - y)**2)**0.5
# check safety
if safe_dist < 0.001:
speed = 0.
continue
else:
speed = 1.0
# bullet will hit the left-down sidee
if x_collision < player1_x and y_collision < player1_y :
angle = np.arctan2(abs(player1_y-y_collision),abs(player1_x-x_collision))
# bullet will hit the right-down side
elif x_collision >= player1_x and y_collision < player1_y :
angle = np.arctan2(abs(player1_y-y_collision),player1_x-x_collision)
# bullet will hit the left-up side
elif x_collision < player1_x and y_collision >= player1_y :
angle = np.arctan2((player1_y-y_collision),abs(player1_x-x_collision))
# bullet will hit the right-up side
else :
angle = np.arctan2((player1_y-y_collision),(player1_x-x_collision))
break
if type == 6 :
if dist < 0.27:
speed = 1.0
if x > player1_x: angle = np.pi
if x < player1_x: angle = 0
break
elif y > 0.8 and player1_y > 0.8:
speed = 1.0
angle = 3*np.pi/2.
break
if (type in [1,2,3,4]) and dist < 0.1:
print("WARNING! You will be hit!")
will_be_hit = True
# colculate the collision point x-coor.
m_data = dy/dx
x_collision = (player1_y - y + dx/dy * player1_x + dy/dx * x)/(m_data + 1./m_data)
y_collision = (player1_x - x + dy/dx * player1_y + dx/dy * y)/(m_data + 1./m_data)
# dis_PL = abs(dy*player1_x - dx*player1_y + dx*y - dy*x)/(dy**2 + dx**2)*0.5
safe_dist = ((x_collision - x)**2+(y_collision - y)**2)**0.5
# check safety
if safe_dist < 0.001:
speed = 0.
continue
else:
speed = 1.0
# bullet will hit the left-down sidee
if x_collision < player1_x and y_collision < player1_y :
angle = np.arctan2(abs(player1_y-y_collision),abs(player1_x-x_collision))
# bullet will hit the right-down side
elif x_collision >= player1_x and y_collision < player1_y :
angle = np.arctan2(abs(player1_y-y_collision),player1_x-x_collision)
# bullet will hit the left-up side
elif x_collision < player1_x and y_collision >= player1_y :
angle = np.arctan2((player1_y-y_collision),abs(player1_x-x_collision))
# bullet will hit the right-up side
else :
angle = np.arctan2((player1_y-y_collision),(player1_x-x_collision))
break
# eating weapon
if type == 8 and player1_weapon <= 5:
if abs(player1_x - x) >= 0.3 and abs(player1_y - y) < 0.4:
if player1_y < player2_y and player1_y < 0.4:
speed = ((x - player1_x)**2 + (y - player1_y)**2)**0.5
speed /= 0.01 # since the maximum speed is 0.01 unit per frame
if speed>1.: speed = 1.
angle = np.arctan2(y-player1_y,x-player1_x)
break
elif player1_y > y:
speed = ((x - player1_x)**2 + (y - player1_y)**2)**0.5
speed /= 0.01 # since the maximum speed is 0.01 unit per frame
if speed>1.: speed = 1.
angle = np.arctan2(y-player1_y,x-player1_x)
break
else:
speed = 1.0
if x>player1_x: angle = 0. # run to right
if x<player1_x: angle = np.pi # run to left
break
elif 0.3 > abs(player1_x - x) >= 0. and abs(player1_y - y) < 0.1:
if player1_y < player2_y and player1_y < 0.4:
speed = 1.0
speed = ((x - player1_x)**2 + (y - player1_y)**2)**0.5
speed /= 0.01 # since the maximum speed is 0.01 unit per frame
if speed>1.: speed = 1.
angle = np.arctan2(y-player1_y,x-player1_x)
break
elif player1_y > y:
speed = ((x - player1_x)**2 + (y - player1_y)**2)**0.5
speed /= 0.01 # since the maximum speed is 0.01 unit per frame
if speed>1.: speed = 1.
angle = np.arctan2(y-player1_y,x-player1_x)
break
else:
speed = 1.0
if x>player1_x: angle = 0. # run to right
if x<player1_x: angle = np.pi # run to left
break
# eating rescuecap
if type == 7 and player1_hp < 12:
if abs(player1_x - x) >= 0.4 and abs(player1_y - y) < 0.4:
if player1_y < player2_y and player1_y < 0.4:
speed = 1.0
if x>player1_x: angle = np.arctan2(1,1) # run to right
if x<player1_x: angle = np.arctan2(1,-1) # run to left
break
else:
speed = 1.0
if x>player1_x: angle = 0. # run to right
if x<player1_x: angle = np.pi # run to left
break
elif abs(player1_x - x) >= 0. and abs(player1_y - y) < 0.1:
if player1_y < player2_y and player1_y < 0.4:
speed = 1.0
if x>player1_x: angle = np.arctan2(1,1) # run to right
if x<player1_x: angle = np.arctan2(1,-1) # run to left
break
else:
speed = 1.0
if x>player1_x: angle = 0. # run to right
if x<player1_x: angle = np.pi # run to left
break
#################################################
##### attacking enemy Strategy
#################################################
# if boss comming
if type in boss :
speed = abs(x-player1_x)
speed /= 0.01
if speed>1.: speed = 1.
if player1_y > 0.2:
if x>player1_x: angle = np.arctan2(-0.3,1.) # escape to right
if x<player1_x: angle = np.arctan2(-0.3,-1) # escape to left
else:
if x>player1_x: angle = np.arctan2(0.3,1.) # escape to right
if x<player1_x: angle = np.arctan2(0.3,-1.) # escape to left
continue
# if there is an enemy and is close enough, attack it
if type!=7 and type!=8 and dist >= 0.25 and dist < 0.9:
speed = abs(x-player1_x)
speed /= 0.01
if speed>1.: speed = 1.
if player1_y > 0.2:
if x>player1_x: angle = np.arctan2(-0.3,1.) # escape to right
if x<player1_x: angle = np.arctan2(-0.3,-1) # escape to left
else:
if x>player1_x: angle = np.arctan2(0.3,1.) # escape to right
if x<player1_x: angle = np.arctan2(0.3,-1.) # escape to left
continue
"""
if (type in invaders):
if (0. < player1_x < 0.1) and dist >= 0.2:
speed = ((0.3-player1_x)**2 + (0.2-player1_y)**2)**0.5
speed /= 0.01 # since the maximum speed is 0.01 unit per frame
if speed > 1.: speed = 1.
angle = np.arctan2(self.init_y-player1_y,self.init_x-player1_x)
continue
elif 0. < player1_x < 0.1 and dist < 0.2 :
speed = 1.
if dy >= 0:
angle = 3*np.pi/2.
continue
else:
angle = np.pi/2.
continue
elif 0. < player1_x < 0.1 and dist < 0.1 :
if x < player1_x: angle = 0. # escape to right
elif x >= player1_x: angle = np.pi # escape to left
continue
elif player1_x < 0. and dist < 0.1:
speed = ((0.3-player1_x)**2 + (0.2-player1_y)**2)**0.5
speed /= 0.01 # since the maximum speed is 0.01 unit per frame
if speed > 1.: speed = 1.
angle = np.arctan2(self.init_y-player1_y,self.init_x-player1_x)
break
"""
return speed, angle
| [
"[email protected]"
] | |
1cf513e702f8d5dc4c104346b894baeb1597baff | bc3ea173f4f5c9b781f9aaa3c115aa9e42125a8e | /VapixActionClient/src/actionStateToggle.py | 581d16f91e3e1c63920320f0f19d5855fc953c6c | [] | no_license | bernhara/AxisVapixSoapCLient | 1c99e82fe241ead59ba32f92f4346f856fd37b43 | 3c6ae698fd9273f0aba8007a0981414df742a4b8 | refs/heads/master | 2020-04-26T02:57:57.801013 | 2019-03-18T12:04:44 | 2019-03-18T12:04:44 | 173,251,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,105 | py |
import sys
import os
if __name__ == '__main__':
pass
from zeep import Client
from zeep import Settings
from zeep.transports import Transport
from zeep.cache import SqliteCache
from zeep import xsd
from lxml import etree
from pretend import stub
import requests
import logging.config
logging.config.dictConfig({
'version': 1,
'formatters': {
'verbose': {
'format': '%(name)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'zeep.transports': {
'level': 'DEBUG',
'propagate': True,
'handlers': ['console'],
},
}
})
# create zeep transport
transport = Transport()
# configure proxies
transport.session.proxies = {
'http': 'localhost:8888',
'https': 'localhost:8888',
}
os.environ["http_proxy"] = 'http://localhost:8888'
os.environ["https_proxy"] = 'http://localhost:8888'
# configure authentication
user = os.environ["axis_soap_user"]
password = os.environ["axis_soap_password"]
auth = requests.auth.HTTPDigestAuth(user, password)
transport.session.auth = auth
# configure cache
cache = SqliteCache(timeout=3600)
transport.cache = cache
url= "http://axis-mk2.home/wsdl/vapix/ActionService.wsdl"
#url = "http://www.axis.com/vapix/ws/EntryService.wsdl"
# http://www.axis.com/vapix/ws/action1/ActionService.wsdl
# Define the address, user name and password for the Axis product. <ip-address> is an IP address or host name.
# string address="<ip-address>";
# string username="<user name>";
# string password="<password>";
#
# // Define the namespaces of the event and action services.
# string eventTargetNamespace = "http://www.axis.com/vapix/ws/event1";
# string actionTargetNamespace = "http://www.axis.com/vapix/ws/action1";
#
# // Create an Entry Service client.
# EntryClient myEntryService = CreateEntryServiceClient(address, username, password);
# url="file:///C:/Users/bibi/EclipseWorkspaces/TMP/CamTest/etc/ActionServiceWithServiceDef.wsdl"
settings = Settings(strict=False, raw_response=False)
client = Client(url, transport=transport, settings=settings)
service = client.create_service(
'{http://www.axis.com/vapix/ws/action1}ActionBinding',
'http://axis-mk2.home/vapix/services')
client.set_ns_prefix('aa', "http://www.axis.com/vapix/ws/action1")
client.set_ns_prefix('wsnt', "http://docs.oasis-open.org/wsn/b-2")
client.set_ns_prefix('tns1', "http://www.onvif.org/ver10/topics")
client.set_ns_prefix('tnsaxis', "http://www.axis.com/2009/event/topics")
# client.set_ns_prefix('aev', "http://www.axis.com/vapix/ws/event1")
# http://axis-mk2.home/wsdl/vapix/ActionService.wsdl?timestamp=1550949556124
NewActionRule_type = client.get_type('ns0:NewActionRule')
Conditions_type = client.get_type('aa:Conditions')
TopicExpressionType_type = client.get_type('wsnt:TopicExpressionType')
FilterType_type = client.get_type('wsnt:FilterType')
# MessageContent_type = client.get_type('wsnt:MessageContent')
# <!--===============================-->
#
# <xs:complexType name="Conditions">
# <xs:sequence>
# <xs:element name="Condition"
# type="wsnt:FilterType"
# minOccurs="1"
# maxOccurs="unbounded" />
# </xs:sequence>
# </xs:complexType>
rules = service.GetActionRules()
for r in rules:
print (r['Name'])
ze_rule_list = (r for r in rules if r['Name'] == 'SendAutoTrack_tmpl')
ze_rule = next(ze_rule_list)
assert ze_rule is not None, "Rule SendAutoTrack_tmpl not found!"
#ze_rule = next (r for r in rules if r['Name'] == 'SendAutoTrack')
Conditions = ze_rule['Conditions']
Condition_seq=Conditions['Condition']
Condition_0=Condition_seq[0]
filterType_0_0_seq=Condition_0['_value_1']
any_0=xsd.AnyObject(FilterType_type, filterType_0_0_seq[0])
any_1=xsd.AnyObject(FilterType_type, filterType_0_0_seq[1])
Condition_0_NEW=FilterType_type([any_0, any_1])
Condition_seq_NEW = [Condition_0_NEW]
Conditions_NEW = Conditions_type(Condition_0_NEW)
# Conditions_NEW["Condition"] = {"_raw_elements": '''<wsnt:TopicExpression Dialect="http://www.onvif.org/ver10/tev/topicExpression/ConcreteSet">
# tns1:RuleEngine/tnsaxis:DigitalAutotracking/tracking//.
# </wsnt:TopicExpression>
# <wsnt:MessageContent Dialect="http://www.onvif.org/ver10/tev/messageContentFilter/ItemFilter">
# boolean(//SimpleItem[@Name="active" and @Value="1"])
# </wsnt:MessageContent>'''}
# Conditions_NEW = Conditions_type([Condition_0])
source_rule_to_clone = ze_rule
newActioRule = NewActionRule_type (Name=source_rule_to_clone['Name']+'2',
Enabled=source_rule_to_clone['Enabled'],
PrimaryAction=source_rule_to_clone['PrimaryAction'],
StartEvent=source_rule_to_clone['StartEvent'],
Conditions=Conditions_NEW
# ActivationTimeout=source_rule_to_clone['ActivationTimeout'],
# FailoverAction=source_rule_to_clone['FailoverAction']
)
add_result = service.AddActionRule (NewActionRule=newActioRule)
newFilterType_1 = xsd.AnyObject(xsd.String(), '1234')
newFilterTypes = [ [ newFilterType_1 ] ]
class MyRendered (object):
localname = 'my_local_name'
tt = '''<wsnt:TopicExpression Dialect="http://www.onvif.org/ver10/tev/topicExpression/ConcreteSet">
tns1:RuleEngine/tnsaxis:DigitalAutotracking/tracking//.
</wsnt:TopicExpression>
<wsnt:MessageContent Dialect="http://www.onvif.org/ver10/tev/messageContentFilter/ItemFilter">
boolean(//SimpleItem[@Name="active" and @Value="1"])
</wsnt:MessageContent>
</Condition>'''
nb_iter = None
def __init__(self):
self.nb_iter = 0
def __iter__(self):
print ("++++ ITERATE")
return self
def __next__(self):
print ("++++ next element")
if (self.nb_iter >= 1):
raise StopIteration
self.nb_iter += 1
return self.tt
zz = xsd.String()
my_string=xsd.SkipValue
value = [ xsd.AnyType(MyRendered) ]
newConditionsRebuilt = Conditions_type (value)
newConditions = MyRendered ()
import zeep.xsd.types.builtins
# class RawXmlString(zeep.xsd.types.builtins.BuiltinType,
# zeep.xsd.types.builtins.AnySimpleType):
# _default_qname = xsd_ns('string')
# accepted_types = six.string_types
#
# @check_no_collection
# def xmlvalue(self, value):
# if isinstance(value, bytes):
# return value.decode('utf-8')
# return six.text_type(value if value is not None else '')
#
# def pythonvalue(self, value):
# return value
template_rule = ze_rule
newActioRule = NewActionRule_type (Name=template_rule['Name']+'2',
Enabled=template_rule['Enabled'],
PrimaryAction=template_rule['PrimaryAction'],
StartEvent=template_rule['StartEvent'],
Conditions=newConditions
# ActivationTimeout=template_rule['ActivationTimeout'],
# FailoverAction=template_rule['FailoverAction']
)
# service.create_message(NewActionRule=newActioRule)
# node = client.create_message(service, 'AddActionRule', NewActionRule=newActioRule)
add_result = service.AddActionRule (NewActionRule=newActioRule)
sys.ext(1)
AddActionRule_type = client.get_type('{http://www.axis.com/vapix/ws/action1}AddActionRule')
NewActionRule_type = client.get_type('{http://www.axis.com/vapix/ws/action1}NewActionRule')
#!! addActionRuleRequest_type = client.get_message('{http://www.axis.com/vapix/ws/action1}AddActionRuleRequest')
rules = service.GetActionRules()
# rr = service.RemoveActionRule ()
# cc = service.AddActionRule ()
tt = NewActionRule_type()
zz = AddActionRule_type(tt)
service.create_message(service, operation_name)
client.create_message(service, operation_name)
tt = new_rule_type()
node = client.create_message(service, 'AddActionRule', tt)
for rule in rules:
name = rule['Name']
if name == 'TTT':
rule_id = rule['RuleID']
# !! remove_result = service.RemoveActionRule (rule_id)
new_rule = {}
new_rule['Name'] = rule['Name']
new_rule['Enabled'] = False
new_rule['StartEvent'] = rule['StartEvent']
new_rule['PrimaryAction'] = rule['PrimaryAction']
new_rule['Conditions'] = rule['Conditions']
new_rule['ActivationTimeout'] = rule['ActivationTimeout']
new_rule['FailoverAction'] = rule['FailoverAction']
tt = new_rule_type()
r = new_rule_type(Name=rule['Name'],
Enabled=False,
StartEvent=rule['StartEvent'],
PrimaryAction=rule['PrimaryAction'],
Conditions=rule['Conditions'],
ActivationTimeout=rule['ActivationTimeout'],
FailoverAction=rule['FailoverAction'],
)
node = client.create_message(service, 'AddActionRule', r)
add_result = service.AddActionRule (r)
x = 1
def test_element_any_type():
node = etree.fromstring(
"""
<?xml version="1.0"?>
<schema xmlns="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/"
targetNamespace="http://tests.python-zeep.org/"
elementFormDefault="qualified">
<element name="container">
<complexType>
<sequence>
<element name="something" type="anyType"/>
</sequence>
</complexType>
</element>
</schema>
""".strip()
)
schema = xsd.Schema(node)
container_elm = schema.get_element("{http://tests.python-zeep.org/}container")
obj = container_elm(something=datetime.time(18, 29, 59))
node = etree.Element("document")
container_elm.render(node, obj)
expected = """
<document>
<ns0:container xmlns:ns0="http://tests.python-zeep.org/">
<ns0:something>18:29:59</ns0:something>
</ns0:container>
</document>
"""
assert_nodes_equal(expected, node)
item = container_elm.parse(list(node)[0], schema)
assert item.something == "18:29:59"
def test_element_any_type_elements():
node = etree.fromstring(
"""
<?xml version="1.0"?>
<schema xmlns="http://www.w3.org/2001/XMLSchema"
xmlns:tns="http://tests.python-zeep.org/"
targetNamespace="http://tests.python-zeep.org/"
elementFormDefault="qualified">
<element name="container">
<complexType>
<sequence>
<element name="something" type="anyType"/>
</sequence>
</complexType>
</element>
</schema>
""".strip()
)
schema = xsd.Schema(node)
Child = xsd.ComplexType(
xsd.Sequence(
[
xsd.Element("{http://tests.python-zeep.org/}item_1", xsd.String()),
xsd.Element("{http://tests.python-zeep.org/}item_2", xsd.String()),
]
)
)
child = Child(item_1="item-1", item_2="item-2")
container_elm = schema.get_element("{http://tests.python-zeep.org/}container")
obj = container_elm(something=child)
node = etree.Element("document")
container_elm.render(node, obj)
expected = """
<document>
<ns0:container xmlns:ns0="http://tests.python-zeep.org/">
<ns0:something>
<ns0:item_1>item-1</ns0:item_1>
<ns0:item_2>item-2</ns0:item_2>
</ns0:something>
</ns0:container>
</document>
"""
assert_nodes_equal(expected, node)
item = container_elm.parse(list(node)[0], schema)
assert len(item.something) == 2
assert item.something[0].text == "item-1"
assert item.something[1].text == "item-2"
| [
"[email protected]"
] | |
b14b4aedc5e9ea740e072b98dae31c7d2a33a7e4 | 10bcaa1fc6c31e6f2436bbf61fae95cf34f3be8f | /Web_Development/Django_Ecommerce_Site/cart/models.py | f209358b57f436d51c083f8ef36dfb06cae184ea | [
"BSD-2-Clause"
] | permissive | FrezeSteve/Python | aa4ce8b625f0a3357f652a15f04646df2cd0b749 | 34a6eda815a339b4443e47b70ed96fff46f6f972 | refs/heads/master | 2020-06-14T00:44:27.019127 | 2019-07-10T05:24:05 | 2019-07-10T05:24:05 | 194,839,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py | from django.db import models
from django.conf import settings
from products.models import Product
from django.db.models.signals import m2m_changed
User=settings.AUTH_USER_MODEL
# Create your models here.
class CartManager(models.Manager):
def new_or_get(self,request):
cart_id=request.session.get('cart_id',None)
qs=self.get_queryset().filter(id=cart_id)
if qs.count()==1:
new_obj=False
cart_obj=qs.first()
if request.user.is_authenticated() and cart_obj.user is None:
cart_obj.user=request.user
cart_obj.save()
else:
cart_obj=Cart.objects.new(user=request.user)
new_obj=True
request.session['cart_id']=cart_obj.id
return cart_obj,new_obj
def new(self,user=None):
user_obj=None
if user is not None:
if user.is_authenticated():
user_obj=user
return self.model.objects.create(user=user_obj)
class Cart(models.Model):
user=models.ForeignKey(User,null=True,blank=True)
products=models.ManyToManyField(Product,blank=True)
total=models.DecimalField(default=0.00,max_digits=100,decimal_places=2)
updated=models.DateTimeField(auto_now=True)
timestamp=models.DateTimeField(auto_now_add=True)
objects=CartManager()
def __str__(self):
return str(self.id)
def pre_save_cart_receiver(sender,instance,action,*args,**kwargs):
if action=='post_add'or action=='post_remove' or action=='post_clear':
products=instance.products.all()
total=0
for x in products:
total+=x.price
instance.total=total
instance.save()
m2m_changed.connect(pre_save_cart_receiver,sender=Cart.products.through)
| [
"[email protected]"
] | |
be85627b65caa911822bc43277bfffed80588664 | 3f578642cfd5f872705a1597efd0886c1f3fdbd6 | /06-字典的增删改查.py | bf0965428c72d5e188894a9c510ce04dad147752 | [] | no_license | danbao571/python- | cc5e13c31b30cd466506cfd06e63b44abedcda92 | 84c2bbd192e9079f5bb4806cf958f128a378bbcd | refs/heads/master | 2022-11-10T11:57:30.576662 | 2020-06-24T13:22:53 | 2020-06-24T13:27:43 | 274,671,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | # 定义空的字典
my_dict = {}
print(my_dict)
# 增加键值对
my_dict["name"] = "张三"
my_dict["age"] = 22
my_dict["sex"] = '男'
my_dict["address"] = '北京'
print(my_dict)
# 修改键值对,key存在就是修改键值对
my_dict["address"] = '上海'
print(my_dict)
# 删除键值对
del my_dict['age']
print(my_dict)
# 随机删除键值对返回key和value(字典无序)
# value = my_dict.popitem()
# 指定key删除键值对,返回key对应value
value = my_dict.pop('sex')
print(value, my_dict)
# 判断key是否存在
result = "age" in my_dict
print(result)
result = "age" in my_dict.keys()
print(result)
# 获取字典中所有的value
result = "张三" in my_dict.values()
print(result) | [
"[email protected]"
] | |
f2a6f0a2026816781bc27cbd90d2fce727b01af6 | 7af70c0d9540544d0306806aa1b93fccfe108d88 | /2021/Day1/day1.py | 94f8837b3b2252357cd09fac0fca5f76bbb2f1c4 | [
"MIT"
] | permissive | dh256/adventofcode | 3a1cb72517b94318c9dbd7e2dad7396ab4341d7a | 03a4d4ba9b1b3add635710748bd79ddd4ef27231 | refs/heads/master | 2023-03-19T02:41:13.256806 | 2023-03-12T18:12:11 | 2023-03-12T18:12:11 | 250,533,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | from Sonar import Sonar
sonar = Sonar('input.txt')
print(f'Part 1: {sonar.increased}')
print(f'Part 2: {sonar.increased2}') | [
"[email protected]"
] | |
47926e4daee33e0da7880f6acca408647cbff254 | 97d9dc37b352e54bf140aeffbc7ed1e78cb588a7 | /pong.py | 08a452cba75a224bef0d6e8d762afc1855308ff7 | [] | no_license | HarshitaJain22/Games | 8b1d519525ff340b463d19aae7d507d18fedd17a | ab1efd7c489598f5dd2524e5df987ce490ac8da8 | refs/heads/master | 2022-11-20T08:11:59.561207 | 2020-07-30T10:33:27 | 2020-07-30T10:33:27 | 252,425,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | import pygame
pygame.init()
window = pygame.display.set_mode((800, 600))
window.fill((0, 0, 0))
pygame.display.set_caption("PONG!")
paddle_ax = 20
paddle_ay = 250
paddle_bx = 760
paddle_by = 250
puck_x = 400
puck_y = 300
puck_speed = 20
vel_x = 20
vel_y = 20
width = 20
height = 100
vel = 30
fps = 5
clock = pygame.time.Clock()
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_UP] and paddle_by > 0:
paddle_by -= vel
if keys[pygame.K_DOWN] and paddle_by < 500:
paddle_by += vel
if keys[pygame.K_w] and paddle_ay > 0:
paddle_ay -= vel
if keys[pygame.K_s] and paddle_ay < 500:
paddle_ay += vel
if (puck_y > 575) or ((puck_y - 10) < 10): #575 and 10 is majboori
vel_y *= -1
if (puck_x > 780) or (puck_x < 20):
puck_x = 400
puck_y = 300
if (puck_x+10 in range(paddle_bx, paddle_bx+width)) and (puck_y in range(paddle_by , paddle_by+height)):
vel_x *= -1
if (puck_x-10 in range(paddle_ax, paddle_ax+width)) and (puck_y in range(paddle_ay , paddle_ay+height)):
vel_x *= -1
puck_x += vel_x
puck_y += vel_y
window.fill((0, 0, 0))
pygame.draw.rect(window, (255, 0, 0), (paddle_ax, paddle_ay, width, height))
pygame.draw.rect(window, (255, 0, 0), (paddle_bx, paddle_by, width, height))
pygame.draw.circle(window, (255, 255, 255), (puck_x, puck_y),10)
pygame.display.update()
clock.tick(fps)
pygame.quit()
| [
"[email protected]"
] | |
bc73efd02fb34e60c7ffe5e60cce6655a6e96d63 | e3f79f46609109eb65f4272179beba0a7a793f1a | /app.py | d0f0d596bfa76ee2c09414b3fed33f7d5784ee12 | [] | no_license | tothbalazsroland/tippmix-api-fetcher | e9ecc49c705c0f89a9480ac1fe16f0138c821425 | 3b34c775de696b03ad543743194024bcb2e3a648 | refs/heads/master | 2020-04-29T17:15:49.874861 | 2019-03-18T15:09:45 | 2019-03-18T15:09:45 | 176,291,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from flask import Flask
import urllib2
import json
from flask import jsonify
app = Flask(__name__)
tippmix_api_url = "http://api.tippmix.hu/tippmix/search"
@app.route('/')
def hello_world():
response = urllib2.urlopen(tippmix_api_url).read()
content = json.loads(response)
return jsonify(content)
if __name__ == '__main__':
app.run()
| [
"[email protected]"
] | |
67e249c349f5bf51fe0141127b41513711f0ad68 | 2098fa068c74cde86ba011f9291614ac4820d8e2 | /ynmt/binaries/default/preprocess.py | 65b1d6dd62b802b76d43a2d79781fe930d2c78b3 | [
"Apache-2.0"
] | permissive | zhangliang-04/YoungNMT | 2a4f6ca80e4745370f73d879bc48eee5937a870c | 51a0e66e874eeb50d226deb8c7e7b19d0a81783a | refs/heads/master | 2022-12-31T07:04:28.669360 | 2020-10-19T14:44:12 | 2020-10-19T14:44:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) Jason Young (杨郑鑫).
#
# E-Mail: <[email protected]>
# 2020-04-02 08:23
#
# This source code is licensed under the Apache-2.0 license found in the
# LICENSE file in the root directory of this source tree.
import ynmt.hocon.arguments as harg
from ynmt.tasks import build_task
from ynmt.utilities.random import fix_random_procedure
from ynmt.utilities.logging import setup_logger, logging_level
def preprocess(args):
logger = setup_logger(name=args.logger.name, logging_path=args.logger.path, logging_level=logging_level['INFO'])
logger.disabled = args.logger.off
fix_random_procedure(args.random_seed)
logger.info(f'Building Task: \'{args.task.name}\' ...')
task = build_task(args.task, logger)
logger.info(f'The construction of Task is complete.')
logger.info(f'Building Ancillary Datasets ...')
task.build_ancillary_datasets(args.task)
logger.info(f'The construction of Ancillary Datasets is complete.')
logger.info(f'Building Datasets ...')
task.build_datasets(args.task)
logger.info(f'The construction of Datasets is complete.')
logger.info(f' $ Finished !')
def main():
args = harg.get_arguments()
preprocess_args = harg.get_partial_arguments(args, 'binaries.preprocess')
preprocess_args.task = harg.get_partial_arguments(args, f'tasks.{preprocess_args.task}')
preprocess(preprocess_args)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8faa1ae4965ddfb2036e9493c630069c377eb43d | f8e21c77db87b7518b02c9a41718f56ed6a590b1 | /fullthrottle_assignment/urls.py | 55d1f144db140c96cd5972064848e096f6fe1d6c | [] | no_license | jay-prakash/user-activity-assignment | 2c55c9f8f4a06cbf956a8e87f132e85e11f0db27 | 2968e43f1f3550549993fa84aa31858fc9d61840 | refs/heads/master | 2022-11-29T07:00:31.695657 | 2020-08-09T16:33:13 | 2020-08-09T16:33:13 | 286,237,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('', include('user_activity.urls'))
]
| [
"[email protected]"
] | |
bef526ebb445d312eeee9697c1b18e8bde294a40 | 9413e8469c4e6c700c7a540e64f5d6bc08439cb2 | /LMSSystem/migrations/0039_auto_20200514_1528.py | 12b28a2f692538d9de4f614e3544a2009caf6595 | [] | no_license | M2DEducation/EduProject | 7e1c357bb253ef92feffb7cc4b91a94e0c3087ac | d73e8b9170b2d3bf0cea19a2eb8b6fb60d2f4434 | refs/heads/master | 2020-09-14T08:24:19.848147 | 2020-05-18T18:36:12 | 2020-05-18T18:36:12 | 223,076,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # Generated by Django 3.0.3 on 2020-05-14 19:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('LMSSystem', '0038_auto_20200514_1526'),
]
operations = [
migrations.AlterField(
model_name='classlistgroupcode',
name='class_group_code',
field=models.CharField(default='VrlSsooiW9YB0fUjYahn1xpAfUzo9zU8', max_length=32),
),
]
| [
"[email protected]"
] | |
758b611abaeeebd5db7aa8487a4e3237a6220b98 | ce60f76c6ad4c48fd6182240b302ee057809cc66 | /extra/jobqueue/store.py | e50b09859dd5b03d81bdaa1e823e61f0c4787d96 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | bumps/bumps | 8ae10e8d15c0aa64e0bab6e00e7fabb2ca1b0860 | 2594e69567d534b434dc0eae727b77fdeff411d4 | refs/heads/master | 2023-08-22T17:56:49.987181 | 2023-07-26T14:22:23 | 2023-07-26T14:22:23 | 2,799,064 | 48 | 28 | NOASSERTION | 2023-07-26T14:22:24 | 2011-11-17T22:22:02 | Python | UTF-8 | Python | false | false | 1,373 | py | import os
import json
import shutil
from tempfile import NamedTemporaryFile
ROOT = '/var/lib/jobqueue/server/%s'
def tempfile():
create('temp')
return NamedTemporaryFile(delete=False, dir=path('temp'))
def path(id):
return ROOT%id
def create(id):
#print "making %s"%path(id)
if not os.path.exists(path(id)):
os.makedirs(path(id))
def destroy(id):
shutil.rmtree(path(id))
def put(id, key, value):
value = json.dumps(value)
datapath = path(id)
datafile = os.path.join(datapath,"K-%s.json"%(key))
try:
open(datafile,'wb').write(value)
except:
raise KeyError("Could not store key %s-%s in %s"%(id,key,datafile))
def get(id, key):
datapath = path(id)
datafile = os.path.join(datapath,"K-%s.json"%(key))
try:
value = open(datafile,'rb').read()
except:
raise KeyError("Could not retrieve key %s-%s"%(id,key))
#if value == "": print "key %s-%s is empty"%(id,key)
return json.loads(value) if value != "" else None
def contains(id, key):
datapath = path(id)
datafile = os.path.join(datapath,"K-%s.json"%(key))
return os.path.exists(datafile)
def delete(id, key):
datapath = path(id)
datafile = os.path.join(datapath,"K-%s.json"%(key))
try:
os.unlink(datafile)
except:
raise KeyError("Could not delete key %s-%s"%(id,key))
| [
"[email protected]"
] | |
dc3bce9be87826bd6ba6dab79800a4656cffc984 | aa5639097c455f3bc7ce884f289fc4dec59b58bd | /philKrull/booksDeml/apps/books_app/urls.py | cc2b62bdaad67e144c8167ae6b76a4e2012dca7c | [] | no_license | sethborne/11_28_2016_Python | 42b0152f286dcb8c28d83f5e4fa68018eff6021f | dae94f0faf7fdd2e107ae4c556e414c27e15e34e | refs/heads/master | 2021-01-11T12:17:10.499958 | 2016-12-14T18:19:00 | 2016-12-14T18:19:00 | 76,482,453 | 0 | 0 | null | 2016-12-14T17:38:12 | 2016-12-14T17:38:11 | null | UTF-8 | Python | false | false | 260 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name = 'index'),
url(r'^create_author$', views.create_author, name = 'create_author'),
url(r'^create_book$', views.create_book, name = 'create_book'),
]
| [
"[email protected]"
] | |
53075b60523000d066173f7ed2aad7a6187ef293 | d0ee1cf523eca021bb8ae0b2067752f24950924b | /src/solver_abstract/core/Rule.py | 6bbef5c4f2e0792c3baee35950b37ea6b7b6f172 | [] | no_license | JamesMcGuigan/kaggle-arc | bc42fd5ca303f96adf9d90b526571e38d6901b5f | 583a4c101ee1bd11c157dc7f5300ab3106944e97 | refs/heads/master | 2022-09-10T18:52:47.687726 | 2020-05-29T17:17:12 | 2020-05-29T17:17:12 | 267,910,558 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,265 | py | import inspect
import traceback
from collections import defaultdict
from collections import UserDict
from functools import lru_cache
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import List
from typing import Set
from typing import Tuple
from typing import Type
from typing import Union
from itertools import product
from src.datamodel.Problem import Problem
from src.settings import settings
from src.solver_abstract.core.Context import Context
from src.solver_abstract.core.Symbol import Symbol
class Rule(object):
def __init__(self, function: Callable, arguments: Dict = None):
self.function = function
self.arguments = arguments or dict()
self._hash = hash(self.function) + hash(tuple(self.arguments.items()))
def __call__(self, context: Union[Problem,Context]) -> Any:
if not isinstance(context, Context): context = Context(context)
output = self.call_with_context(self.function, context, self.arguments)
return output
def __repr__(self):
arguments = { key: value.__name__ if hasattr(value, '__name__') else str(value)
for key, value in self.arguments.items() }
arguments = " ".join( f"{k}={v}" for k,v in arguments.items() )
return f'<Rule {self.function.__name__}({arguments})>'
def __hash__(self):
return self._hash
def __eq__(self, other):
if not isinstance(other, Rule): return False
if self.function != other.function: return False
if self.arguments != other.arguments: return False # compare by value - https://stackoverflow.com/questions/4527942/comparing-two-dictionaries-and-checking-how-many-key-value-pairs-are-equal
return True
@classmethod
def kwargs_from_context( cls, function, context: Context, arguments={}, strict=False):
signature = inspect.signature(function)
kwargs = {}
for key, parameter in signature.parameters.items():
if key not in arguments.keys(): continue
argument = arguments[key]
# Resolve argument symbols from context
if isinstance(argument, Symbol):
name = argument.name
if name in context:
argument = context[name]
# Resolve arguments[key]
if cls.isinstance(argument, parameter.annotation):
if callable(argument):
kwargs[key] = cls.call_with_context(argument, context)
else:
kwargs[key] = argument
continue
# See if we can typematch from context - strict means enforcing a unique typematch
seen = set()
context_by_type = cls.group_context_by_type(context)
for key, parameter in signature.parameters.items():
if key in kwargs: continue # already solved
if parameter.annotation not in context_by_type: continue
if strict and len(context_by_type[parameter.annotation]) != 1: continue
for symbol in context_by_type[parameter.annotation]:
if symbol in seen: continue
if symbol.name not in context: continue
seen.add(symbol)
kwargs[key] = context[symbol.name]
break
for key, parameter in signature.parameters.items():
if not key in kwargs and parameter.default is parameter.empty:
if settings['debug']:
print(f'{cls.__name__}.kwargs_from_context() - unable to resolve | {function} {signature} | using: arguments={arguments}, context={context}')
return None
return kwargs
@classmethod
def call_with_context(cls, function, context: Context, arguments: Dict={} ) -> Any:
kwargs = cls.kwargs_from_context(function, context, arguments)
output = None
try:
if kwargs is not None:
output = function(**kwargs)
except TypeError as exception:
if settings['debug']: cls.print_exception(exception, function, kwargs, arguments, context)
except Exception as exception:
if settings['debug']: cls.print_exception(exception, function, kwargs, arguments, context)
if output is None and settings['debug']:
output = cls.kwargs_from_context(function, context, arguments)
return output
# noinspection PyUnusedLocal
@classmethod
def print_exception(cls, exception, function=None, kwargs=None, arguments=None, context=None ):
if settings['debug']:
print('-'*20)
print(f"Exception: {cls.__name__}.call_with_context()")
print(f"function={function}")
print(f"kwargs={kwargs}")
#print(f"arguments={arguments}")
#print(f"context={context}")
traceback.print_exception(type(exception), exception, exception.__traceback__)
print('-'*20)
@classmethod
@lru_cache(1024) # Profiler: expensive function
def group_context_by_type( cls, context: Union[Dict,UserDict] ) -> DefaultDict[Type,List[Any]]:
grouped = defaultdict(list)
for name, item in context.items():
types = cls.types(item)
for type in types:
grouped[type].append( Symbol(name) )
return grouped
@classmethod
def group_by_type( cls, collection: List[Any] ) -> DefaultDict[Type,Set[Any]]:
grouped = defaultdict(set)
for item in collection:
types = cls.types(item)
for type in types:
grouped[type].add(item)
return grouped
@classmethod
def types( cls, input: Any ) -> Tuple[Type]:
"""as cls.type() but always return a iterable tuple"""
types = cls.type(input)
if not isinstance(types, tuple): types = (types,)
return types
@classmethod
def type( cls, input: Any ) -> Union[Type, Tuple[Type]]:
# Profiler: was 20% now 13% of runtime - selectively cache functions
if callable(input):
hkey = hash(input)
if hkey not in cls._type_cache:
cls._type_cache[hkey] = cls._type(input)
return cls._type_cache[hkey]
else:
return cls._type(input)
_type_cache = {}
# noinspection PyTypeChecker
@classmethod
def _type( cls, input: Any ) -> Union[Type, Tuple[Type]]:
# https://stackoverflow.com/questions/45957615/check-a-variable-against-union-type-at-runtime-in-python-3-6
if isinstance(input, Type): return input
if hasattr(input, '__origin__'):
if input.__origin__ == Union: return tuple( cls._type(arg) for arg in input.__args__ ) # Union[int,str] -> (<class 'int'>, <class 'str'>)
else: return input.__origin__ # Tuple[int,int] -> <class 'tuple'>
if callable(input): return cls.type( cls.inspect_signature(input).return_annotation )
else: return type(input)
@classmethod
def isinstance( cls, a, b ):
a_types = cls.types(a)
b_types = cls.types(b)
return any([ a_type == b_type for a_type, b_type in product(a_types, b_types) ])
@staticmethod
@lru_cache(None)
def inspect_signature( input: Callable ):
return inspect.signature(input)
@classmethod
def argument_permutations(cls, function: Callable, context: Context, arguments=[]):
parameters = cls.inspect_signature(function).parameters
if len(parameters) == 0: return []
parameters = dict(parameters)
arg_options = defaultdict(list)
arguments_by_type = cls.group_by_type(arguments)
context_by_type = cls.group_context_by_type(context)
for index, (key, parameter) in enumerate(parameters.items()):
index = str(index) # create_context() stores numbers as strings, and symbols() requires a string
annotation = parameters[key].annotation
for annotation_type in cls.types(annotation):
# Type input as first parameter
if index in context:
if cls.isinstance(context[index], annotation_type):
arg_options[key].append( Symbol(index) )
continue
# Add from context by type
for item in context_by_type[ annotation_type ]:
if item not in arg_options[key]: # not everything hashes when using a set
arg_options[key].append(item)
# Add from self.arguments by type
for item in arguments_by_type[ annotation_type ]:
if item not in arg_options[key]:
arg_options[key].append(item)
# https://stackoverflow.com/questions/15211568/combine-python-dictionary-permutations-into-list-of-dictionaries
permutations = [ dict(zip(arg_options, options)) for options in product(*arg_options.values()) ]
return permutations
| [
"[email protected]"
] | |
cbaa6f54a684c4c06f61e15be8ce7ab163f3c5c7 | dca80d4401dee3c5926f6e215461c88f024c0bc2 | /Matplotlib/intro.py | de94ccda575dd018682cc363d3e80f60e49f83f3 | [] | no_license | Abhimanyukumar08/Data-Science | 1c7b79f0c7483621d98347cf6f14d537f969dcc8 | 9261066af948c3de0b005ae0f657731d899aff3c | refs/heads/master | 2023-08-07T08:11:52.600729 | 2021-09-14T15:28:16 | 2021-09-14T15:28:16 | 379,266,875 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
x = np.linspace(0,10,20)
y = x*x
z = x+y
print(y,"\n",z)
# to plot the values
# plt.plot(x,y)
#labelling the graph
plt.xlabel("x-axis")
plt.ylabel("y-axis")
plt.title("first graph")
#multiple line in single graph
plt.plot(x,y,z,y, label = 'legend')
plt.legend()
plt.show()
| [
"[email protected]"
] | |
29daccff5b13532a26a99f6b7b20c12c8d0ae916 | 56118be23d8137c559c5d0ad646afea285620568 | /dask/dataframe/tests/test_boolean.py | a2f0daa68c98bd4332729d00fe9aa5b7cab9e76c | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | freyam/dask | b29c8364d74545233611f3db7c2352c01270631c | e1974bffd6067659bb250fd84b6eb3f224cdb79f | refs/heads/main | 2023-07-27T02:36:11.589342 | 2021-09-09T13:56:24 | 2021-09-09T13:56:24 | 383,085,729 | 1 | 0 | BSD-3-Clause | 2021-07-05T09:29:57 | 2021-07-05T09:29:56 | null | UTF-8 | Python | false | false | 906 | py | import pandas as pd
import dask.dataframe as dd
def test_meta():
values = pd.array([True, False, None], dtype="boolean")
ds = dd.from_pandas(pd.Series(values), 2)
assert ds.dtype == pd.BooleanDtype()
dd.utils.assert_eq(ds._meta_nonempty, pd.Series([True, pd.NA], dtype="boolean"))
ddf = dd.from_pandas(pd.DataFrame({"A": values}), 2)
assert ddf.dtypes["A"] == pd.BooleanDtype()
dd.utils.assert_eq(
ddf._meta_nonempty,
pd.DataFrame({"A": pd.array([True, pd.NA], dtype="boolean")}),
)
def test_ops():
s1 = pd.Series(pd.array([True, False, None] * 3, dtype="boolean"))
s2 = pd.Series(pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean"))
ds1 = dd.from_pandas(s1, 2)
ds2 = dd.from_pandas(s2, 2)
dd.utils.assert_eq(ds1 | ds2, s1 | s2)
dd.utils.assert_eq(ds1 & ds2, s1 & s2)
dd.utils.assert_eq(ds1 ^ ds2, s1 ^ s2)
| [
"[email protected]"
] | |
f1330358b21295664c1b9d10ab778c2e07df1410 | a9275787202972a5484b0961e699bda5d4afb8f7 | /wjx/somelib/GetWebInfo.py | 6ec85bd08a0e1ab1a3a0e29ebd1e75d45a614abd | [
"MIT"
] | permissive | wwqkbd/test_login | e9aef6f4336aeb0c9de3eb116cb8e65b8bf2eb7e | 1c435a284321879f0853d6c46b2fde8ed5e1895f | refs/heads/master | 2023-03-15T19:30:55.673568 | 2019-06-03T16:49:21 | 2019-06-03T16:49:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--log-level=3")
# 打开随机生成答案的js
with open('somelib/random.js', 'r', encoding='utf-8') as f:
randomjs = f.read()
class WjxDemo(object):
def __init__(self, url):
self.url = url
def open_questions_page(self):
# 有窗口模式
# self.driver = webdriver.Chrome()
# 无窗口模式
self.driver = webdriver.Chrome(chrome_options=chrome_options)
self.driver.get(self.url)
def get_info(self):
jsdict = r'''
{
'cookie': document.cookie,
'html': document.documentElement.outerHTML
}
'''
s = 'return ' + jsdict
web_info = self.driver.execute_script(s.replace('\n', ''))
return web_info
def get_answer(self, times):
answer_arr = []
for i in range(times):
answer_arr.append(self.driver.execute_script(randomjs + 'return sent_to_answer();'))
return answer_arr
| [
"[email protected]"
] | |
e728183ff73afc5acd4cb9e9d2674068e9a7bed0 | 55080860557bc8e43a4b8040f3eac6a4930067db | /venv/bin/pip3 | 8982d0592f8c98b2091a3704c3b776a66cee9a43 | [] | no_license | michaelisaac015/multichain | d224f0198f661b6a4f53266c0e362d6ab8e8aacd | 8c79bc9d06ca302738b5d4165df23d651c2b4c3f | refs/heads/master | 2021-09-08T19:48:57.939336 | 2018-09-20T17:38:48 | 2018-09-20T17:38:49 | 149,647,254 | 0 | 0 | null | 2021-09-02T05:19:21 | 2018-09-20T17:40:21 | Python | UTF-8 | Python | false | false | 408 | #!/home/research/PycharmProjects/multichain/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | ||
362ff66396b2f42d82f94a07021f22c9d53d50e3 | 6b699b7763a0ff8c32b85014d96f6faf02514a2e | /models/research/syntaxnet/syntaxnet/beam_reader_ops_test.py | 9d5a388bf9870ea187d569c79b85f947352b42fd | [
"Apache-2.0"
] | permissive | leizeling/Base_tensorflow-object_detection_2Dcord | df7c195685fed21fd456f1dd79881a198cf8b6e0 | d07418eb68543adc2331211ccabbc27137c8676e | refs/heads/master | 2020-03-19T11:51:57.961688 | 2018-06-07T14:47:16 | 2018-06-07T14:47:16 | 136,481,479 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,120 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for beam_reader_ops."""
import os.path
import time
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from syntaxnet import structured_graph_builder
from syntaxnet.ops import gen_parser_ops
FLAGS = tf.app.flags.FLAGS
if not hasattr(FLAGS, 'test_srcdir'):
FLAGS.test_srcdir = ''
if not hasattr(FLAGS, 'test_tmpdir'):
FLAGS.test_tmpdir = tf.test.get_temp_dir()
class ParsingReaderOpsTest(test_util.TensorFlowTestCase):
def setUp(self):
# Creates a task context with the correct testing paths.
initial_task_context = os.path.join(FLAGS.test_srcdir,
'syntaxnet/'
'testdata/context.pbtxt')
self._task_context = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', FLAGS.test_srcdir)
.replace('OUTPATH', FLAGS.test_tmpdir))
# Creates necessary term maps.
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context,
corpus_name='training-corpus').run()
self._num_features, self._num_feature_ids, _, self._num_actions = (
sess.run(gen_parser_ops.feature_size(task_context=self._task_context,
arg_prefix='brain_parser')))
def MakeGraph(self,
max_steps=10,
beam_size=2,
batch_size=1,
**kwargs):
"""Constructs a structured learning graph."""
assert max_steps > 0, 'Empty network not supported.'
logging.info('MakeGraph + %s', kwargs)
with self.test_session(graph=tf.Graph()) as sess:
feature_sizes, domain_sizes, embedding_dims, num_actions = sess.run(
gen_parser_ops.feature_size(task_context=self._task_context))
embedding_dims = [8, 8, 8]
hidden_layer_sizes = []
learning_rate = 0.01
builder = structured_graph_builder.StructuredGraphBuilder(
num_actions,
feature_sizes,
domain_sizes,
embedding_dims,
hidden_layer_sizes,
seed=1,
max_steps=max_steps,
beam_size=beam_size,
gate_gradients=True,
use_locking=True,
use_averaging=False,
check_parameters=False,
**kwargs)
builder.AddTraining(self._task_context,
batch_size,
learning_rate=learning_rate,
decay_steps=1000,
momentum=0.9,
corpus_name='training-corpus')
builder.AddEvaluation(self._task_context,
batch_size,
evaluation_max_steps=25,
corpus_name=None)
builder.training['inits'] = tf.group(*builder.inits.values(), name='inits')
return builder
def Train(self, **kwargs):
with self.test_session(graph=tf.Graph()) as sess:
max_steps = 3
batch_size = 3
beam_size = 3
builder = (
self.MakeGraph(
max_steps=max_steps, beam_size=beam_size,
batch_size=batch_size, **kwargs))
logging.info('params: %s', builder.params.keys())
logging.info('variables: %s', builder.variables.keys())
t = builder.training
sess.run(t['inits'])
costs = []
gold_slots = []
alive_steps_vector = []
every_n = 5
walltime = time.time()
for step in range(10):
if step > 0 and step % every_n == 0:
new_walltime = time.time()
logging.info(
'Step: %d <cost>: %f <gold_slot>: %f <alive_steps>: %f <iter '
'time>: %f ms',
step, sum(costs[-every_n:]) / float(every_n),
sum(gold_slots[-every_n:]) / float(every_n),
sum(alive_steps_vector[-every_n:]) / float(every_n),
1000 * (new_walltime - walltime) / float(every_n))
walltime = new_walltime
cost, gold_slot, alive_steps, _ = sess.run(
[t['cost'], t['gold_slot'], t['alive_steps'], t['train_op']])
costs.append(cost)
gold_slots.append(gold_slot.mean())
alive_steps_vector.append(alive_steps.mean())
if builder._only_train:
trainable_param_names = [
k for k in builder.params if k in builder._only_train]
else:
trainable_param_names = builder.params.keys()
if builder._use_averaging:
for v in trainable_param_names:
avg = builder.variables['%s_avg_var' % v].eval()
tf.assign(builder.params[v], avg).eval()
# Reset for pseudo eval.
costs = []
gold_slots = []
alive_stepss = []
for step in range(10):
cost, gold_slot, alive_steps = sess.run(
[t['cost'], t['gold_slot'], t['alive_steps']])
costs.append(cost)
gold_slots.append(gold_slot.mean())
alive_stepss.append(alive_steps.mean())
logging.info(
'Pseudo eval: <cost>: %f <gold_slot>: %f <alive_steps>: %f',
sum(costs[-every_n:]) / float(every_n),
sum(gold_slots[-every_n:]) / float(every_n),
sum(alive_stepss[-every_n:]) / float(every_n))
def PathScores(self, iterations, beam_size, max_steps, batch_size):
with self.test_session(graph=tf.Graph()) as sess:
t = self.MakeGraph(beam_size=beam_size, max_steps=max_steps,
batch_size=batch_size).training
sess.run(t['inits'])
all_path_scores = []
beam_path_scores = []
for i in range(iterations):
logging.info('run %d', i)
tensors = (
sess.run(
[t['alive_steps'], t['concat_scores'],
t['all_path_scores'], t['beam_path_scores'],
t['indices'], t['path_ids']]))
logging.info('alive for %s, all_path_scores and beam_path_scores, '
'indices and path_ids:'
'\n%s\n%s\n%s\n%s',
tensors[0], tensors[2], tensors[3], tensors[4], tensors[5])
logging.info('diff:\n%s', tensors[2] - tensors[3])
all_path_scores.append(tensors[2])
beam_path_scores.append(tensors[3])
return all_path_scores, beam_path_scores
def testParseUntilNotAlive(self):
"""Ensures that the 'alive' condition works in the Cond ops."""
with self.test_session(graph=tf.Graph()) as sess:
t = self.MakeGraph(batch_size=3, beam_size=2, max_steps=5).training
sess.run(t['inits'])
for i in range(5):
logging.info('run %d', i)
tf_alive = t['alive'].eval()
self.assertFalse(any(tf_alive))
def testParseMomentum(self):
"""Ensures that Momentum training can be done using the gradients."""
self.Train()
self.Train(model_cost='perceptron_loss')
self.Train(model_cost='perceptron_loss',
only_train='softmax_weight,softmax_bias', softmax_init=0)
self.Train(only_train='softmax_weight,softmax_bias', softmax_init=0)
def testPathScoresAgree(self):
"""Ensures that path scores computed in the beam are same in the net."""
all_path_scores, beam_path_scores = self.PathScores(
iterations=1, beam_size=130, max_steps=5, batch_size=1)
self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-6)
def testBatchPathScoresAgree(self):
"""Ensures that path scores computed in the beam are same in the net."""
all_path_scores, beam_path_scores = self.PathScores(
iterations=1, beam_size=130, max_steps=5, batch_size=22)
self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-6)
def testBatchOneStepPathScoresAgree(self):
"""Ensures that path scores computed in the beam are same in the net."""
all_path_scores, beam_path_scores = self.PathScores(
iterations=1, beam_size=130, max_steps=1, batch_size=22)
self.assertArrayNear(all_path_scores[0], beam_path_scores[0], 1e-6)
if __name__ == '__main__':
googletest.main()
| [
"[email protected]"
] | |
c88db3e78a5dbe72641e36c2a877849b73156344 | 3f09058d7637cb0e2a7574a8887c72b771b00b9d | /dice.py | 0f3d4fa249ff5d2eada56065835429d5e7a9f68b | [] | no_license | chrishaining/games_of_chance | 4390d3e95437dcb08168f324ee30b2be5b337b1b | 1fa6968bae1dbeb5ae36782a9a93ce5ecb3a0c1f | refs/heads/master | 2020-11-27T12:45:17.005655 | 2020-01-11T16:28:30 | 2020-01-11T16:28:30 | 229,445,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,531 | py | #import packages - random, maybe numpy, maybe matplotlib
import random
from matplotlib import pyplot as plt
import numpy as np
#create a list of possible numbers (let's make it two lists - imagine there are two dice)
#(possible refactor - even if I want to have two numbers, I don't really need two lists - I could just repeat a function for one list)
first_die = [1, 2, 3, 4, 5, 6]
second_die = [1, 2, 3, 4, 5, 6]
#create a list of results. starts empty
results = []
#create a function to roll the dice. this will include adding the result to the results list
#use random.choice() to select a single item from a list.
def roll_dice():
first_die_result = random.choice(first_die)
second_die_result = random.choice(second_die)
results.append(first_die_result+second_die_result)
# roll_dice()
# print(results)
#in order to give enough results for a histogram, create a function to roll the dice multiple times
def roll_multiple_dice(rolls):
counter = 0
while counter < rolls:
roll_dice()
counter += 1
print("The dice have been rolled {} times.".format(rolls))
#call the dice roll method and print the results
roll_multiple_dice(100)
# print(results)
#calculate and show the standard deviation
# std = round(np.std(results), 2)
# print("The standard deviation is {}.".format(std))
#create a histogram for the results (requires)
def make_histogram(array):
plt.hist(array, bins=10)
plt.xlabel('number rolled')
plt.ylabel('frequency')
plt.show()
make_histogram(results)
| [
"[email protected]"
] | |
28f7fb699b7de609cb6db288fa6c401134f44598 | bf5487ab254401278448c7ece92d389b01ef74e5 | /save_tfrecord.py | 60092726129ded42ea675b3f7d08f4b43ecd09a7 | [] | no_license | JustDoITLab/videoqa | a5dc04f97fad9bff8272b58bea03a8959be96775 | 607a3add03d31ec45b8bcf76c38872875e282947 | refs/heads/master | 2020-04-06T22:35:42.737033 | 2018-11-16T09:36:56 | 2018-11-16T09:36:56 | 157,841,008 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,475 | py | from random import shuffle
import numpy as np
import glob
import tensorflow as tf
import cv2
import sys
import os
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import image,sequence
from nltk.probability import FreqDist
from skimage import io
def txt_to_df(path):
df = pd.read_csv(path, header=None)
df.columns = ['name'] + [str(j)+str(i) for i in ['1','2','3','4','5'] for j in ['q','a1','a2','a3']]
file_names = df['name']
df = pd.wide_to_long(df, stubnames=['q','a1','a2','a3'],i='name',j='qa')
df['index'] = list(map(lambda x :x[0],df.index))
df['qa'] = list(map(lambda x :x[1],df.index))
df['index'] = df['index'].astype('category')
df['index'].cat.set_categories(file_names,inplace = True)
df.sort_values(['index','qa'],ascending = True,inplace = True)
return df,file_names
def answer_to_input(df_a):
# ans_list = sorted(map(lambda word : word[0],FreqDist(df_a['a1'].append(df_a['a2']).append(df_a['a3'])).most_common(1000)))
# ans_list = sorted(map(lambda word : word[0],FreqDist(df_a['a1']).most_common(1000)))
ans_list = sorted(map(lambda word : word,FreqDist(df_a['a1'])))
# pd.DataFrame(ans_list).to_csv('temp.csv',header=None,index=None)
# df_a[['a1','a2','a3']] = df_a[['a1','a2','a3']].applymap(lambda x: x if x in ans_list else '0')
# df_a['lable'] = df_a['a1']+','+df_a['a2']+','+df_a['a3']
# answer_input = df_a['a1'].str.get_dummies(sep = ',')[ans_list].values
answer_input = df_a['a1'].apply(lambda x : ans_list.index(x))
# print(list(answer_input))
return np.array(answer_input)
def question_to_input(df_q1,df_q2):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(df_q1 + df_q2)
encoded_1 = tokenizer.texts_to_sequences(df_q1)
encoded_2 = tokenizer.texts_to_sequences(df_q2)
question_input_train = sequence.pad_sequences(encoded_1, maxlen=15)
question_input_test = sequence.pad_sequences(encoded_2, maxlen=15)
return question_input_train,question_input_test
def load_image(addr): # A function to Load image
result = np.zeros((3,40,40,3))
for index,name in enumerate(os.listdir(addr)):
img = io.imread(addr+'/'+name)
img = cv2.resize(img, (40, 40), interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 这里/255是为了将像素值归一化到[0,1]
img = img / 255
result[index] = img.astype(np.float32)
return result
# 将数据转化成对应的属性
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
train_txt_path = './VQADatasetA_20180815/train.txt'
test_txt_path = './VQADatasetA_20180815/test.txt'
df_txt_train,file_names_train = txt_to_df(train_txt_path)
df_txt_test,file_names_test = txt_to_df(test_txt_path)
df_train_q,df_test_q = question_to_input(list(map(str,df_txt_train['q'])),list(map(str,df_txt_test['q'])))
a1 = answer_to_input(df_txt_train)
# 因为我装的是CPU版本的,运行起来会有'warning',解决方法入下,眼不见为净~
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
image_path = './image_train_72_3'
train_addrs = os.listdir(image_path)
# 下面这段就开始把数据写入TFRecods文件
train_filename = './train1.tfrecords' # 输出文件地址
# 创建一个writer来写 TFRecords 文件
writer = tf.python_io.TFRecordWriter(train_filename)
for i in range(len(train_addrs)):
# 这是写入操作可视化处理
# 加载图片
for j in range(5):
img = load_image(image_path+'/'+list(file_names_train)[i])
question = df_train_q[i+j]
answer1 = a1[i+j]
# 创建一个属性(feature)
feature = {'train/answer': _int64_feature(answer1),
'train/question': _bytes_feature(tf.compat.as_bytes(question.tostring())),
'train/image': _bytes_feature(tf.compat.as_bytes(img.tostring()))}
# 创建一个 example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# 将上面的example protocol buffer写入文件
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
| [
"[email protected]"
] | |
bbd5b29946d3d1fcc962939dab4955b82824989f | 344e2956b4e2a30a8ef7532d951f96d995d1dd1e | /16_mmdet/lib/cfgs/fovea_r50_fpn_4x4_coco.py | 8452b618173e489abc322aef13905ffd256a7e0d | [
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"GPL-3.0-only"
] | permissive | karndeepsingh/Monk_Object_Detection | e64199705326e4cd65e4b29946cae210a4ef9649 | 425fa50a3236cb9097389646275da06bf9185f6b | refs/heads/master | 2022-12-22T18:26:53.933397 | 2020-09-28T12:49:50 | 2020-09-28T12:49:50 | 299,307,843 | 1 | 1 | Apache-2.0 | 2020-09-28T12:52:18 | 2020-09-28T12:52:17 | null | UTF-8 | Python | false | false | 3,776 | py | # model settings
model = dict(
type='FOVEA',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs='on_input'),
bbox_head=dict(
type='FoveaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
base_edge_list=[16, 32, 64, 128, 256],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
sigma=0.4,
with_deform=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.50,
alpha=0.4,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
# training and testing settings
train_cfg = dict()
test_cfg = dict(
nms_pre=1000,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
#Dataset Settings
dataset_type = 'CocoDataset'
data_root = ''
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=, #change1
workers_per_gpu=, #change2
train=dict(
type=dataset_type,
classes=, #change3
ann_file=, #change4
img_prefix=, #change5
pipeline=train_pipeline),
val=dict(
type=dataset_type,
classes=, #change6
ann_file=, #change7
img_prefix=, #change8
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=, #change9
ann_file=, #change10
img_prefix=, #change11
pipeline=test_pipeline))
evaluation = dict(interval=, metric='bbox') #change9
# Schedule Settings
optimizer = dict(type='SGD', lr=, momentum=, weight_decay=) #change12
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=) #change13
total_epochs = #change14
# Runtime Dataset
checkpoint_config = dict(interval=) #change15
# yapf:disable
log_config = dict(
interval=50, #change16
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = #change17
resume_from = None
workflow = [('train', 1)]
gpu_ids = None #change18
| [
"[email protected]"
] | |
9005b2de140d8e16cba27f1249f4cfee000e7f81 | 2c12f504c49c3a8d8941212b44faabeda15df4e1 | /DjangoRestApiMongoDB/cards/models.py | aedecc8f416831efe3277c73ab0ded5189878a39 | [] | no_license | 88amit77/DRF_mongo | d60901a2dc76a1a57705be27a92ac0fe7eafdc3b | a242cd7f6118e6d157828de6851d48c6f91f2f18 | refs/heads/master | 2023-06-21T13:34:52.398953 | 2021-07-17T04:19:54 | 2021-07-17T04:19:54 | 386,834,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from django.db import models
# Create your models here.
class Card(models.Model):
question = models.CharField(max_length=255)
answer = models.TextField()
# def __str__(self):
# return self.question
| [
"[email protected]"
] | |
271077be4ca0a477e30f8f0cbd8093cb13c2f7b9 | 10e8c2915c214cc1ea13dbb2e6aed8e79008e89f | /django-InstallSystem/SystemProject/pxeviews/UserList.py | 51538adb3f9ba4d6ab8d2aded607d77e873226a1 | [] | no_license | jhusshini8/pxe-install-system | 1b5af028c3d5c5855085218a89c03e9f37d63add | 190721d83e0c26a2a1fbaa0b8b413de1adb76fce | refs/heads/master | 2023-02-18T18:46:26.441944 | 2021-01-12T10:32:44 | 2021-01-12T10:32:44 | 294,567,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | # -*- coding:utf-8 -*-
from django.http import JsonResponse
from SystemProject import models
def UserListPage(request):
if (request.method == 'GET'):
dataobj = models.InstallRecord.objects.all()
data_dic = {}
transferd_comment_list = []
for i in dataobj:
data_dic["name"] = i.author
transferd_comment_list.append(data_dic.copy())
result = dict()
data_result = dict()
data_result['items'] = transferd_comment_list
result["code"] = 20000
result["data"] = data_result
return JsonResponse(result, safe=False)
def UserListPageAll(request):
if (request.method == 'GET'):
dataobj = models.InstallRecord.objects.all()
install_count = models.InstallRecord.objects.all().count()
data_dic = {}
transferd_comment_list = []
for i in dataobj:
data_dic["id"] = i.id
if int(i.installstatus) == 1:
data_dic['status'] = "安装中"
elif int(i.installstatus) == 2:
data_dic['status'] = "成功"
elif int(i.installstatus) == 3:
data_dic['status'] = "删除"
else:
data_dic['status'] = "失败"
data_dic["name"] = i.author
data_dic["installsystem"] = models.SystemType.objects.get(id=i.version_name_id).record_list
transferd_comment_list.append(data_dic.copy())
result = dict()
data_result = dict()
data_result["total"] = install_count
data_result['items'] = transferd_comment_list
result["code"] = 20000
result["data"] = data_result
return JsonResponse(result, safe=False) | [
"[email protected]"
] | |
5e774aa9f1672d90581e70c88465ec42fe3ac8a6 | a5e9e6a505740f85891f54533654e0e76ef6b511 | /untitled0.py | d2f35fdbc2e9e10664d4c158fd40941f9541de85 | [] | no_license | swagatsourav/learngit | c3d5af1f4d683dd97f4bb3ad1db80f5b4b38b3df | e633bd0f717030cde697ad54d293fdad22738ccb | refs/heads/master | 2022-06-28T13:51:21.663052 | 2020-05-07T05:13:40 | 2020-05-07T05:13:40 | 260,623,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,796 | py | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first_project.settings')
import django
django.setup()
from first_app.models import Assigned_Role, Role_Master, User_Accounts, User_Master
from faker import Faker
import random
fake = Faker()
desingnations = ['Consultant', 'Supervisor', 'Operator', 'Architecht']
def populate_Data(n=5):
for i in range(n):
# Creates the initial entry in User_Accounts
fake_first_name = fake.first_name()
fake_user_name = fake_first_name + '_1234'
while User_Accounts.objects.filter(user_name=fake_user_name).count() != 0:
fake_first_name = fake.first_name()
fake_user_name = fake_first_name + '_1234'
fake_email_id = fake.email()
while User_Accounts.objects.filter(email_id=fake_email_id).count() != 0:
fake_email_id = fake.email()
# picks a date between today and next 1 year.
fake_pwd_exp_date = fake.date_between(
start_date='today', end_date='+1y')
pwd = "Swagat@123"
ua_obj = User_Accounts(user_name=fake_user_name, password=pwd,email_id=fake_email_id, pwd_exp_date=fake_pwd_exp_date)
ua_obj.save()
print(f"User \"{fake_first_name}\" inserted.")
# Create entry in User_Master
fake_last_name = fake.last_name()
ran_desingnation = random.choice(desingnations)
# fake_dob = datetime.datetime(1992,7,18)
fake_dob = fake.date_between(start_date='-30y', end_date='-18y')
um_obj = User_Master(user_id=ua_obj, first_name=fake_first_name, last_name=fake_last_name,desingnation=ran_desingnation, dob=fake_dob, user_status='A', created_by=fake.name())
um_obj.save()
print(f"User \"{fake_first_name}\" master date inserted.")
populate_Data(3)
| [
"[email protected]"
] | |
ff82320cf996f96ad04a788662147eb25c59aa70 | 72373550e744687d43e931fea83b241d2931caea | /weiredalgo.py | b048cf0cdd95aea2cf6a33df07079b8c17666437 | [] | no_license | HrithikArya/MyInitialProjects | 549c18576a66b3c8031a1029c9a81bab8a11520a | 14a421dce072c8429f2fae3fe81407c295ee7299 | refs/heads/master | 2022-09-17T21:28:15.445934 | 2020-05-23T12:49:13 | 2020-05-23T12:49:13 | 256,234,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | '''Consider an algorithm that takes as input a positive integer n. If n is even, the algorithm divides it by two, and if n is odd, the algorithm multiplies it by three and adds one. The algorithm repeats this, until n is one. For example, the sequence for n=3 is as follows:
3→10→5→16→8→4→2→1
Your task is to simulate the execution of the algorithm for a given value of n.
Input
The only input line contains an integer n.
Output
Print a line that contains all values of n during the algorithm.
Constraints
1≤n≤106
Example
Input:
3
Output:
3 10 5 16 8 4 2 1
'''
try :
x = int(input('Enter number: '))
print(x, end =" ")
except :
print("Sorry, Wrong input")
while x != 1 :
if x % 2 == 0 :
x /= 2
print(x, end =" ")
else :
x = x * 3 + 1
print(x, end =" ")
| [
"[email protected]"
] | |
6b39708268c0f5423035acb0b225fbf37f6460e9 | 38a82ba0eed9517ed6c5bc60af4f31c7016123ce | /BokePro/User/migrations/0002_auto_20180621_1044.py | 16938a33ead01945be9dc516794837d1ee75f602 | [] | no_license | laoitelu/XmanPro | 1e3555ec3e91b3c92a0205424136b9aee56fa855 | e17b83a055dcf01988bdf014f43a4046f423bd89 | refs/heads/master | 2020-05-23T23:25:22.815756 | 2019-07-24T06:24:29 | 2019-07-24T06:24:29 | 186,993,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('User', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='c_time',
field=models.DateTimeField(default=datetime.datetime(2018, 6, 21, 2, 44, 18, 309000, tzinfo=utc), verbose_name='\u521b\u952e\u65f6\u95f4'),
),
]
| [
"[email protected]"
] | |
c24ef2ef5d9a0413bf474980fe6aa3176eb09f55 | 7da17557b79142f7e72e275c669f157e7cd35753 | /products/migrations/0008_auto_20201229_0922.py | 3e8413aa72f2c8d7aa92f94fd84e7cae4e50cef4 | [] | no_license | Code-Institute-Submissions/ci-ms4-bookstore | 8143b9dc4c8e9a52bea96fbf981f0daae54eee21 | c5978948c49d155349dedb5d0510e52dcf119782 | refs/heads/main | 2023-03-09T04:28:12.768946 | 2021-02-28T11:29:27 | 2021-02-28T11:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # Generated by Django 3.1.3 on 2020-12-29 08:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0007_auto_20201228_1032'),
]
operations = [
migrations.AlterField(
model_name='productreview',
name='reviewer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
583ed1810de71f3f768f11829f441344ef667a02 | 5429f39ca838b6df14dc2f71459457cbc1fae306 | /create.py | ac08e3877188a0bb89eaf7c9c67fd850898b5bc5 | [] | no_license | karouu/RepoInitiazationAutomation | 54b56f19b09f8db2550346c9363e494a218eb521 | eb0a8f04900175074927cda2a802dbb8fca302c4 | refs/heads/master | 2020-06-08T10:14:02.354326 | 2019-09-03T18:53:45 | 2019-09-03T18:53:45 | 193,211,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | import sys
import os
from github import Github
path = #"/yours/Projects/Directory"
username = "" #Insert your github username here
password = "" #Insert your github password here
def create():
folderName = str(sys.argv[1])
os.makedirs(path + str(sys.argv[1]))
user = Github(username, password).get_user()
repo = user.create_repo(sys.argv[1])
print("Succesfully created repository {}".format(sys.argv[1]))
if __name__ == "__main__":
create()
| [
"[email protected]"
] | |
9b6fcc6efb4e5d2f3a46c869d5bdfcc0ea5276a9 | 099efd8bd924fbd8c0e97bbeb65dbaf993332f5d | /Jigsaw_Predict.py | 78a42d2e5274e5bd3c76ebffd0571c2ff05b2bd4 | [] | no_license | salokr/jigsaw | d823f533d4349a4e9191a88f36550f582bef39e7 | 4b7f45a3fe9e61610bedd9202d799d5092ee79ac | refs/heads/master | 2021-05-02T06:58:54.419994 | 2018-02-09T06:45:40 | 2018-02-09T06:45:40 | 120,867,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | import pandas as pd
import numpy as np
from keras.preprocessing.sequence import pad_sequences
labels=['toxic','severe_toxic','obscene','threat','insult','identity_hate']
def predict_save_df(test_file_address,model,df_name,tokenizer=None,maxlen=None):
test_df=pd.read_csv(test_file_address)
X_test=test_df['comment_text'].fillna('<UNK>')
X_test=tokenizer.texts_to_sequences(X_test)
X_test=pad_sequences(X_test,maxlen=maxlen)
probabilities=model.predict(X_test)
submission_df = pd.DataFrame(columns=['id'] + labels)
submission_df['id'] = test_df['id'].values
submission_df[labels] = probabilities
submission_df.to_csv("./" + raw_input('Enter Prediction File Name(Don\'t append .csv at end) : ') + '_jigsaw.csv',index=False) | [
"[email protected]"
] | |
d3e7ead2fc7b6d4160d4cad09aa4abd6a1e305e6 | 5b57f5aa89024986a8017ae1bc29e6be5da20d01 | /rtslib/root.py | 24fb756aa06f1d569efc833a0f8e86a15d5cc25c | [
"Apache-2.0"
] | permissive | prakashsurya/python-rtslib-fb | a0429d96dd80bfe6d3ce8148c00adb75c3f49ef2 | 05c878cd2724abb6b553961e9733309ca268c97b | refs/heads/master | 2020-05-22T18:33:30.742167 | 2018-02-19T21:04:09 | 2019-03-04T15:28:53 | 186,474,471 | 0 | 0 | Apache-2.0 | 2019-05-13T18:28:42 | 2019-05-13T18:28:41 | null | UTF-8 | Python | false | false | 10,307 | py | '''
Implements the RTSRoot class.
This file is part of RTSLib.
Copyright (c) 2011-2013 by Datera, Inc
Copyright (c) 2011-2014 by Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
import os
import stat
import json
from .node import CFSNode
from .target import Target
from .fabric import FabricModule
from .tcm import so_mapping, StorageObject
from .utils import RTSLibError, modprobe, mount_configfs
from .utils import dict_remove, set_attributes
default_save_file = "/etc/rtslib-fb-target/saveconfig.json"
class RTSRoot(CFSNode):
'''
This is an interface to the root of the configFS object tree.
Is allows one to start browsing Target and StorageObjects,
as well as helper methods to return arbitrary objects from the
configFS tree.
>>> import rtslib.root as root
>>> rtsroot = root.RTSRoot()
>>> rtsroot.path
'/sys/kernel/config/target'
>>> rtsroot.exists
True
>>> rtsroot.targets # doctest: +ELLIPSIS
[...]
>>> rtsroot.tpgs # doctest: +ELLIPSIS
[...]
>>> rtsroot.storage_objects # doctest: +ELLIPSIS
[...]
>>> rtsroot.network_portals # doctest: +ELLIPSIS
[...]
'''
# RTSRoot private stuff
def __init__(self):
'''
Instantiate an RTSRoot object. Basically checks for configfs setup and
base kernel modules (tcm)
'''
super(RTSRoot, self).__init__()
modprobe('configfs')
mount_configfs()
modprobe('target_core_mod')
self._create_in_cfs_ine('any')
def _list_targets(self):
self._check_self()
for fabric_module in self.fabric_modules:
for target in fabric_module.targets:
yield target
def _list_storage_objects(self):
self._check_self()
for so in StorageObject.all():
yield so
def _list_tpgs(self):
self._check_self()
for t in self.targets:
for tpg in t.tpgs:
yield tpg
def _list_node_acls(self):
self._check_self()
for t in self.tpgs:
for node_acl in t.node_acls:
yield node_acl
def _list_node_acl_groups(self):
self._check_self()
for t in self.tpgs:
for nag in t.node_acl_groups:
yield nag
def _list_mapped_luns(self):
self._check_self()
for na in self.node_acls:
for mlun in na.mapped_luns:
yield mlun
def _list_mapped_lun_groups(self):
self._check_self()
for nag in self.node_acl_groups:
for mlg in nag.mapped_lun_groups:
yield mlg
def _list_network_portals(self):
self._check_self()
for t in self.tpgs:
for p in t.network_portals:
yield p
def _list_luns(self):
self._check_self()
for t in self.tpgs:
for lun in t.luns:
yield lun
def _list_sessions(self):
self._check_self()
for na in self.node_acls:
if na.session:
yield na.session
def _list_fabric_modules(self):
self._check_self()
for mod in FabricModule.all():
yield mod
def __str__(self):
return "rtslib"
# RTSRoot public stuff
def dump(self):
'''
Returns a dict representing the complete state of the target
config, suitable for serialization/deserialization, and then
handing to restore().
'''
d = super(RTSRoot, self).dump()
d['storage_objects'] = [so.dump() for so in self.storage_objects]
d['targets'] = [t.dump() for t in self.targets]
d['fabric_modules'] = [f.dump() for f in self.fabric_modules
if f.has_feature("discovery_auth")
if f.discovery_enable_auth]
return d
def clear_existing(self, confirm=False):
'''
Remove entire current configuration.
'''
if not confirm:
raise RTSLibError("As a precaution, confirm=True needs to be set")
# Targets depend on storage objects, delete them first.
for t in self.targets:
t.delete()
for fm in (f for f in self.fabric_modules if f.has_feature("discovery_auth")):
fm.clear_discovery_auth_settings()
for so in self.storage_objects:
so.delete()
def restore(self, config, clear_existing=False, abort_on_error=False):
'''
Takes a dict generated by dump() and reconfigures the target to match.
Returns list of non-fatal errors that were encountered.
Will refuse to restore over an existing configuration unless clear_existing
is True.
'''
if clear_existing:
self.clear_existing(confirm=True)
elif any(self.storage_objects) or any(self.targets):
raise RTSLibError("storageobjects or targets present, not restoring")
errors = []
if abort_on_error:
def err_func(err_str):
raise RTSLibError(err_str)
else:
def err_func(err_str):
errors.append(err_str + ", skipped")
for index, so in enumerate(config.get('storage_objects', [])):
if 'name' not in so:
err_func("'name' not defined in storage object %d" % index)
continue
try:
so_cls = so_mapping[so['plugin']]
except KeyError:
err_func("'plugin' not defined or invalid in storageobject %s" % so['name'])
continue
kwargs = so.copy()
dict_remove(kwargs, ('exists', 'attributes', 'plugin', 'buffered_mode'))
try:
so_obj = so_cls(**kwargs)
except Exception as e:
err_func("Could not create StorageObject %s: %s" % (so['name'], e))
continue
# Custom err func to include block name
def so_err_func(x):
return err_func("Storage Object %s/%s: %s" % (so['plugin'], so['name'], x))
set_attributes(so_obj, so.get('attributes', {}), so_err_func)
# Don't need to create fabric modules
for index, fm in enumerate(config.get('fabric_modules', [])):
if 'name' not in fm:
err_func("'name' not defined in fabricmodule %d" % index)
continue
for fm_obj in self.fabric_modules:
if fm['name'] == fm_obj.name:
fm_obj.setup(fm, err_func)
break
for index, t in enumerate(config.get('targets', [])):
if 'wwn' not in t:
err_func("'wwn' not defined in target %d" % index)
continue
if 'fabric' not in t:
err_func("target %s missing 'fabric' field" % t['wwn'])
continue
if t['fabric'] not in (f.name for f in self.fabric_modules):
err_func("Unknown fabric '%s'" % t['fabric'])
continue
fm_obj = FabricModule(t['fabric'])
# Instantiate target
Target.setup(fm_obj, t, err_func)
return errors
def save_to_file(self, save_file=None):
'''
Write the configuration in json format to a file.
Save file defaults to '/etc/targets/saveconfig.json'.
'''
if not save_file:
save_file = default_save_file
with open(save_file+".temp", "w+") as f:
os.fchmod(f.fileno(), stat.S_IRUSR | stat.S_IWUSR)
f.write(json.dumps(self.dump(), sort_keys=True, indent=2))
f.write("\n")
os.fsync(f.fileno())
os.rename(save_file+".temp", save_file)
def restore_from_file(self, restore_file=None, clear_existing=True, abort_on_error=False):
'''
Restore the configuration from a file in json format.
Restore file defaults to '/etc/targets/saveconfig.json'.
Returns a list of non-fatal errors. If abort_on_error is set,
it will raise the exception instead of continuing.
'''
if not restore_file:
restore_file = default_save_file
with open(restore_file, "r") as f:
config = json.loads(f.read())
return self.restore(config, clear_existing=clear_existing,
abort_on_error=abort_on_error)
targets = property(_list_targets,
doc="Get the list of Target objects.")
tpgs = property(_list_tpgs,
doc="Get the list of all the existing TPG objects.")
node_acls = property(_list_node_acls,
doc="Get the list of all the existing NodeACL objects.")
node_acl_groups = property(_list_node_acl_groups,
doc="Get the list of all the existing NodeACLGroup objects.")
mapped_luns = property(_list_mapped_luns,
doc="Get the list of all the existing MappedLUN objects.")
mapped_lun_groups = property(_list_mapped_lun_groups,
doc="Get the list of all the existing MappedLUNGroup objects.")
sessions = property(_list_sessions,
doc="Get the list of all the existing sessions.")
network_portals = property(_list_network_portals,
doc="Get the list of all the existing Network Portal objects.")
storage_objects = property(_list_storage_objects,
doc="Get the list of all the existing Storage objects.")
luns = property(_list_luns,
doc="Get the list of all existing LUN objects.")
fabric_modules = property(_list_fabric_modules,
doc="Get the list of all FabricModule objects.")
def _test():
'''Run the doctests.'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
d02905929158389d06f917dfcb000c3db8912408 | 71051439e59c20f35cf2cd7303d5ebdd98b4bd4c | /Draft2.py | dac7241feb971663170cd8f9f61d5f790320d93b | [] | no_license | ronggurmahendra/THT | d34618e05a4648a4710b0262668d1123705baee1 | feef5913987eab796b8fa3fcea4d2cc309d58a08 | refs/heads/master | 2020-12-04T11:27:28.639396 | 2020-01-04T15:32:37 | 2020-01-04T15:32:37 | 231,746,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | import cv2
import numpy as np
def nothing(x):
pass
'''
cv2.namedWindow("Tracking")
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("UH", "Tracking", 255, 255, nothing)
cv2.createTrackbar("US", "Tracking", 255, 255, nothing)
cv2.createTrackbar("UV", "Tracking", 255, 255, nothing)
'''
while True:
frame = cv2.imread('Target Deteksi Objek ver. 1.0.1 (Aruco Kecil)-1.png')
scale_percent = 30
width = int(frame.shape[1] * scale_percent / 100)
height = int(frame.shape[0] * scale_percent / 100)
dim = (width, height)
frame= cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
'''
l_h = cv2.getTrackbarPos("LH", "Tracking")
l_s = cv2.getTrackbarPos("LS", "Tracking")
l_v = cv2.getTrackbarPos("LV", "Tracking")
u_h = cv2.getTrackbarPos("UH", "Tracking")
u_s = cv2.getTrackbarPos("US", "Tracking")
u_v = cv2.getTrackbarPos("UV", "Tracking")
'''
l_h = 86
l_s = 51
l_v = 51
u_h = 131
u_s = 255
u_v = 255
l_b = np.array([l_h, l_s, l_v])
u_b = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, l_b, u_b)
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow("frame", frame)
cv2.imshow("mask", mask)
cv2.imshow("res", res)
key = cv2.waitKey(1)
if key == 27:
break
cv2.destroyAllWindows() | [
"[email protected]"
] | |
f9607c234e0ea4c040cda408ab7fcd3ed1be04f7 | 42dcbe38d5248a64bff7070927e1882e00cb62b5 | /example/08_glumpy/source/run_00_window.py | 3b356ebea950fc695600aa3fdf164f763b681f6e | [] | no_license | plops/cl-py-generator | 0bd6279e922d973ae3b9771436bb83cd6df452f4 | cc64c7d15b49f2650723489eb6bce70d361fad8f | refs/heads/master | 2023-08-31T22:29:31.550521 | 2023-08-31T19:30:09 | 2023-08-31T19:30:09 | 154,458,175 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | import matplotlib.pyplot as plt
plt.ion()
import numpy as np
from glumpy import app, gloo, gl
app.use("glfw")
window=app.Window()
vertex=""" attribute vec2 position;
void main (){
gl_Position=vec4(position, (0.0e+0f), (1.e+0f));
}"""
fragment=""" void main (){
gl_FragColor=vec4((1.e+0f), (0.0e+0f), (0.0e+0f), (1.e+0f));
}"""
quad=gloo.Program(vertex, fragment, count=4)
quad["position"]=(-1,1,), (1,1,), (-1,-1,), (1,-1,)
@window.event
def on_draw(dt):
window.clear()
quad.draw(gl.GL_TRIANGLE_STRIP)
app.run() | [
"[email protected]"
] | |
50777e863bef01afe866ba0a54731094c1599324 | 12816bcea9962af00c6d121e35563c129881e256 | /BodyTracking/kafkaProducer.py | 78c4328068116b7d25345c0116b07009e28850ed | [] | no_license | luissantanaa/ESProject | 2f98d59c602d5045554072d86cdf8774cc89d05a | 20844afce5d284865d5b211729159e115babfb13 | refs/heads/master | 2021-09-21T23:05:50.679672 | 2020-07-22T14:59:53 | 2020-07-22T14:59:53 | 248,569,166 | 0 | 2 | null | 2021-08-13T15:36:22 | 2020-03-19T17:50:59 | Java | UTF-8 | Python | false | false | 2,062 | py | import time
import json
from kafka import KafkaProducer
from kafka.errors import KafkaError
from datetime import datetime
def main():
#all 3 data files
files = ["data1.txt", "data2.txt", "data3.txt"]
print("What is you username? ")
username = input()
#connect to kafka server and transforms data into json
#producer = KafkaProducer(bootstrap_servers=['192.168.160.103:9092'],value_serializer=lambda x: json.dumps(x).encode('utf-8'))
producer = KafkaProducer(bootstrap_servers=['192.168.160.103:9093'],value_serializer=lambda x: json.dumps(x).encode('utf-8'))
#for each file reads all lines
for f in files:
with open(f, "r") as fin:
time.sleep(2)
#send first line to producer
line = fin.readline()
if line != "":
print(line)
line = line.strip()
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
date_reading = now.strftime("%d/%m/%Y - %H:%M:%S.%f")
data = {"username":username, "date_reading": date_reading, "joints": line}
producer.send('esp21_joints', value=data)
time.sleep(0.5)
#while there is a line, reads line and send it to producer topic joints, must wait 0.1 seconds between each reading to simulate body movement
while line:
line = fin.readline()
if line != "":
print(line)
line = line.strip()
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
date_reading = now.strftime("%d/%m/%Y - %H:%M:%S.%f")
data = {"username":username, "date_reading": date_reading, "joints": line}
producer.send('esp21_joints', value=data)
time.sleep(1)
main()
| [
"[email protected]"
] | |
91f418506e3f243d83d02ceb799a126427962ae1 | 7f962ac1f8561e2989c4ed297d1cd68fd758b36d | /ig/tests.py | f2b4bbf37342a8d7016a8defcd3a18eab41b0a38 | [
"MIT"
] | permissive | muhindokiro/Instaclone | 583eb318167cb45158c018013f499f89491325bc | 49e7d50ac73520ea84eeb20f70285faac2e7947a | refs/heads/master | 2022-06-12T20:26:28.716261 | 2019-07-31T14:31:49 | 2019-07-31T14:31:49 | 199,273,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | from django.test import TestCase
from .models import Editor,Post,tags
import datetime as dt
# Create your tests here.
class EditorTestClass(TestCase):
# Set up method
def setUp(self):
self.james= Editor(first_name = 'James', last_name ='Muriuki', email ='[email protected]')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.james,Editor))
def test_save_method(self):
self.james.save_editor()
editors = Editor.objects.all()
self.assertTrue(len(editors) > 0)
# def test_delete_method(self):
# self.james.delete_editor()
# editors = Editor.objects.all()
# self.assertTrue(len(editors) > 0)
# def test_update_method(self):
# self.james.update_editor()
# editors = Editor.objects.all()
# self.assertTrue(len(editors) > 0)
# def test_display_method(self):
# self.james.display_editor()
# editors = Editor.objects.all()
# self.assertTrue(len(editors) > 0)
class ArticleTestClass(TestCase):
def setUp(self):
# Creating a new editor and saving it
self.james= Editor(first_name = 'James', last_name ='Muriuki', email ='[email protected]')
self.james.save_editor()
# Creating a new tag and saving it
self.new_tag = tags(name = 'testing')
self.new_tag.save()
self.new_article= Post(title = 'Test Article',post = 'This is a random test Post',editor = self.james)
self.new_article.save()
self.new_article.tags.add(self.new_tag)
def tearDown(self):
Editor.objects.all().delete()
tags.objects.all().delete()
Post.objects.all().delete()
def test_get_ig_today(self):
today_ig = Post.todays_ig()
self.assertTrue(len(today_ig)>0)
# def test_get_ig_by_date(self):
# test_date = '2017-03-17'
# date = dt.datetime.strptime(test_date, '%Y-%m-%d').date()
# ig_by_date = Article.days_ig(date)
# self.assertTrue(len(ig_by_date) == 0)
| [
"[email protected]"
] | |
0c37883e98e84b4afb1ec0c737d8df3a2a60c6b4 | 8b3fe256812141286275f42237c746e02c28d04d | /firstDjangoProj/settings.py | f76690b5756dd216c89fc6c58a53e5ee630b1094 | [] | no_license | shuvo-sutradhar/firsrDjangoProj | 9415f1b95a2624ae082ea4653b5ec8661a3a0dec | d45235fb63713c7d0a6cc164963f427950e3fcf4 | refs/heads/master | 2021-02-08T09:29:34.326877 | 2020-03-01T11:26:07 | 2020-03-01T11:26:07 | 244,136,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,431 | py | """
Django settings for firstDjangoProj project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'usha%0_hkusu=9oa_m8yk(_w5gut9v1dl0x$&v@y@vbchz+n99'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'travello.apps.TravelloConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'firstDjangoProj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firstDjangoProj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'firstDjangoProjDB',
'USER': 'postgres',
'PASSWORD':'123456',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"[email protected]"
] | |
14516b5b08c681a0f1ce71bb4c696419562af9e5 | 198381b17ea710921a8fcbe6e86922aa2fe0adc1 | /scrap_sel.py | 2e030b74c04479dff446e7f66a11aa2254214d26 | [] | no_license | JorgeOsorio97/BiBaMexScrapper | 2e1e9dfa21831ee2a9f6015b5bda4f986d58a384 | dc878083e061d3232bf2480075a59d755c9efa19 | refs/heads/master | 2023-01-04T13:51:06.710942 | 2020-10-18T14:02:58 | 2020-10-18T14:02:58 | 305,114,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,682 | py | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
driver = webdriver.Chrome("/home/jorge-valdez/chromedriver")
driver.get('https://picomponentebi.cnbv.gob.mx/ReportViwer/ReportService?sector=40&tema=2&subTema=3&tipoInformacion=0&subTipoInformacion=0&idReporte=040_11q_BIPER0&idPortafolio=0&idTipoReporteBI=1')
# driver.get('https://picomponentebi.cnbv.gob.mx/ReportViwer/ReportService?sector=68&tema=2&subTema=3&tipoInformacion=0&subTipoInformacion=0&idReporte=068_11q_BIPER0&idPortafolio=0&idTipoReporteBI=1')
ifm = driver.find_element_by_xpath('//iframe')
def sleep_wf(segs, message=None):
if message:
print(message)
for i in range(segs):
print(i)
time.sleep(1)
sleep_wf(20)
# driver.implicitly_wait(10)
boton_filtros = driver.find_element_by_xpath(
'//button[@id="btn_OcultaFiltros"]').click()
sleep_wf(5)
# driver.implicitly_wait(10)
driver.find_element_by_xpath(
'//input[@id="rbdIntervalo1" and @value="14"]').click()
meses_list = driver.find_elements_by_xpath(
'//label[./input[@type="checkbox" and @class="chk_Box_Val" and starts-with(@value,"[Periodos]")]]')
# print(meses_list)
print("Numero de meses posibles", len(meses_list))
driver.find_element_by_xpath('//h3[@id="Institucion_6_H"]').click()
instituciones_list = driver.find_elements_by_xpath(
'//label[./input[@type="checkbox" and @class="chk_Box_Val" and starts-with(@value,"[Instituciones]")]]')
print("Numero de instituciones", len(instituciones_list))
# Limpiar listas a Dicts
meses_list = [{"nombre": mes.find_element_by_tag_name(
'input').get_attribute('id')[-6:], 'valor':mes.find_element_by_tag_name(
'input').get_attribute('value'), 'id':mes.find_element_by_tag_name(
'input').get_attribute('id')} for mes in meses_list]
instituciones_list = [{"nombre": inst.text, 'valor': inst.find_element_by_tag_name(
'input').get_attribute('value'), 'id': inst.find_element_by_tag_name(
'input').get_attribute('id')} for inst in instituciones_list]
# print(meses_list)
df = pd.DataFrame(data=meses_list+instituciones_list,
columns=['nombre', 'valor', 'id'])
df.to_csv('test.csv')
result_df = pd.DataFrame(
[], columns=['nombre_p', 'valor_p', 'id_p', 'nombre_in', 'valor_in', 'id_in'])
driver.find_element_by_xpath('//h3[@id="Escala_3_H"]').click()
sleep_wf(1)
driver.find_element_by_xpath('//input[@id="rdbEscala1"]').click()
sleep_wf(1)
driver.find_element_by_xpath('//button[@id="btn_GeneraRoporte"]').click()
sleep_wf(15)
for mes_i, mes in enumerate(meses_list):
for inst_i, inst in enumerate(instituciones_list):
WebDriverWait(driver, 60).until(
EC.presence_of_element_located((By.ID, "btn_OcultaFiltros"))
)
sleep_wf(1, "Boton Mostrat Filtros")
# Mostrar filtros
driver.find_element_by_xpath(
'//button[@id="btn_OcultaFiltros"]').click()
sleep_wf(2, "Filtrar: Configurar Periodos")
# PERIODOS
# Abrir Periodos
driver.find_element_by_xpath('//h3[@id="Periodo_Filtros_H"]').click()
# Definir configurar consulta
sleep_wf(1)
driver.find_element_by_xpath(
'//input[@id="rbdIntervalo1" and @value="14"]').click()
sleep_wf(1)
# SELECCIONAR PERIODO
driver.find_element_by_xpath('//h3[@id="Periodo_5_H"]').click()
if mes_i != 0:
WebDriverWait(driver, 60).until(
EC.presence_of_element_located((By.ID, mes['id']))
)
sleep_wf(1, "Seleccionar meses")
driver.find_element_by_xpath(
'//input[@id="{}"]'.format(meses_list[0]['id'])).click()
sleep_wf(1)
driver.find_element_by_xpath(
'//input[@id="{}"]'.format(mes['id'])).click()
sleep_wf(1, 'Seleccionar Instituciones')
# INSTITUCIONES
driver.find_element_by_xpath('//h3[@id="Institucion_6_H"]').click()
if inst_i != 0:
WebDriverWait(driver, 60).until(
EC.presence_of_element_located((By.ID, inst['id']))
)
sleep_wf(1)
driver.find_element_by_xpath(
'//input[@id="{}"]'.format(instituciones_list[0]['id'])).click()
sleep_wf(1)
driver.find_element_by_xpath(
'//input[@id="{}"]'.format(inst['id'])).click()
sleep_wf(1)
print(mes['nombre'], mes['valor'], mes['id'])
print(inst['nombre'], inst['valor'], inst['id'])
driver.find_element_by_xpath('//h3[@id="Escala_3_H"]').click()
sleep_wf(1)
driver.find_element_by_xpath('//input[@id="rdbEscala1"]').click()
sleep_wf(1)
driver.find_element_by_xpath(
'//button[@id="btn_GeneraRoporte"]').click()
sleep_wf(15, "Primer iframe")
# Descargar como csv
# Cambiamos de iframe
WebDriverWait(driver, 60).until(
EC.presence_of_element_located((By.ID, mes['IFrame_Container']))
)
frames_list = driver.find_elements_by_tag_name('iframe')
sleep_wf(1, "Segundo iframe")
frame1 = driver.find_element_by_tag_name('iframe')
driver.switch_to.frame(frame1)
frames_list = driver.find_elements_by_tag_name('iframe')
frame2 = driver.find_element_by_tag_name('iframe')
driver.switch_to.frame(frame2)
driver.implicitly_wait(5)
driver.find_element_by_id(
'ReportViewer1_ctl05_ctl04_ctl00_ButtonLink').click()
# driver.find_element_by_css_selector(
# 'a#ReportViewer1_ctl05_ctl04_ctl00_ButtonLink').click
# driver.find_element_by_xpath(
# '//a[@id="ReportViewer1_ctl05_ctl04_ctl00_ButtonLink"]').click()
driver.find_element_by_xpath(
'//a[@title="CSV (delimitado por comas)"]').click()
sleep_wf(5)
driver.implicitly_wait(20)
driver.get('https://picomponentebi.cnbv.gob.mx/ReportViwer/ReportService?sector=40&tema=2&subTema=3&tipoInformacion=0&subTipoInformacion=0&idReporte=040_11q_BIPER0&idPortafolio=0&idTipoReporteBI=1')
temp_df = pd.DataFrame(
[[mes['nombre'], mes['valor'], mes['id'], inst['nombre'], inst['valor'], inst['id']]], columns=['nombre_p', 'valor_p', 'id_p', 'nombre_in', 'valor_in', 'id_in'])
result_df.append(temp_df, ignore_index=True)
result_df.to_csv('result.csv')
sleep_wf(30)
result_df.to_csv('result.csv')
| [
"[email protected]"
] | |
f819abe792a7c1a051fbb7777fc74be6c07721da | f0994c93c6b8207a574b63356d477a989a3330d7 | /web/tests/test_model.py | 203c19731943df794fe0aabf5134c62018811919 | [
"MIT"
] | permissive | Limpan/bytardag | 5e2de4ae06a438b532fbb1e9ddccbcdb6626effa | 4a38ae2789b52ccd4db4a4a9dfe21e47781c8b20 | refs/heads/main | 2021-07-01T11:38:57.408369 | 2019-03-13T19:58:17 | 2019-03-13T19:58:17 | 58,823,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | import pytest
from app.models import BankAccount, User
from sqlalchemy.sql import text
def test_password_setter():
u = User(password='supersecret')
assert u.password_hash is not None
def test_no_password_getter():
u = User(password='supersecret')
with pytest.raises(AttributeError):
u.password
def test_password_verification():
u = User(password='cat')
assert u.verify_password('cat')
assert u.verify_password('dog') is False
def test_password_salts_are_random():
u = User(password='cat')
u2 = User(password='cat')
assert u.password_hash != u2.password_hash
def test_bank_account_number_masking():
a1 = BankAccount(clearing='4386', number='9823857')
assert a1.clearing == '4386'
assert a1.number == '*****57'
assert a1._number == '9823857'
a2 = BankAccount(clearing='573', number='39923456')
assert a2.clearing == '573'
assert a2.number == '******56'
assert a2._number == '39923456'
def test_bank_account_not_stored_in_the_clear(db):
user = User()
account = BankAccount(bank='JVA Bank', clearing='1234', number='9876543')
user.account = account
db.session.add(user)
db.session.commit()
sql = text('SELECT bank, clearing, _number FROM bank_accounts WHERE id=:id')
result = db.session.execute(sql, {'id': user.account.id})
assert result.rowcount is 1
row = result.next()
assert not row['bank'] is account.bank
assert not row['clearing'] is account.clearing
assert not row['_number'] is account._number
| [
"[email protected]"
] | |
e9613c950135aa319847d382bc34becb53bf5339 | 1d61636b68c2299b2f71f20262d34678eede2024 | /dict/database.py | 6bd516d46ad8f00e64a3a0f7434eedc835f17ef3 | [] | no_license | sky000/MyGCS | 99ebffa79ebee3bb603c40304e2e406057cafb62 | bfe39a0d665d358782b8666669b127f827588260 | refs/heads/master | 2020-05-07T15:02:20.864803 | 2012-04-05T06:05:11 | 2012-04-05T06:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | from django.shortcuts import render_to_response,get_object_or_404
from dict.models import *
def detail(request,database_id):
d = get_object_or_404(Database, pk=database_id)
i = get_object_or_404(Instance, pk=d.instance_id)
h = get_object_or_404(Host, pk=i.host_id)
navis = [];
navis.append({'href':'/sys/host/'+str(h.id),'name':h.name})
navis.append({'href':'/sys/instance/'+str(i.id),'name':i.name})
navis.append({'href':'/sys/database/'+str(d.id),'name':d.name})
return render_to_response('dict_database_detail.html', {'navis': navis, 'instance':i,'host': h,'database': d}) | [
"[email protected]"
] | |
8a680af866beec50be20ff3310ad76231893fede | 640c3cd1feae861530d61eb486a27be3ed808422 | /demo/modeling/detector/generalized_rcnn.py | 96ddbd6ba5b926f5dca0e06c96d37ab21f7e3652 | [] | no_license | Cuberick-Orion/maskrcnn-benchmark_ARCHIVED | ff773a823e1edf9cb90ea3dedd0eefe029d4edfa | 543dc9ce2879a853cb34ac8521b997d06308ae51 | refs/heads/master | 2020-05-15T15:25:43.664425 | 2019-05-16T12:50:56 | 2019-05-16T12:50:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,854 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Implements the Generalized R-CNN framework
"""
import torch
from torch import nn
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.modeling.backbone import build_backbone
from maskrcnn_benchmark.modeling.rpn.rpn import build_rpn
from ..roi_heads.roi_heads import build_roi_heads
from ..roi_heads.box_head.roi_box_feature_extractors import make_roi_box_feature_extractor
import pdb
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN. Currently supports boxes and masks.
It consists of three main parts:
- backbone
- rpn
- heads: takes the features + the proposals from the RPN and computes
detections / masks from it.
"""
def __init__(self, cfg):
super(GeneralizedRCNN, self).__init__()
self.backbone = build_backbone(cfg)
self.rpn = build_rpn(cfg, self.backbone.out_channels)
self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels)
self.feature_extractor = make_roi_box_feature_extractor(cfg, self.backbone.out_channels)
def forward(self, images, targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and targets is None:
raise ValueError("In training mode, targets should be passed")
images = to_image_list(images)
features = self.backbone(images.tensors)
# pdb.set_trace()
proposals, proposal_losses = self.rpn(images, features, targets)
if self.roi_heads:
x, result, detector_losses = self.roi_heads(features, proposals, targets)
# print(x.shape)
# print(e)
## >>>>>>>>
if len(result[0]) == 0:
output = []
else:
output = self.feature_extractor(features, result) ### this is the feature !!!
# pdb.set_trace()
## <<<<<<<<
else:
# RPN-only models don't have roi_heads
x = features
result = proposals
detector_losses = {}
if self.training:
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
return result, output ## the features
| [
"[email protected]"
] | |
373ab58fde34141dc9f59454bce5a1b4f130cce6 | db326011a46bdc55c64b0c35424df551ed030519 | /leetcode/Done/122_BestTimeStock2.py | 75a7627bed73df7c846fa36c63fd053c63435d41 | [] | no_license | humachine/AlgoLearning | ac9a6bd0fdf1293ca63d85732bfbbf864bd68220 | 2d06ab566c8adc6d864c5565310b56008d2d4b31 | refs/heads/master | 2020-12-12T07:19:02.518810 | 2017-04-29T07:15:04 | 2017-04-29T07:15:04 | 49,476,721 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | #https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii/
"""Test cases:
Inp: [3, 7, 2, 3, 6]
Out: 8 (7-3 + 6-2)
"""
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices or len(prices)==1:
return 0
#Since unlimited number of transactions are allowed, if the rate increases over a day,
#buy on previous day and sell on next day.
#
#Even cases were rate continuously increases over 3 days are covered by 3 piecewise increases
profit = 0
for i in xrange(len(prices)-1):
profit += max(0, prices[i+1]-prices[i])
return profit
s = Solution()
print s.maxProfit([3, 7, 2, 3, 6])
| [
"[email protected]"
] | |
2022f96f260de8476e404b75baad9a86518fc821 | 31596558d227ca10911a776023aeef1a7eb0d2c9 | /Other groups/Truck’d/Princess Connect! ReDive [BD]/priconne_nced01.py | dad0bfad2deb5502fbc8b8255ccb561e9c29bbbc | [
"MIT"
] | permissive | Ichunjo/encode-scripts | 914a113093670f7d07e652ef50f6f04d09cc58b1 | 389a9f497e637eaade6f99acee816636856961d4 | refs/heads/master | 2022-05-03T00:48:54.543905 | 2022-03-17T17:43:02 | 2022-03-17T17:43:02 | 220,536,158 | 45 | 8 | null | 2019-11-08T21:25:59 | 2019-11-08T19:41:14 | Python | UTF-8 | Python | false | false | 5,637 | py | """Priconne script"""
__author__ = 'Vardë'
import sys
import os
import shlex
import subprocess
from functools import partial
from typing import NamedTuple
from pathlib import Path
from acsuite import eztrim
from cooldegrain import CoolDegrain
import debandshit as dbs
import vardefunc as vdf
import kagefunc as kgf
from vsutil import depth, get_w, get_y
import lvsfunc as lvf
import vapoursynth as vs
core = vs.core
class InfosBD(NamedTuple):
path: str
src: str
src_clip: vs.VideoNode
frame_start: int
frame_end: int
src_cut: vs.VideoNode
a_src: str
a_src_cut: str
a_enc_cut: str
name: str
output: str
chapter: str
output_final: str
def infos_bd(path, frame_start, frame_end) -> InfosBD:
src = path + '.m2ts'
src_clip = core.lsmas.LWLibavSource(src, prefer_hw=1, ff_loglevel=3)
src_cut = src_clip[frame_start:frame_end]
a_src = path + '.wav'
a_src_cut = path + '_cut_track_{}.wav'
a_enc_cut = path + '_track_{}.m4a'
name = Path(sys.argv[0]).stem
output = name + '.265'
chapter = '_assets/chapters/' + name + '.txt'
output_final = name + '.mkv'
return InfosBD(path, src, src_clip, frame_start, frame_end,
src_cut, a_src, a_src_cut, a_enc_cut,
name, output, chapter, output_final)
JPBD = infos_bd(r'[BDMV][200807][CYGX-00001]Princess Connect! Re_Dive Vol.1\BD_VIDEO\BDMV\STREAM\00008', 0, -24)
def do_filter():
"""Vapoursynth filtering"""
src = JPBD.src_cut
src = depth(src, 16)
h = 882
w = get_w(h)
kernel = 'lanczos'
taps = 5
denoise = CoolDegrain(src, tr=1, thsad=24, blksize=8, overlap=4, plane=4)
out = denoise
luma = get_y(out)
descale = kgf.get_descale_filter(kernel, taps=taps)(depth(luma, 32), w, h)
upscale = vdf.fsrcnnx_upscale(depth(descale, 16), src.width, src.height, '_assets/shaders/FSRCNNX_x2_56-16-4-1.glsl',
partial(core.resize.Bicubic, filter_param_a=0, filter_param_b=0))
out = vdf.merge_chroma(upscale, out)
deband_mask = lvf.denoise.detail_mask(out, brz_a=2000, brz_b=1000)
deband = dbs.f3kpf(out, 17, 30, 30)
deband = core.std.MaskedMerge(deband, out, deband_mask)
deband = core.neo_f3kdb.Deband(deband, preset='depth', grainy=24, grainc=18, keep_tv_range=True)
out = deband
grain = kgf.adaptive_grain(out, 0.2, luma_scaling=14)
out = grain
return depth(out, 10)
def do_encode(clip: vs.VideoNode)-> None:
"""Compression with x26X"""
print('\n\n\nVideo encoding')
print('\n\n\nVideo encoding')
x265_cmd = f'x265 -o {JPBD.output} - --y4m' + ' '
x265_cmd += f'--csv {JPBD.name}_log_x265.csv --csv-log-level 2' + ' '
x265_cmd += '--frame-threads 8 --pmode --pme --preset slower' + ' '
x265_cmd += f'--frames {clip.num_frames} --fps 24000/1001 --output-depth 10' + ' '
x265_cmd += '--rd 3 --no-rect --no-amp --rskip 1 --tu-intra-depth 2 --tu-inter-depth 2 --tskip' + ' '
x265_cmd += '--merange 48 --weightb' + ' '
x265_cmd += '--no-strong-intra-smoothing' + ' '
x265_cmd += '--psy-rd 2.0 --psy-rdoq 1.0 --no-open-gop --keyint 360 --min-keyint 12 --scenecut 45 --rc-lookahead 120 --bframes 16' + ' '
x265_cmd += '--crf 15 --aq-mode 3 --aq-strength 0.85 --qcomp 0.70' + ' '
x265_cmd += '--deblock=-1:-1 --no-sao --no-sao-non-deblock' + ' '
x265_cmd += f'--sar 1 --range limited --colorprim 1 --transfer 1 --colormatrix 1 --min-luma {str(16<<2)} --max-luma {str(235<<2)}'# + ' '
print("Encoder command: ", " ".join(shlex.split(x265_cmd)), "\n")
process = subprocess.Popen(shlex.split(x265_cmd), stdin=subprocess.PIPE)
clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
process.communicate()
print('\n\n\nAudio extraction')
eac3to_args = ['eac3to', JPBD.src, '2:', JPBD.a_src, '-log=NUL']
subprocess.run(eac3to_args, text=True, check=True, encoding='utf-8')
print('\n\n\nAudio cutting')
eztrim(JPBD.src_clip, (JPBD.frame_start, JPBD.frame_end), JPBD.a_src, JPBD.a_src_cut.format(1))
print('\n\n\nAudio encoding')
qaac_args = ['qaac', JPBD.a_src_cut.format(1), '-V', '127', '--no-delay', '-o', JPBD.a_enc_cut.format(1)]
subprocess.run(qaac_args, text=True, check=True, encoding='utf-8')
ffprobe_args = ['ffprobe', '-loglevel', 'quiet', '-show_entries', 'format_tags=encoder', '-print_format', 'default=nokey=1:noprint_wrappers=1', JPBD.a_enc_cut.format(1)]
encoder_name = subprocess.check_output(ffprobe_args, shell=True, encoding='utf-8')
f = open("tags_aac.xml", 'w')
f.writelines(['<?xml version="1.0"?>', '<Tags>', '<Tag>', '<Targets>', '</Targets>',
'<Simple>', '<Name>ENCODER</Name>', f'<String>{encoder_name}</String>', '</Simple>',
'</Tag>', '</Tags>'])
f.close()
print('\nFinal muxing')
mkv_args = ['mkvmerge', '-o', JPBD.output_final,
'--track-name', '0:HEVC BDRip by Vardë@Raws-Maji', '--language', '0:jpn', JPBD.output,
'--tags', '0:tags_aac.xml', '--track-name', '0:AAC 2.0', '--language', '0:jpn', JPBD.a_enc_cut.format(1)]
subprocess.run(mkv_args, text=True, check=True, encoding='utf-8')
# Clean up
files = [JPBD.a_src, JPBD.a_src_cut.format(1),
JPBD.a_enc_cut.format(1), 'tags_aac.xml']
for file in files:
if os.path.exists(file):
os.remove(file)
if __name__ == '__main__':
FILTERED = do_filter()
do_encode(FILTERED)
| [
"[email protected]"
] | |
135ccb49c34aa486409e6428052b893b0b4927b9 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/nltk/corpus/reader/xmldocs.py | b0e00c2ff7e6a7fc83f9b5868c21d1bc0d405702 | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,280 | py | # Natural Language Toolkit: XML Corpus Reader
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Steven Bird <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Corpus reader for corpora whose documents are xml files.
(note -- not named 'xml' to avoid conflicting w/ standard xml package)
"""
import codecs
from xml.etree import ElementTree
from nltk.corpus.reader.api import CorpusReader
from nltk.corpus.reader.util import *
from nltk.data import SeekableUnicodeStreamReader
from nltk.internals import ElementWrapper
from nltk.tokenize import WordPunctTokenizer
class XMLCorpusReader(CorpusReader):
"""
Corpus reader for corpora whose documents are xml files.
Note that the ``XMLCorpusReader`` constructor does not take an
``encoding`` argument, because the unicode encoding is specified by
the XML files themselves. See the XML specs for more info.
"""
def __init__(self, root, fileids, wrap_etree=False):
self._wrap_etree = wrap_etree
CorpusReader.__init__(self, root, fileids)
def xml(self, fileid=None):
# Make sure we have exactly one file -- no concatenating XML.
if fileid is None and len(self._fileids) == 1:
fileid = self._fileids[0]
if not isinstance(fileid, str):
raise TypeError("Expected a single file identifier string")
# Read the XML in using ElementTree.
with self.abspath(fileid).open() as fp:
elt = ElementTree.parse(fp).getroot()
# If requested, wrap it.
if self._wrap_etree:
elt = ElementWrapper(elt)
# Return the ElementTree element.
return elt
def words(self, fileid=None):
"""
Returns all of the words and punctuation symbols in the specified file
that were in text nodes -- ie, tags are ignored. Like the xml() method,
fileid can only specify one file.
:return: the given file's text nodes as a list of words and punctuation symbols
:rtype: list(str)
"""
elt = self.xml(fileid)
encoding = self.encoding(fileid)
word_tokenizer = WordPunctTokenizer()
try:
iterator = elt.getiterator()
except:
iterator = elt.iter()
out = []
for node in iterator:
text = node.text
if text is not None:
if isinstance(text, bytes):
text = text.decode(encoding)
toks = word_tokenizer.tokenize(text)
out.extend(toks)
return out
class XMLCorpusView(StreamBackedCorpusView):
"""
A corpus view that selects out specified elements from an XML
file, and provides a flat list-like interface for accessing them.
(Note: ``XMLCorpusView`` is not used by ``XMLCorpusReader`` itself,
but may be used by subclasses of ``XMLCorpusReader``.)
Every XML corpus view has a "tag specification", indicating what
XML elements should be included in the view; and each (non-nested)
element that matches this specification corresponds to one item in
the view. Tag specifications are regular expressions over tag
paths, where a tag path is a list of element tag names, separated
by '/', indicating the ancestry of the element. Some examples:
- ``'foo'``: A top-level element whose tag is ``foo``.
- ``'foo/bar'``: An element whose tag is ``bar`` and whose parent
is a top-level element whose tag is ``foo``.
- ``'.*/foo'``: An element whose tag is ``foo``, appearing anywhere
in the xml tree.
- ``'.*/(foo|bar)'``: An wlement whose tag is ``foo`` or ``bar``,
appearing anywhere in the xml tree.
The view items are generated from the selected XML elements via
the method ``handle_elt()``. By default, this method returns the
element as-is (i.e., as an ElementTree object); but it can be
overridden, either via subclassing or via the ``elt_handler``
constructor parameter.
"""
#: If true, then display debugging output to stdout when reading
#: blocks.
_DEBUG = False
#: The number of characters read at a time by this corpus reader.
_BLOCK_SIZE = 1024
def __init__(self, fileid, tagspec, elt_handler=None):
"""
Create a new corpus view based on a specified XML file.
Note that the ``XMLCorpusView`` constructor does not take an
``encoding`` argument, because the unicode encoding is
specified by the XML files themselves.
:type tagspec: str
:param tagspec: A tag specification, indicating what XML
elements should be included in the view. Each non-nested
element that matches this specification corresponds to one
item in the view.
:param elt_handler: A function used to transform each element
to a value for the view. If no handler is specified, then
``self.handle_elt()`` is called, which returns the element
as an ElementTree object. The signature of elt_handler is::
elt_handler(elt, tagspec) -> value
"""
if elt_handler:
self.handle_elt = elt_handler
self._tagspec = re.compile(tagspec + r"\Z")
"""The tag specification for this corpus view."""
self._tag_context = {0: ()}
"""A dictionary mapping from file positions (as returned by
``stream.seek()`` to XML contexts. An XML context is a
tuple of XML tag names, indicating which tags have not yet
been closed."""
encoding = self._detect_encoding(fileid)
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
def _detect_encoding(self, fileid):
if isinstance(fileid, PathPointer):
try:
infile = fileid.open()
s = infile.readline()
finally:
infile.close()
else:
with open(fileid, "rb") as infile:
s = infile.readline()
if s.startswith(codecs.BOM_UTF16_BE):
return "utf-16-be"
if s.startswith(codecs.BOM_UTF16_LE):
return "utf-16-le"
if s.startswith(codecs.BOM_UTF32_BE):
return "utf-32-be"
if s.startswith(codecs.BOM_UTF32_LE):
return "utf-32-le"
if s.startswith(codecs.BOM_UTF8):
return "utf-8"
m = re.match(br'\s*<\?xml\b.*\bencoding="([^"]+)"', s)
if m:
return m.group(1).decode()
m = re.match(br"\s*<\?xml\b.*\bencoding='([^']+)'", s)
if m:
return m.group(1).decode()
# No encoding found -- what should the default be?
return "utf-8"
def handle_elt(self, elt, context):
"""
Convert an element into an appropriate value for inclusion in
the view. Unless overridden by a subclass or by the
``elt_handler`` constructor argument, this method simply
returns ``elt``.
:return: The view value corresponding to ``elt``.
:type elt: ElementTree
:param elt: The element that should be converted.
:type context: str
:param context: A string composed of element tags separated by
forward slashes, indicating the XML context of the given
element. For example, the string ``'foo/bar/baz'``
indicates that the element is a ``baz`` element whose
parent is a ``bar`` element and whose grandparent is a
top-level ``foo`` element.
"""
return elt
#: A regular expression that matches XML fragments that do not
#: contain any un-closed tags.
_VALID_XML_RE = re.compile(
r"""
[^<]*
(
((<!--.*?-->) | # comment
(<![CDATA[.*?]]) | # raw character data
(<!DOCTYPE\s+[^\[]*(\[[^\]]*])?\s*>) | # doctype decl
(<[^!>][^>]*>)) # tag or PI
[^<]*)*
\Z""",
re.DOTALL | re.VERBOSE,
)
#: A regular expression used to extract the tag name from a start tag,
#: end tag, or empty-elt tag string.
_XML_TAG_NAME = re.compile(r"<\s*(?:/\s*)?([^\s>]+)")
#: A regular expression used to find all start-tags, end-tags, and
#: empty-elt tags in an XML file. This regexp is more lenient than
#: the XML spec -- e.g., it allows spaces in some places where the
#: spec does not.
_XML_PIECE = re.compile(
r"""
# Include these so we can skip them:
(?P<COMMENT> <!--.*?--> )|
(?P<CDATA> <![CDATA[.*?]]> )|
(?P<PI> <\?.*?\?> )|
(?P<DOCTYPE> <!DOCTYPE\s+[^\[^>]*(\[[^\]]*])?\s*>)|
# These are the ones we actually care about:
(?P<EMPTY_ELT_TAG> <\s*[^>/\?!\s][^>]*/\s*> )|
(?P<START_TAG> <\s*[^>/\?!\s][^>]*> )|
(?P<END_TAG> <\s*/[^>/\?!\s][^>]*> )""",
re.DOTALL | re.VERBOSE,
)
def _read_xml_fragment(self, stream):
"""
Read a string from the given stream that does not contain any
un-closed tags. In particular, this function first reads a
block from the stream of size ``self._BLOCK_SIZE``. It then
checks if that block contains an un-closed tag. If it does,
then this function either backtracks to the last '<', or reads
another block.
"""
fragment = ""
if isinstance(stream, SeekableUnicodeStreamReader):
startpos = stream.tell()
while True:
# Read a block and add it to the fragment.
xml_block = stream.read(self._BLOCK_SIZE)
fragment += xml_block
# Do we have a well-formed xml fragment?
if self._VALID_XML_RE.match(fragment):
return fragment
# Do we have a fragment that will never be well-formed?
if re.search("[<>]", fragment).group(0) == ">":
pos = stream.tell() - (
len(fragment) - re.search("[<>]", fragment).end()
)
raise ValueError('Unexpected ">" near char %s' % pos)
# End of file?
if not xml_block:
raise ValueError("Unexpected end of file: tag not closed")
# If not, then we must be in the middle of a <..tag..>.
# If appropriate, backtrack to the most recent '<'
# character.
last_open_bracket = fragment.rfind("<")
if last_open_bracket > 0:
if self._VALID_XML_RE.match(fragment[:last_open_bracket]):
if isinstance(stream, SeekableUnicodeStreamReader):
stream.seek(startpos)
stream.char_seek_forward(last_open_bracket)
else:
stream.seek(-(len(fragment) - last_open_bracket), 1)
return fragment[:last_open_bracket]
# Otherwise, read another block. (i.e., return to the
# top of the loop.)
def read_block(self, stream, tagspec=None, elt_handler=None):
"""
Read from ``stream`` until we find at least one element that
matches ``tagspec``, and return the result of applying
``elt_handler`` to each element found.
"""
if tagspec is None:
tagspec = self._tagspec
if elt_handler is None:
elt_handler = self.handle_elt
# Use a stack of strings to keep track of our context:
context = list(self._tag_context.get(stream.tell()))
assert context is not None # check this -- could it ever happen?
elts = []
elt_start = None # where does the elt start
elt_depth = None # what context depth
elt_text = ""
while elts == [] or elt_start is not None:
if isinstance(stream, SeekableUnicodeStreamReader):
startpos = stream.tell()
xml_fragment = self._read_xml_fragment(stream)
# End of file.
if not xml_fragment:
if elt_start is None:
break
else:
raise ValueError("Unexpected end of file")
# Process each <tag> in the xml fragment.
for piece in self._XML_PIECE.finditer(xml_fragment):
if self._DEBUG:
print("{:>25} {}".format("/".join(context)[-20:], piece.group()))
if piece.group("START_TAG"):
name = self._XML_TAG_NAME.match(piece.group()).group(1)
# Keep context up-to-date.
context.append(name)
# Is this one of the elts we're looking for?
if elt_start is None:
if re.match(tagspec, "/".join(context)):
elt_start = piece.start()
elt_depth = len(context)
elif piece.group("END_TAG"):
name = self._XML_TAG_NAME.match(piece.group()).group(1)
# sanity checks:
if not context:
raise ValueError("Unmatched tag </%s>" % name)
if name != context[-1]:
raise ValueError(f"Unmatched tag <{context[-1]}>...</{name}>")
# Is this the end of an element?
if elt_start is not None and elt_depth == len(context):
elt_text += xml_fragment[elt_start : piece.end()]
elts.append((elt_text, "/".join(context)))
elt_start = elt_depth = None
elt_text = ""
# Keep context up-to-date
context.pop()
elif piece.group("EMPTY_ELT_TAG"):
name = self._XML_TAG_NAME.match(piece.group()).group(1)
if elt_start is None:
if re.match(tagspec, "/".join(context) + "/" + name):
elts.append((piece.group(), "/".join(context) + "/" + name))
if elt_start is not None:
# If we haven't found any elements yet, then keep
# looping until we do.
if elts == []:
elt_text += xml_fragment[elt_start:]
elt_start = 0
# If we've found at least one element, then try
# backtracking to the start of the element that we're
# inside of.
else:
# take back the last start-tag, and return what
# we've gotten so far (elts is non-empty).
if self._DEBUG:
print(" " * 36 + "(backtrack)")
if isinstance(stream, SeekableUnicodeStreamReader):
stream.seek(startpos)
stream.char_seek_forward(elt_start)
else:
stream.seek(-(len(xml_fragment) - elt_start), 1)
context = context[: elt_depth - 1]
elt_start = elt_depth = None
elt_text = ""
# Update the _tag_context dict.
pos = stream.tell()
if pos in self._tag_context:
assert tuple(context) == self._tag_context[pos]
else:
self._tag_context[pos] = tuple(context)
return [
elt_handler(
ElementTree.fromstring(elt.encode("ascii", "xmlcharrefreplace")),
context,
)
for (elt, context) in elts
]
| [
"[email protected]"
] | |
926c80f8c35f6ada515d47d5801b3bd6f88bba9a | 8dc39dc2c3aefc93bb63c666c59a34c833bbe20d | /monasca_common/kafka_lib/partitioner/__init__.py | 5b6ac2d4a66a5063cc81924c00b4caed6efe9532 | [
"Apache-2.0"
] | permissive | openstack/monasca-common | 1448c7beff899a9026e39b77a3219b82ab082f02 | cdc03f9ae5aeda0a8409e8d555efa488cfb25fa7 | refs/heads/master | 2023-08-30T05:28:25.682558 | 2023-08-12T20:42:54 | 2023-08-12T20:46:54 | 21,861,588 | 27 | 26 | Apache-2.0 | 2023-07-08T02:10:44 | 2014-07-15T13:54:25 | Java | UTF-8 | Python | false | false | 234 | py | from .roundrobin import RoundRobinPartitioner
from .hashed import HashedPartitioner, Murmur2Partitioner, LegacyPartitioner
__all__ = [
'RoundRobinPartitioner', 'HashedPartitioner', 'Murmur2Partitioner',
'LegacyPartitioner'
]
| [
"[email protected]"
] | |
b6af1643c1da16d9b7e7ee8212e32a31281c69de | 997a65cffc140b9005ac88f29222e7e632ec518c | /visualize_actmap.py | 6d4249f0acf8fbd4592affeff027721e384c98ba | [
"Apache-2.0"
] | permissive | Maeve-D/reid_for_deepsort | 5e900be1f22b09e9079bb8a0f6a36bad3e2fae91 | f82e2280443829339de4d29c7bbede9d992c4f97 | refs/heads/master | 2022-07-10T01:47:54.645557 | 2020-05-19T06:03:42 | 2020-05-19T06:03:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,281 | py | import argparse
import os
import os.path as osp
import warnings
from collections import OrderedDict
from torchvision import transforms
import cv2
import numpy as np
import torch
import torchvision
from torch.nn import functional as F
from models import build_model
def load_checkpoint(fpath):
r"""Loads checkpoint.
``UnicodeDecodeError`` can be well handled, which means
python2-saved files can be read from python3.
Args:
fpath (str): path to checkpoint.
Returns:
dict
Examples::
>>> from torchreid.utils import load_checkpoint
>>> fpath = 'log/my_model/model.pth.tar-10'
>>> checkpoint = load_checkpoint(fpath)
"""
if fpath is None:
raise ValueError('File path is None')
if not osp.exists(fpath):
raise FileNotFoundError('File is not found at "{}"'.format(fpath))
map_location = None if torch.cuda.is_available() else 'cpu'
try:
checkpoint = torch.load(fpath, map_location=map_location)
except UnicodeDecodeError:
pickle.load = partial(pickle.load, encoding="latin1")
pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
checkpoint = torch.load(fpath,
pickle_module=pickle,
map_location=map_location)
except Exception:
print('Unable to load checkpoint from "{}"'.format(fpath))
raise
return checkpoint
def load_pretrained_weights(model, weight_path):
r"""Loads pretrianed weights to model.
Features::
- Incompatible layers (unmatched in name or size) will be ignored.
- Can automatically deal with keys containing "module.".
Args:
model (nn.Module): network model.
weight_path (str): path to pretrained weights.
Examples::
>>> from torchreid.utils import load_pretrained_weights
>>> weight_path = 'log/my_model/model-best.pth.tar'
>>> load_pretrained_weights(model, weight_path)
"""
checkpoint = load_checkpoint(weight_path)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn('The pretrained weights "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(weight_path))
else:
print('Successfully loaded pretrained weights from "{}"'.format(
weight_path))
if len(discarded_layers) > 0:
print('** The following layers are discarded '
'due to unmatched keys or layer size: {}'.format(
discarded_layers))
def mkdir_if_missing(dirname):
"""Creates dirname if it is missing."""
if not osp.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def check_isfile(fpath):
"""Checks if the given path is a file.
Args:
fpath (str): file path.
Returns:
bool
"""
isfile = osp.isfile(fpath)
if not isfile:
warnings.warn('No file found at "{}"'.format(fpath))
return isfile
IMAGENET_MEAN = [0.3568, 0.3141, 0.2781]
IMAGENET_STD = [0.1752, 0.1857, 0.1879]
GRID_SPACING = 10
@torch.no_grad()
def visactmap(model,
test_loader,
save_dir,
width,
height,
use_gpu,
datasets,
img_mean=None,
img_std=None):
if img_mean is None or img_std is None:
# use imagenet mean and std
img_mean = IMAGENET_MEAN
img_std = IMAGENET_STD
model.eval()
if True:
target = "cow"
data_loader = test_loader
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_' + target)
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps for {} ...'.format(target))
for batch_idx, (imgs, labels) in enumerate(data_loader):
if use_gpu:
imgs = imgs.cuda()
# forward to get convolutional feature maps
try:
outputs = model(imgs, return_featuremaps=True)
except TypeError:
raise TypeError(
'forward() got unexpected keyword argument "return_featuremaps". '
'Please add return_featuremaps as an input argument to forward(). When '
'return_featuremaps=True, return feature maps only.')
if outputs.dim() != 4:
raise ValueError(
'The model output is supposed to have '
'shape of (b, c, h, w), i.e. 4 dimensions, but got {} dimensions. '
'Please make sure you set the model output at eval mode '
'to be the last convolutional feature maps'.format(
outputs.dim()))
# compute activation maps
outputs = (outputs**2).sum(1)
b, h, w = outputs.size()
outputs = outputs.view(b, h * w)
outputs = F.normalize(outputs, p=2, dim=1)
outputs = outputs.view(b, h, w)
if use_gpu:
imgs, outputs = imgs.cpu(), outputs.cpu()
for j in range(outputs.size(0)):
# get image name
path = datasets.imgs[j][0] # paths[j]
imname = osp.basename(osp.splitext(path)[0])
# RGB image
img = imgs[j, ...]
for t, m, s in zip(img, img_mean, img_std):
t.mul_(s).add_(m).clamp_(0, 1)
img_np = np.uint8(np.floor(img.numpy() * 255))
img_np = img_np.transpose((1, 2, 0)) # (c, h, w) -> (h, w, c)
# activation map
am = outputs[j, ...].numpy()
am = cv2.resize(am, (width, height))
am = 255 * (am - np.min(am)) / (np.max(am) - np.min(am) +
1e-12)
am = np.uint8(np.floor(am))
am = cv2.applyColorMap(am, cv2.COLORMAP_JET)
# overlapped
overlapped = img_np * 0.3 + am * 0.7
overlapped[overlapped > 255] = 255
overlapped = overlapped.astype(np.uint8)
# save images in a single figure (add white spacing between images)
# from left to right: original image, activation map, overlapped image
grid_img = 255 * np.ones(
(height, 3 * width + 2 * GRID_SPACING, 3), dtype=np.uint8)
grid_img[:, :width, :] = img_np[:, :, ::-1]
grid_img[:, width + GRID_SPACING:2 * width +
GRID_SPACING, :] = am
grid_img[:, 2 * width + 2 * GRID_SPACING:, :] = overlapped
cv2.imwrite(osp.join(actmap_dir, imname +
'_%d_%d.jpg' % (batch_idx, j)), grid_img)
if (batch_idx + 1) % 10 == 0:
print('- done batch {}/{}'.format(batch_idx + 1,
len(data_loader)))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str)
parser.add_argument('-m', '--model', type=str, default='mudeep')
parser.add_argument('--weights', type=str)
parser.add_argument('--save-dir', type=str, default='log')
parser.add_argument('--height', type=int, default=128)
parser.add_argument('--width', type=int, default=128)
args = parser.parse_args()
use_gpu = torch.cuda.is_available()
trans = transforms.Compose([
transforms.Resize((128, 128)),
transforms.ToTensor(),
transforms.Normalize([0.3568, 0.3141, 0.2781],
[0.1752, 0.1857, 0.1879])
])
test_datasets = torchvision.datasets.ImageFolder("data/val",
transform=trans)
test_loader = torch.utils.data.DataLoader(test_datasets,
batch_size=6,
shuffle=False)
model = build_model(name=args.model,
num_classes=len(test_loader.dataset.classes),
use_gpu=use_gpu)
if args.weights and check_isfile(args.weights):
load_pretrained_weights(model, args.weights)
if use_gpu:
model = model.cuda()
visactmap(model, test_loader, args.save_dir, args.width, args.height,
use_gpu, test_datasets)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
648625485bd08395f2f97b719d859fc3a996ac25 | 6265f2a3f5eb53d8b42ef9f8840fff5f99f4385b | /bin/django-admin.py | 13758fcc6e2d0a6057eff39700ebfb960552c504 | [] | no_license | viniciuslopeslps/rosadeserto | 3712174e172958f5c6e1cf366a4b9de1e1bbf20f | baee2ef8877e54eb4f83ea4bd4fc4c46127ba7cc | refs/heads/master | 2021-01-18T13:37:06.261636 | 2015-09-19T12:28:57 | 2015-09-19T12:28:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | #!/home/vinicius/Desktop/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
0decd14c5eea6a3b94dc416b797428fb82c25fb8 | 6459988f85f39f2c650ebe92765c3512b2826439 | /src/preselectionstudy.py | 97c4ddadd84bfadecd81ffca84042ec83cdbcc18 | [] | no_license | sheldonptn/ThesisCode | 783055a385ba843c3ff1f6107462807df13bff5e | e8dd8da9fea4abefec58dfe5f3988f71d9642200 | refs/heads/master | 2021-01-21T05:54:44.303132 | 2013-08-31T00:33:09 | 2013-08-31T00:33:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(50000) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:pat.root'
)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('preselectionStudy.root')
)
process.demo = cms.EDAnalyzer('PreselectionStudy',
bdisc_name = cms.string('combinedSecondaryVertexBJetTags'),
RFijo = cms.untracked.bool(True),
outputLHCO = cms.string('pruebaPreselectionStudy'),
outputDiscriminants= cms.string('pruebaPreselectionStudy'),
postfix= cms.string('1')
)
process.p = cms.Path(process.demo)
| [
"[email protected]"
] | |
bcccb53694b38b42cf580dc8a1affe6c3f207603 | bf4c1793a20dcb5fac58ede1532212171fb5800c | /boss_service/version/v3/bossConfig.py | 91735a78cb0b40c29caa0888f5e636d1787e6c8d | [] | no_license | a2381165/bss-service | 392a52f02ecc383a240a08336141c9484705c028 | f5ab00f5a6abccdb53653eecc29c0f14b50da07d | refs/heads/master | 2020-04-13T23:17:03.336220 | 2018-12-29T10:33:50 | 2018-12-29T10:33:50 | 163,502,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,453 | py | # -*- coding: utf-8 -*-
from flask import Blueprint
version = "bossv3"
app = Blueprint('api', __name__)
# base
from controllers import AdminAuthorityApi
from controllers import UserLoginApi
from controllers import UserApi
from controllers import UserRoleApi
# from controllers import UserLogApi
from controllers import RoleApi
from controllers import RoleMenuApi
from controllers import OrganizationApi
from controllers import MenuApi
from controllers import AreaApi
from controllers import VersionApi
from controllers import UserRoleCheckApi
# spider
from controllers import SpiderNodeApi
from controllers import SpiderScriptApi
from controllers import SpiderDeployApi
from controllers import SpiderFailUrlApi
from controllers import SpiderProjectApi
from controllers import SpiderScheduleApi
from controllers import SpiderScriptNodeApi
from controllers import SpiderScriptScheduleApi
from controllers import TokenRefreshApi
from controllers import AdminLogApi
# data
from controllers import DataSourceApi
from controllers import CategoryApi
from controllers import CategoryInsertApi
from controllers import DataAreaApi
from controllers import DataAreaSetApi
from controllers import CrawlerApi
from controllers import TempItemApi
from controllers import DepartmentApi
from controllers import LabelApi
from controllers import DeptImgGet
from controllers import IndustryApi
from controllers import DeptItemApi
from controllers import ItemApi
from controllers import ItemContentAttachApi
from controllers import ItemContentApi
# appPreview
from controllers import appMinApi
# workbench
from controllers import WorkbenchApi
from controllers import WorkDataSourceApi
# dataCopy
# from common.demos import getOut
# uplaod
from controllers import uploadFileApi
# 咨詢師
from controllers import UserDeptAreaApi
from controllers import ItemServerApi
from controllers import TempServiceApi
# from controllers import AidanceApi
from controllers import NewAidanceApi
# from controllers import TaskAidanceApi
from controllers import ServiceAttachApi
# 咨询师 order
from controllers import UserOrderAssignApi
from controllers import UserOrderEditApi
from controllers import UserOrderAcceptApi
from controllers import UserOrderFileApi
from controllers import UserOrderProjectApi
from controllers import UserOrderContractApi
from controllers import UserOrderApi
from controllers import OrderAidanceTApi
# 流程
from controllers import WorkFlowApi
from controllers import SubFlowApi
# 通用
from controllers import CommonApi
from controllers import currencyApi
# 沟通
from controllers import CommunicateApi
from controllers import ProductServiceApi
from controllers import PotentialCustomersApi
from controllers import MemberEnterpriseCertificationApi
from controllers import MemberEnterpriseCertificationCheckApi
from controllers import ServiceApi
# 不通用流程 接口
from controllers import flowApi
# 银行
from controllers import BankPartnerApi
from controllers import BankProductApi
# 合同
from controllers import tempContractFlowApi
from controllers import MemberContractApi
# 渠道
from controllers import UserTaskApi
# 项目结算
from controllers import ContractSettlementApi
# 商务协助
from controllers import FlowSingleServiceApi
from controllers import FlowWholeServiceApi
from controllers import UserInternalOrderApi
# 项目 结算
from controllers import EditCostApi
from controllers import ServiceFeeApi
| [
"[email protected]"
] | |
26dc6a62ac0ea02cf588316e6268cc846400f74b | ffb10a6af572ccc3ac69757f3a4df6a717c3958e | /lab2.exe12.py | 8e00c8ca53a12a57f767865979a95437481eee35 | [] | no_license | gokarna123/Gokarna | 8a1d7623f8c68e582fb460e32fb61b56ef3aa2fe | 43ab75c7cc2f85ed1dbe0cc31f2d55161a2dd9c4 | refs/heads/master | 2021-01-03T18:04:50.500032 | 2020-02-13T05:26:03 | 2020-02-13T05:26:03 | 240,182,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | x=5
a=x+3
print("The value of x:", a) | [
"[email protected]"
] | |
22fa1c62f0118d9035ceb34d3c7c8aa00a0adc53 | 170ceca7c2213384f072372577fb98edc3e555bc | /useraccounts/models.py | 3f0fd3472b0ba0bfc545ada22649659c4358dcc2 | [] | no_license | coffeeincodeout/membership | a655cee0df1afc7193e3fb305a017d61ba4ca9ea | 9b790644f3419e513296e40ff76a96975cc1e520 | refs/heads/master | 2022-12-12T12:07:25.910622 | 2020-01-21T03:14:43 | 2020-01-21T03:14:43 | 234,834,968 | 1 | 0 | null | 2022-12-08T03:27:53 | 2020-01-19T03:32:45 | Python | UTF-8 | Python | false | false | 161 | py | from django.contrib.postgres.fields import JSONField
from django.db import models
# Create your models here.
class Members(models.Model):
data = JSONField() | [
"[email protected]"
] | |
2b82f5fba1337f533dbb1f8833eb6b441fc8dc61 | ed1e81a2325d310de7961274a06bfe6cdb7993d0 | /Coursera-Example-Python/mouse_input1.py | d465233dd31ac893509ce67086f5cbc72245762c | [] | no_license | fahimkhan/python | ce573298adf30ca8426b74f3ab275ab7f8047a91 | 1733ad39cf214362c8a76f8996740715888d2101 | refs/heads/master | 2021-01-15T15:50:27.323739 | 2016-08-24T11:02:56 | 2016-08-24T11:02:56 | 20,254,607 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | # Examples of mouse input
import simplegui
import math
# intialize globals
WIDTH = 450
HEIGHT = 300
ball_pos = [WIDTH / 2, HEIGHT / 2]
BALL_RADIUS = 15
ball_color = "Red"
# helper function
def distance(p, q):
return math.sqrt( (p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)
# define event handler for mouse click, draw
def click(pos):
global ball_pos, ball_color
if distance(pos, ball_pos) < BALL_RADIUS:
ball_color = "Green"
else:
ball_pos = list(pos)
ball_color = "Red"
def draw(canvas):
canvas.draw_circle(ball_pos, BALL_RADIUS, 1, "Black", ball_color)
# create frame
frame = simplegui.create_frame("Mouse selection", WIDTH, HEIGHT)
frame.set_canvas_background("White")
# register event handler
frame.set_mouseclick_handler(click)
frame.set_draw_handler(draw)
# start frame
frame.start()
| [
"[email protected]"
] | |
1bb7271217fb6f322b890d32a9751f1dd37b6bef | 8c8408703c7247b49653bb943346384764163e92 | /jan_7/app/__init__.py | fcf43f3f958c7cb5ae9b368b8d58a3c19c7aeac2 | [] | no_license | rowanv/blog_examples | da1cf7e0ab257b9c7b5c1aa8dd45a4e194ed330a | 44a06316f746c94ae0a579477710a6a832d2f7fc | refs/heads/master | 2021-01-10T04:22:38.688430 | 2016-02-11T09:08:15 | 2016-02-11T09:08:15 | 48,996,378 | 21 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from flask import Flask
from flask.ext.bower import Bower
from flask.ext.cache import Cache
app = Flask(__name__, static_url_path='/static')
# define the cache config keys,
app.config['CACHE_TYPE'] = 'simple'
# register the cache instance and binds it on to your app
app.cache = Cache(app)
from app import views | [
"[email protected]"
] | |
15a0e397e7e470df33685cb0d881c8f9130757a2 | c7ac2674039d3869c77e64db4036992578c19279 | /imported_xlrd/tests/test_xldate.py | 5f7c14d2d463cb5268d3b6e026fb79678fa0160b | [
"BSD-3-Clause"
] | permissive | pfiumara/Progetto3TDP | df0c7cbc4568c1911ca9dd661200df3e14989294 | afe0c6067c1109f20bfa753265aff1fc6329b744 | refs/heads/master | 2021-08-27T23:30:37.399682 | 2017-12-10T19:21:49 | 2017-12-10T19:21:49 | 113,438,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | #!/usr/bin/env python
# Author: mozman <[email protected]>
# Purpose: test xldate.py
# Created: 04.12.2010
# Copyright (C) 2010, Manfred Moitzi
# License: BSD licence
import unittest
from imported_xlrd.xlrd import xldate
DATEMODE = 0 # 1900-based
class TestXLDate(unittest.TestCase):
def test_date_as_tuple(self):
date = xldate.xldate_as_tuple( 2741., DATEMODE )
self.assertEqual(date, (1907, 7, 3, 0, 0, 0))
date = xldate.xldate_as_tuple( 38406., DATEMODE )
self.assertEqual(date, (2005, 2, 23, 0, 0, 0))
date = xldate.xldate_as_tuple( 32266., DATEMODE )
self.assertEqual(date, (1988, 5, 3, 0, 0, 0))
def test_time_as_tuple(self):
time = xldate.xldate_as_tuple( .273611, DATEMODE )
self.assertEqual(time, (0, 0, 0, 6, 34, 0))
time = xldate.xldate_as_tuple( .538889, DATEMODE )
self.assertEqual(time, (0, 0, 0, 12, 56, 0))
time = xldate.xldate_as_tuple( .741123, DATEMODE )
self.assertEqual(time, (0, 0, 0, 17, 47, 13))
def test_xldate_from_date_tuple(self):
date = xldate.xldate_from_date_tuple( (1907, 7, 3), DATEMODE )
self.assertAlmostEqual(date, 2741.)
date = xldate.xldate_from_date_tuple( (2005, 2, 23), DATEMODE )
self.assertAlmostEqual(date, 38406.)
date = xldate.xldate_from_date_tuple( (1988, 5, 3), DATEMODE )
self.assertAlmostEqual(date, 32266.)
def test_xldate_from_time_tuple(self):
time = xldate.xldate_from_time_tuple( (6, 34, 0) )
self.assertAlmostEqual(time, .273611, places=6)
time = xldate.xldate_from_time_tuple( (12, 56, 0) )
self.assertAlmostEqual(time, .538889, places=6)
time = xldate.xldate_from_time_tuple( (17, 47, 13) )
self.assertAlmostEqual(time, .741123, places=6)
def test_xldate_from_datetime_tuple(self):
date = xldate.xldate_from_datetime_tuple( (1907, 7, 3, 6, 34, 0), DATEMODE )
self.assertAlmostEqual(date, 2741.273611, places=6)
date = xldate.xldate_from_datetime_tuple( (2005, 2, 23, 12, 56, 0), DATEMODE )
self.assertAlmostEqual(date, 38406.538889, places=6)
date = xldate.xldate_from_datetime_tuple( (1988, 5, 3, 17, 47, 13), DATEMODE )
self.assertAlmostEqual(date, 32266.741123, places=6)
if __name__=='__main__':
unittest.main()
| [
"[email protected]"
] | |
d1f184b2b374d46d840344178e62fc14a30c8621 | 7f0dd9a5868f0033cd44a1d254e0251648ab7eb8 | /lab_zyd_pathSim/find_meta_path.py | f88f66e7c51ace95efbf7be6873dc0a806a7d257 | [] | no_license | EisRoot/zyd_pathSim | fc99c25f90b049417fd11d9e44dad49cc02602a9 | 1427c6b2158dc0cad5ef888427f4a0122d2abf2a | refs/heads/master | 2020-06-28T20:17:22.631471 | 2019-08-18T04:22:39 | 2019-08-18T04:22:39 | 200,331,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,635 | py | from build_graph import init_graph
import pandas as pd
from DGAS_FMPV2 import FMP_algo
import multiprocessing
import math
import datetime
import os
def read_node_file(path):
nodes = []
with open(path, "r",encoding='UTF-8-sig') as fp:
lines = fp.readlines()
for line in lines:
line = line[:-1]
nodes.append(line)
return nodes
def get_node_type(node):
"""
Get node type
:param node:
:return: type
"""
type=node[0]
return type
def create_gene_pair():
nodes = read_node_file("4DegreeGene_HGNCID.csv")
print(nodes)
max_length = 4
algo = FMP_algo(init_graph())
pair_list = []
c = 0
match_list = []
for i in nodes:
match_list.append(i)
nodes2 = list(set(nodes) - set(match_list))
for j in nodes2:
# if not i ==j and not pair_list.__contains__({
# "gene1": j,
# "gene2": i
# }):
if not i == j:
pair_list.append({
"gene1": i,
"gene2": j
})
# re = algo.start(i, j, max_length)
# print(re)
else:
c += 1
print(c)
print(len(pair_list))
pair_pd = pd.DataFrame(pair_list)
pair_pd.to_csv("lab_result/gene_pair.csv")
def mutil_prcoessing(dict_parameter):
gene_pair=dict_parameter['gene_pair']
max_length=dict_parameter['max_length']
graph=dict_parameter['graph']
now_time = datetime.datetime.now().timestamp()
algo=FMP_algo(graph)
count = 0
lens = len(gene_pair)
final_len = lens
meta_list={}
for pair in gene_pair:
count += 1
if count % 100 == 0:
time = datetime.datetime.now().timestamp() - now_time
array_time = time / (count)
left =array_time * (final_len - count) / (60)
print("the estimated time of processing:"+str(os.getpid())+" is:"+str(left)[0:5]+" min")
gene1=pair[0]
gene2=pair[1]
re = algo.start(gene1, gene2, max_length)
# if score.__len__() > 0:
# print(score)
meta_list = count_meta_path(re,meta_list)
print(" ############Processing:"+str(os.getpid())+" is done ###################")
return meta_list
def count_meta_path(re,dict):
for i in re:
meta_path_name=i["meta_path_name"]
if dict.__contains__(meta_path_name):
dict[meta_path_name]['num_of_pair']+=1
dict[meta_path_name]['ins']+=i['ins']
else:
dict[meta_path_name]={
"num_of_pair":1,
"ins":i['ins']
}
return dict
def create_multi_task_data(gene_pairs, cores, max_length,):
list_len = len(gene_pairs)
cut_count = math.floor(list_len / cores)
cut_list = []
print("Split data("+str(list_len)+") into "+str(cores)+" set:")
graph=init_graph()
for i in range(0, cores-1):
print(str(i * cut_count)+"--"+str(i * cut_count + cut_count - 1))
piece = gene_pairs[i * cut_count:i * cut_count + cut_count - 1]
cut_list.append({
'gene_pair': piece,
'max_length': max_length,
'graph':graph
})
i = cores-1
final_piece = gene_pairs[i * cut_count:list_len - 1]
print(str(i * cut_count)+"--"+str(list_len-1))
cut_list.append({
'gene_pair': final_piece,
'max_length': max_length,
'graph': graph
})
return cut_list
if __name__ == '__main__':
gene_pair=pd.read_csv("lab_result/gene_pair.csv",index_col=0)
max_length=4
gene_pair=gene_pair.to_records(index=None)
cores = multiprocessing.cpu_count() - 2
multiprocessing.freeze_support()
pool = multiprocessing.Pool(processes=cores)
print("The number of cpu cores is " + str(cores))
multiprocessing_data=create_multi_task_data(gene_pair,cores,max_length)
total={}
for y in pool.imap_unordered(mutil_prcoessing,multiprocessing_data):
for k,v in y.items():
if total.__contains__(k):
total[k]['num_of_pair']+=v['num_of_pair']
total[k]['ins']+=v['ins']
else:
total[k]={
'num_of_pair':v['num_of_pair'],
'ins':v['ins']
}
total_list=[]
for k,v in total.items():
total_list.append({
"meta_path_name":k,
"num_of_pair":v['num_of_pair'],
"ins":v['ins']
})
total_pd=pd.DataFrame(total_list)
total_pd.to_csv("lab_result/meta_path_total.csv") | [
"[email protected]"
] | |
fbf8a24c5d4d4f392218ab2998d4bdc5aeabd786 | 46818112a384b7623664c1591f0c060594e74bb5 | /final_ver3.py | 1789128bb89b59997bb91d64a672abd432cbf0e1 | [] | no_license | NaifengLiu/SparkSubmission | acdcc45eb37bc7d46102b8a680222d6949496d1c | c4bdf76ae2cbe4ccacae9894e0345b2397624ec5 | refs/heads/master | 2022-07-19T18:22:46.919596 | 2020-05-20T04:26:51 | 2020-05-20T04:26:51 | 255,046,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,639 | py | from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark import SparkFiles
import sys
def find_id(house, street, street_dict):
import re
import bisect
street = street.upper()
house = re.sub("[^0-9]", "", house)
if house != "":
if street not in street_dict:
return None
options = street_dict[street]
house = int(house)
if house % 2 == 1:
return options[1][bisect.bisect_left(options[0], house)]
else:
return options[3][bisect.bisect_left(options[2], house)]
return None
def process(pid, records):
import csv
from ast import literal_eval
counts = {}
streets_list = dict()
boros = [0, dict(), dict(), dict(), dict(), dict()]
boro_id = [1,2,3,4,5]
for single_id in boro_id:
with open("boro_" + str(single_id) + ".csv") as f:
for line in f.readlines():
street_name = line.split(",")[0]
rest = ','.join(line.split(",")[1:])
boros[single_id][street_name] = literal_eval(rest)
f.close()
if pid==0:
next(records)
reader = csv.reader(records)
for row in reader:
county = row[0]
num = row[1]
st = row[2]
nyc_boro_mapping = dict()
nyc_boro_mapping['NY'] = 1
nyc_boro_mapping['BX'] = 2
nyc_boro_mapping['BK'] = 3
nyc_boro_mapping['K'] = 3
nyc_boro_mapping['Q'] = 4
nyc_boro_mapping['QN'] = 4
nyc_boro_mapping['ST'] = 5
if county in nyc_boro_mapping:
zoneid = find_id(num, st, boros[nyc_boro_mapping[county]])
if zoneid:
counts[zoneid] = counts.get(zoneid, 0) + 1
return counts.items()
if __name__ == '__main__':
sc = SparkContext()
spark = SparkSession(sc)
# path = "hdfs:///data/share/bdm/nyc_cscl.csv"
# path = "hdfs:///user/nliu/boros/boro_1.csv"
# sc.addFile("hdfs:///user/nliu/boros/boro_1.csv")
# sc.addFile("hdfs:///user/nliu/boros/boro_2.csv")
# sc.addFile("hdfs:///user/nliu/boros/boro_3.csv")
# sc.addFile("hdfs:///user/nliu/boros/boro_4.csv")
# sc.addFile("hdfs:///user/nliu/boros/boro_5.csv")
# df = spark.read.csv("2015.csv", header=True, multiLine=True, escape='"')
# rdd = df.select(df['Violation County'], df['House Number'], df['Street Name']).rdd
rdd = sc.textFile("2015.csv")
print(rdd.collect())
counts = rdd.mapPartitionsWithIndex(process).reduceByKey(lambda x, y: x + y).collect()
# counts.show()
print(counts)
# counts.saveAsTextFile("2222")
| [
"[email protected]"
] | |
d9b1bb102c23a95e7439b40884113ab0e2e964db | 4cb91aa52cc495c434535946b98b31a573589410 | /ui.py | efdfaf62bea7b834854f9392baf4164e6c1ad7bf | [] | no_license | vinczepa57/QuizProject | 9eb154fa7b195ae67b548dcdeab52408c5113e86 | 110d4a94f6d9557608e1e92bb1fba563cbd90f9b | refs/heads/master | 2023-04-30T08:26:12.289837 | 2021-05-21T15:02:01 | 2021-05-21T15:02:01 | 369,570,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | from tkinter import *
from quiz_brain import QuizBrain
THEME_COLOR = "#375362"
class QuizInterface:
def __init__(self, quiz_brain: QuizBrain):
self.quiz = quiz_brain
self.window = Tk()
self.window.title("Quizzler")
self.window.config(padx=20, pady=20, bg=THEME_COLOR)
self.score_label = Label(text="Score: 0", bg=THEME_COLOR, fg="white")
self.score_label.grid(row=0, column=1)
self.canvas = Canvas(width=300, height=250, bg="white")
self.question_text = self.canvas.create_text(
150,
125,
text="Some QT",
fill=THEME_COLOR,
font=("Arial", 20, "italic"),
width=280
)
self.canvas.grid(row=1, column=0, columnspan=2, pady=50)
true_image = PhotoImage(file="images/true.png")
self.true_button = Button(image=true_image, highlightthickness=0, command=self.true_pressed)
self.true_button.grid(row=2, column=0)
false_image = PhotoImage(file="images/false.png")
self.false_button = Button(image=false_image, highlightthickness=0, command=self.false_pressed)
self.false_button.grid(row=2, column=1)
self.get_next_question()
self.window.mainloop()
def get_next_question(self):
self.canvas.config(bg="white")
if self.quiz.still_has_questions():
self.score_label.config(text=f"Score: {self.quiz.score}")
q_text = self.quiz.next_question()
self.canvas.itemconfig(self.question_text, text=q_text)
else:
self.canvas.itemconfig(self.question_text, text="You´ve reached the end of the quiz")
self.true_button.config(state="disabled")
self.false_button.config(state="disabled")
def true_pressed(self):
self.give_feedback(self.quiz.check_answer("True"))
def false_pressed(self):
self.give_feedback(self.quiz.check_answer("False"))
def give_feedback(self, is_right):
if is_right:
self.canvas.config(bg="green")
else:
self.canvas.config(bg="red")
self.window.after(1000, self.get_next_question)
| [
"[email protected]"
] | |
d6584a40ee6dfbae3346fbf4b6be6f4381bdd0c4 | f8ac857c9a03095d8c0a42f2a30087c7afe95196 | /train.py | ad00d9ad089400c9613761e6ac61f18cfd2005bf | [] | no_license | lpadukana/drlnd_p3_tennis | 5de698b0306d92be8dfb1c3912cb15641e2a0957 | e5c996136f282257b1a6331269d3203b389bd8af | refs/heads/master | 2020-04-08T02:16:51.008404 | 2018-11-24T14:24:43 | 2018-11-24T14:24:43 | 158,929,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,526 | py | from collections import deque
import torch
from ddpg_agent import Agent
from unityagents import UnityEnvironment
import numpy as np
from contextlib import closing, contextmanager
import matplotlib.pyplot as plt
@contextmanager
def muted_logs(log_name):
import logging
logger = logging.getLogger(log_name)
old_level = logger.level
old_propagate = logger.propagate
logger.setLevel(logging.CRITICAL)
logger.propagate = False
try:
yield
finally:
logger.setLevel(old_level)
logger.propagate = old_propagate
class UnityEnvWrapper:
def __init__(self, no_graphics=False):
self.env = UnityEnvironment(
file_name="Tennis_Linux/Tennis.x86_64",
no_graphics=no_graphics
)
self.env_info = None
self.reset()
def brain_name(self):
return self.env.brain_names[0]
def brain(self):
return self.env.brains[self.brain_name()]
def reset(self, train_mode=True):
self.env_info = self.env.reset(train_mode=train_mode)[
self.brain_name()]
return self.env_info.vector_observations
def n_agents(self):
# self.states().shape[0]
return len(self.env_info.agents)
def n_actions(self):
return self.brain().vector_action_space_size
def states(self):
return self.env_info.vector_observations
def step(self, actions):
self.env_info = self.env.step(actions)[self.brain_name()]
return self.env_info.vector_observations, self.env_info.rewards, self.env_info.local_done, None
def n_states(self):
return self.states().shape[1]
def info(self):
print('Number of agents:', self.n_agents())
print('Size of each action:', self.n_actions())
print('Size of observations: {}'.format(self.n_states()))
print('Example state:', self.states()[0])
@classmethod
def random_play(cls, n_episodes):
env = cls()
with closing(env):
for i in range(1, n_episodes):
states = env.reset(train_mode=False)
scores = np.zeros(env.n_agents())
while True:
actions = np.clip(np.random.randn(
env.n_agents(), env.n_actions()), -1, 1)
next_states, rewards, dones, _ = env.step(actions)
scores += rewards
states = next_states
if np.any(dones):
break
print('Score (max over agents) from episode {}: {}'.format(
i, np.max(scores)))
def close(self):
self.env.close()
# with muted_logs('unityagents'), muted_logs('root'):
# UnityEnvWrapper.random_play(n_episodes=15)
env = UnityEnvWrapper(no_graphics=False)
agent1 = Agent(state_size=env.n_states() + 1,
action_size=env.n_actions(), random_seed=2)
agent2 = Agent(state_size=env.n_states() + 1,
action_size=env.n_actions(), random_seed=2)
agent2.critic_local = agent1.critic_local
agent2.critic_target = agent1.critic_target
agent2.critic_optimizer = agent1.critic_optimizer
agent2.actor_local = agent1.actor_local
agent2.actor_target = agent1.actor_target
agent2.actor_optimizer = agent1.actor_optimizer
agent2.memory = agent1.memory
print(env.n_agents(), env.n_states(), env.n_actions())
def save(i_episode, scores1, scores2, mean_scores):
print("Saving checkpoints...")
torch.save(agent1.actor_local.state_dict(),
'checkpoint_actor_1.pth')
torch.save(agent2.actor_local.state_dict(),
'checkpoint_actor_2.pth')
torch.save(agent1.critic_local.state_dict(),
'checkpoint_critic_1.pth')
torch.save(agent2.critic_local.state_dict(),
'checkpoint_critic_2.pth')
torch.save(dict(episode=i_episode,
scores1=scores1,
scores2=scores2,
mean_scores=mean_scores),
'scores.pth')
def train_agent(n_episodes=10000, print_every=100, target_score=0.5):
scores1 = []
scores2 = []
mean_scores = []
for i_episode in range(0, n_episodes + 1):
state = env.reset(train_mode=True)
agent1.reset()
agent2.reset()
score1 = 0
score2 = 0
while True:
state1 = np.concatenate([state[0], [1]])
state2 = np.concatenate([state[1], [-1]])
action1 = agent1.act(state1)
action2 = agent2.act(state2)
next_state, reward, done, _ = env.step([action1, action2])
next_state1 = np.concatenate([next_state[0], [1]])
next_state2 = np.concatenate([next_state[1], [-1]])
agent1.step(state1, action1, np.mean(reward),
next_state1, done[0])
agent2.step(state2, action2, np.mean(reward),
next_state2, done[1])
state = next_state
score1 += reward[0]
score2 += reward[1]
if np.all(done):
break
scores1.append(score1)
scores2.append(score2)
mean_scores.append(np.mean([score1, score2]))
mean1 = np.mean(scores1[-100:])
mean2 = np.mean(scores2[-100:])
mean_score = np.mean(mean_scores[-100:])
print('Episode {}\t mean-score-1: {:.3f} mean-score-2: {:.3f} mean-score: {:.3f}'.format(
i_episode, mean1, mean2, mean_score))
if i_episode % print_every == 0:
save(i_episode, scores1, scores2, mean_scores)
if mean_score > target_score:
save(i_episode, scores1, scores2, mean_scores)
break
return mean_scores
def main():
scores = train_agent(target_score=1.0)
fig = plt.figure()
fig.add_subplot(111)
plt.plot(np.arange(1, len(scores) + 1), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.savefig('score.png')
play()
def play():
agent1.actor_local.load_state_dict(torch.load('checkpoint_actor_1.pth'))
agent1.actor_local.eval()
agent2.critic_local.load_state_dict(torch.load('checkpoint_critic_2.pth'))
agent2.actor_local.eval()
state = env.reset(train_mode=False)
while True:
state1 = np.concatenate([state[0], [1]])
state2 = np.concatenate([state[1], [-1]])
action1 = agent1.act(state1, add_noise=False)
action2 = agent2.act(state2, add_noise=False)
state, _, _, _ = env.step([action1, action2])
main()
| [
"[email protected]"
] | |
d6129b4b35fd43742bcdbe7f19a651ae53655464 | 6e803e53594b479c65174d081e8d4f00c9b9d0a8 | /echo_client.py | 09e992d555b779b58c812bcc45d94b71f61573ab | [] | no_license | voite1/python_server | fe294ba0286da886fbbc62ab227be2512bd0d79c | b0ef6c3249a824c330da4d6f16c9707cc7853804 | refs/heads/master | 2020-05-18T21:35:34.452417 | 2015-01-28T23:14:30 | 2015-01-28T23:14:30 | 29,949,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py | import socket
import sys
def client(msg, log_buffer=sys.stderr):
server_address = ('localhost', 10000)
# TODO: Replace the following line with your code which will instantiate
# a TCP socket with IPv4 Addressing, call the socket you make 'sock'
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
print >>log_buffer, 'connecting to {0} port {1}'.format(*server_address)
# TODO: connect your socket to the server here.
sock.connect(server_address)
# this try/finally block exists purely to allow us to close the socket
# when we are finished with it
try:
print >>log_buffer, 'sending "{0}"'.format(msg)
# TODO: send your message to the server here.
sock.sendall(msg)
# TODO: the server should be sending you back your message as a series
# of 16-byte chunks. You will want to log them as you receive
# each one. You will also need to check to make sure that
# you have received the entire message you sent __before__
# closing the socket.
# Make sure that you log each chunk you receive. Use the print
# statement below to do it. (The tests expect this log format)
while True:
chunk = sock.recv(16)
print >>log_buffer, 'received "{0}"'.format(chunk)
if len(chunk) < 16:
break
finally:
# TODO: after you break out of the loop receiving echoed chunks from
# the server you will want to close your client socket.
print >>log_buffer, 'closing socket'
sock.close()
if __name__ == '__main__':
if len(sys.argv) != 2:
usg = '\nusage: python echo_client.py "this is my message"\n'
print >>sys.stderr, usg
sys.exit(1)
msg = sys.argv[1]
client(msg)
| [
"[email protected]"
] | |
7baaf7a98f9dd55046043b389df190b7c96d3b93 | d5fac012124c783d196f4d0e387740643ea4aae3 | /lab/plot.py | 86af8ac2d1cf9e3157ed2f0114aead3350758206 | [
"MIT"
] | permissive | kazumov/na-zero-vs-mean | 4e309118b4180df52dbb5bf4c842362af576fd95 | a80e168b3f83dfccc6c294e16c52f4a582a0f345 | refs/heads/master | 2020-08-27T17:22:02.117557 | 2019-10-25T16:53:38 | 2019-10-25T16:53:38 | 217,443,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,563 | py | """The plot class"""
__author__ = "Ruben R. Kazumov"
__copyright__ = "Copyright 2019, Ruben R. Kazumov"
__credits__ = ["Ruben R. Kazumov"]
__license__ = "MIT"
__version__ = [3, 0, 0]
__maintainer__ = "Ruben R. Kazumov"
__email__ = "[email protected]"
__status__ = "Production"
import os, sys
from uuid import uuid4
from abc import ABC
from keras.callbacks.callbacks import History
import matplotlib.pyplot as plt
import seaborn
class Plot(ABC):
def __init__(self, path: str = "plots", fileName: str = ""):
dirNameDefault = "plots"
if path == "":
self.path = os.getcwd() + os.path.sep + dirNameDefault
else:
self.path = os.getcwd() + os.path.sep + path
# file name
if fileName == "":
# random
self.url = self.path + os.path.sep + str(uuid4()) + ".png"
else:
# concrete name
self.url = self.path + os.path.sep + fileName
if os.path.exists(self.url):
# remove old file
os.remove(self.url)
class FittingAccuracy(Plot):
"""The fitting accuracy plot"""
def __init__(self, dataSignature: str = "", path: str = "plots"):
self.title = dataSignature
# file name is alwais random
super().__init__(path=path, fileName="")
def plot(
self,
history: History,
trainingDataSelector: str = "accuracy",
testDataSelector: str = "val_accuracy",
) -> str:
"""Creates a fitting accuracy plot.
Parameter:
- history (History): A fitting history.
- trainingDataSelector (str): A histroy data attribute name for the training data.
- testDataSelector (str): A histroy data attribute name fro the testing data.
Returns (str): URL to plot file."""
plt.style.use("seaborn")
seaborn.set_style("whitegrid")
plt.figure()
axes = plt.gca()
axes.set_ylim([0.4, 1.0])
plt.plot(history.history[trainingDataSelector])
plt.plot(history.history[testDataSelector])
plt.title(f"Accuracy plot for the data: ({self.title})")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Train", "Test"], loc="upper left")
plt.savefig(self.url)
return self.url
class FittingLossFunction(Plot):
"""The loss function plot"""
def __init__(self, dataSignature: str = "", path: str = "plots"):
self.title = dataSignature
# file name is alwais random
super().__init__(path=path, fileName="")
def plot(
self,
history: History,
trainingDataSelector: str = "loss",
testDataSelector: str = "val_loss",
) -> None:
"""Plots a history of the loss function value change
Parameter:
- history (History): A fitting history.
- trainingDataSelector (str): A histroy data attribute name for the training data.
- testDataSelector (str): A histroy data attribute name fro the testing data.
Returns (str): URL to plot file."""
plt.style.use("seaborn")
seaborn.set_style("whitegrid")
plt.figure()
plt.plot(history.history[trainingDataSelector])
plt.plot(history.history[testDataSelector])
plt.title(f"Loss function plot for the data: ({self.title})")
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.legend(["Train", "Test"], loc="upper left")
plt.savefig(self.url)
return self.url
| [
"[email protected]"
] | |
e6cc5c16152309d1d35ea6968ab5ad78ff93572b | dfb53581b4e6dbdc8e3789ea2678de1e1c4b5962 | /AI/DataAnalysis/Day04/demo08_sma.py | 4b8e4210a713cc7154bd036950c54937ff630d5c | [] | no_license | biabulinxi/Python-ML-DL | 7eff6d6898d72f00575045c5aa2acac45b4b0b82 | 217d594a3c0cba1e52550f74d100cc5023fb415b | refs/heads/master | 2020-06-01T09:13:17.314121 | 2019-06-08T03:59:36 | 2019-06-08T03:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,491 | py | # -*- coding: utf-8 -*-
# @Project:AID1810
# @Author:biabu
# @Date:2019/3/18 15:30
# @File_name:demo08_sma.py
# @IDE:PyCharm
"""
移动平均线
"""
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
# 处理日期格式
def dmy2ymd(dmy):
dmy = str(dmy, encoding='utf-8')
# 转换格式
d = dt.datetime.strptime(dmy, '%d-%m-%Y')
t = d.date()
s = t.strftime('%Y-%m-%d')
return s
# usecols = (列索引, 行索引)
dates, closing_prices = np.loadtxt('../da_data/aapl.csv', delimiter=',', usecols=(1, 6), unpack=True, dtype='M8[D], f8',converters={1: dmy2ymd})
# 绘制收盘价
plt.figure('AAPL', facecolor='lightgray')
plt.title('AAPL', fontsize=18)
plt.xlabel('Date', fontsize=14)
plt.ylabel('Price', fontsize=14)
plt.tick_params(labelsize=10)
plt.grid(linestyle=':')
# 设置主刻度定位器为每周一
ax = plt.gca()
ax.xaxis.set_major_locator(md.WeekdayLocator(byweekday=md.MO))
ax.xaxis.set_major_formatter(md.DateFormatter('%Y/%m/%d'))
# 修改dates的数据类型 从M8[D] -> 到matplotlib可以识别的日期类型
dates = dates.astype(md.datetime.datetime)
plt.plot(dates, closing_prices, color='b', linewidth=3, linestyle='--', label='closing_prices', alpha=0.3)
########################################################
# 计算5日均线
sma5 = np.zeros(closing_prices.size - 4)
for i in range(sma5.size):
sma5[i] = closing_prices[i:i+5].mean()
# 绘制均线图
plt.plot(dates[4:],sma5, color='orangered', label='SMA-5', linewidth=2)
########################################################
# 基于卷积元算的5日均线
# 卷积核数组
core = np.ones(5) / 5
# 有效卷积运算
sma52 = np.convolve(closing_prices, core, 'valid')
plt.plot(dates[4:],sma52, color='g', label='SMA-52', linewidth=7, alpha=0.3)
# 基于卷积元算的10日均线
# 卷积核数组
core = np.ones(10) / 10
# 有效卷积运算
sma53 = np.convolve(closing_prices, core, 'valid')
plt.plot(dates[9:],sma53, color='b', label='SMA-53', linewidth=7, alpha=0.3)
###################################################
# 加权卷积5日均线
# 从 y = e^x, 取5个值作为卷积核
weights = np.exp(np.linspace(-1, 0, 5))
weights = weights[::-1]
# 权重归一化
weights /= weights.sum()
ema5 = np.convolve(closing_prices, weights, 'valid')
plt.plot(dates[4:],ema5, color='yellow', label='SMA-5wt', linewidth=2)
plt.legend()
# 自动格式化x轴的日期输出
plt.gcf().autofmt_xdate()
plt.show()
| [
"[email protected]"
] | |
0a7bb23340642e74887e7fbc396203b06a25cec4 | 88d9ae530766b1e5c916aa0533bbf02e5c6f5c72 | /overrideMethod.py | 97fdb8eb065c6c9a31b096b1e9124621dc1802fe | [] | no_license | jonathanbasuki/Python-Object-Oriented | 8962edb05781f89208dda434bfa8a021e0bb05ac | 304d42df8f8760f84f7fbb41705593ff1616a9d9 | refs/heads/master | 2022-12-03T00:05:20.120244 | 2020-08-22T06:09:08 | 2020-08-22T06:09:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | class Hero:
def __init__(self, name, health):
self.name = name
self.health = health
def info(self):
print("{} \n\tType : Hero \n\tHealth : {}".format(self.name, self.health))
# subclass
class Mage(Hero):
def __init__(self, name):
super().__init__(name, 100)
# subclass
class Tank(Hero):
def __init__(self, name):
super().__init__(name, 200)
def info(self):
print("{} \n\tType : Tank \n\tHealth : {}".format(self.name, self.health))
cyclops = Mage("Cyclops")
franco = Tank("Franco")
cyclops.info()
franco.info()
| [
"[email protected]"
] | |
c0bce431720191c8deb12083156d2057dd2f92bd | de27692fcf2ff86b024ebee7784a96a8b1329e4d | /ex041.py | 2df7258146b83cbf6c6b47184fb50def1710c9da | [] | no_license | YaraDeOliveira/CursoemVideo | d2675c5e22840330f1cd581368e6855103b6c7ea | f065c768231deaa996498d21af6b97fd1201abc4 | refs/heads/main | 2023-04-23T07:56:11.700722 | 2021-05-12T14:16:36 | 2021-05-12T14:16:36 | 348,168,481 | 0 | 0 | null | 2021-03-26T18:35:31 | 2021-03-16T00:57:48 | Python | UTF-8 | Python | false | false | 462 | py | from datetime import date
ano = int(input('Digite o ano do seu nascimento: '))
idade = date.today().year - ano
print(f'O atleta tem {idade} anos')
if idade <= 9:
print('\033[1;34mCLASSIFICACAO: MIRIM')
elif 9 < idade <= 14:
print('\033[1;34mCLASSIFICACAO: INFANTIL')
elif 14 < idade <= 19:
print('\033[1;34mCLASSIFICACAO: JUNIOR')
elif 19 < idade <= 20:
print('\033[1;34mCLASSIFICACAO: SENIOR')
else:
print('\033[1;34mCLASSIFICACAO: MASTER')
| [
"[email protected]"
] | |
34ec16d1624c47e10c7ebfdebdfd30f821d3ad3b | ce797d225651f2a8a501b4965569448509872550 | /myscript.py | cf7bfc9c7d985e0d9419853a94c3587406b39c65 | [] | no_license | yota9/keras_neural | 63145a61e2c78ee506267604a0caedb5c81c7bda | 64299b800b36cd6040ea301ee3bc4ecc04796988 | refs/heads/master | 2020-03-20T18:06:50.989463 | 2018-06-16T10:09:15 | 2018-06-16T10:09:15 | 137,571,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,740 | py | from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
# paths
photo_dir = '/home/student21m15/labs/lab3/optional/'
train_dir = photo_dir + 'train'
val_dir = photo_dir + 'val'
# samples size
samples_size = 1000
# validation image count
validation_samples_size = 300
# image sizes
img_width = 128
img_height = 128
img_depth = 3
# input image shape
input_shape = (img_width, img_height, img_depth)
# images in batch
batch_size = 64
# types of images
nb_classes = 3
# epoches count
nb_epoch = 1000
# convolutional layer core
conv_core = 3
# default dropout
def_dropout = 0.2
# default pool size
def_pool_size = 2
# Sequential model
model = Sequential()
# First cascade
# convolutional layer
model.add(Conv2D(32, (conv_core, conv_core), padding='same',
input_shape=(img_width, img_height, img_depth), activation='relu'))
# convolutional layer
model.add(Conv2D(32, (conv_core, conv_core), padding='same',
input_shape=(img_width, img_height, img_depth), activation='relu'))
# pooling layer
model.add(MaxPooling2D(pool_size=(def_pool_size, def_pool_size)))
# batch normalization
model.add(BatchNormalization())
# dropout pooling
model.add(Dropout(def_dropout))
# Second cascade
# convolutional layer
model.add(Conv2D(32, (conv_core, conv_core), padding='same',
input_shape=(img_width, img_height, img_depth), activation='relu'))
# pooling layer
model.add(MaxPooling2D(pool_size=(def_pool_size, def_pool_size)))
# batch normalization
model.add(BatchNormalization())
# dropout pooling
model.add(Dropout(def_dropout))
# Third cascade
model.add(Conv2D(64, (conv_core, conv_core), padding='same', activation='relu'))
# pooling layer
model.add(MaxPooling2D(pool_size=(def_pool_size, def_pool_size)))
# batch normalization
model.add(BatchNormalization())
# dropout pooling
model.add(Dropout(def_dropout))
# Fourth cascade
model.add(Conv2D(64, (conv_core, conv_core), padding='same', activation='relu'))
# pooling layer
model.add(MaxPooling2D(pool_size=(def_pool_size, def_pool_size)))
# batch normalization
model.add(BatchNormalization())
# dropout pooling
model.add(Dropout(def_dropout))
# flatten the input
model.add(Flatten())
# Fully connected layer
model.add(Dense(1024, activation='relu'))
# ropout pooling
model.add(Dropout(0.5))
# Output layer
model.add(Dense(nb_classes, activation='softmax'))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
print(model.summary())
aug_gen = ImageDataGenerator(rescale=1./255,
rotation_range=15,
width_shift_range=0.2,
height_shift_range=0.15,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
datagen = ImageDataGenerator(rescale=1./255)
# Train generator
train_generator = aug_gen.flow_from_directory(
train_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
# Validation generator
val_generator = datagen.flow_from_directory(
val_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
# train model
model.fit_generator(
train_generator,
steps_per_epoch=samples_size // batch_size,
epochs=nb_epoch,
validation_data=val_generator,
validation_steps=validation_samples_size // batch_size)
model.save('khmelevsky.h5')
| [
"[email protected]"
] | |
ee3d5e6b9b5a0ae1fea52db6f5265698ee9d216e | 24d8132ccd315936a4f015f8b5303471137aaf51 | /webproj/webapp/views.py | f1ee4d9c3ad6cf54de68908e3df12a051ce66cf5 | [] | no_license | benj99/root | 60240b742103b0372b40e13ed7d4234c45bdc7d0 | 191077c5f45451ce00bd85b259989f1668c88e7e | refs/heads/master | 2022-12-14T05:03:18.722732 | 2020-09-14T17:16:50 | 2020-09-14T17:16:50 | 295,486,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from django.shortcuts import render
# Create your views here.
def homepage(request):
return render(request,'webapp/homepage.html')
def pictures(request):
return render(request,'webapp/pictures.html')
def pic1(request):
return render(request,'webapp/pic1.html')
def pic2(request):
return render(request,'webapp/pic3.html')
def pic3(request):
return render(request,'webapp/pic3.html')
| [
"[email protected]"
] | |
328ec21902569279449032138565a5c3f6c1275a | 544659d003e69b9555e7a746b6d185b0e20c2436 | /test.py | 50675d2f9cab978ca55aa1b297fc60b549e8d207 | [] | no_license | vivek2612/mooc | 43a167b1cbf03a0a59c98a7408a7a3f50681c817 | 8fdb9fd9391ae649aec229645b637787c6bb3406 | refs/heads/master | 2016-09-11T02:28:07.838033 | 2015-03-02T18:49:34 | 2015-03-02T18:49:34 | 23,511,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from mylib import *
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
fname = sys.argv[1]
lines = readFile(fname)
outfile = open(fname+'-simplified','w')
for line in lines:
arr = getArray(line)
req = getCompleteRequest(arr)
ref = getHttpReferer(arr);
statusCode = getStatusCode(arr);
outfile.write(str(statusCode)+", "+req+", "+ ref+'\n')
outfile.close()
| [
"[email protected]"
] | |
0762afa3b3aa0f0bfd0316ace6a7c526442e841b | 7c93f47718862efdbaceabfd630a45cb0f53d07b | /texio/Test/dummy_response.py | 20ec51e1402eaa03c76f6ebd149bd83694ad2afe | [] | no_license | tdrk1980/power_control | a39f9e61037e08c4289a370d86942fac91415dc4 | d1d4673f2d271e8d5e89cbf6f10c0692a5cd2d38 | refs/heads/master | 2020-04-17T13:15:44.632242 | 2019-01-21T02:57:40 | 2019-01-21T02:57:40 | 166,607,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | import serial
import time
print("to stop this script, please use Ctrl + Break ! (not Ctrl + C !)")
print("dummy_response-start")
current_voltage = 12.0
with serial.Serial(port="COM27", baudrate=9600, bytesize=serial.SEVENBITS, parity=serial.PARITY_EVEN, stopbits=serial.STOPBITS_ONE) as ser:
while(1):
request = ser.readline().decode("cp932").strip()
print("recv:"+request)
if "VOLT?" in request:
response = f"VOLT {current_voltage:.2f}\r"
ser.write(response.encode("CP932"))
print("send:"+response.strip())
elif "VOLT " in request:# e.g. request => "VOLT 13.00"
request = request.split(" ") # =>["VOLT", "13.00"]
current_voltage = float(request[1]) #=> current_voltage=13.0
print("dummy_response-end")
| [
"[email protected]"
] | |
d71f634691d0ebccf187fdda66721d94d8bff7e3 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2022_06_01/aio/operations/_data_collection_rules_operations.py | 3e861dea15bc45ab77ee311896f74969698adb82 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 30,076 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._data_collection_rules_operations import (
build_create_request,
build_delete_request,
build_get_request,
build_list_by_resource_group_request,
build_list_by_subscription_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DataCollectionRulesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.monitor.v2022_06_01.aio.MonitorManagementClient`'s
:attr:`data_collection_rules` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> AsyncIterable["_models.DataCollectionRuleResource"]:
"""Lists all data collection rules in the specified resource group.
Lists all data collection rules in the specified resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataCollectionRuleResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-01"))
cls: ClsType[_models.DataCollectionRuleResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataCollectionRuleResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules"
}
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> AsyncIterable["_models.DataCollectionRuleResource"]:
"""Lists all data collection rules in the specified subscription.
Lists all data collection rules in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataCollectionRuleResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-01"))
cls: ClsType[_models.DataCollectionRuleResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataCollectionRuleResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_subscription.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Insights/dataCollectionRules"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, data_collection_rule_name: str, **kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Returns the specified data collection rule.
Returns the specified data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-01"))
cls: ClsType[_models.DataCollectionRuleResource] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
data_collection_rule_name=data_collection_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DataCollectionRuleResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"
}
@overload
async def create(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[_models.DataCollectionRuleResource] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Creates or updates a data collection rule.
Creates or updates a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Default value is None.
:type body: ~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Creates or updates a data collection rule.
Creates or updates a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Default value is None.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[Union[_models.DataCollectionRuleResource, IO]] = None,
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Creates or updates a data collection rule.
Creates or updates a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Is either a DataCollectionRuleResource type or a IO type. Default
value is None.
:type body: ~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DataCollectionRuleResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
if body is not None:
_json = self._serialize.body(body, "DataCollectionRuleResource")
else:
_json = None
request = build_create_request(
resource_group_name=resource_group_name,
data_collection_rule_name=data_collection_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DataCollectionRuleResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DataCollectionRuleResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"
}
@overload
async def update(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[_models.ResourceForUpdate] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Updates part of a data collection rule.
Updates part of a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Default value is None.
:type body: ~azure.mgmt.monitor.v2022_06_01.models.ResourceForUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Updates part of a data collection rule.
Updates part of a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Default value is None.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
data_collection_rule_name: str,
body: Optional[Union[_models.ResourceForUpdate, IO]] = None,
**kwargs: Any
) -> _models.DataCollectionRuleResource:
"""Updates part of a data collection rule.
Updates part of a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:param body: The payload. Is either a ResourceForUpdate type or a IO type. Default value is
None.
:type body: ~azure.mgmt.monitor.v2022_06_01.models.ResourceForUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataCollectionRuleResource or the result of cls(response)
:rtype: ~azure.mgmt.monitor.v2022_06_01.models.DataCollectionRuleResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DataCollectionRuleResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
if body is not None:
_json = self._serialize.body(body, "ResourceForUpdate")
else:
_json = None
request = build_update_request(
resource_group_name=resource_group_name,
data_collection_rule_name=data_collection_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DataCollectionRuleResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, data_collection_rule_name: str, **kwargs: Any
) -> None:
"""Deletes a data collection rule.
Deletes a data collection rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param data_collection_rule_name: The name of the data collection rule. The name is case
insensitive. Required.
:type data_collection_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-06-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
data_collection_rule_name=data_collection_rule_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseCommonV2, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"
}
| [
"[email protected]"
] | |
769c5f76132c9861d61c86f1350d8746d3a04d61 | c81d7dfef424b088bf2509a1baf406a80384ea5a | /venv/Lib/site-packages/biom/cli/table_ids.py | cf610d54c6ed4c0a8e1f8ac78a55e06f85d14506 | [] | no_license | Goutham2591/OMK_PART2 | 111210d78fc4845481ed55c852b8f2f938918f4a | cb54fb21ebf472bffc6ee4f634bf1e68303e113d | refs/heads/master | 2022-12-10T01:43:08.213010 | 2018-04-05T02:09:41 | 2018-04-05T02:09:41 | 124,828,094 | 0 | 1 | null | 2022-12-07T23:43:03 | 2018-03-12T03:20:14 | Python | UTF-8 | Python | false | false | 1,204 | py | # -----------------------------------------------------------------------------
# Copyright (c) 2011-2017, The BIOM Format Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
import click
from biom.cli import cli
from biom import load_table
@cli.command(name='table-ids')
@click.option('-i', '--input-fp', required=True,
type=click.Path(exists=True, dir_okay=False),
help='The input BIOM table')
@click.option('--observations', default=False, is_flag=True,
help="Grab observation IDs")
def summarize_table(input_fp, observations):
"""Dump IDs in a table.
Dump out the IDs found within a table:
Example usage:
Get the sample IDs within a table:
$ biom table-ids -i table.biom
Get the observation IDs within a table:
$ biom table-ids -i table.biom --observations
"""
tab = load_table(input_fp)
for id_ in tab.ids(axis='observation' if observations else 'sample'):
click.echo(id_)
| [
"[email protected]"
] | |
b5aae277c4a03a0eea9005d32181dca1acb53fbc | e57cb1724ba0e7ffeefdfbe5ddea0e149d8b791c | /2020/CJ2020/Reverse/Pawon/solver.py | 24396621b13b350ac60bcb597d00c00f90e8fa53 | [] | no_license | insomn14/CTFindo | bcebf79d8d2047ef6afed8e16685110f3b6d57de | 1068dd73b26d6da83f2efcaec80fbee142869a14 | refs/heads/master | 2023-02-25T12:31:04.039741 | 2021-01-31T18:46:58 | 2021-01-31T18:46:58 | 299,901,525 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | from z3 import *
inp = [BitVec(f'v{i}', 8) for i in range(26)]
s = Solver()
s.add ( inp[5] == ord('-'), inp[11] == ord('-'), inp[18] == ord('-') )
s.add ( inp[0] == inp[10] )
s.add ( inp[1] == ord('e') )
s.add ( inp[3] == ord('P') )
s.add ( inp[25] == ord('Y'))
s.add ( inp[2] == ord('m') )
s.add ( inp[4] == inp[1] )
s.add ( inp[6] == ord('j') )
s.add ( inp[7] == ord('o') )
s.add ( inp[8] == inp[9] )
s.add ( inp[9] == ord('S') )
s.add ( inp[12] == (inp[5] * 2 + 9) )
s.add ( inp[23] == inp[17] + 3 )
s.add ( inp[13] == inp[20] )
s.add ( inp[14] == ord('z') )
s.add ( inp[16] == (inp[15] * 2 + 0xffffff7a) )
s.add ( inp[21] == ord('T') )
s.add ( inp[16] == ord('H') )
s.add ( inp[20] == ord('u') )
s.add ( inp[17] == ord('5') )
s.add ( inp[19] == ord('S') )
s.add ( inp[22] == ord('1') )
s.add ( inp[10] == inp[21] )
s.add ( inp[24] == (inp[20] * 2 + 0xffffffc3) )
if (s.check() == sat):
model = s.model()
res = [chr(int(str(model.evaluate(inp[i])))) for i in range(26)]
print(''.join(res))
else:
print('unsat') | [
"[email protected]"
] | |
28f7c4eccd6fde5260db5e231f76bb8d2319d2e5 | ee3b80bfdd08b047446cf3d7a5ff52d92fdbbeee | /blog/utils.py | 3793f81b023ead3427e08ea47e7f4aa3afdc92e7 | [] | no_license | nerser/Blog | 7b8a61989d79f1e0013074ef79765fa6ea6ad16f | 6d9b9f1c8774a769451395def5967aba6ee1cdcf | refs/heads/main | 2023-01-18T15:41:56.913739 | 2020-12-06T17:21:02 | 2020-12-06T17:21:02 | 317,652,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,140 | py | from django.shortcuts import render, redirect
from django.shortcuts import get_object_or_404
from django.urls import reverse
from typing import Callable
class ObjectDetailMixin:
model: Callable = None
template: str = None
def get(self, request, slug):
obj = get_object_or_404(self.model, slug__iexact=slug)
return render(request, self.template, context={self.model.__name__.lower(): obj, 'admin_object': obj, 'detail': True})
class ObjectCreate:
form_model: Callable = None
template: str = None
def get(self, request):
form = self.form_model()
return render(request, self.template, context={'form': form})
def post(self, request):
bound_form = self.form_model(request.POST)
if bound_form.is_valid():
new_obj = bound_form.save()
return redirect(new_obj)
return render(request, self.template, context={'form': bound_form})
class ObjectUpdate:
form_model: Callable = None
model: Callable = None
template: str = None
def get(self, request, slug):
obj = self.model.objects.get(slug__iexact=slug)
bound_form = self.form_model(instance=obj)
return render(request, self.template, context={'form': bound_form, self.model.__name__.lower(): obj})
def post(self, request, slug):
obj = self.model.objects.get(slug__iexact=slug)
bound_form = self.form_model(request.POST, instance=obj)
if bound_form.is_valid():
new_obj = bound_form.save()
return redirect(new_obj)
return render(request, self.template, context={'form': bound_form, self.model.__name__.lower(): obj})
class ObjectDelete:
model: Callable = None
template: str = None
model_list_template: str = None
def get(self, request, slug):
obj = self.model.objects.get(slug__iexact=slug)
return render(request, self.template, context={self.model.__name__.lower(): obj})
def post(self, request, slug):
obj = self.model.objects.get(slug__iexact=slug)
obj.delete()
return redirect(reverse(self.model_list_template))
| [
"[email protected]"
] | |
86828bb749f4281d10339cb98b252c38e77420f7 | ee71d6baed9dfd99e454aa49733990c8ca43ef63 | /dummy/tasks.py | dda342c8800cecee6dfdb43914d2435f853af8a6 | [] | no_license | Cybernisk/django-robust | d72a1097ee3c3e62352d48d3a29eb40a306f02ea | 595f3a7cd8a2fb0fd48a787660589bfd6c584e47 | refs/heads/master | 2020-12-11T16:35:39.331505 | 2018-09-16T15:34:36 | 2018-09-16T15:34:36 | 233,899,107 | 0 | 0 | null | 2020-01-14T17:39:20 | 2020-01-14T17:39:20 | null | UTF-8 | Python | false | false | 124 | py | from robust import task
def spam():
raise RuntimeError('fail')
def bar():
spam()
@task()
def foo():
bar()
| [
"[email protected]"
] | |
4414ca2806ad21e67ce2c29f2b62027df8aac973 | 2abf95e965c49124de12db63080b636cf3538ca3 | /other/makeData.py | b4cd99f7e9bbea5254773aaebcc1f3b69ef5e591 | [] | no_license | reo11/TMUProjectExpB1 | 5611e194d5fdad969264fd128600f2279c0c970d | 590a02f811af3939c46db467fdb91ba05a3940af | refs/heads/master | 2020-03-30T21:07:45.484053 | 2019-02-08T06:01:21 | 2019-02-08T06:01:21 | 151,617,334 | 0 | 0 | null | 2018-10-06T02:12:11 | 2018-10-04T18:23:37 | Matlab | UTF-8 | Python | false | false | 629 | py | from pathlib import Path
trainPath = '../textFile/train.txt'
testPath = '../textFile/test.txt'
DBPath = '../dataset/DB/jpeg/'
QueryPath = '../dataset/Query/jpeg/'
f = open(trainPath,'w')
p = Path(DBPath)
p = sorted(p.glob("*.jpg"))
count = 0
for filename in p:
f.write(str(filename.name) + ' ' + str(count/10) + '\n')
count += 1
f.close()
f = open(testPath,'w')
p = Path(QueryPath)
p = sorted(p.glob("*.jpg"))
count = 0
for filename in p:
if filename.name[0:1] == 'r':
num = -1
else:
num = int(filename.name[0:2])
f.write(str(filename.name) + ' ' + str(num) + '\n')
count += 1
f.close() | [
"[email protected]"
] | |
891420b8a67493a494fd308b12e879ca1fa309b0 | 3b5b2e5b66167e21cf285d390ed423d8e1f8acd9 | /viewHR/seeder.py | 0d17c1ff1c62d2640d19c2c8efd6ed3d97308525 | [
"MIT"
] | permissive | fpolica91/python_utility | 9918b348e16ce22b6e4ec5a9d42067b0df9b1759 | e2161d1cbb55a5d1534fdf4cd7313d602303257d | refs/heads/main | 2023-05-12T09:17:58.711277 | 2021-06-05T01:18:45 | 2021-06-05T01:18:45 | 373,994,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | import firebase_admin
import json
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate("./service-account.json")
app = firebase_admin.initialize_app(cred)
db = firestore.client()
# collection = db.collection(u'companies')
# documents = list(collection.get())
# print("# of documents in collection: {}".format(len(documents)))
with open("complete_companies.json", "r") as f:
file_contents = json.load(f)
# for index,value in enumerate(file_contents):
# if index > 3337:
# collection.document(value["symbol"]).set(value)
# if len(value["symbol"]) > 0:
#
# ref.set(file_contents)
# for doc in collection:
# print(f'{doc.id} => {doc.to_dict()}')
| [
"[email protected]"
] | |
028a7ef7c2d12a813d9de43f895f5b4cfcf0feac | ee4099e6d4167bc6fde35757954683f6df55744e | /venv/bin/jupyter-notebook | 01c0aaca2fb688d1e3514432a4454b05b98fe825 | [
"Apache-2.0"
] | permissive | LaplaceKorea/qc_portfolio_optimization | e4e416b07c23d9b54699b89ee7222a1a359eb857 | 30f0e27689ad6bf37fb87880c2813a5b9858608d | refs/heads/master | 2023-07-13T19:26:11.726818 | 2020-12-15T18:59:33 | 2020-12-15T18:59:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | #!/home/oscar/Documents/qc_portfolio_optimization/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from notebook.notebookapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
cff2f6c75c4e5307657846e88fe40b4abc9704c8 | 8e5d05dbdd4750f1aaeda2dc23e2f975eeccb40f | /manage.py | f24e9a44ae003135ef0540d73686af6ba5faf034 | [] | no_license | Springer114/Dossier | 6f30c0fd2099db1edbd9a7ffa60530e5ea36c6a7 | 3f46a1a4691698a405a22ddc9f1251f50db0d6c4 | refs/heads/master | 2023-04-21T13:30:03.095309 | 2021-04-23T20:55:28 | 2021-04-23T20:55:28 | 334,011,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Dossier.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8e0132b657b38bddc674e8addc063f8086e6e278 | 29c535eab26cec3981e929a02bb26620b8f144f0 | /EBSCO-Deletions.py | 94b6ad662dfe47630f45371f4877de4e4ca8478c | [] | no_license | marriott-library/UALC-Statistics-and-EBSCO-Deletions | 9d0c9d256d02ee930dc53ae4ef0c2e9f37dc6c36 | 2efc318edbd9e20130897686f200b106ed6598f4 | refs/heads/master | 2021-01-10T07:18:46.179681 | 2016-02-06T01:10:19 | 2016-02-06T01:10:19 | 51,183,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py | # EBSCO Deletions Helper
#
# Expects to be run in the same directory as a file called [REMOVALS].txt which contains one removal ID per line.
# Expects that a folder called [CSVs] is also in the same directory, and that [CSVs] contains CSV reports from ALMA.
# Output can be copied from the terminal or piped to a file. Errors are not handled so files must be properly-formatted.
#
# Script should be run using Python 3 or higher. Example command for Mac OS X to pipe to a file (run in same directory):
# python3 EBSCO-Deletions.py > Output.txt
import csv
import os
import re
def ingest_removals(removals_path):
removal_ids = set()
with open(removals_path, newline='') as removals_txt:
for line in removals_txt:
removal_ids.add(int(line))
return removal_ids
def ingest_reports(reports_path):
alma_titles = {}
for report_filename in os.listdir(reports_path):
if report_filename.endswith(".csv"):
with open(reports_path + os.path.sep + report_filename, newline='') as report_csv:
report_reader = csv.reader(report_csv)
next(report_reader) # Skips the first row in each file, which is a header row.
for row in report_reader:
match = re.search("EBSC [0-9]*;", row[1])
if match:
alma_titles[int(row[1][match.start() + 5 : match.end() - 1])] = int(row[0])
return alma_titles
if __name__ == '__main__':
removals_path = os.getcwd() + os.path.sep + "[Removals].txt"
reports_path = os.getcwd() + os.path.sep + "[CSVs]"
removal_ids = ingest_removals(removals_path)
alma_titles = ingest_reports(reports_path)
for removal_id in removal_ids:
if removal_id in alma_titles :
print(alma_titles[removal_id])
| [
"[email protected]"
] | |
acc054cb7ad73e3d28f7837d2a8e1a48ab4a31a3 | 26afe291d193e96356def48b384a55ca28d08b91 | /cart/CART.py | 115faf80d2e84b639b26a96cb4055245701f9835 | [] | no_license | SeanLee97/machine_learning_demo | 2fe5bb65c37c0612d50e969483598cda1a9fa1b4 | d65b3beae887cf0349364602adeb8d89c949f528 | refs/heads/master | 2021-05-13T17:42:21.108490 | 2018-01-09T15:19:54 | 2018-01-09T15:19:54 | 116,831,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,481 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
# 加载数据集
def load_dataset(filename):
data_mat = []
with open(filename, 'r') as f:
for line in f.readlines():
float_line = map(float, line.strip().split('\t'))
data_mat.append(float_line)
return data_mat
# 二元切分数据集合
# 需要了解numpy.nonzero()
def bin_split_dataset(dataset, feature, val):
mat_gt = dataset[np.nonzero(dataset[:, feature]>val)[0], :]
mat_lt = dataset[np.nonzero(dataset[:, feature]<=val)[0], :]
return mat_gt, mat_lt
'''
mat = np.mat(np.eye(4))
print(mat)
a, b = bin_split_dataset(mat, 1, 0.5)
print(a)
print(b)
'''
# 求数据集的均值(即叶子节点的值)作为回归值
def reg_leaf(dataset):
return np.mean(dataset[:, -1])
# 求数据集的总方差,用来衡量数据集的混合度
def reg_err(dataset):
return np.var(dataset[:,-1]).shape(dataset)[0]
# ***选择最好的切分
'''
ops元组设置包括tolS,指切分前后误差的差的允许值,即两次误差的差必须大于这个限制值;另外一个是tolN,表示切分之后的两个子集的样本数必须大于这个值(相当于预剪枝)
'''
def choose_best_split(dataset, leaf_type=reg_leaf, err_type = reg_err, ops=(1, 4)):
# err_limit 切分前后误差下降限制
# num_limit 切分后子集的样本数目限制
err_limit = ops[0]
num_limit = ops[1]
dataset = np.array(dataset)
if len(set((dataset[:,-1].T).tolist()))==1:
# 当所有y值都相同时停止
return None, leaf_type(dataset)
m, n = np.shape(dataset)
s = reg_err(dataset)
best_s = np.inf
best_index = 0
best_val = 0
for featurn_index in range(n-1):
for split_value in set(dataset[:,featurn_index]):
greater, less = bin_split_dataset(dataset, feature_index, split_value)
if np.shape(greater)[0] < num_limit or np.shape(less)[0]< num_limit:
continue
new_s = err_type(greater)+err_type(less)
if new_s < best_s:
best_index = feature_index
best_value = split_value
best_s = new_s
if s-best_s < err_limit:
# 划分前后的误差小于下降值则停止
return None, leaf_type(dataset)
greater, less = bin_split_dataset(dataset, best_index, best_value)
if np.shape(greater)[0] < num_limit or np.shape(less)[0] < num_limit:
# 切分后的两个数据集的样本数小于num_limit
return None, leaf_type(dataset)
return best_index, best_value
# 后剪枝
def is_tree():
return type(obj).__name__=='dict'
def get_mean(tree):
if is_tree(tree['right']):
tree['right'] = get_mean(tree['right'])
if is_tree(tree['left']):
tree['left'] = get_mean(tree['left'])
return (tree['right']+tree['left'])/2.0
# 剪枝函数
def prune(tree, test_data):
if np.shape(test_data)[0] == 0:
return get_mean(tree)
if is_tree(tree['right']) or is_tree(tree['left']):
lset, rset = bin_split_dataset(test_data, tree['sp_ind'], tree['sp_val'])
if is_tree(tree['left']):
tree['left']=prune(tree['left'], lset)
if is_tree(tree['right']):
tree['right']=prune(tree['right'], rset)
if not is_tree(tree['right']) and not is_tree(tree['left']):
lset, rset = bin_split_dataset(test_data, tree['sp_ind'], tree['sp_val'])
# 没有合并的误差
error_no_merge = np.sum(np.power(lset[:,-1]-tree['left'], 2))+np.sum(np.power(rset[:,-1]-tree['right'], 2))
# 求合并后的误差
tree_mean = (tree['left']+tree['right'])/2.0
error_merge = np.sum(np.power(test_data[:,-1]-tree_mean, 2))
# *比较前后误差,决定是否剪枝
if error_merge < err_no_merge:
return tree_mean
else:
return true
return tree
# 模型树
'''
把节点设置为分段线性函数,分段线性是指模型由多个线性片段组成
'''
def linear_solve(dataset):
data_mat = np.mat(dataset)
m, n = np.shape(datamat)
x = np.mat(np.ones((m,n)))
y = np.mat(np.ones((m,1)))
x[:,1:n] = data_mat[:,0:n-1]
y = data_mat[:, -1]
xTx = x.T*x
if np.linalg.det(xTx) == 0.0:
raise NameError('this matrix cannot do inverse!')
ws = xTx.I*(x.T*y)
return ws, x, y
'''
modelLeaf与modelErr这两个函数是用来生成叶节点的,不过生成的不是一个值而是一个线性模型;同理modelErr是用来计算误差的,这两个函数调用时,都会在里面调用linearModel函数,因为需要用划分的子数据集生成线性模型
'''
def model_leaf(dataset):
ws, x, y = linear_solve(dataset)
return ws
def model_err(dataset):
ws, x, y = linear_solve(dataset)
y_hat = x*ws
return np.sum(np.power(y-y_hat, 2))
| [
"[email protected]"
] | |
b549d2747f26879f282ecf9a057e72ce1d180c35 | 91f5d2bc216cb6bffa4196af2392806c3a6bab97 | /xml2mql/xml2mql.py | 64abde6fceb330c34899a0607df83efb0ba3b549 | [
"MIT"
] | permissive | emdros/python-xml2emdrosmql | 2e0407221451071136ab406e9a66a86e852b9f4f | a6c52c030c07e5df6547d58bea5b5110c4429db4 | refs/heads/master | 2020-03-16T03:08:55.702040 | 2018-05-08T07:59:42 | 2018-05-08T07:59:42 | 132,481,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | # -*- coding: utf-8 -*-
#
# XML to Emdros MQL data importer.
#
#
# Copyright (C) 2018 Sandborg-Petersen Holding ApS, Denmark
#
# Made available under the MIT License.
#
# See the file LICENSE in the root of the sources for the full license
# text.
#
#
import sys
import os
import re
import json
import xml.sax
from . import json_generator
from . import mql_generator
from . import renderjson_generator
def getBasename(pathname):
basename = os.path.split(pathname)[-1]
return basename
def mangle_XML_entities(s):
r = s.replace("&", "&")
r = r.replace("<", "<")
r = r.replace(">", ">")
r = r.replace("\"", """)
return r
def generateJSON(json_filename_or_file, xml_filename_list, default_document_name = "document", default_token_name = "token"):
handler = json_generator.JSONGeneratorHandler(default_document_name, default_token_name)
for filename in xml_filename_list:
fin = open(filename, "rb")
sys.stderr.write("Now reading: %s ...\n" % filename)
xml.sax.parse(fin, handler)
fin.close()
if type(json_filename_or_file) == type(""):
sys.stderr.write("Now writing: %s ...\n" % json_filename_or_file)
fout = open(json_filename_or_file, "wb")
handler.doCommand(fout)
fout.close()
else:
sys.stderr.write("Now writing: JSON ...\n")
handler.doCommand(json_filename_or_file)
sys.stderr.write("... Done!\n\n")
def generateRenderJSON(json_filename_or_file, render_json_filename):
if type(json_filename_or_file) == type(""):
sys.stderr.write("Now reading: JSON file %s ...\n" % json_filename_or_file)
fin = open(json_filename_or_file, "rb")
handler = renderjson_generator.RenderJSONGeneratorHandler(fin)
fin.close()
else:
sys.stderr.write("Now reading: JSON ...\n")
handler = renderjson_generator.RenderJSONGeneratorHandler(json_filename_or_file)
sys.stderr.write("Now writing: %s...\n" % render_json_filename)
handler.doCommand(open(render_json_filename, "wb"))
sys.stderr.write("... Done!\n")
def generateMQL(json_filename, xml_filenames_list, first_monad, first_id_d, defualt_document_name = "document", default_token_name = "token"):
if json_filename == None or json_filename == "":
json_file = tempfile.NamedTemporaryFile()
# Generate JSON first...
generateJSON(json_file, xml_filenames_list, default_document_name, default_token_name)
# Rewind file
json_file.seek(0)
else:
json_file = open(json_filename, "rb")
handler = mql_generator.MQLGeneratorHandler(json_file, sys.stdout, first_monad, first_id_d)
json_file.close()
for filename in xml_filenames_list:
fin = open(filename, "rb")
sys.stderr.write("Now reading: %s ...\n" % filename)
handler.setBasename(getBasename(filename))
xml.sax.parse(fin, handler)
fin.close()
| [
"[email protected]"
] | |
2672fa0945aa8aa4a549743c4da3e539795498f2 | e1079d5e630a97d15ddf4d2d5f1c63afc8e05752 | /Recursion In Python/3. Recursion With Numbers/1. The Power Of A Number.py | 1c7b05f1515831c620d760689415fcf7971355e2 | [] | no_license | Nilutpal-Gogoi/DataStructures-Algorithms | 9235447968c5b40a1435b3bd37ee405f13326db5 | d832556d3bf389d8269b91c1e06f52541a9eaa74 | refs/heads/master | 2023-02-11T23:58:35.309673 | 2021-01-04T07:09:10 | 2021-01-04T07:09:10 | 305,415,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # The power(or exponent) "a", of a number x represents the number of times x will
# be multiplied by itself. It is written as a small, superscript number to the
# right of and above the base number.
def power(base, exponent):
if exponent == 0:
return 1
else:
return base*power(base, exponent-1)
# print(power(2,3))
# Better time complexity
def power1(base ,exponent):
if exponent == 0:
return 1
elif exponent % 2 == 0:
return power1(base*base, exponent//2)
else:
return base*power1(base*base, (exponent-1)//2)
print(power1(2, 3))
| [
"[email protected]"
] | |
328b4ac74798fb57b8638c628f6d9d1a046c292f | 92ced3e16bb0aea62dec75b980f3737047152862 | /parallel_identify_from_rtmp_0602.py | 825f7ab7295cbb4f7757cfdb00d259efed74175b | [] | no_license | ZhuChaozheng/face_recognition_from_rtmp | 62d213072fda4a566348c7a8ada9d68ff2b7c3b5 | 45fdd055778031fca0f2925b24359e5e5fed3461 | refs/heads/master | 2021-07-06T16:39:58.231494 | 2020-09-30T00:56:01 | 2020-09-30T00:56:01 | 189,748,595 | 6 | 7 | null | 2020-09-30T00:56:03 | 2019-06-01T15:29:03 | Python | UTF-8 | Python | false | false | 6,770 | py | import face_recognition
import cv2
import PIL.Image
import numpy as np
import multiprocessing
import click
import sys
import itertools
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
def process_images_in_process_pool(rgb_small_frame, known_names, known_face_encodings, number_of_cpus, tolerance, show_distance):
if number_of_cpus == -1:
processes = None
else:
processes = number_of_cpus
# macOS will crash due to a bug in libdispatch if you don't use 'forkserver'
context = multiprocessing
if "forkserver" in multiprocessing.get_all_start_methods():
context = multiprocessing.get_context("forkserver")
pool = context.Pool(processes=processes)
function_parameters = zip(
rgb_small_frame,
itertools.repeat(known_names),
itertools.repeat(known_face_encodings),
itertools.repeat(tolerance),
itertools.repeat(show_distance)
)
pool.starmap(test_image, function_parameters)
def test_image(rgb_small_frame, known_names, known_face_encodings, tolerance=0.6, show_distance=False):
#Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
print('2')
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
# @click.command()
# @click.argument('known_people_folder')
# @click.option('--rtmp_addr', default='rtmp://server.blackant.org:1935/live_2710/hello')
# @click.option('--cpus', default=1, help='number of CPU cores to use in parallel (can speed up processing lots of images). -1 means "use all in system"')
# @click.option('--tolerance', default=0.6, help='Tolerance for face comparisons. Default is 0.6. Lower this if you get multiple matches for the same person.')
# @click.option('--show-distance', default=False, type=bool, help='Output face distance. Useful for tweaking tolerance setting.')
def main():
rtmp_addr = 'rtmp://server.blackant.org:1935/live_2710/hello'
cpus = 4
tolerance = 0.6
show_distance = False
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# Multi-core processing only supported on Python 3.4 or greater
if (sys.version_info < (3, 4)) and cpus != 1:
click.echo("WARNING: Multi-processing support requires Python 3.4 or greater. Falling back to single-threaded processing!")
cpus = 1
# Get a reference to webcam #0 (the default rtmp_addr)
video_capture = cv2.VideoCapture(rtmp_addr)
#video_capture = cv2.VideoCapture(0)
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("./known_people/yang.png")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
print('finished training yang')
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("./known_people/zhu.png")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
print('finished training zhu')
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Yang",
"Zhu"
]
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
print('1')
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
if cpus == 1:
test_image(rgb_small_frame, known_face_names, known_face_encodings, tolerance, show_distance)
else:
process_images_in_process_pool(rgb_small_frame, known_face_names, known_face_encodings, cpus, tolerance, show_distance)
# test_image(rgb_small_frame, known_face_names, known_face_encodings, tolerance, show_distance)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
19cf63501b56bcd1efa89c4fc434d3fc9c199d19 | de97a93cd8cc17c29114df31566f846d9151bb6e | /suckcontrol/daemon.py | 0713d488e78221dc8a6efc4724c8c9edab9f4961 | [
"MIT"
] | permissive | peacepenguin/SuckControl | add6717ef3f433481c109a014a82d204193035e3 | ff5de37a467ba5d549587ffea16bd0e778ce2336 | refs/heads/main | 2023-08-17T01:57:18.104044 | 2021-10-03T15:57:31 | 2021-10-03T15:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | import logging
from time import sleep
from numpy import interp
from threading import Thread
logger = logging.getLogger('suckcontrol.daemon')
def _control_speed(config, temp, controls, points):
sensors_all = config.sensors_all
try:
sensor_temp = sensors_all[temp]
except KeyError:
# In case of removed hardware
logger.info(f'{temp} doesn\'t exists.')
return
sensor_controls = []
for control in controls:
try:
sensor_controls.append(sensors_all[control])
except KeyError:
# In case of removed hardware
logger.info(f'{control} doesn\'t exists.')
continue
if not len(sensor_controls):
logger.info('No sensors to control, abort')
return
to_set = None
temp_value = int(sensor_temp.Value)
control_value = int(sensor_controls[0].Value)
logger.debug(f'Fan: {sensor_controls[0].Name}')
logger.debug(f'Temp: {temp_value}')
if temp_value < points[0][0]:
# Temp is below first point
to_set = points[0][1]
else:
for i, point in enumerate(points):
# Check, if this is the last point
if point == points[-1]:
# Temp is above last point
to_set = point[1]
else:
nextpoint = i + 1
logger.debug(f'{point}, {points[nextpoint]}')
point_next = points[nextpoint]
if temp_value in range(point[0], point_next[0]):
# Temp is between point[0] and point_next[0]
xp = [point[0], point_next[0]]
fp = [point[1], point_next[1]]
to_set = interp(temp_value, xp, fp)
break
logger.debug(f'Before change: {control_value} - After change: {to_set}')
if control_value == to_set or to_set is None:
# No need to set
return
for control in sensor_controls:
try:
control.Control.SetSoftware(to_set)
except AttributeError:
# This happened only on NVIDIA cards before NvApiWrapper was implemented in LHM
logger.warning('Can\'t control this sensor: {control.Name} - {control.Identifier}')
return
return
def _update_rules(config):
while True:
# Update rules, for newly added ones
sleep(0.1) # Incase there are no rules.
config.get_hardware_sensors()
for rule in config.config['user']:
sleep(0.2)
if config.terminate:
break
if not rule['enabled']:
continue
temp = rule['sensor_temp']
controls = rule['sensor_controls']
points = rule['points']
# Make the fans suck
_control_speed(config, temp, controls, points)
if config.terminate:
break
def start_daemons(config):
update_rules = Thread(target=_update_rules, args=(config,))
update_rules.daemon = True
update_rules.start()
logger.info('daemon-thread started')
| [
"[email protected]"
] | |
fccb6587bdf7dcb5371e847e6f92308dc40f5e51 | 68d595578c82f186d599a6071a2ee8f3b0afbfef | /demo.py | ab6a3a7563807cda96076af2a7cc1addc2c3c355 | [
"MIT"
] | permissive | hikabarki/pytorch-template-1 | a21f39ec6208b257dfaf70d4c5714a3900a47d3f | d404a50abdeea892c068ba90a3e93759e6f7c4ab | refs/heads/master | 2022-12-19T18:00:45.972884 | 2020-10-02T17:11:40 | 2020-10-02T17:11:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | """
Author: Smeet Shah
Description: Python script to run a demo of the trained model on data samples.
- Loads the trained model weights
- Iterates over all data samples in demo directory
- Preprocesses each data sample and runs the model
- Decodes the model outputs and gives predictions
"""
import os
import random
import torch
import numpy as np
from config import args
from models import MyNet
from data.tools import prepare_input
from essentials.pprocs import preprocess_sample
from essentials.decoders import decode
def main():
"""
Main function wrapper for demo script
"""
random.seed(args["SEED"])
np.random.seed(args["SEED"])
torch.manual_seed(args["SEED"])
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
if args["TRAINED_WEIGHTS_FILE"] is not None:
print("Trained Weights File: %s" % (args["TRAINED_WEIGHTS_FILE"]))
print("Demo Directory: %s" % (args["DEMO_DIRECTORY"]))
model = MyNet()
model.load_state_dict(
torch.load(
args["CODE_DIRECTORY"] + args["TRAINED_WEIGHTS_FILE"],
map_location=device,
)
)
model.to(device)
print("Running Demo ....")
for root, dirs, files in os.walk(args["DEMO_DIRECTORY"]):
for file in files:
sampleFile = os.path.join(root, file)
preprocess_sample(sampleFile)
inp, _ = prepare_input(sampleFile)
inputBatch = torch.unsqueeze(inp, dim=0)
inputBatch = (inputBatch.float()).to(device)
model.eval()
with torch.no_grad():
outputBatch = model(inputBatch)
predictionBatch = decode(outputBatch)
pred = predictionBatch[0][:]
print("File: %s" % (file))
print("Prediction: %s" % (pred))
print("\n")
print("Demo Completed.")
else:
print("Path to trained weights file not specified.")
return
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
3b295c0ac867201dff2bf9b12be03c32e17cbb69 | db4f810b2bcadf1d5e88f856516954789f199bde | /c_network.py | 0a1483289da3c9c03f78fe937369f8160ceb4432 | [] | no_license | peryion/competitive-network | f23b4430245f93f5fabb0c9f6cab0258e5bd3897 | 62566194b90323e106a42520e0cd7e7ac2a680a9 | refs/heads/master | 2021-08-29T18:22:34.170004 | 2017-12-14T15:46:14 | 2017-12-14T15:46:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import numpy as np
def sigmoid(x,derivative=False):#激活函数
return 1/(1+np.exp(-x))
def normalization(M):
"""
对行向量进行归一化
:param M:行向量:【dim=len(M)】
:return: 归一化后的行向量M
"""
M=M/np.sqrt(np.dot(M,M.T))
return M
def normalization_all(N):
"""
对矩阵进行归一化
:param N: 矩阵:【m,n】
:return: 归一化后的矩阵M_all:【m,n】
"""
M_all=[]
for i in range(len(N)):
K=normalization(N[i])
M_all.append(K)
return M_all
class competitive_network(object):
def __init__(self,x_dim,c_dim,a):
W=np.random.rand(c_dim,x_dim)*(-2)+1
self.W=normalization_all(W)
self.a=a
def forward_propagation(self,x):
x=x.reshape(1,x.shape[0])
z_layer=np.dot(self.W,x.T)
a_layer=sigmoid(z_layer)
argmax=np.where(a_layer==np.amax(a_layer))[0][0]
return argmax
def back_propagation(self,argmax,x):
self.W[argmax] = self.a * (x - self.W[argmax])
self.W[argmax]=normalization(self.W[argmax])
self.a-=self.decay
def train(self,X,num_item):
X=np.array(X)
self.decay=self.a/num_item
for item in range(num_item):
for i in range(X.shape[0]):
argmax=self.forward_propagation(X[i])
self.back_propagation(argmax,X[i])
def prediction(self,x):
argmax=self.forward_propagation(x)
return argmax
| [
"[email protected]"
] | |
e6b35d6bc6ecc755d0bd88ac078e1dadddb471ca | 49e193cc108e4a7b7e92ecf69c77b15234912871 | /src/cli/parser.py | 09a56efb59c7eed09f856864b85e3389b478e0cf | [] | no_license | AlphaTechnolog/thmctrl | 80bd2c41b6b5680990f984d09618b68e9d76c8ad | 4adfaf5d216f63d2373231efbd6ec60807f0888f | refs/heads/main | 2023-04-12T17:47:43.284770 | 2021-05-08T11:39:13 | 2021-05-08T11:39:13 | 355,208,435 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,792 | py | import argparse
from defs import __version__
from resources.resources import config_path
parser = argparse.ArgumentParser(
prog='thmctrl',
description='Manage my dotfiles config - AlphaTechnology',
epilog=f'Config file path: "{config_path}"'
)
parser.add_argument(
'-V', '--version',
action='version', version=__version__
)
subparsers = parser.add_subparsers(title='subcommands')
config_parser = subparsers.add_parser('config', help='Manage config')
config_parser.set_defaults(action='config')
config_parser.add_argument(
'-G', '--get',
action='store_true'
)
config_parser.add_argument(
'-F', '--fetch',
required=False, help='Fetch by profile name'
)
config_subparsers = config_parser.add_subparsers(title='config subcommands')
config_subparser_create = config_subparsers.add_parser('create', help='Create profile')
config_subparser_create.set_defaults(action='config_create')
config_subparser_create.add_argument('name', help='The profile name')
config_subparser_create.add_argument(
'-pt', '--pycritty-theme',
required=True, help='The pycritty theme name'
)
config_subparser_create.add_argument(
'-po', '--pycritty-opacity',
required=True, help='The pycritty opacity'
)
config_subparser_create.add_argument(
'-pf', '--pycritty-font',
required=True, help='The pycritty font'
)
config_subparser_create.add_argument(
'-ps', '--pycritty-size',
required=True, help='The pycritty font size'
)
config_subparser_create.add_argument(
'-ppx', '--pycritty-padding-x',
required=True, help='The x of pycritty padding'
)
config_subparser_create.add_argument(
'-ppy', '--pycritty-padding-y',
required=True, help='The y of pycritty padding'
)
config_subparser_create.add_argument(
'-qt', '--qtile-theme',
required=True, help='The qtile theme name'
)
config_subparser_create.add_argument(
'-wn', '--wallc-wallpaper-name',
required=True, help='The wallc wallpaper name'
)
config_subparser_create.add_argument(
'-we', '--wallc-wallpaper-extension',
required=False, default='jpg',
help='The wallc wallpaper extension (default: jpg)'
)
config_subparser_create.add_argument(
'-gtk', '--gtk-theme',
required=True, help='The gtk theme'
)
config_subparser_create.add_argument(
'--gtk-icon',
required=True, help='The gtk icons'
)
config_subparser_create.add_argument(
'--gtk-cursor',
required=True, help='The gtk cursor'
)
config_subparser_create.add_argument(
'-S', '--shell',
required=True, help='The shell executable (required in /etc/shells)'
)
config_subparser_create.add_argument(
'-Si', '--shell-init',
required=False, help='The first shell argument (used to change a shell theme)'
)
profile_parser = subparsers.add_parser('profile', help='Dispatch profile')
profile_parser.set_defaults(action='profile')
profile_parser.add_argument('name', help='The name of profile to dispatch')
profile_parser.add_argument(
'-S', '--dispatch',
required=False, type=str,
help='The dispatch options',
choices=('all', 'pycritty', 'wallc', 'qtile', 'gtk', 'shell'),
default='all'
)
profile_parser.add_argument(
"-G", "--get",
choices=("compact", "full",),
required=False, default="no_get",
help="Get the profile"
)
profiles_parser = subparsers.add_parser('profiles', help='Manage the profiles.')
profiles_parser.set_defaults(action='profiles')
profiles_parser.add_argument(
'-A', '--available',
action='store_true',
help='List all available profiles'
)
used_parser = subparsers.add_parser('used', help='Show the used theme')
used_parser.set_defaults(action='used')
used_parser.add_argument(
'-C', '--compact',
action='store_true', help="Show as compact format"
)
| [
"[email protected]"
] | |
6752c4fc336f3a62fdb122671ae58087ba7710b5 | 64860c19c4d51ed9669ddae4bb3c365848a8e712 | /ipython_config.py | 86662bfc4b3200a8212de396ac8f64ef030eee98 | [
"BSD-3-Clause"
] | permissive | NSLS-II-SIX/profile_collection | af3ef9def112ee3b1166f21b9fc2f271ce07a6fe | 7cbd3ad2e1675aa14099be0aab00a6516ce7981d | refs/heads/master | 2022-05-04T04:36:18.615076 | 2022-04-15T15:50:02 | 2022-04-15T15:50:02 | 75,974,438 | 0 | 3 | BSD-3-Clause | 2023-08-29T20:07:36 | 2016-12-08T20:55:34 | Python | UTF-8 | Python | false | false | 54 | py | c.InteractiveShellApp.extensions = ['pyOlog.cli.ipy']
| [
"[email protected]"
] | |
5da4a51c12216e107243aedc5cf5d81eeefedac1 | b09e71b77dd41d5db266733d1eedb845cb56d5c2 | /models/ts_hred/src/reranking/baselines/Common/pstfast.py | daa3c6e9ac97b00034390c19eb02eff6562e6882 | [] | no_license | lingxiao/neural-chatbot | 1bcaaea5ede06d0cdc7232f3905b2c813f97f36d | 70dc9366c9d419c26cfb51086187026503820267 | refs/heads/master | 2021-01-20T07:51:26.398642 | 2017-12-26T07:05:11 | 2017-12-26T07:05:11 | 90,052,227 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,954 | py | import cPickle
import numpy as np
import os
import sys
import argparse
import collections
import operator
from collections import OrderedDict
import logging
import math
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
def kl_divergence(a, b, smoothing):
norm_a = sum(a.values())
norm_b = sum(b.values())
# Approximate KL (i.e. smooth while computing)
kl = 0.
for x, v in a.items():
kl += v/norm_a * (np.log(v/norm_a) - np.log(b.get(x, smoothing)/norm_b))
assert kl >= 0
return kl
class Node(object):
def __init__(self, query_id=-1):
# Query id
self.node_id = query_id
# Sorted array of children
self.probs = {}
self.children = {}
class PSTInfer(object):
def __init__(self, exact_match=False):
self.exact_match = exact_match
self.query_to_id = {}
self.id_to_query = []
def _load_pickle(self, input_file):
f = open(input_file, 'r')
self.data = cPickle.load(f)
self.query_to_id = cPickle.load(f)
f.close()
def load(self, input_file):
self._load_pickle(input_file)
self.id_to_query = [query for query, query_id in \
sorted(self.query_to_id.items(), key=operator.itemgetter(1))]
logger.debug('Loaded inference engine')
def _find(self, suffix):
"""
Save the tree in a flattened python format using lists.
"""
def _get_child(levels, child):
# levels = [(i, {}), []], [], [], ...
for level in levels:
level_root = level[0]
if level_root[0] == child:
return level_root
return None
suffix = map(lambda x : self.query_to_id.get(x, -1), suffix)
# Initialize node_path with the root
node_path = [self.data[0]]
past_node = self.data
# For each query in the suffix, explore the path
# in the tree
for query in reversed(suffix):
if len(past_node) == 0:
break
next_node = _get_child(past_node[1:], query)
if not next_node:
break
# Append the root of the node in the node path
node_path.append(next_node)
past_node = next_node
# If we have found it, then the root
# correspond to the first element in the suffix
return node_path, query == suffix[0]
def evaluate(self, suffix, targets):
node_path, found = _find(suffix)
targets = set(map(lambda x: self.query_to_id.get(x, -1), targets))
log_probs = {}
for node in reversed(node_path):
probs = dict(node[1])
for target in targets:
if target in probs:
log_probs[self.id_to_query[target]] = probs[target]
targets.remove(target)
return log_probs
def suggest(self, suffix, N=20):
node_path, is_found = self._find(suffix)
node_id = node_path[-1][0]
node_scores = node_path[-1][1]
data = {'last_node_id' : node_id, \
'last_node_query': self.id_to_query[node_id], \
'found' : is_found, \
'suggestions' : [], \
'scores' : []}
# If it is the root or if we want an exact session
# match, i.e. NGRAM models
if node_id == 0 or (self.exact_match and not found):
return data
# Get top N
sugg_score = [(self.id_to_query[sugg_id], score) for sugg_id, score in node_scores[:N]]
sugg, score = map(list, zip(*sugg_score))
data['suggestions'] = sugg
data['scores'] = score
return data
class PST(object):
def __init__(self, D=4):
self.root = Node(0)
self.query_dict = {'[ROOT]' : 0}
self.nodes = [self.root]
self.D = D
self.num_nodes = 1
def __str__(self):
def _str(node, space=0):
message = '\t' * space + '@@NODE: %s - @@PROBS: %s' % (node.node_id, self.get_probs(node))
list_child = node.children.values()
for child in list_child:
message += '\n%s' % _str(child, space + 1)
return message
return _str(self.root)
def get_children(self):
def _get_children(node):
nodes = [node]
list_child = node.children.values()
for x in list_child:
nodes += _get_children(v)
return nodes
return _get_children(self.root)
def get_probs(self, node):
return node.probs.items()
def save(self, output_path):
"""
Save the tree in a flattened python format using lists.
[(node_id,{}),[(child_id_1,{q_1 : p_1, q_2 : p_2})],[(child_id_2,{...})]]
"""
def _flatten(node):
# Normalize the probabilities when saving the PST
sum_p = sum(node.probs.values())
normalized_probs = [(i, float(v)/sum_p) for i, v in node.probs.items()][:20]
sorted_probs = sorted(normalized_probs, key=operator.itemgetter(1), reverse=True)
reprs = [(node.node_id, sorted_probs)]
list_child = node.children.items()
for child_id, child in list_child:
reprs.append(_flatten(child))
del node.children[child_id]
return reprs
reprs = _flatten(self.root)
logger.info('Saving PST to {} / {} nodes.'.format(output_path, self.num_nodes))
f = open(output_path, 'w')
cPickle.dump(reprs, f)
cPickle.dump(self.query_dict, f)
f.close()
def get_count(self):
def _count(node):
count = 0
list_child = node.children.values()
for child in list_child:
count += _count(child)
return count + len(node.children)
return _count(self.root)
def delete_children(self, node, to_delete):
del node.children[to_delete.node_id]
def prune(self, epsilon=0.05):
smoothing = 1.0/len(self.query_dict)
def _prune(node):
if len(node.probs) > 0:
list_nodes = node.children.values()
kl_nodes = [(x, kl_divergence(node.probs, x.probs, smoothing)) for x in list_nodes]
for kl_node, kl in kl_nodes:
if kl < epsilon:
self.delete_children(node, kl_node)
list_nodes = node.children.values()
for child in list_nodes:
_prune(child)
_prune(self.root)
self.num_nodes = self.get_count()
def normalize(self):
def _normalize(node):
norm = sum(node.probs.values())
node.probs = dict([(x, v/norm) for x, v in node.probs.items()])
for child in self.children.values():
_normalize(child)
_normalize(node)
def _find(self, suffix):
past_node = self.root
for query in reversed(suffix):
next_node = self.get_child(past_node, query)
if next_node:
past_node = next_node
else:
return past_node, query, False
return past_node, None, True
def get_child(self, node, query_id):
return node.children.get(query_id, None)
def add_child(self, node, new_node):
assert new_node.node_id not in node.children
node.children[new_node.node_id] = new_node
return new_node
def add_session(self, session):
def _update_prob(node, query_id):
node.probs[query_id] = node.probs.get(query_id, 0.) + 1.
# Check if the root has a node with that name
len_session = len(session)
if len_session < 2:
return
for x in session:
if x not in self.query_dict:
self.query_dict[x] = len(self.query_dict)
session = [self.query_dict[x] for x in session]
for x in range(len_session - 1):
tgt_indx = len_session - x - 1
for c in range(self.D):
ctx_indx = tgt_indx - c - 1
if ctx_indx < 0:
break
suffix = session[ctx_indx:tgt_indx]
tgt = session[tgt_indx]
assert len(suffix) > 0
suffix_node, last_query, found = self._find(suffix)
if not found:
new_node = Node(last_query)
suffix_node = self.add_child(suffix_node, new_node)
self.num_nodes += 1
_update_prob(suffix_node, tgt)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.